diff --git a/.vscode/settings.json b/.vscode/settings.json index 8fba11c48..56c99cef4 100644 --- a/.vscode/settings.json +++ b/.vscode/settings.json @@ -1,5 +1,5 @@ { - "C_Cpp.default.compilerPath": ".cosmocc/3.7.1/bin/aarch64-linux-cosmo-c++", + "C_Cpp.default.compilerPath": ".cosmocc/3.8.0/bin/aarch64-linux-cosmo-c++", "C_Cpp.default.compilerArgs": [ "-nostdinc", "-nostdlib", @@ -33,4 +33,4 @@ "files.associations": { "log.h": "c" } -} \ No newline at end of file +} diff --git a/Makefile b/Makefile index 415eb36f8..5b3184792 100644 --- a/Makefile +++ b/Makefile @@ -147,10 +147,10 @@ export MODE export SOURCE_DATE_EPOCH export TMPDIR -COSMOCC = .cosmocc/3.7.1 +COSMOCC = .cosmocc/3.8.0 BOOTSTRAP = $(COSMOCC)/bin TOOLCHAIN = $(COSMOCC)/bin/$(ARCH)-linux-cosmo- -DOWNLOAD := $(shell build/download-cosmocc.sh $(COSMOCC) 3.7.1 13b65b0e659b493bd82f3d0a319d0265d66f849839e484aa2a54191024711e85) +DOWNLOAD := $(shell build/download-cosmocc.sh $(COSMOCC) 3.8.0 813c6b2f95062d2e0a845307a79505424cb98cb038e8013334f8a22e3b92a474) IGNORE := $(shell $(MKDIR) $(TMPDIR)) diff --git a/ape/aarch64.lds b/ape/aarch64.lds index 356ff3ae7..0a232a2da 100644 --- a/ape/aarch64.lds +++ b/ape/aarch64.lds @@ -103,10 +103,8 @@ SECTIONS { *(.eh_frame_entry .eh_frame_entry.*) } - .eh_frame : ONLY_IF_RO { - KEEP(*(.eh_frame)) - *(.eh_frame.*) - } + __eh_frame_hdr_start = SIZEOF(.eh_frame_hdr) > 0 ? ADDR(.eh_frame_hdr) : 0; + __eh_frame_hdr_end = SIZEOF(.eh_frame_hdr) > 0 ? . : 0; .gcc_except_table : ONLY_IF_RO { *(.gcc_except_table .gcc_except_table.*) @@ -127,9 +125,11 @@ SECTIONS { . += CONSTANT(MAXPAGESIZE); . = DATA_SEGMENT_ALIGN(CONSTANT(MAXPAGESIZE), CONSTANT(COMMONPAGESIZE)); - .eh_frame : ONLY_IF_RW { + .eh_frame : { + __eh_frame_start = .; KEEP(*(.eh_frame)) *(.eh_frame.*) + __eh_frame_end = .; } .gnu_extab : ONLY_IF_RW { diff --git a/ape/ape.lds b/ape/ape.lds index 4bf0f0fd8..ec63ae7d5 100644 --- a/ape/ape.lds +++ b/ape/ape.lds @@ -329,6 +329,10 @@ SECTIONS { *(.ubsan.types) *(.ubsan.data) + __eh_frame_hdr_start_actual = .; + *(.eh_frame_hdr) + __eh_frame_hdr_end_actual = .; + /* Legal Notices */ __notices = .; KEEP(*(.notice)) @@ -422,6 +426,11 @@ SECTIONS { KEEP(*(.dtors)) __fini_array_end = .; + __eh_frame_start = .; + KEEP(*(.eh_frame)) + *(.eh_frame.*) + __eh_frame_end = .; + /*BEGIN: Post-Initialization Read-Only */ . = ALIGN(. != 0 ? __SIZEOF_POINTER__ : 0); KEEP(*(SORT_BY_NAME(.piro.relo.sort.*))) @@ -601,6 +610,9 @@ ape_text_memsz = ape_text_filesz; ape_text_align = CONSTANT(COMMONPAGESIZE); ape_text_rva = RVA(ape_text_vaddr); +__eh_frame_hdr_start = __eh_frame_hdr_end_actual > __eh_frame_hdr_start_actual ? __eh_frame_hdr_start_actual : 0; +__eh_frame_hdr_end = __eh_frame_hdr_end_actual > __eh_frame_hdr_start_actual ? __eh_frame_hdr_end_actual : 0; + /* we roundup here because xnu wants the file load segments page-aligned */ /* but we don't want to add the nop padding to the ape program, so we'll */ /* let ape.S dd read past the end of the file into the wrapping binaries */ diff --git a/build/definitions.mk b/build/definitions.mk index 703a5c381..6682f79b7 100644 --- a/build/definitions.mk +++ b/build/definitions.mk @@ -92,10 +92,7 @@ DEFAULT_COPTS ?= \ -fno-gnu-unique \ -fstrict-aliasing \ -fstrict-overflow \ - -fno-semantic-interposition \ - -fno-dwarf2-cfi-asm \ - -fno-unwind-tables \ - -fno-asynchronous-unwind-tables + -fno-semantic-interposition ifeq ($(ARCH), x86_64) # Microsoft says "[a]ny memory below the stack beyond the red zone @@ -139,8 +136,6 @@ DEFAULT_CFLAGS = \ DEFAULT_CXXFLAGS = \ -std=gnu++23 \ - -fno-rtti \ - -fno-exceptions \ -fuse-cxa-atexit \ -Wno-int-in-bool-context \ -Wno-narrowing \ diff --git a/build/objdump b/build/objdump index 3683e9bc8..b49667976 100755 --- a/build/objdump +++ b/build/objdump @@ -6,14 +6,14 @@ if [ -n "$OBJDUMP" ]; then fi find_objdump() { - if [ -x .cosmocc/3.6.0/bin/$1-linux-cosmo-objdump ]; then - OBJDUMP=.cosmocc/3.6.0/bin/$1-linux-cosmo-objdump - elif [ -x .cosmocc/3.6.0/bin/$1-linux-musl-objdump ]; then - OBJDUMP=.cosmocc/3.6.0/bin/$1-linux-musl-objdump - elif [ -x "$COSMO/.cosmocc/3.6.0/bin/$1-linux-cosmo-objdump" ]; then - OBJDUMP="$COSMO/.cosmocc/3.6.0/bin/$1-linux-cosmo-objdump" - elif [ -x "$COSMO/.cosmocc/3.6.0/bin/$1-linux-musl-objdump" ]; then - OBJDUMP="$COSMO/.cosmocc/3.6.0/bin/$1-linux-musl-objdump" + if [ -x .cosmocc/3.8.0/bin/$1-linux-cosmo-objdump ]; then + OBJDUMP=.cosmocc/3.8.0/bin/$1-linux-cosmo-objdump + elif [ -x .cosmocc/3.8.0/bin/$1-linux-musl-objdump ]; then + OBJDUMP=.cosmocc/3.8.0/bin/$1-linux-musl-objdump + elif [ -x "$COSMO/.cosmocc/3.8.0/bin/$1-linux-cosmo-objdump" ]; then + OBJDUMP="$COSMO/.cosmocc/3.8.0/bin/$1-linux-cosmo-objdump" + elif [ -x "$COSMO/.cosmocc/3.8.0/bin/$1-linux-musl-objdump" ]; then + OBJDUMP="$COSMO/.cosmocc/3.8.0/bin/$1-linux-musl-objdump" else echo "error: toolchain not found (try running 'cosmocc --update' or 'make' in the cosmo monorepo)" >&2 exit 1 diff --git a/ctl/set.h b/ctl/set.h index cc951b98c..2216ca851 100644 --- a/ctl/set.h +++ b/ctl/set.h @@ -241,8 +241,9 @@ class set private: friend class set; node_type* node_; + node_type* root_; - explicit reverse_iterator(node_type* node) : node_(node) + explicit reverse_iterator(node_type* node, node_type* root) : node_(node), root_(root) { } }; @@ -347,17 +348,17 @@ class set reverse_iterator rbegin() { - return reverse_iterator(rightmost(root_)); + return reverse_iterator(rightmost(root_), root_); } const_reverse_iterator rbegin() const { - return const_reverse_iterator(rightmost(root_)); + return const_reverse_iterator(rightmost(root_), root_); } const_reverse_iterator crbegin() const { - return const_reverse_iterator(rightmost(root_)); + return const_reverse_iterator(rightmost(root_), root_); } iterator end() noexcept @@ -377,17 +378,17 @@ class set reverse_iterator rend() { - return reverse_iterator(nullptr); + return reverse_iterator(nullptr, root_); } const_reverse_iterator rend() const { - return const_reverse_iterator(nullptr); + return const_reverse_iterator(nullptr, root_); } const_reverse_iterator crend() const { - return const_reverse_iterator(nullptr); + return const_reverse_iterator(nullptr, root_); } void clear() noexcept diff --git a/examples/BUILD.mk b/examples/BUILD.mk index e0318d0a7..10eeaff6b 100644 --- a/examples/BUILD.mk +++ b/examples/BUILD.mk @@ -94,6 +94,7 @@ EXAMPLES_DIRECTDEPS = \ THIRD_PARTY_VQSORT \ THIRD_PARTY_XED \ THIRD_PARTY_LIBCXXABI \ + THIRD_PARTY_LIBUNWIND \ THIRD_PARTY_ZLIB \ TOOL_ARGS \ TOOL_BUILD_LIB \ diff --git a/libc/integral/normalize.inc b/libc/integral/normalize.inc index 3edf8c66e..6a1969d51 100644 --- a/libc/integral/normalize.inc +++ b/libc/integral/normalize.inc @@ -3,8 +3,8 @@ #endif #define __COSMOPOLITAN_MAJOR__ 3 -#define __COSMOPOLITAN_MINOR__ 7 -#define __COSMOPOLITAN_PATCH__ 1 +#define __COSMOPOLITAN_MINOR__ 8 +#define __COSMOPOLITAN_PATCH__ 0 #define __COSMOPOLITAN__ \ (100000000 * __COSMOPOLITAN_MAJOR__ + 1000000 * __COSMOPOLITAN_MINOR__ + \ __COSMOPOLITAN_PATCH__) @@ -93,6 +93,30 @@ #include "libc/integral/llp64.inc" #endif +#undef __INT_FAST16_MAX__ +#undef __INT_FAST16_TYPE__ +#undef __UINT_FAST16_MAX__ +#undef __INT_FAST16_WIDTH__ +#undef __UINT_FAST16_TYPE__ + +#define __INT_FAST16_MAX__ 2147483647 +#define __INT_FAST16_TYPE__ int +#define __UINT_FAST16_MAX__ 4294967295U +#define __INT_FAST16_WIDTH__ 32 +#define __UINT_FAST16_TYPE__ unsigned int + +#undef __INT_FAST32_MAX__ +#undef __INT_FAST32_TYPE__ +#undef __UINT_FAST32_MAX__ +#undef __INT_FAST32_WIDTH__ +#undef __UINT_FAST32_TYPE__ + +#define __INT_FAST32_MAX__ 2147483647 +#define __INT_FAST32_TYPE__ int +#define __UINT_FAST32_MAX__ 4294967295U +#define __INT_FAST32_WIDTH__ 32 +#define __UINT_FAST32_TYPE__ unsigned int + #if !(__ASSEMBLER__ + __LINKER__ + 0) #ifdef __STDC__ #include "libc/integral/c.inc" diff --git a/libc/intrin/personality.c b/libc/intrin/personality.c new file mode 100644 index 000000000..de4dce7a5 --- /dev/null +++ b/libc/intrin/personality.c @@ -0,0 +1,22 @@ +/*-*- mode:c;indent-tabs-mode:nil;c-basic-offset:2;tab-width:8;coding:utf-8 -*-│ +│ vi: set et ft=c ts=2 sts=2 sw=2 fenc=utf-8 :vi │ +╞══════════════════════════════════════════════════════════════════════════════╡ +│ Copyright 2024 Justine Alexandra Roberts Tunney │ +│ │ +│ Permission to use, copy, modify, and/or distribute this software for │ +│ any purpose with or without fee is hereby granted, provided that the │ +│ above copyright notice and this permission notice appear in all copies. │ +│ │ +│ THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL │ +│ WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED │ +│ WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE │ +│ AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL │ +│ DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR │ +│ PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER │ +│ TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR │ +│ PERFORMANCE OF THIS SOFTWARE. │ +╚─────────────────────────────────────────────────────────────────────────────*/ + +__attribute__((__weak__)) void __gxx_personality_v0() { + __builtin_trap(); +} diff --git a/libc/isystem/ammintrin.h b/libc/isystem/ammintrin.h index 028098a89..c29f7c84a 100644 --- a/libc/isystem/ammintrin.h +++ b/libc/isystem/ammintrin.h @@ -1,4 +1,8 @@ #ifndef COSMOPOLITAN_LIBC_ISYSTEM_AMMINTRIN_INTERNAL_H_ #define COSMOPOLITAN_LIBC_ISYSTEM_AMMINTRIN_INTERNAL_H_ +#ifdef __clang__ +#include "third_party/intel/clang/ammintrin.h" +#else #include "third_party/intel/ammintrin.internal.h" +#endif #endif /* COSMOPOLITAN_LIBC_ISYSTEM_AMMINTRIN_INTERNAL_H_ */ diff --git a/libc/isystem/amxcomplexintrin.h b/libc/isystem/amxcomplexintrin.h index b6b9ea7d3..be8122bd3 100644 --- a/libc/isystem/amxcomplexintrin.h +++ b/libc/isystem/amxcomplexintrin.h @@ -1 +1,5 @@ +#ifdef __clang__ +#include "third_party/intel/clang/amxcomplexintrin.h" +#else #include "third_party/intel/amxcomplexintrin.internal.h" +#endif diff --git a/libc/isystem/amxfp16intrin.h b/libc/isystem/amxfp16intrin.h index 6b4043496..eb25dfc70 100644 --- a/libc/isystem/amxfp16intrin.h +++ b/libc/isystem/amxfp16intrin.h @@ -1 +1,5 @@ +#ifdef __clang__ +#include "third_party/intel/clang/amxfp16intrin.h" +#else #include "third_party/intel/amxfp16intrin.internal.h" +#endif diff --git a/libc/isystem/arm_acle.h b/libc/isystem/arm_acle.h index 5e695146a..70d0a1ed2 100644 --- a/libc/isystem/arm_acle.h +++ b/libc/isystem/arm_acle.h @@ -1,4 +1,8 @@ #ifndef COSMOPOLITAN_LIBC_ISYSTEM_ARM_ACLE_H_ #define COSMOPOLITAN_LIBC_ISYSTEM_ARM_ACLE_H_ +#ifdef __clang__ +#include "third_party/aarch64/clang/arm_acle.h" +#else #include "third_party/aarch64/arm_acle.internal.h" +#endif #endif /* COSMOPOLITAN_LIBC_ISYSTEM_ARM_ACLE_H_ */ diff --git a/libc/isystem/arm_bf16.h b/libc/isystem/arm_bf16.h index 8177f26c0..bbfdfe6ba 100644 --- a/libc/isystem/arm_bf16.h +++ b/libc/isystem/arm_bf16.h @@ -1,4 +1,8 @@ #ifndef COSMOPOLITAN_LIBC_ISYSTEM_ARM_BF16_H_ #define COSMOPOLITAN_LIBC_ISYSTEM_ARM_BF16_H_ +#ifdef __clang__ +#include "third_party/aarch64/clang/arm_bf16.h" +#else #include "third_party/aarch64/arm_bf16.internal.h" +#endif #endif /* COSMOPOLITAN_LIBC_ISYSTEM_ARM_BF16_H_ */ diff --git a/libc/isystem/arm_fp16.h b/libc/isystem/arm_fp16.h index 2df9b91a2..5c269e35d 100644 --- a/libc/isystem/arm_fp16.h +++ b/libc/isystem/arm_fp16.h @@ -1,4 +1,8 @@ #ifndef COSMOPOLITAN_LIBC_ISYSTEM_ARM_FP16_H_ #define COSMOPOLITAN_LIBC_ISYSTEM_ARM_FP16_H_ +#ifdef __clang__ +#include "third_party/aarch64/clang/arm_fp16.h" +#else #include "third_party/aarch64/arm_fp16.internal.h" +#endif #endif /* COSMOPOLITAN_LIBC_ISYSTEM_ARM_FP16_H_ */ diff --git a/libc/isystem/arm_neon.h b/libc/isystem/arm_neon.h index c59b01ae6..6beff8834 100644 --- a/libc/isystem/arm_neon.h +++ b/libc/isystem/arm_neon.h @@ -1,4 +1,8 @@ #ifndef COSMOPOLITAN_LIBC_ISYSTEM_ARM_NEON_H_ #define COSMOPOLITAN_LIBC_ISYSTEM_ARM_NEON_H_ +#ifdef __clang__ +#include "third_party/aarch64/clang/arm_neon.h" +#else #include "third_party/aarch64/arm_neon.internal.h" +#endif #endif /* COSMOPOLITAN_LIBC_ISYSTEM_ARM_NEON_H_ */ diff --git a/libc/isystem/arm_sve.h b/libc/isystem/arm_sve.h new file mode 100644 index 000000000..2e8fc6d18 --- /dev/null +++ b/libc/isystem/arm_sve.h @@ -0,0 +1,8 @@ +#ifndef COSMOPOLITAN_LIBC_ISYSTEM_ARM_SVE_H_ +#define COSMOPOLITAN_LIBC_ISYSTEM_ARM_SVE_H_ +#ifdef __clang__ +#include "third_party/aarch64/clang/arm_sve.h" +#else +#include "third_party/aarch64/arm_sve.internal.h" +#endif +#endif /* COSMOPOLITAN_LIBC_ISYSTEM_ARM_SVE_H_ */ diff --git a/libc/isystem/arm_vector_types.h b/libc/isystem/arm_vector_types.h new file mode 100644 index 000000000..b2018e69d --- /dev/null +++ b/libc/isystem/arm_vector_types.h @@ -0,0 +1,8 @@ +#ifndef COSMOPOLITAN_LIBC_ISYSTEM_ARM_VECTOR_TYPES_H_ +#define COSMOPOLITAN_LIBC_ISYSTEM_ARM_VECTOR_TYPES_H_ +#ifdef __clang__ +#include "third_party/aarch64/clang/arm_vector_types.h" +#else +#include "third_party/aarch64/arm_vector_types.internal.h" +#endif +#endif /* COSMOPOLITAN_LIBC_ISYSTEM_ARM_VECTOR_TYPES_H_ */ diff --git a/libc/isystem/avxifmaintrin.h b/libc/isystem/avxifmaintrin.h index a93835f7e..8b94c5d8e 100644 --- a/libc/isystem/avxifmaintrin.h +++ b/libc/isystem/avxifmaintrin.h @@ -1 +1,5 @@ +#ifdef __clang__ +#include "third_party/intel/clang/avxifmaintrin.h" +#else #include "third_party/intel/avxifmaintrin.internal.h" +#endif diff --git a/libc/isystem/avxneconvertintrin.h b/libc/isystem/avxneconvertintrin.h index 691504600..fac905bc6 100644 --- a/libc/isystem/avxneconvertintrin.h +++ b/libc/isystem/avxneconvertintrin.h @@ -1 +1,5 @@ +#ifdef __clang__ +#include "third_party/intel/clang/avxneconvertintrin.h" +#else #include "third_party/intel/avxneconvertintrin.internal.h" +#endif diff --git a/libc/isystem/avxvnniint16intrin.h b/libc/isystem/avxvnniint16intrin.h index fc8c6d0ab..b7cce37b7 100644 --- a/libc/isystem/avxvnniint16intrin.h +++ b/libc/isystem/avxvnniint16intrin.h @@ -1 +1,5 @@ +#ifdef __clang__ +#include "third_party/intel/clang/avxvnniint16intrin.h" +#else #include "third_party/intel/avxvnniint16intrin.internal.h" +#endif diff --git a/libc/isystem/avxvnniint8intrin.h b/libc/isystem/avxvnniint8intrin.h index ccb746b56..e760b646d 100644 --- a/libc/isystem/avxvnniint8intrin.h +++ b/libc/isystem/avxvnniint8intrin.h @@ -1 +1,5 @@ +#ifdef __clang__ +#include "third_party/intel/clang/avxvnniint8intrin.h" +#else #include "third_party/intel/avxvnniint8intrin.internal.h" +#endif diff --git a/libc/isystem/clzerointrin.h b/libc/isystem/clzerointrin.h index 5c0be5400..5e9b053d6 100644 --- a/libc/isystem/clzerointrin.h +++ b/libc/isystem/clzerointrin.h @@ -1,4 +1,8 @@ #ifndef COSMOPOLITAN_LIBC_ISYSTEM_CLZEROINTRIN_INTERNAL_H_ #define COSMOPOLITAN_LIBC_ISYSTEM_CLZEROINTRIN_INTERNAL_H_ +#ifdef __clang__ +#include "third_party/intel/clang/clzerointrin.h" +#else #include "third_party/intel/clzerointrin.internal.h" +#endif #endif /* COSMOPOLITAN_LIBC_ISYSTEM_CLZEROINTRIN_INTERNAL_H_ */ diff --git a/libc/isystem/cmpccxaddintrin.h b/libc/isystem/cmpccxaddintrin.h index 48fd2c5db..10f4e9b7e 100644 --- a/libc/isystem/cmpccxaddintrin.h +++ b/libc/isystem/cmpccxaddintrin.h @@ -1 +1,5 @@ +#ifdef __clang__ +#include "third_party/intel/clang/cmpccxaddintrin.h" +#else #include "third_party/intel/cmpccxaddintrin.internal.h" +#endif diff --git a/libc/isystem/emmintrin.h b/libc/isystem/emmintrin.h index 1c670b16a..5123aa712 100644 --- a/libc/isystem/emmintrin.h +++ b/libc/isystem/emmintrin.h @@ -1,4 +1,8 @@ #ifndef COSMOPOLITAN_LIBC_ISYSTEM_EMMINTRIN_INTERNAL_H_ #define COSMOPOLITAN_LIBC_ISYSTEM_EMMINTRIN_INTERNAL_H_ +#ifdef __clang__ +#include "third_party/intel/clang/emmintrin.h" +#else #include "third_party/intel/emmintrin.internal.h" +#endif #endif /* COSMOPOLITAN_LIBC_ISYSTEM_EMMINTRIN_INTERNAL_H_ */ diff --git a/libc/isystem/immintrin.h b/libc/isystem/immintrin.h index 683eb5a7a..72cb67c80 100644 --- a/libc/isystem/immintrin.h +++ b/libc/isystem/immintrin.h @@ -1,4 +1,8 @@ #ifndef COSMOPOLITAN_LIBC_ISYSTEM_IMMINTRIN_INTERNAL_H_ #define COSMOPOLITAN_LIBC_ISYSTEM_IMMINTRIN_INTERNAL_H_ +#ifdef __clang__ +#include "third_party/intel/clang/immintrin.h" +#else #include "third_party/intel/immintrin.internal.h" +#endif #endif /* COSMOPOLITAN_LIBC_ISYSTEM_IMMINTRIN_INTERNAL_H_ */ diff --git a/libc/isystem/mm_malloc.h b/libc/isystem/mm_malloc.h index 7634fa6de..a81913524 100644 --- a/libc/isystem/mm_malloc.h +++ b/libc/isystem/mm_malloc.h @@ -1,4 +1,8 @@ #ifndef COSMOPOLITAN_LIBC_ISYSTEM_MM_MALLOC_INTERNAL_H_ #define COSMOPOLITAN_LIBC_ISYSTEM_MM_MALLOC_INTERNAL_H_ +#ifdef __clang__ +#include "third_party/intel/clang/mm_malloc.h" +#else #include "third_party/intel/mm_malloc.internal.h" +#endif #endif /* COSMOPOLITAN_LIBC_ISYSTEM_MM_MALLOC_INTERNAL_H_ */ diff --git a/libc/isystem/mmintrin.h b/libc/isystem/mmintrin.h index af089e7c6..f4fbbe9d3 100644 --- a/libc/isystem/mmintrin.h +++ b/libc/isystem/mmintrin.h @@ -1,4 +1,8 @@ #ifndef COSMOPOLITAN_LIBC_ISYSTEM_MMINTRIN_INTERNAL_H_ #define COSMOPOLITAN_LIBC_ISYSTEM_MMINTRIN_INTERNAL_H_ +#ifdef __clang__ +#include "third_party/intel/clang/mmintrin.h" +#else #include "third_party/intel/mmintrin.internal.h" +#endif #endif /* COSMOPOLITAN_LIBC_ISYSTEM_MMINTRIN_INTERNAL_H_ */ diff --git a/libc/isystem/mwaitxintrin.h b/libc/isystem/mwaitxintrin.h index 42a5f3e72..aa5d8ef88 100644 --- a/libc/isystem/mwaitxintrin.h +++ b/libc/isystem/mwaitxintrin.h @@ -1,4 +1,8 @@ #ifndef COSMOPOLITAN_LIBC_ISYSTEM_MWAITXINTRIN_INTERNAL_H_ #define COSMOPOLITAN_LIBC_ISYSTEM_MWAITXINTRIN_INTERNAL_H_ +#ifdef __clang__ +#include "third_party/intel/clang/mwaitxintrin.h" +#else #include "third_party/intel/mwaitxintrin.internal.h" +#endif #endif /* COSMOPOLITAN_LIBC_ISYSTEM_MWAITXINTRIN_INTERNAL_H_ */ diff --git a/libc/isystem/nmmintrin.h b/libc/isystem/nmmintrin.h index 0a5ef7c98..f2fcea020 100644 --- a/libc/isystem/nmmintrin.h +++ b/libc/isystem/nmmintrin.h @@ -1,4 +1,8 @@ #ifndef COSMOPOLITAN_LIBC_ISYSTEM_NMMINTRIN_INTERNAL_H_ #define COSMOPOLITAN_LIBC_ISYSTEM_NMMINTRIN_INTERNAL_H_ +#ifdef __clang__ +#include "third_party/intel/clang/nmmintrin.h" +#else #include "third_party/intel/nmmintrin.internal.h" +#endif #endif /* COSMOPOLITAN_LIBC_ISYSTEM_NMMINTRIN_INTERNAL_H_ */ diff --git a/libc/isystem/pmmintrin.h b/libc/isystem/pmmintrin.h index 21e098b7c..5c557dc55 100644 --- a/libc/isystem/pmmintrin.h +++ b/libc/isystem/pmmintrin.h @@ -1,4 +1,8 @@ #ifndef COSMOPOLITAN_LIBC_ISYSTEM_PMMINTRIN_INTERNAL_H_ #define COSMOPOLITAN_LIBC_ISYSTEM_PMMINTRIN_INTERNAL_H_ +#ifdef __clang__ +#include "third_party/intel/clang/pmmintrin.h" +#else #include "third_party/intel/pmmintrin.internal.h" +#endif #endif /* COSMOPOLITAN_LIBC_ISYSTEM_PMMINTRIN_INTERNAL_H_ */ diff --git a/libc/isystem/popcntintrin.h b/libc/isystem/popcntintrin.h index 632667eb0..9583b31e4 100644 --- a/libc/isystem/popcntintrin.h +++ b/libc/isystem/popcntintrin.h @@ -1,4 +1,8 @@ #ifndef COSMOPOLITAN_LIBC_ISYSTEM_POPCNTINTRIN_INTERNAL_H_ #define COSMOPOLITAN_LIBC_ISYSTEM_POPCNTINTRIN_INTERNAL_H_ +#ifdef __clang__ +#include "third_party/intel/clang/popcntintrin.h" +#else #include "third_party/intel/popcntintrin.internal.h" +#endif #endif /* COSMOPOLITAN_LIBC_ISYSTEM_POPCNTINTRIN_INTERNAL_H_ */ diff --git a/libc/isystem/prfchiintrin.h b/libc/isystem/prfchiintrin.h index f76698468..059e1e0db 100644 --- a/libc/isystem/prfchiintrin.h +++ b/libc/isystem/prfchiintrin.h @@ -1 +1,5 @@ +#ifdef __clang__ +#include "third_party/intel/clang/prfchiintrin.h" +#else #include "third_party/intel/prfchiintrin.internal.h" +#endif diff --git a/libc/isystem/raointintrin.h b/libc/isystem/raointintrin.h index 4f41b106a..e9486ee29 100644 --- a/libc/isystem/raointintrin.h +++ b/libc/isystem/raointintrin.h @@ -1 +1,5 @@ +#ifdef __clang__ +#include "third_party/intel/clang/raointintrin.h" +#else #include "third_party/intel/raointintrin.internal.h" +#endif diff --git a/libc/isystem/sgxintrin.h b/libc/isystem/sgxintrin.h index 0ba872436..e44b52753 100644 --- a/libc/isystem/sgxintrin.h +++ b/libc/isystem/sgxintrin.h @@ -1,4 +1,8 @@ #ifndef COSMOPOLITAN_LIBC_ISYSTEM_SGXINTRIN_INTERNAL_H_ #define COSMOPOLITAN_LIBC_ISYSTEM_SGXINTRIN_INTERNAL_H_ +#ifdef __clang__ +#include "third_party/intel/clang/sgxintrin.h" +#else #include "third_party/intel/sgxintrin.internal.h" +#endif #endif /* COSMOPOLITAN_LIBC_ISYSTEM_SGXINTRIN_INTERNAL_H_ */ diff --git a/libc/isystem/sha512intrin.h b/libc/isystem/sha512intrin.h index f364a7e5f..1b1ed0d39 100644 --- a/libc/isystem/sha512intrin.h +++ b/libc/isystem/sha512intrin.h @@ -1 +1,5 @@ +#ifdef __clang__ +#include "third_party/intel/clang/sha512intrin.h" +#else #include "third_party/intel/sha512intrin.internal.h" +#endif diff --git a/libc/isystem/sm3intrin.h b/libc/isystem/sm3intrin.h index 2f35eeba6..80271fb9b 100644 --- a/libc/isystem/sm3intrin.h +++ b/libc/isystem/sm3intrin.h @@ -1 +1,5 @@ +#ifdef __clang__ +#include "third_party/intel/clang/sm3intrin.h" +#else #include "third_party/intel/sm3intrin.internal.h" +#endif diff --git a/libc/isystem/sm4intrin.h b/libc/isystem/sm4intrin.h index 91edac356..2c9100603 100644 --- a/libc/isystem/sm4intrin.h +++ b/libc/isystem/sm4intrin.h @@ -1 +1,5 @@ +#ifdef __clang__ +#include "third_party/intel/clang/sm4intrin.h" +#else #include "third_party/intel/sm4intrin.internal.h" +#endif diff --git a/libc/isystem/smmintrin.h b/libc/isystem/smmintrin.h index fd7d9b648..4fdb44f60 100644 --- a/libc/isystem/smmintrin.h +++ b/libc/isystem/smmintrin.h @@ -1,4 +1,8 @@ #ifndef COSMOPOLITAN_LIBC_ISYSTEM_SMMINTRIN_INTERNAL_H_ #define COSMOPOLITAN_LIBC_ISYSTEM_SMMINTRIN_INTERNAL_H_ +#ifdef __clang__ +#include "third_party/intel/clang/smmintrin.h" +#else #include "third_party/intel/smmintrin.internal.h" +#endif #endif /* COSMOPOLITAN_LIBC_ISYSTEM_SMMINTRIN_INTERNAL_H_ */ diff --git a/libc/isystem/tmmintrin.h b/libc/isystem/tmmintrin.h index d1279467e..952e63841 100644 --- a/libc/isystem/tmmintrin.h +++ b/libc/isystem/tmmintrin.h @@ -1,4 +1,8 @@ #ifndef COSMOPOLITAN_LIBC_ISYSTEM_TMMINTRIN_INTERNAL_H_ #define COSMOPOLITAN_LIBC_ISYSTEM_TMMINTRIN_INTERNAL_H_ +#ifdef __clang__ +#include "third_party/intel/clang/tmmintrin.h" +#else #include "third_party/intel/tmmintrin.internal.h" +#endif #endif /* COSMOPOLITAN_LIBC_ISYSTEM_TMMINTRIN_INTERNAL_H_ */ diff --git a/libc/isystem/usermsrintrin.h b/libc/isystem/usermsrintrin.h index 85a8d8130..d996e157c 100644 --- a/libc/isystem/usermsrintrin.h +++ b/libc/isystem/usermsrintrin.h @@ -1 +1,5 @@ +#ifdef __clang__ +#include "third_party/intel/clang/usermsrintrin.h" +#else #include "third_party/intel/usermsrintrin.internal.h" +#endif diff --git a/libc/isystem/wmmintrin.h b/libc/isystem/wmmintrin.h index 8c4f60e00..15ed4a9fe 100644 --- a/libc/isystem/wmmintrin.h +++ b/libc/isystem/wmmintrin.h @@ -1,4 +1,8 @@ #ifndef COSMOPOLITAN_LIBC_ISYSTEM_WMMINTRIN_INTERNAL_H_ #define COSMOPOLITAN_LIBC_ISYSTEM_WMMINTRIN_INTERNAL_H_ +#ifdef __clang__ +#include "third_party/intel/clang/wmmintrin.h" +#else #include "third_party/intel/wmmintrin.internal.h" +#endif #endif /* COSMOPOLITAN_LIBC_ISYSTEM_WMMINTRIN_INTERNAL_H_ */ diff --git a/libc/isystem/x86intrin.h b/libc/isystem/x86intrin.h index fb8c3f971..da763450b 100644 --- a/libc/isystem/x86intrin.h +++ b/libc/isystem/x86intrin.h @@ -1,4 +1,8 @@ #ifndef COSMOPOLITAN_LIBC_ISYSTEM_X86INTRIN_INTERNAL_H_ #define COSMOPOLITAN_LIBC_ISYSTEM_X86INTRIN_INTERNAL_H_ +#ifdef __clang__ +#include "third_party/intel/clang/x86intrin.h" +#else #include "third_party/intel/x86intrin.internal.h" +#endif #endif /* COSMOPOLITAN_LIBC_ISYSTEM_X86INTRIN_INTERNAL_H_ */ diff --git a/libc/isystem/xmmintrin.h b/libc/isystem/xmmintrin.h index 594e650fd..3f528bcb3 100644 --- a/libc/isystem/xmmintrin.h +++ b/libc/isystem/xmmintrin.h @@ -1,4 +1,8 @@ #ifndef COSMOPOLITAN_LIBC_ISYSTEM_XMMINTRIN_INTERNAL_H_ #define COSMOPOLITAN_LIBC_ISYSTEM_XMMINTRIN_INTERNAL_H_ +#ifdef __clang__ +#include "third_party/intel/clang/xmmintrin.h" +#else #include "third_party/intel/xmmintrin.internal.h" +#endif #endif /* COSMOPOLITAN_LIBC_ISYSTEM_XMMINTRIN_INTERNAL_H_ */ diff --git a/libc/mem/BUILD.mk b/libc/mem/BUILD.mk index 438837a7a..52f2ff9f4 100644 --- a/libc/mem/BUILD.mk +++ b/libc/mem/BUILD.mk @@ -42,7 +42,8 @@ $(LIBC_MEM_A_OBJS): private \ COPTS += \ -fno-sanitize=all \ -Wframe-larger-than=4096 \ - -Walloca-larger-than=4096 + -Walloca-larger-than=4096 \ + -fexceptions o/$(MODE)/libc/mem/asan.o: private \ CFLAGS += \ diff --git a/libc/mem/alg.h b/libc/mem/alg.h index f2824c8ba..c9bdb8f53 100644 --- a/libc/mem/alg.h +++ b/libc/mem/alg.h @@ -7,10 +7,10 @@ void *bsearch(const void *, const void *, size_t, size_t, void *bsearch_r(const void *, const void *, size_t, size_t, int (*)(const void *, const void *, void *), void *) paramsnonnull((1, 2, 5)) nosideeffect; -void qsort3(void *, size_t, size_t, - int (*)(const void *, const void *)) libcesque paramsnonnull(); -void qsort(void *, size_t, size_t, - int (*)(const void *, const void *)) libcesque paramsnonnull(); +void qsort3(void *, size_t, size_t, int (*)(const void *, const void *)) + paramsnonnull(); +void qsort(void *, size_t, size_t, int (*)(const void *, const void *)) + paramsnonnull(); void qsort_r(void *, size_t, size_t, int (*)(const void *, const void *, void *), void *) paramsnonnull((1, 4)); diff --git a/test/libc/tinymath/BUILD.mk b/test/libc/tinymath/BUILD.mk index 7431bb4eb..4b304dc67 100644 --- a/test/libc/tinymath/BUILD.mk +++ b/test/libc/tinymath/BUILD.mk @@ -48,6 +48,8 @@ TEST_LIBC_TINYMATH_DIRECTDEPS = \ THIRD_PARTY_DOUBLECONVERSION \ THIRD_PARTY_GDTOA \ THIRD_PARTY_LIBCXX \ + THIRD_PARTY_LIBCXXABI \ + THIRD_PARTY_LIBUNWIND \ TEST_LIBC_TINYMATH_DEPS := \ $(call uniq,$(foreach x,$(TEST_LIBC_TINYMATH_DIRECTDEPS),$($(x)))) diff --git a/test/libcxx/cexception_test.cc b/test/libcxx/cexception_test.cc index 0e09c014b..28aa20d48 100644 --- a/test/libcxx/cexception_test.cc +++ b/test/libcxx/cexception_test.cc @@ -21,13 +21,6 @@ #include "libc/mem/mem.h" #include "libc/runtime/runtime.h" -// this dontthrow keyword SHOULD break this test. it's probably passing -// because we're currently using SjLj exceptions. the day we can change -// things, remove `dontthrow` and this test will still be a useful help -extern "C" dontthrow void qsort_(void *, size_t, size_t, - int (*)(const void *, - const void *)) asm("qsort"); - struct Resource { char *p; Resource() { @@ -60,7 +53,7 @@ int A[3] = {3, 2, 1}; int Work(void) { Resource r; pPoke(r.p); - qsort_(A, 3, sizeof(int), cmp); + qsort(A, 3, sizeof(int), cmp); return A[0]; } int (*pWork)(void) = Work; diff --git a/third_party/aarch64/BUILD.mk b/third_party/aarch64/BUILD.mk index 7ed5ff404..ffff966a9 100644 --- a/third_party/aarch64/BUILD.mk +++ b/third_party/aarch64/BUILD.mk @@ -3,4 +3,4 @@ PKGS += THIRD_PARTY_AARCH64 THIRD_PARTY_AARCH64_HDRS = $(filter %.h,$(THIRD_PARTY_AARCH64_FILES)) -THIRD_PARTY_AARCH64_FILES := $(wildcard third_party/aarch64/*) +THIRD_PARTY_AARCH64_FILES := $(wildcard third_party/aarch64/*) $(wildcard third_party/aarch64/clang/*) diff --git a/third_party/aarch64/clang/arm64intr.h b/third_party/aarch64/clang/arm64intr.h new file mode 100644 index 000000000..4943b2db6 --- /dev/null +++ b/third_party/aarch64/clang/arm64intr.h @@ -0,0 +1,35 @@ +/*===---- arm64intr.h - ARM64 Windows intrinsics -------------------------------=== + * + * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. + * See https://llvm.org/LICENSE.txt for license information. + * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + * + *===-----------------------------------------------------------------------=== + */ + +/* Only include this if we're compiling for the windows platform. */ +#ifndef _MSC_VER +#include_next +#else + +#ifndef __ARM64INTR_H +#define __ARM64INTR_H + +typedef enum +{ + _ARM64_BARRIER_SY = 0xF, + _ARM64_BARRIER_ST = 0xE, + _ARM64_BARRIER_LD = 0xD, + _ARM64_BARRIER_ISH = 0xB, + _ARM64_BARRIER_ISHST = 0xA, + _ARM64_BARRIER_ISHLD = 0x9, + _ARM64_BARRIER_NSH = 0x7, + _ARM64_BARRIER_NSHST = 0x6, + _ARM64_BARRIER_NSHLD = 0x5, + _ARM64_BARRIER_OSH = 0x3, + _ARM64_BARRIER_OSHST = 0x2, + _ARM64_BARRIER_OSHLD = 0x1 +} _ARM64INTR_BARRIER_TYPE; + +#endif /* __ARM64INTR_H */ +#endif /* _MSC_VER */ diff --git a/third_party/aarch64/clang/arm_acle.h b/third_party/aarch64/clang/arm_acle.h new file mode 100644 index 000000000..1518b0c4c --- /dev/null +++ b/third_party/aarch64/clang/arm_acle.h @@ -0,0 +1,888 @@ +/*===---- arm_acle.h - ARM Non-Neon intrinsics -----------------------------=== + * + * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. + * See https://llvm.org/LICENSE.txt for license information. + * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + * + * The Arm C Language Extensions specifications can be found in the following + * link: https://github.com/ARM-software/acle/releases + * + * The ACLE section numbers are subject to change. When consulting the + * specifications, it is recommended to search using section titles if + * the section numbers look outdated. + * + *===-----------------------------------------------------------------------=== + */ + +#ifndef __ARM_ACLE_H +#define __ARM_ACLE_H + +#ifndef __ARM_ACLE +#error "ACLE intrinsics support not enabled." +#endif + +#include + +#if defined(__cplusplus) +extern "C" { +#endif + +/* 7 SYNCHRONIZATION, BARRIER AND HINT INTRINSICS */ +/* 7.3 Memory barriers */ +#if !__has_builtin(__dmb) +#define __dmb(i) __builtin_arm_dmb(i) +#endif +#if !__has_builtin(__dsb) +#define __dsb(i) __builtin_arm_dsb(i) +#endif +#if !__has_builtin(__isb) +#define __isb(i) __builtin_arm_isb(i) +#endif + +/* 7.4 Hints */ + +#if !__has_builtin(__wfi) +static __inline__ void __attribute__((__always_inline__, __nodebug__)) __wfi(void) { + __builtin_arm_wfi(); +} +#endif + +#if !__has_builtin(__wfe) +static __inline__ void __attribute__((__always_inline__, __nodebug__)) __wfe(void) { + __builtin_arm_wfe(); +} +#endif + +#if !__has_builtin(__sev) +static __inline__ void __attribute__((__always_inline__, __nodebug__)) __sev(void) { + __builtin_arm_sev(); +} +#endif + +#if !__has_builtin(__sevl) +static __inline__ void __attribute__((__always_inline__, __nodebug__)) __sevl(void) { + __builtin_arm_sevl(); +} +#endif + +#if !__has_builtin(__yield) +static __inline__ void __attribute__((__always_inline__, __nodebug__)) __yield(void) { + __builtin_arm_yield(); +} +#endif + +#if defined(__ARM_32BIT_STATE) && __ARM_32BIT_STATE +#define __dbg(t) __builtin_arm_dbg(t) +#endif + +#if defined(__ARM_64BIT_STATE) && __ARM_64BIT_STATE +#define _CHKFEAT_GCS 1 +static __inline__ uint64_t __attribute__((__always_inline__, __nodebug__)) +__chkfeat(uint64_t __features) { + return __builtin_arm_chkfeat(__features) ^ __features; +} +#endif + +/* 7.5 Swap */ +static __inline__ uint32_t __attribute__((__always_inline__, __nodebug__)) +__swp(uint32_t __x, volatile uint32_t *__p) { + uint32_t v; + do + v = __builtin_arm_ldrex(__p); + while (__builtin_arm_strex(__x, __p)); + return v; +} + +/* 7.6 Memory prefetch intrinsics */ +/* 7.6.1 Data prefetch */ +#define __pld(addr) __pldx(0, 0, 0, addr) + +#if defined(__ARM_32BIT_STATE) && __ARM_32BIT_STATE +#define __pldx(access_kind, cache_level, retention_policy, addr) \ + __builtin_arm_prefetch(addr, access_kind, 1) +#else +#define __pldx(access_kind, cache_level, retention_policy, addr) \ + __builtin_arm_prefetch(addr, access_kind, cache_level, retention_policy, 1) +#endif + +/* 7.6.2 Instruction prefetch */ +#define __pli(addr) __plix(0, 0, addr) + +#if defined(__ARM_32BIT_STATE) && __ARM_32BIT_STATE +#define __plix(cache_level, retention_policy, addr) \ + __builtin_arm_prefetch(addr, 0, 0) +#else +#define __plix(cache_level, retention_policy, addr) \ + __builtin_arm_prefetch(addr, 0, cache_level, retention_policy, 0) +#endif + +/* 7.7 NOP */ +#if !defined(_MSC_VER) || (!defined(__aarch64__) && !defined(__arm64ec__)) +static __inline__ void __attribute__((__always_inline__, __nodebug__)) __nop(void) { + __builtin_arm_nop(); +} +#endif + +/* 8 DATA-PROCESSING INTRINSICS */ +/* 8.2 Miscellaneous data-processing intrinsics */ +/* ROR */ +static __inline__ uint32_t __attribute__((__always_inline__, __nodebug__)) +__ror(uint32_t __x, uint32_t __y) { + __y %= 32; + if (__y == 0) + return __x; + return (__x >> __y) | (__x << (32 - __y)); +} + +static __inline__ uint64_t __attribute__((__always_inline__, __nodebug__)) +__rorll(uint64_t __x, uint32_t __y) { + __y %= 64; + if (__y == 0) + return __x; + return (__x >> __y) | (__x << (64 - __y)); +} + +static __inline__ unsigned long __attribute__((__always_inline__, __nodebug__)) +__rorl(unsigned long __x, uint32_t __y) { +#if __SIZEOF_LONG__ == 4 + return __ror(__x, __y); +#else + return __rorll(__x, __y); +#endif +} + + +/* CLZ */ +static __inline__ unsigned int __attribute__((__always_inline__, __nodebug__)) +__clz(uint32_t __t) { + return __builtin_arm_clz(__t); +} + +static __inline__ unsigned int __attribute__((__always_inline__, __nodebug__)) +__clzl(unsigned long __t) { +#if __SIZEOF_LONG__ == 4 + return __builtin_arm_clz(__t); +#else + return __builtin_arm_clz64(__t); +#endif +} + +static __inline__ unsigned int __attribute__((__always_inline__, __nodebug__)) +__clzll(uint64_t __t) { + return __builtin_arm_clz64(__t); +} + +/* CLS */ +static __inline__ unsigned int __attribute__((__always_inline__, __nodebug__)) +__cls(uint32_t __t) { + return __builtin_arm_cls(__t); +} + +static __inline__ unsigned int __attribute__((__always_inline__, __nodebug__)) +__clsl(unsigned long __t) { +#if __SIZEOF_LONG__ == 4 + return __builtin_arm_cls(__t); +#else + return __builtin_arm_cls64(__t); +#endif +} + +static __inline__ unsigned int __attribute__((__always_inline__, __nodebug__)) +__clsll(uint64_t __t) { + return __builtin_arm_cls64(__t); +} + +/* REV */ +static __inline__ uint32_t __attribute__((__always_inline__, __nodebug__)) +__rev(uint32_t __t) { + return __builtin_bswap32(__t); +} + +static __inline__ unsigned long __attribute__((__always_inline__, __nodebug__)) +__revl(unsigned long __t) { +#if __SIZEOF_LONG__ == 4 + return __builtin_bswap32(__t); +#else + return __builtin_bswap64(__t); +#endif +} + +static __inline__ uint64_t __attribute__((__always_inline__, __nodebug__)) +__revll(uint64_t __t) { + return __builtin_bswap64(__t); +} + +/* REV16 */ +static __inline__ uint32_t __attribute__((__always_inline__, __nodebug__)) +__rev16(uint32_t __t) { + return __ror(__rev(__t), 16); +} + +static __inline__ uint64_t __attribute__((__always_inline__, __nodebug__)) +__rev16ll(uint64_t __t) { + return (((uint64_t)__rev16(__t >> 32)) << 32) | (uint64_t)__rev16((uint32_t)__t); +} + +static __inline__ unsigned long __attribute__((__always_inline__, __nodebug__)) +__rev16l(unsigned long __t) { +#if __SIZEOF_LONG__ == 4 + return __rev16(__t); +#else + return __rev16ll(__t); +#endif +} + +/* REVSH */ +static __inline__ int16_t __attribute__((__always_inline__, __nodebug__)) +__revsh(int16_t __t) { + return (int16_t)__builtin_bswap16((uint16_t)__t); +} + +/* RBIT */ +static __inline__ uint32_t __attribute__((__always_inline__, __nodebug__)) +__rbit(uint32_t __t) { + return __builtin_arm_rbit(__t); +} + +static __inline__ uint64_t __attribute__((__always_inline__, __nodebug__)) +__rbitll(uint64_t __t) { +#if defined(__ARM_32BIT_STATE) && __ARM_32BIT_STATE + return (((uint64_t)__builtin_arm_rbit(__t)) << 32) | + __builtin_arm_rbit(__t >> 32); +#else + return __builtin_arm_rbit64(__t); +#endif +} + +static __inline__ unsigned long __attribute__((__always_inline__, __nodebug__)) +__rbitl(unsigned long __t) { +#if __SIZEOF_LONG__ == 4 + return __rbit(__t); +#else + return __rbitll(__t); +#endif +} + +/* 8.3 16-bit multiplications */ +#if defined(__ARM_FEATURE_DSP) && __ARM_FEATURE_DSP +static __inline__ int32_t __attribute__((__always_inline__,__nodebug__)) +__smulbb(int32_t __a, int32_t __b) { + return __builtin_arm_smulbb(__a, __b); +} +static __inline__ int32_t __attribute__((__always_inline__,__nodebug__)) +__smulbt(int32_t __a, int32_t __b) { + return __builtin_arm_smulbt(__a, __b); +} +static __inline__ int32_t __attribute__((__always_inline__,__nodebug__)) +__smultb(int32_t __a, int32_t __b) { + return __builtin_arm_smultb(__a, __b); +} +static __inline__ int32_t __attribute__((__always_inline__,__nodebug__)) +__smultt(int32_t __a, int32_t __b) { + return __builtin_arm_smultt(__a, __b); +} +static __inline__ int32_t __attribute__((__always_inline__,__nodebug__)) +__smulwb(int32_t __a, int32_t __b) { + return __builtin_arm_smulwb(__a, __b); +} +static __inline__ int32_t __attribute__((__always_inline__,__nodebug__)) +__smulwt(int32_t __a, int32_t __b) { + return __builtin_arm_smulwt(__a, __b); +} +#endif + +/* + * 8.4 Saturating intrinsics + * + * FIXME: Change guard to their corresponding __ARM_FEATURE flag when Q flag + * intrinsics are implemented and the flag is enabled. + */ +/* 8.4.1 Width-specified saturation intrinsics */ +#if defined(__ARM_FEATURE_SAT) && __ARM_FEATURE_SAT +#define __ssat(x, y) __builtin_arm_ssat(x, y) +#define __usat(x, y) __builtin_arm_usat(x, y) +#endif + +/* 8.4.2 Saturating addition and subtraction intrinsics */ +#if defined(__ARM_FEATURE_DSP) && __ARM_FEATURE_DSP +static __inline__ int32_t __attribute__((__always_inline__, __nodebug__)) +__qadd(int32_t __t, int32_t __v) { + return __builtin_arm_qadd(__t, __v); +} + +static __inline__ int32_t __attribute__((__always_inline__, __nodebug__)) +__qsub(int32_t __t, int32_t __v) { + return __builtin_arm_qsub(__t, __v); +} + +static __inline__ int32_t __attribute__((__always_inline__, __nodebug__)) +__qdbl(int32_t __t) { + return __builtin_arm_qadd(__t, __t); +} +#endif + +/* 8.4.3 Accumulating multiplications */ +#if defined(__ARM_FEATURE_DSP) && __ARM_FEATURE_DSP +static __inline__ int32_t __attribute__((__always_inline__, __nodebug__)) +__smlabb(int32_t __a, int32_t __b, int32_t __c) { + return __builtin_arm_smlabb(__a, __b, __c); +} +static __inline__ int32_t __attribute__((__always_inline__, __nodebug__)) +__smlabt(int32_t __a, int32_t __b, int32_t __c) { + return __builtin_arm_smlabt(__a, __b, __c); +} +static __inline__ int32_t __attribute__((__always_inline__, __nodebug__)) +__smlatb(int32_t __a, int32_t __b, int32_t __c) { + return __builtin_arm_smlatb(__a, __b, __c); +} +static __inline__ int32_t __attribute__((__always_inline__, __nodebug__)) +__smlatt(int32_t __a, int32_t __b, int32_t __c) { + return __builtin_arm_smlatt(__a, __b, __c); +} +static __inline__ int32_t __attribute__((__always_inline__, __nodebug__)) +__smlawb(int32_t __a, int32_t __b, int32_t __c) { + return __builtin_arm_smlawb(__a, __b, __c); +} +static __inline__ int32_t __attribute__((__always_inline__, __nodebug__)) +__smlawt(int32_t __a, int32_t __b, int32_t __c) { + return __builtin_arm_smlawt(__a, __b, __c); +} +#endif + + +/* 8.5.4 Parallel 16-bit saturation */ +#if defined(__ARM_FEATURE_SIMD32) && __ARM_FEATURE_SIMD32 +#define __ssat16(x, y) __builtin_arm_ssat16(x, y) +#define __usat16(x, y) __builtin_arm_usat16(x, y) +#endif + +/* 8.5.5 Packing and unpacking */ +#if defined(__ARM_FEATURE_SIMD32) && __ARM_FEATURE_SIMD32 +typedef int32_t int8x4_t; +typedef int32_t int16x2_t; +typedef uint32_t uint8x4_t; +typedef uint32_t uint16x2_t; + +static __inline__ int16x2_t __attribute__((__always_inline__, __nodebug__)) +__sxtab16(int16x2_t __a, int8x4_t __b) { + return __builtin_arm_sxtab16(__a, __b); +} +static __inline__ int16x2_t __attribute__((__always_inline__, __nodebug__)) +__sxtb16(int8x4_t __a) { + return __builtin_arm_sxtb16(__a); +} +static __inline__ int16x2_t __attribute__((__always_inline__, __nodebug__)) +__uxtab16(int16x2_t __a, int8x4_t __b) { + return __builtin_arm_uxtab16(__a, __b); +} +static __inline__ int16x2_t __attribute__((__always_inline__, __nodebug__)) +__uxtb16(int8x4_t __a) { + return __builtin_arm_uxtb16(__a); +} +#endif + +/* 8.5.6 Parallel selection */ +#if defined(__ARM_FEATURE_SIMD32) && __ARM_FEATURE_SIMD32 +static __inline__ uint8x4_t __attribute__((__always_inline__, __nodebug__)) +__sel(uint8x4_t __a, uint8x4_t __b) { + return __builtin_arm_sel(__a, __b); +} +#endif + +/* 8.5.7 Parallel 8-bit addition and subtraction */ +#if defined(__ARM_FEATURE_SIMD32) && __ARM_FEATURE_SIMD32 +static __inline__ int8x4_t __attribute__((__always_inline__, __nodebug__)) +__qadd8(int8x4_t __a, int8x4_t __b) { + return __builtin_arm_qadd8(__a, __b); +} +static __inline__ int8x4_t __attribute__((__always_inline__, __nodebug__)) +__qsub8(int8x4_t __a, int8x4_t __b) { + return __builtin_arm_qsub8(__a, __b); +} +static __inline__ int8x4_t __attribute__((__always_inline__, __nodebug__)) +__sadd8(int8x4_t __a, int8x4_t __b) { + return __builtin_arm_sadd8(__a, __b); +} +static __inline__ int8x4_t __attribute__((__always_inline__, __nodebug__)) +__shadd8(int8x4_t __a, int8x4_t __b) { + return __builtin_arm_shadd8(__a, __b); +} +static __inline__ int8x4_t __attribute__((__always_inline__, __nodebug__)) +__shsub8(int8x4_t __a, int8x4_t __b) { + return __builtin_arm_shsub8(__a, __b); +} +static __inline__ int8x4_t __attribute__((__always_inline__, __nodebug__)) +__ssub8(int8x4_t __a, int8x4_t __b) { + return __builtin_arm_ssub8(__a, __b); +} +static __inline__ uint8x4_t __attribute__((__always_inline__, __nodebug__)) +__uadd8(uint8x4_t __a, uint8x4_t __b) { + return __builtin_arm_uadd8(__a, __b); +} +static __inline__ uint8x4_t __attribute__((__always_inline__, __nodebug__)) +__uhadd8(uint8x4_t __a, uint8x4_t __b) { + return __builtin_arm_uhadd8(__a, __b); +} +static __inline__ uint8x4_t __attribute__((__always_inline__, __nodebug__)) +__uhsub8(uint8x4_t __a, uint8x4_t __b) { + return __builtin_arm_uhsub8(__a, __b); +} +static __inline__ uint8x4_t __attribute__((__always_inline__, __nodebug__)) +__uqadd8(uint8x4_t __a, uint8x4_t __b) { + return __builtin_arm_uqadd8(__a, __b); +} +static __inline__ uint8x4_t __attribute__((__always_inline__, __nodebug__)) +__uqsub8(uint8x4_t __a, uint8x4_t __b) { + return __builtin_arm_uqsub8(__a, __b); +} +static __inline__ uint8x4_t __attribute__((__always_inline__, __nodebug__)) +__usub8(uint8x4_t __a, uint8x4_t __b) { + return __builtin_arm_usub8(__a, __b); +} +#endif + +/* 8.5.8 Sum of 8-bit absolute differences */ +#if defined(__ARM_FEATURE_SIMD32) && __ARM_FEATURE_SIMD32 +static __inline__ uint32_t __attribute__((__always_inline__, __nodebug__)) +__usad8(uint8x4_t __a, uint8x4_t __b) { + return __builtin_arm_usad8(__a, __b); +} +static __inline__ uint32_t __attribute__((__always_inline__, __nodebug__)) +__usada8(uint8x4_t __a, uint8x4_t __b, uint32_t __c) { + return __builtin_arm_usada8(__a, __b, __c); +} +#endif + +/* 8.5.9 Parallel 16-bit addition and subtraction */ +#if defined(__ARM_FEATURE_SIMD32) && __ARM_FEATURE_SIMD32 +static __inline__ int16x2_t __attribute__((__always_inline__, __nodebug__)) +__qadd16(int16x2_t __a, int16x2_t __b) { + return __builtin_arm_qadd16(__a, __b); +} +static __inline__ int16x2_t __attribute__((__always_inline__, __nodebug__)) +__qasx(int16x2_t __a, int16x2_t __b) { + return __builtin_arm_qasx(__a, __b); +} +static __inline__ int16x2_t __attribute__((__always_inline__, __nodebug__)) +__qsax(int16x2_t __a, int16x2_t __b) { + return __builtin_arm_qsax(__a, __b); +} +static __inline__ int16x2_t __attribute__((__always_inline__, __nodebug__)) +__qsub16(int16x2_t __a, int16x2_t __b) { + return __builtin_arm_qsub16(__a, __b); +} +static __inline__ int16x2_t __attribute__((__always_inline__, __nodebug__)) +__sadd16(int16x2_t __a, int16x2_t __b) { + return __builtin_arm_sadd16(__a, __b); +} +static __inline__ int16x2_t __attribute__((__always_inline__, __nodebug__)) +__sasx(int16x2_t __a, int16x2_t __b) { + return __builtin_arm_sasx(__a, __b); +} +static __inline__ int16x2_t __attribute__((__always_inline__, __nodebug__)) +__shadd16(int16x2_t __a, int16x2_t __b) { + return __builtin_arm_shadd16(__a, __b); +} +static __inline__ int16x2_t __attribute__((__always_inline__, __nodebug__)) +__shasx(int16x2_t __a, int16x2_t __b) { + return __builtin_arm_shasx(__a, __b); +} +static __inline__ int16x2_t __attribute__((__always_inline__, __nodebug__)) +__shsax(int16x2_t __a, int16x2_t __b) { + return __builtin_arm_shsax(__a, __b); +} +static __inline__ int16x2_t __attribute__((__always_inline__, __nodebug__)) +__shsub16(int16x2_t __a, int16x2_t __b) { + return __builtin_arm_shsub16(__a, __b); +} +static __inline__ int16x2_t __attribute__((__always_inline__, __nodebug__)) +__ssax(int16x2_t __a, int16x2_t __b) { + return __builtin_arm_ssax(__a, __b); +} +static __inline__ int16x2_t __attribute__((__always_inline__, __nodebug__)) +__ssub16(int16x2_t __a, int16x2_t __b) { + return __builtin_arm_ssub16(__a, __b); +} +static __inline__ uint16x2_t __attribute__((__always_inline__, __nodebug__)) +__uadd16(uint16x2_t __a, uint16x2_t __b) { + return __builtin_arm_uadd16(__a, __b); +} +static __inline__ uint16x2_t __attribute__((__always_inline__, __nodebug__)) +__uasx(uint16x2_t __a, uint16x2_t __b) { + return __builtin_arm_uasx(__a, __b); +} +static __inline__ uint16x2_t __attribute__((__always_inline__, __nodebug__)) +__uhadd16(uint16x2_t __a, uint16x2_t __b) { + return __builtin_arm_uhadd16(__a, __b); +} +static __inline__ uint16x2_t __attribute__((__always_inline__, __nodebug__)) +__uhasx(uint16x2_t __a, uint16x2_t __b) { + return __builtin_arm_uhasx(__a, __b); +} +static __inline__ uint16x2_t __attribute__((__always_inline__, __nodebug__)) +__uhsax(uint16x2_t __a, uint16x2_t __b) { + return __builtin_arm_uhsax(__a, __b); +} +static __inline__ uint16x2_t __attribute__((__always_inline__, __nodebug__)) +__uhsub16(uint16x2_t __a, uint16x2_t __b) { + return __builtin_arm_uhsub16(__a, __b); +} +static __inline__ uint16x2_t __attribute__((__always_inline__, __nodebug__)) +__uqadd16(uint16x2_t __a, uint16x2_t __b) { + return __builtin_arm_uqadd16(__a, __b); +} +static __inline__ uint16x2_t __attribute__((__always_inline__, __nodebug__)) +__uqasx(uint16x2_t __a, uint16x2_t __b) { + return __builtin_arm_uqasx(__a, __b); +} +static __inline__ uint16x2_t __attribute__((__always_inline__, __nodebug__)) +__uqsax(uint16x2_t __a, uint16x2_t __b) { + return __builtin_arm_uqsax(__a, __b); +} +static __inline__ uint16x2_t __attribute__((__always_inline__, __nodebug__)) +__uqsub16(uint16x2_t __a, uint16x2_t __b) { + return __builtin_arm_uqsub16(__a, __b); +} +static __inline__ uint16x2_t __attribute__((__always_inline__, __nodebug__)) +__usax(uint16x2_t __a, uint16x2_t __b) { + return __builtin_arm_usax(__a, __b); +} +static __inline__ uint16x2_t __attribute__((__always_inline__, __nodebug__)) +__usub16(uint16x2_t __a, uint16x2_t __b) { + return __builtin_arm_usub16(__a, __b); +} +#endif + +/* 8.5.10 Parallel 16-bit multiplication */ +#if defined(__ARM_FEATURE_SIMD32) && __ARM_FEATURE_SIMD32 +static __inline__ int32_t __attribute__((__always_inline__, __nodebug__)) +__smlad(int16x2_t __a, int16x2_t __b, int32_t __c) { + return __builtin_arm_smlad(__a, __b, __c); +} +static __inline__ int32_t __attribute__((__always_inline__, __nodebug__)) +__smladx(int16x2_t __a, int16x2_t __b, int32_t __c) { + return __builtin_arm_smladx(__a, __b, __c); +} +static __inline__ int64_t __attribute__((__always_inline__, __nodebug__)) +__smlald(int16x2_t __a, int16x2_t __b, int64_t __c) { + return __builtin_arm_smlald(__a, __b, __c); +} +static __inline__ int64_t __attribute__((__always_inline__, __nodebug__)) +__smlaldx(int16x2_t __a, int16x2_t __b, int64_t __c) { + return __builtin_arm_smlaldx(__a, __b, __c); +} +static __inline__ int32_t __attribute__((__always_inline__, __nodebug__)) +__smlsd(int16x2_t __a, int16x2_t __b, int32_t __c) { + return __builtin_arm_smlsd(__a, __b, __c); +} +static __inline__ int32_t __attribute__((__always_inline__, __nodebug__)) +__smlsdx(int16x2_t __a, int16x2_t __b, int32_t __c) { + return __builtin_arm_smlsdx(__a, __b, __c); +} +static __inline__ int64_t __attribute__((__always_inline__, __nodebug__)) +__smlsld(int16x2_t __a, int16x2_t __b, int64_t __c) { + return __builtin_arm_smlsld(__a, __b, __c); +} +static __inline__ int64_t __attribute__((__always_inline__, __nodebug__)) +__smlsldx(int16x2_t __a, int16x2_t __b, int64_t __c) { + return __builtin_arm_smlsldx(__a, __b, __c); +} +static __inline__ int32_t __attribute__((__always_inline__, __nodebug__)) +__smuad(int16x2_t __a, int16x2_t __b) { + return __builtin_arm_smuad(__a, __b); +} +static __inline__ int32_t __attribute__((__always_inline__, __nodebug__)) +__smuadx(int16x2_t __a, int16x2_t __b) { + return __builtin_arm_smuadx(__a, __b); +} +static __inline__ int32_t __attribute__((__always_inline__, __nodebug__)) +__smusd(int16x2_t __a, int16x2_t __b) { + return __builtin_arm_smusd(__a, __b); +} +static __inline__ int32_t __attribute__((__always_inline__, __nodebug__)) +__smusdx(int16x2_t __a, int16x2_t __b) { + return __builtin_arm_smusdx(__a, __b); +} +#endif + +/* 8.6 Floating-point data-processing intrinsics */ +#if (defined(__ARM_FEATURE_DIRECTED_ROUNDING) && \ + (__ARM_FEATURE_DIRECTED_ROUNDING)) && \ + (defined(__ARM_64BIT_STATE) && __ARM_64BIT_STATE) +static __inline__ double __attribute__((__always_inline__, __nodebug__)) +__rintn(double __a) { + return __builtin_roundeven(__a); +} + +static __inline__ float __attribute__((__always_inline__, __nodebug__)) +__rintnf(float __a) { + return __builtin_roundevenf(__a); +} +#endif + +/* 8.8 CRC32 intrinsics */ +#if (defined(__ARM_FEATURE_CRC32) && __ARM_FEATURE_CRC32) || \ + (defined(__ARM_64BIT_STATE) && __ARM_64BIT_STATE) +static __inline__ uint32_t __attribute__((__always_inline__, __nodebug__, target("crc"))) +__crc32b(uint32_t __a, uint8_t __b) { + return __builtin_arm_crc32b(__a, __b); +} + +static __inline__ uint32_t __attribute__((__always_inline__, __nodebug__, target("crc"))) +__crc32h(uint32_t __a, uint16_t __b) { + return __builtin_arm_crc32h(__a, __b); +} + +static __inline__ uint32_t __attribute__((__always_inline__, __nodebug__, target("crc"))) +__crc32w(uint32_t __a, uint32_t __b) { + return __builtin_arm_crc32w(__a, __b); +} + +static __inline__ uint32_t __attribute__((__always_inline__, __nodebug__, target("crc"))) +__crc32d(uint32_t __a, uint64_t __b) { + return __builtin_arm_crc32d(__a, __b); +} + +static __inline__ uint32_t __attribute__((__always_inline__, __nodebug__, target("crc"))) +__crc32cb(uint32_t __a, uint8_t __b) { + return __builtin_arm_crc32cb(__a, __b); +} + +static __inline__ uint32_t __attribute__((__always_inline__, __nodebug__, target("crc"))) +__crc32ch(uint32_t __a, uint16_t __b) { + return __builtin_arm_crc32ch(__a, __b); +} + +static __inline__ uint32_t __attribute__((__always_inline__, __nodebug__, target("crc"))) +__crc32cw(uint32_t __a, uint32_t __b) { + return __builtin_arm_crc32cw(__a, __b); +} + +static __inline__ uint32_t __attribute__((__always_inline__, __nodebug__, target("crc"))) +__crc32cd(uint32_t __a, uint64_t __b) { + return __builtin_arm_crc32cd(__a, __b); +} +#endif + +/* 8.6 Floating-point data-processing intrinsics */ +/* Armv8.3-A Javascript conversion intrinsic */ +#if defined(__ARM_64BIT_STATE) && __ARM_64BIT_STATE +static __inline__ int32_t __attribute__((__always_inline__, __nodebug__, target("v8.3a"))) +__jcvt(double __a) { + return __builtin_arm_jcvt(__a); +} +#endif + +/* Armv8.5-A FP rounding intrinsics */ +#if defined(__ARM_64BIT_STATE) && __ARM_64BIT_STATE +static __inline__ float __attribute__((__always_inline__, __nodebug__, target("v8.5a"))) +__rint32zf(float __a) { + return __builtin_arm_rint32zf(__a); +} + +static __inline__ double __attribute__((__always_inline__, __nodebug__, target("v8.5a"))) +__rint32z(double __a) { + return __builtin_arm_rint32z(__a); +} + +static __inline__ float __attribute__((__always_inline__, __nodebug__, target("v8.5a"))) +__rint64zf(float __a) { + return __builtin_arm_rint64zf(__a); +} + +static __inline__ double __attribute__((__always_inline__, __nodebug__, target("v8.5a"))) +__rint64z(double __a) { + return __builtin_arm_rint64z(__a); +} + +static __inline__ float __attribute__((__always_inline__, __nodebug__, target("v8.5a"))) +__rint32xf(float __a) { + return __builtin_arm_rint32xf(__a); +} + +static __inline__ double __attribute__((__always_inline__, __nodebug__, target("v8.5a"))) +__rint32x(double __a) { + return __builtin_arm_rint32x(__a); +} + +static __inline__ float __attribute__((__always_inline__, __nodebug__, target("v8.5a"))) +__rint64xf(float __a) { + return __builtin_arm_rint64xf(__a); +} + +static __inline__ double __attribute__((__always_inline__, __nodebug__, target("v8.5a"))) +__rint64x(double __a) { + return __builtin_arm_rint64x(__a); +} +#endif + +/* 8.9 Armv8.7-A load/store 64-byte intrinsics */ +#if defined(__ARM_64BIT_STATE) && __ARM_64BIT_STATE +typedef struct { + uint64_t val[8]; +} data512_t; + +static __inline__ data512_t __attribute__((__always_inline__, __nodebug__, target("ls64"))) +__arm_ld64b(const void *__addr) { + data512_t __value; + __builtin_arm_ld64b(__addr, __value.val); + return __value; +} +static __inline__ void __attribute__((__always_inline__, __nodebug__, target("ls64"))) +__arm_st64b(void *__addr, data512_t __value) { + __builtin_arm_st64b(__addr, __value.val); +} +static __inline__ uint64_t __attribute__((__always_inline__, __nodebug__, target("ls64"))) +__arm_st64bv(void *__addr, data512_t __value) { + return __builtin_arm_st64bv(__addr, __value.val); +} +static __inline__ uint64_t __attribute__((__always_inline__, __nodebug__, target("ls64"))) +__arm_st64bv0(void *__addr, data512_t __value) { + return __builtin_arm_st64bv0(__addr, __value.val); +} +#endif + +/* 11.1 Special register intrinsics */ +#define __arm_rsr(sysreg) __builtin_arm_rsr(sysreg) +#define __arm_rsr64(sysreg) __builtin_arm_rsr64(sysreg) +#define __arm_rsr128(sysreg) __builtin_arm_rsr128(sysreg) +#define __arm_rsrp(sysreg) __builtin_arm_rsrp(sysreg) +#define __arm_rsrf(sysreg) __builtin_bit_cast(float, __arm_rsr(sysreg)) +#define __arm_rsrf64(sysreg) __builtin_bit_cast(double, __arm_rsr64(sysreg)) +#define __arm_wsr(sysreg, v) __builtin_arm_wsr(sysreg, v) +#define __arm_wsr64(sysreg, v) __builtin_arm_wsr64(sysreg, v) +#define __arm_wsr128(sysreg, v) __builtin_arm_wsr128(sysreg, v) +#define __arm_wsrp(sysreg, v) __builtin_arm_wsrp(sysreg, v) +#define __arm_wsrf(sysreg, v) __arm_wsr(sysreg, __builtin_bit_cast(uint32_t, v)) +#define __arm_wsrf64(sysreg, v) __arm_wsr64(sysreg, __builtin_bit_cast(uint64_t, v)) + +/* 10.3 MTE intrinsics */ +#if defined(__ARM_64BIT_STATE) && __ARM_64BIT_STATE +#define __arm_mte_create_random_tag(__ptr, __mask) __builtin_arm_irg(__ptr, __mask) +#define __arm_mte_increment_tag(__ptr, __tag_offset) __builtin_arm_addg(__ptr, __tag_offset) +#define __arm_mte_exclude_tag(__ptr, __excluded) __builtin_arm_gmi(__ptr, __excluded) +#define __arm_mte_get_tag(__ptr) __builtin_arm_ldg(__ptr) +#define __arm_mte_set_tag(__ptr) __builtin_arm_stg(__ptr) +#define __arm_mte_ptrdiff(__ptra, __ptrb) __builtin_arm_subp(__ptra, __ptrb) + +/* 18 memcpy family of operations intrinsics - MOPS */ +#define __arm_mops_memset_tag(__tagged_address, __value, __size) \ + __builtin_arm_mops_memset_tag(__tagged_address, __value, __size) +#endif + +/* 11.3 Coprocessor Intrinsics */ +#if defined(__ARM_FEATURE_COPROC) + +#if (__ARM_FEATURE_COPROC & 0x1) + +#if (__ARM_ARCH < 8) +#define __arm_cdp(coproc, opc1, CRd, CRn, CRm, opc2) \ + __builtin_arm_cdp(coproc, opc1, CRd, CRn, CRm, opc2) +#endif /* __ARM_ARCH < 8 */ + +#define __arm_ldc(coproc, CRd, p) __builtin_arm_ldc(coproc, CRd, p) +#define __arm_stc(coproc, CRd, p) __builtin_arm_stc(coproc, CRd, p) + +#define __arm_mcr(coproc, opc1, value, CRn, CRm, opc2) \ + __builtin_arm_mcr(coproc, opc1, value, CRn, CRm, opc2) +#define __arm_mrc(coproc, opc1, CRn, CRm, opc2) \ + __builtin_arm_mrc(coproc, opc1, CRn, CRm, opc2) + +#if (__ARM_ARCH != 4) && (__ARM_ARCH < 8) +#define __arm_ldcl(coproc, CRd, p) __builtin_arm_ldcl(coproc, CRd, p) +#define __arm_stcl(coproc, CRd, p) __builtin_arm_stcl(coproc, CRd, p) +#endif /* (__ARM_ARCH != 4) && (__ARM_ARCH != 8) */ + +#if (__ARM_ARCH_8M_MAIN__) || (__ARM_ARCH_8_1M_MAIN__) +#define __arm_cdp(coproc, opc1, CRd, CRn, CRm, opc2) \ + __builtin_arm_cdp(coproc, opc1, CRd, CRn, CRm, opc2) +#define __arm_ldcl(coproc, CRd, p) __builtin_arm_ldcl(coproc, CRd, p) +#define __arm_stcl(coproc, CRd, p) __builtin_arm_stcl(coproc, CRd, p) +#endif /* ___ARM_ARCH_8M_MAIN__ */ + +#endif /* __ARM_FEATURE_COPROC & 0x1 */ + +#if (__ARM_FEATURE_COPROC & 0x2) +#define __arm_cdp2(coproc, opc1, CRd, CRn, CRm, opc2) \ + __builtin_arm_cdp2(coproc, opc1, CRd, CRn, CRm, opc2) +#define __arm_ldc2(coproc, CRd, p) __builtin_arm_ldc2(coproc, CRd, p) +#define __arm_stc2(coproc, CRd, p) __builtin_arm_stc2(coproc, CRd, p) +#define __arm_ldc2l(coproc, CRd, p) __builtin_arm_ldc2l(coproc, CRd, p) +#define __arm_stc2l(coproc, CRd, p) __builtin_arm_stc2l(coproc, CRd, p) +#define __arm_mcr2(coproc, opc1, value, CRn, CRm, opc2) \ + __builtin_arm_mcr2(coproc, opc1, value, CRn, CRm, opc2) +#define __arm_mrc2(coproc, opc1, CRn, CRm, opc2) \ + __builtin_arm_mrc2(coproc, opc1, CRn, CRm, opc2) +#endif + +#if (__ARM_FEATURE_COPROC & 0x4) +#define __arm_mcrr(coproc, opc1, value, CRm) \ + __builtin_arm_mcrr(coproc, opc1, value, CRm) +#define __arm_mrrc(coproc, opc1, CRm) __builtin_arm_mrrc(coproc, opc1, CRm) +#endif + +#if (__ARM_FEATURE_COPROC & 0x8) +#define __arm_mcrr2(coproc, opc1, value, CRm) \ + __builtin_arm_mcrr2(coproc, opc1, value, CRm) +#define __arm_mrrc2(coproc, opc1, CRm) __builtin_arm_mrrc2(coproc, opc1, CRm) +#endif + +#endif // __ARM_FEATURE_COPROC + +/* 17 Transactional Memory Extension (TME) Intrinsics */ +#if defined(__ARM_FEATURE_TME) && __ARM_FEATURE_TME + +#define _TMFAILURE_REASON 0x00007fffu +#define _TMFAILURE_RTRY 0x00008000u +#define _TMFAILURE_CNCL 0x00010000u +#define _TMFAILURE_MEM 0x00020000u +#define _TMFAILURE_IMP 0x00040000u +#define _TMFAILURE_ERR 0x00080000u +#define _TMFAILURE_SIZE 0x00100000u +#define _TMFAILURE_NEST 0x00200000u +#define _TMFAILURE_DBG 0x00400000u +#define _TMFAILURE_INT 0x00800000u +#define _TMFAILURE_TRIVIAL 0x01000000u + +#define __tstart() __builtin_arm_tstart() +#define __tcommit() __builtin_arm_tcommit() +#define __tcancel(__arg) __builtin_arm_tcancel(__arg) +#define __ttest() __builtin_arm_ttest() + +#endif /* __ARM_FEATURE_TME */ + +/* 8.7 Armv8.5-A Random number generation intrinsics */ +#if defined(__ARM_64BIT_STATE) && __ARM_64BIT_STATE +static __inline__ int __attribute__((__always_inline__, __nodebug__, target("rand"))) +__rndr(uint64_t *__p) { + return __builtin_arm_rndr(__p); +} +static __inline__ int __attribute__((__always_inline__, __nodebug__, target("rand"))) +__rndrrs(uint64_t *__p) { + return __builtin_arm_rndrrs(__p); +} +#endif + +/* 11.2 Guarded Control Stack intrinsics */ +#if defined(__ARM_64BIT_STATE) && __ARM_64BIT_STATE +static __inline__ void * __attribute__((__always_inline__, __nodebug__)) +__gcspr() { + return (void *)__builtin_arm_rsr64("gcspr_el0"); +} + +static __inline__ uint64_t __attribute__((__always_inline__, __nodebug__, target("gcs"))) +__gcspopm() { + return __builtin_arm_gcspopm(0); +} + +static __inline__ const void * __attribute__((__always_inline__, __nodebug__, target("gcs"))) +__gcsss(const void *__stack) { + return __builtin_arm_gcsss(__stack); +} +#endif + +#if defined(__cplusplus) +} +#endif + +#endif /* __ARM_ACLE_H */ diff --git a/third_party/aarch64/clang/arm_bf16.h b/third_party/aarch64/clang/arm_bf16.h new file mode 100644 index 000000000..329ae39e6 --- /dev/null +++ b/third_party/aarch64/clang/arm_bf16.h @@ -0,0 +1,20 @@ +/*===---- arm_bf16.h - ARM BF16 intrinsics -----------------------------------=== + * + * + * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. + * See https://llvm.org/LICENSE.txt for license information. + * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + * + *===-----------------------------------------------------------------------=== + */ + +#ifndef __ARM_BF16_H +#define __ARM_BF16_H + +typedef __bf16 bfloat16_t; +#define __ai static __inline__ __attribute__((__always_inline__, __nodebug__)) + + +#undef __ai + +#endif diff --git a/third_party/aarch64/clang/arm_cde.h b/third_party/aarch64/clang/arm_cde.h new file mode 100644 index 000000000..4ad5d825d --- /dev/null +++ b/third_party/aarch64/clang/arm_cde.h @@ -0,0 +1,410 @@ +/*===---- arm_cde.h - ARM CDE intrinsics -----------------------------------=== + * + * + * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. + * See https://llvm.org/LICENSE.txt for license information. + * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + * + *===-----------------------------------------------------------------------=== + */ + +#ifndef __ARM_CDE_H +#define __ARM_CDE_H + +#if !__ARM_FEATURE_CDE +#error "CDE support not enabled" +#endif + +#include + +#ifdef __cplusplus +extern "C" { +#endif + +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_cde_cx1))) +uint32_t __arm_cx1(int, uint32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_cde_cx1a))) +uint32_t __arm_cx1a(int, uint32_t, uint32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_cde_cx1d))) +uint64_t __arm_cx1d(int, uint32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_cde_cx1da))) +uint64_t __arm_cx1da(int, uint64_t, uint32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_cde_cx2))) +uint32_t __arm_cx2(int, uint32_t, uint32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_cde_cx2a))) +uint32_t __arm_cx2a(int, uint32_t, uint32_t, uint32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_cde_cx2d))) +uint64_t __arm_cx2d(int, uint32_t, uint32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_cde_cx2da))) +uint64_t __arm_cx2da(int, uint64_t, uint32_t, uint32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_cde_cx3))) +uint32_t __arm_cx3(int, uint32_t, uint32_t, uint32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_cde_cx3a))) +uint32_t __arm_cx3a(int, uint32_t, uint32_t, uint32_t, uint32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_cde_cx3d))) +uint64_t __arm_cx3d(int, uint32_t, uint32_t, uint32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_cde_cx3da))) +uint64_t __arm_cx3da(int, uint64_t, uint32_t, uint32_t, uint32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_cde_vcx1_u32))) +uint32_t __arm_vcx1_u32(int, uint32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_cde_vcx1a_u32))) +uint32_t __arm_vcx1a_u32(int, uint32_t, uint32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_cde_vcx1d_u64))) +uint64_t __arm_vcx1d_u64(int, uint32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_cde_vcx1da_u64))) +uint64_t __arm_vcx1da_u64(int, uint64_t, uint32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_cde_vcx2_u32))) +uint32_t __arm_vcx2_u32(int, uint32_t, uint32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_cde_vcx2a_u32))) +uint32_t __arm_vcx2a_u32(int, uint32_t, uint32_t, uint32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_cde_vcx2d_u64))) +uint64_t __arm_vcx2d_u64(int, uint64_t, uint32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_cde_vcx2da_u64))) +uint64_t __arm_vcx2da_u64(int, uint64_t, uint64_t, uint32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_cde_vcx3_u32))) +uint32_t __arm_vcx3_u32(int, uint32_t, uint32_t, uint32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_cde_vcx3a_u32))) +uint32_t __arm_vcx3a_u32(int, uint32_t, uint32_t, uint32_t, uint32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_cde_vcx3d_u64))) +uint64_t __arm_vcx3d_u64(int, uint64_t, uint64_t, uint32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_cde_vcx3da_u64))) +uint64_t __arm_vcx3da_u64(int, uint64_t, uint64_t, uint64_t, uint32_t); + +#if __ARM_FEATURE_MVE + +typedef uint16_t mve_pred16_t; +typedef __attribute__((__neon_vector_type__(8), __clang_arm_mve_strict_polymorphism)) int16_t int16x8_t; +typedef __attribute__((__neon_vector_type__(4), __clang_arm_mve_strict_polymorphism)) int32_t int32x4_t; +typedef __attribute__((__neon_vector_type__(2), __clang_arm_mve_strict_polymorphism)) int64_t int64x2_t; +typedef __attribute__((__neon_vector_type__(16), __clang_arm_mve_strict_polymorphism)) int8_t int8x16_t; +typedef __attribute__((__neon_vector_type__(8), __clang_arm_mve_strict_polymorphism)) uint16_t uint16x8_t; +typedef __attribute__((__neon_vector_type__(4), __clang_arm_mve_strict_polymorphism)) uint32_t uint32x4_t; +typedef __attribute__((__neon_vector_type__(2), __clang_arm_mve_strict_polymorphism)) uint64_t uint64x2_t; +typedef __attribute__((__neon_vector_type__(16), __clang_arm_mve_strict_polymorphism)) uint8_t uint8x16_t; + +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx1q_m_s16))) +int16x8_t __arm_vcx1q_m(int, int16x8_t, uint32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx1q_m_s32))) +int32x4_t __arm_vcx1q_m(int, int32x4_t, uint32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx1q_m_s64))) +int64x2_t __arm_vcx1q_m(int, int64x2_t, uint32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx1q_m_s8))) +int8x16_t __arm_vcx1q_m(int, int8x16_t, uint32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx1q_m_u16))) +uint16x8_t __arm_vcx1q_m(int, uint16x8_t, uint32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx1q_m_u32))) +uint32x4_t __arm_vcx1q_m(int, uint32x4_t, uint32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx1q_m_u64))) +uint64x2_t __arm_vcx1q_m(int, uint64x2_t, uint32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx1q_m_u8))) +uint8x16_t __arm_vcx1q_m(int, uint8x16_t, uint32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_cde_vcx1q_u8))) +uint8x16_t __arm_vcx1q_u8(int, uint32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx1qa_m_s16))) +int16x8_t __arm_vcx1qa_m(int, int16x8_t, uint32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx1qa_m_s32))) +int32x4_t __arm_vcx1qa_m(int, int32x4_t, uint32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx1qa_m_s64))) +int64x2_t __arm_vcx1qa_m(int, int64x2_t, uint32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx1qa_m_s8))) +int8x16_t __arm_vcx1qa_m(int, int8x16_t, uint32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx1qa_m_u16))) +uint16x8_t __arm_vcx1qa_m(int, uint16x8_t, uint32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx1qa_m_u32))) +uint32x4_t __arm_vcx1qa_m(int, uint32x4_t, uint32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx1qa_m_u64))) +uint64x2_t __arm_vcx1qa_m(int, uint64x2_t, uint32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx1qa_m_u8))) +uint8x16_t __arm_vcx1qa_m(int, uint8x16_t, uint32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx1qa_s16))) +int16x8_t __arm_vcx1qa(int, int16x8_t, uint32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx1qa_s32))) +int32x4_t __arm_vcx1qa(int, int32x4_t, uint32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx1qa_s64))) +int64x2_t __arm_vcx1qa(int, int64x2_t, uint32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx1qa_s8))) +int8x16_t __arm_vcx1qa(int, int8x16_t, uint32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx1qa_u16))) +uint16x8_t __arm_vcx1qa(int, uint16x8_t, uint32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx1qa_u32))) +uint32x4_t __arm_vcx1qa(int, uint32x4_t, uint32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx1qa_u64))) +uint64x2_t __arm_vcx1qa(int, uint64x2_t, uint32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx1qa_u8))) +uint8x16_t __arm_vcx1qa(int, uint8x16_t, uint32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx2q_m_impl_s16))) +int16x8_t __arm_vcx2q_m_impl(int, int16x8_t, uint8x16_t, uint32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx2q_m_impl_s32))) +int32x4_t __arm_vcx2q_m_impl(int, int32x4_t, uint8x16_t, uint32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx2q_m_impl_s64))) +int64x2_t __arm_vcx2q_m_impl(int, int64x2_t, uint8x16_t, uint32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx2q_m_impl_s8))) +int8x16_t __arm_vcx2q_m_impl(int, int8x16_t, uint8x16_t, uint32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx2q_m_impl_u16))) +uint16x8_t __arm_vcx2q_m_impl(int, uint16x8_t, uint8x16_t, uint32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx2q_m_impl_u32))) +uint32x4_t __arm_vcx2q_m_impl(int, uint32x4_t, uint8x16_t, uint32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx2q_m_impl_u64))) +uint64x2_t __arm_vcx2q_m_impl(int, uint64x2_t, uint8x16_t, uint32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx2q_m_impl_u8))) +uint8x16_t __arm_vcx2q_m_impl(int, uint8x16_t, uint8x16_t, uint32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx2q_s16))) +int16x8_t __arm_vcx2q(int, int16x8_t, uint32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx2q_s32))) +int32x4_t __arm_vcx2q(int, int32x4_t, uint32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx2q_s64))) +int64x2_t __arm_vcx2q(int, int64x2_t, uint32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx2q_s8))) +int8x16_t __arm_vcx2q(int, int8x16_t, uint32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx2q_u16))) +uint16x8_t __arm_vcx2q(int, uint16x8_t, uint32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx2q_u32))) +uint32x4_t __arm_vcx2q(int, uint32x4_t, uint32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx2q_u64))) +uint64x2_t __arm_vcx2q(int, uint64x2_t, uint32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx2q_u8))) +uint8x16_t __arm_vcx2q(int, uint8x16_t, uint32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx2q_u8_s16))) +uint8x16_t __arm_vcx2q_u8(int, int16x8_t, uint32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx2q_u8_s32))) +uint8x16_t __arm_vcx2q_u8(int, int32x4_t, uint32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx2q_u8_s64))) +uint8x16_t __arm_vcx2q_u8(int, int64x2_t, uint32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx2q_u8_s8))) +uint8x16_t __arm_vcx2q_u8(int, int8x16_t, uint32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx2q_u8_u16))) +uint8x16_t __arm_vcx2q_u8(int, uint16x8_t, uint32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx2q_u8_u32))) +uint8x16_t __arm_vcx2q_u8(int, uint32x4_t, uint32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx2q_u8_u64))) +uint8x16_t __arm_vcx2q_u8(int, uint64x2_t, uint32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx2q_u8_u8))) +uint8x16_t __arm_vcx2q_u8(int, uint8x16_t, uint32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx2qa_impl_s16))) +int16x8_t __arm_vcx2qa_impl(int, int16x8_t, uint8x16_t, uint32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx2qa_impl_s32))) +int32x4_t __arm_vcx2qa_impl(int, int32x4_t, uint8x16_t, uint32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx2qa_impl_s64))) +int64x2_t __arm_vcx2qa_impl(int, int64x2_t, uint8x16_t, uint32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx2qa_impl_s8))) +int8x16_t __arm_vcx2qa_impl(int, int8x16_t, uint8x16_t, uint32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx2qa_impl_u16))) +uint16x8_t __arm_vcx2qa_impl(int, uint16x8_t, uint8x16_t, uint32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx2qa_impl_u32))) +uint32x4_t __arm_vcx2qa_impl(int, uint32x4_t, uint8x16_t, uint32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx2qa_impl_u64))) +uint64x2_t __arm_vcx2qa_impl(int, uint64x2_t, uint8x16_t, uint32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx2qa_impl_u8))) +uint8x16_t __arm_vcx2qa_impl(int, uint8x16_t, uint8x16_t, uint32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx2qa_m_impl_s16))) +int16x8_t __arm_vcx2qa_m_impl(int, int16x8_t, uint8x16_t, uint32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx2qa_m_impl_s32))) +int32x4_t __arm_vcx2qa_m_impl(int, int32x4_t, uint8x16_t, uint32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx2qa_m_impl_s64))) +int64x2_t __arm_vcx2qa_m_impl(int, int64x2_t, uint8x16_t, uint32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx2qa_m_impl_s8))) +int8x16_t __arm_vcx2qa_m_impl(int, int8x16_t, uint8x16_t, uint32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx2qa_m_impl_u16))) +uint16x8_t __arm_vcx2qa_m_impl(int, uint16x8_t, uint8x16_t, uint32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx2qa_m_impl_u32))) +uint32x4_t __arm_vcx2qa_m_impl(int, uint32x4_t, uint8x16_t, uint32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx2qa_m_impl_u64))) +uint64x2_t __arm_vcx2qa_m_impl(int, uint64x2_t, uint8x16_t, uint32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx2qa_m_impl_u8))) +uint8x16_t __arm_vcx2qa_m_impl(int, uint8x16_t, uint8x16_t, uint32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx3q_impl_s16))) +int16x8_t __arm_vcx3q_impl(int, int16x8_t, uint8x16_t, uint32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx3q_impl_s32))) +int32x4_t __arm_vcx3q_impl(int, int32x4_t, uint8x16_t, uint32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx3q_impl_s64))) +int64x2_t __arm_vcx3q_impl(int, int64x2_t, uint8x16_t, uint32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx3q_impl_s8))) +int8x16_t __arm_vcx3q_impl(int, int8x16_t, uint8x16_t, uint32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx3q_impl_u16))) +uint16x8_t __arm_vcx3q_impl(int, uint16x8_t, uint8x16_t, uint32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx3q_impl_u32))) +uint32x4_t __arm_vcx3q_impl(int, uint32x4_t, uint8x16_t, uint32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx3q_impl_u64))) +uint64x2_t __arm_vcx3q_impl(int, uint64x2_t, uint8x16_t, uint32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx3q_impl_u8))) +uint8x16_t __arm_vcx3q_impl(int, uint8x16_t, uint8x16_t, uint32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx3q_m_impl_s16))) +int16x8_t __arm_vcx3q_m_impl(int, int16x8_t, uint8x16_t, uint8x16_t, uint32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx3q_m_impl_s32))) +int32x4_t __arm_vcx3q_m_impl(int, int32x4_t, uint8x16_t, uint8x16_t, uint32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx3q_m_impl_s64))) +int64x2_t __arm_vcx3q_m_impl(int, int64x2_t, uint8x16_t, uint8x16_t, uint32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx3q_m_impl_s8))) +int8x16_t __arm_vcx3q_m_impl(int, int8x16_t, uint8x16_t, uint8x16_t, uint32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx3q_m_impl_u16))) +uint16x8_t __arm_vcx3q_m_impl(int, uint16x8_t, uint8x16_t, uint8x16_t, uint32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx3q_m_impl_u32))) +uint32x4_t __arm_vcx3q_m_impl(int, uint32x4_t, uint8x16_t, uint8x16_t, uint32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx3q_m_impl_u64))) +uint64x2_t __arm_vcx3q_m_impl(int, uint64x2_t, uint8x16_t, uint8x16_t, uint32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx3q_m_impl_u8))) +uint8x16_t __arm_vcx3q_m_impl(int, uint8x16_t, uint8x16_t, uint8x16_t, uint32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx3q_u8_impl_s16))) +uint8x16_t __arm_vcx3q_u8_impl(int, int16x8_t, uint8x16_t, uint32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx3q_u8_impl_s32))) +uint8x16_t __arm_vcx3q_u8_impl(int, int32x4_t, uint8x16_t, uint32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx3q_u8_impl_s64))) +uint8x16_t __arm_vcx3q_u8_impl(int, int64x2_t, uint8x16_t, uint32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx3q_u8_impl_s8))) +uint8x16_t __arm_vcx3q_u8_impl(int, int8x16_t, uint8x16_t, uint32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx3q_u8_impl_u16))) +uint8x16_t __arm_vcx3q_u8_impl(int, uint16x8_t, uint8x16_t, uint32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx3q_u8_impl_u32))) +uint8x16_t __arm_vcx3q_u8_impl(int, uint32x4_t, uint8x16_t, uint32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx3q_u8_impl_u64))) +uint8x16_t __arm_vcx3q_u8_impl(int, uint64x2_t, uint8x16_t, uint32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx3q_u8_impl_u8))) +uint8x16_t __arm_vcx3q_u8_impl(int, uint8x16_t, uint8x16_t, uint32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx3qa_impl_s16))) +int16x8_t __arm_vcx3qa_impl(int, int16x8_t, uint8x16_t, uint8x16_t, uint32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx3qa_impl_s32))) +int32x4_t __arm_vcx3qa_impl(int, int32x4_t, uint8x16_t, uint8x16_t, uint32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx3qa_impl_s64))) +int64x2_t __arm_vcx3qa_impl(int, int64x2_t, uint8x16_t, uint8x16_t, uint32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx3qa_impl_s8))) +int8x16_t __arm_vcx3qa_impl(int, int8x16_t, uint8x16_t, uint8x16_t, uint32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx3qa_impl_u16))) +uint16x8_t __arm_vcx3qa_impl(int, uint16x8_t, uint8x16_t, uint8x16_t, uint32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx3qa_impl_u32))) +uint32x4_t __arm_vcx3qa_impl(int, uint32x4_t, uint8x16_t, uint8x16_t, uint32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx3qa_impl_u64))) +uint64x2_t __arm_vcx3qa_impl(int, uint64x2_t, uint8x16_t, uint8x16_t, uint32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx3qa_impl_u8))) +uint8x16_t __arm_vcx3qa_impl(int, uint8x16_t, uint8x16_t, uint8x16_t, uint32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx3qa_m_impl_s16))) +int16x8_t __arm_vcx3qa_m_impl(int, int16x8_t, uint8x16_t, uint8x16_t, uint32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx3qa_m_impl_s32))) +int32x4_t __arm_vcx3qa_m_impl(int, int32x4_t, uint8x16_t, uint8x16_t, uint32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx3qa_m_impl_s64))) +int64x2_t __arm_vcx3qa_m_impl(int, int64x2_t, uint8x16_t, uint8x16_t, uint32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx3qa_m_impl_s8))) +int8x16_t __arm_vcx3qa_m_impl(int, int8x16_t, uint8x16_t, uint8x16_t, uint32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx3qa_m_impl_u16))) +uint16x8_t __arm_vcx3qa_m_impl(int, uint16x8_t, uint8x16_t, uint8x16_t, uint32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx3qa_m_impl_u32))) +uint32x4_t __arm_vcx3qa_m_impl(int, uint32x4_t, uint8x16_t, uint8x16_t, uint32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx3qa_m_impl_u64))) +uint64x2_t __arm_vcx3qa_m_impl(int, uint64x2_t, uint8x16_t, uint8x16_t, uint32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx3qa_m_impl_u8))) +uint8x16_t __arm_vcx3qa_m_impl(int, uint8x16_t, uint8x16_t, uint8x16_t, uint32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s16_u8))) +int16x8_t __arm_vreinterpretq_s16_u8(uint8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s32_u8))) +int32x4_t __arm_vreinterpretq_s32_u8(uint8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s64_u8))) +int64x2_t __arm_vreinterpretq_s64_u8(uint8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s8_u8))) +int8x16_t __arm_vreinterpretq_s8_u8(uint8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u16_u8))) +uint16x8_t __arm_vreinterpretq_u16_u8(uint8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u32_u8))) +uint32x4_t __arm_vreinterpretq_u32_u8(uint8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u64_u8))) +uint64x2_t __arm_vreinterpretq_u64_u8(uint8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u8_s16))) +uint8x16_t __arm_vreinterpretq_u8(int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u8_s32))) +uint8x16_t __arm_vreinterpretq_u8(int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u8_s64))) +uint8x16_t __arm_vreinterpretq_u8(int64x2_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u8_s8))) +uint8x16_t __arm_vreinterpretq_u8(int8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u8_u16))) +uint8x16_t __arm_vreinterpretq_u8(uint16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u8_u32))) +uint8x16_t __arm_vreinterpretq_u8(uint32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u8_u64))) +uint8x16_t __arm_vreinterpretq_u8(uint64x2_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vreinterpretq_u8_u8))) +uint8x16_t __arm_vreinterpretq_u8(uint8x16_t); +#define __arm_vcx2q_m(cp, inactive, n, imm, pred) __arm_vcx2q_m_impl((cp), (inactive), __arm_vreinterpretq_u8(n), (imm), (pred)) +#define __arm_vcx2qa(cp, acc, n, imm) __arm_vcx2qa_impl((cp), (acc), __arm_vreinterpretq_u8(n), (imm)) +#define __arm_vcx2qa_m(cp, acc, n, imm, pred) __arm_vcx2qa_m_impl((cp), (acc), __arm_vreinterpretq_u8(n), (imm), (pred)) +#define __arm_vcx3q(cp, n, m, imm) __arm_vcx3q_impl((cp), (n), __arm_vreinterpretq_u8(m), (imm)) +#define __arm_vcx3q_m(cp, inactive, n, m, imm, pred) __arm_vcx3q_m_impl((cp), (inactive), __arm_vreinterpretq_u8(n), __arm_vreinterpretq_u8(m), (imm), (pred)) +#define __arm_vcx3q_u8(cp, n, m, imm) __arm_vcx3q_u8_impl((cp), (n), __arm_vreinterpretq_u8(m), (imm)) +#define __arm_vcx3qa(cp, acc, n, m, imm) __arm_vcx3qa_impl((cp), (acc), __arm_vreinterpretq_u8(n), __arm_vreinterpretq_u8(m), (imm)) +#define __arm_vcx3qa_m(cp, acc, n, m, imm, pred) __arm_vcx3qa_m_impl((cp), (acc), __arm_vreinterpretq_u8(n), __arm_vreinterpretq_u8(m), (imm), (pred)) + +#endif /* __ARM_FEATURE_MVE */ + +#if __ARM_FEATURE_MVE & 2 + +typedef __fp16 float16_t; +typedef float float32_t; +typedef __attribute__((__neon_vector_type__(8), __clang_arm_mve_strict_polymorphism)) float16_t float16x8_t; +typedef __attribute__((__neon_vector_type__(4), __clang_arm_mve_strict_polymorphism)) float32_t float32x4_t; + +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx1q_m_f16))) +float16x8_t __arm_vcx1q_m(int, float16x8_t, uint32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx1q_m_f32))) +float32x4_t __arm_vcx1q_m(int, float32x4_t, uint32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx1qa_f16))) +float16x8_t __arm_vcx1qa(int, float16x8_t, uint32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx1qa_f32))) +float32x4_t __arm_vcx1qa(int, float32x4_t, uint32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx1qa_m_f16))) +float16x8_t __arm_vcx1qa_m(int, float16x8_t, uint32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx1qa_m_f32))) +float32x4_t __arm_vcx1qa_m(int, float32x4_t, uint32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx2q_f16))) +float16x8_t __arm_vcx2q(int, float16x8_t, uint32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx2q_f32))) +float32x4_t __arm_vcx2q(int, float32x4_t, uint32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx2q_m_impl_f16))) +float16x8_t __arm_vcx2q_m_impl(int, float16x8_t, uint8x16_t, uint32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx2q_m_impl_f32))) +float32x4_t __arm_vcx2q_m_impl(int, float32x4_t, uint8x16_t, uint32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx2q_u8_f16))) +uint8x16_t __arm_vcx2q_u8(int, float16x8_t, uint32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx2q_u8_f32))) +uint8x16_t __arm_vcx2q_u8(int, float32x4_t, uint32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx2qa_impl_f16))) +float16x8_t __arm_vcx2qa_impl(int, float16x8_t, uint8x16_t, uint32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx2qa_impl_f32))) +float32x4_t __arm_vcx2qa_impl(int, float32x4_t, uint8x16_t, uint32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx2qa_m_impl_f16))) +float16x8_t __arm_vcx2qa_m_impl(int, float16x8_t, uint8x16_t, uint32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx2qa_m_impl_f32))) +float32x4_t __arm_vcx2qa_m_impl(int, float32x4_t, uint8x16_t, uint32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx3q_impl_f16))) +float16x8_t __arm_vcx3q_impl(int, float16x8_t, uint8x16_t, uint32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx3q_impl_f32))) +float32x4_t __arm_vcx3q_impl(int, float32x4_t, uint8x16_t, uint32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx3q_m_impl_f16))) +float16x8_t __arm_vcx3q_m_impl(int, float16x8_t, uint8x16_t, uint8x16_t, uint32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx3q_m_impl_f32))) +float32x4_t __arm_vcx3q_m_impl(int, float32x4_t, uint8x16_t, uint8x16_t, uint32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx3q_u8_impl_f16))) +uint8x16_t __arm_vcx3q_u8_impl(int, float16x8_t, uint8x16_t, uint32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx3q_u8_impl_f32))) +uint8x16_t __arm_vcx3q_u8_impl(int, float32x4_t, uint8x16_t, uint32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx3qa_impl_f16))) +float16x8_t __arm_vcx3qa_impl(int, float16x8_t, uint8x16_t, uint8x16_t, uint32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx3qa_impl_f32))) +float32x4_t __arm_vcx3qa_impl(int, float32x4_t, uint8x16_t, uint8x16_t, uint32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx3qa_m_impl_f16))) +float16x8_t __arm_vcx3qa_m_impl(int, float16x8_t, uint8x16_t, uint8x16_t, uint32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx3qa_m_impl_f32))) +float32x4_t __arm_vcx3qa_m_impl(int, float32x4_t, uint8x16_t, uint8x16_t, uint32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_f16_u8))) +float16x8_t __arm_vreinterpretq_f16_u8(uint8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_f32_u8))) +float32x4_t __arm_vreinterpretq_f32_u8(uint8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u8_f16))) +uint8x16_t __arm_vreinterpretq_u8(float16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u8_f32))) +uint8x16_t __arm_vreinterpretq_u8(float32x4_t); + +#endif /* __ARM_FEATURE_MVE & 2 */ + +#ifdef __cplusplus +} /* extern "C" */ +#endif + +#endif /* __ARM_CDE_H */ diff --git a/third_party/aarch64/clang/arm_cmse.h b/third_party/aarch64/clang/arm_cmse.h new file mode 100644 index 000000000..ecf50ecc5 --- /dev/null +++ b/third_party/aarch64/clang/arm_cmse.h @@ -0,0 +1,217 @@ +//===---- arm_cmse.h - Arm CMSE support -----------------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#ifndef __ARM_CMSE_H +#define __ARM_CMSE_H + +#if (__ARM_FEATURE_CMSE & 0x1) +#include +#include + +#define __ARM_CMSE_SECURE_MODE (__ARM_FEATURE_CMSE & 0x2) +#define CMSE_MPU_READWRITE 1 /* checks if readwrite_ok field is set */ +#define CMSE_AU_NONSECURE 2 /* checks if permissions have secure field unset */ +#define CMSE_MPU_UNPRIV 4 /* sets T flag on TT insrtuction */ +#define CMSE_MPU_READ 8 /* checks if read_ok field is set */ +#define CMSE_MPU_NONSECURE 16 /* sets A flag, checks if secure field unset */ +#define CMSE_NONSECURE (CMSE_AU_NONSECURE | CMSE_MPU_NONSECURE) + +#define cmse_check_pointed_object(p, f) \ + cmse_check_address_range((p), sizeof(*(p)), (f)) + +#if defined(__cplusplus) +extern "C" { +#endif + +typedef union { + struct cmse_address_info { +#ifdef __ARM_BIG_ENDIAN + /* __ARM_BIG_ENDIAN */ +#if (__ARM_CMSE_SECURE_MODE) + unsigned idau_region : 8; + unsigned idau_region_valid : 1; + unsigned secure : 1; + unsigned nonsecure_readwrite_ok : 1; + unsigned nonsecure_read_ok : 1; +#else + unsigned : 12; +#endif + unsigned readwrite_ok : 1; + unsigned read_ok : 1; +#if (__ARM_CMSE_SECURE_MODE) + unsigned sau_region_valid : 1; +#else + unsigned : 1; +#endif + unsigned mpu_region_valid : 1; +#if (__ARM_CMSE_SECURE_MODE) + unsigned sau_region : 8; +#else + unsigned : 8; +#endif + unsigned mpu_region : 8; + +#else /* __ARM_LITTLE_ENDIAN */ + unsigned mpu_region : 8; +#if (__ARM_CMSE_SECURE_MODE) + unsigned sau_region : 8; +#else + unsigned : 8; +#endif + unsigned mpu_region_valid : 1; +#if (__ARM_CMSE_SECURE_MODE) + unsigned sau_region_valid : 1; +#else + unsigned : 1; +#endif + unsigned read_ok : 1; + unsigned readwrite_ok : 1; +#if (__ARM_CMSE_SECURE_MODE) + unsigned nonsecure_read_ok : 1; + unsigned nonsecure_readwrite_ok : 1; + unsigned secure : 1; + unsigned idau_region_valid : 1; + unsigned idau_region : 8; +#else + unsigned : 12; +#endif +#endif /*__ARM_LITTLE_ENDIAN */ + } flags; + unsigned value; +} cmse_address_info_t; + +static cmse_address_info_t __attribute__((__always_inline__, __nodebug__)) +cmse_TT(void *__p) { + cmse_address_info_t __u; + __u.value = __builtin_arm_cmse_TT(__p); + return __u; +} +static cmse_address_info_t __attribute__((__always_inline__, __nodebug__)) +cmse_TTT(void *__p) { + cmse_address_info_t __u; + __u.value = __builtin_arm_cmse_TTT(__p); + return __u; +} + +#if __ARM_CMSE_SECURE_MODE +static cmse_address_info_t __attribute__((__always_inline__, __nodebug__)) +cmse_TTA(void *__p) { + cmse_address_info_t __u; + __u.value = __builtin_arm_cmse_TTA(__p); + return __u; +} +static cmse_address_info_t __attribute__((__always_inline__, __nodebug__)) +cmse_TTAT(void *__p) { + cmse_address_info_t __u; + __u.value = __builtin_arm_cmse_TTAT(__p); + return __u; +} +#endif + +#define cmse_TT_fptr(p) cmse_TT(__builtin_bit_cast(void *, (p))) +#define cmse_TTT_fptr(p) cmse_TTT(__builtin_bit_cast(void *, (p))) + +#if __ARM_CMSE_SECURE_MODE +#define cmse_TTA_fptr(p) cmse_TTA(__builtin_bit_cast(void *, (p))) +#define cmse_TTAT_fptr(p) cmse_TTAT(__builtin_bit_cast(void *, (p))) +#endif + +static void *__attribute__((__always_inline__)) +cmse_check_address_range(void *__pb, size_t __s, int __flags) { + uintptr_t __begin = (uintptr_t)__pb; + uintptr_t __end = __begin + __s - 1; + + if (__end < __begin) + return NULL; /* wrap around check */ + + /* Check whether the range crosses a 32-bytes aligned address */ + const int __single_check = (__begin ^ __end) < 0x20u; + + /* execute the right variant of the TT instructions */ + void *__pe = (void *)__end; + cmse_address_info_t __permb, __perme; + switch (__flags & (CMSE_MPU_UNPRIV | CMSE_MPU_NONSECURE)) { + case 0: + __permb = cmse_TT(__pb); + __perme = __single_check ? __permb : cmse_TT(__pe); + break; + case CMSE_MPU_UNPRIV: + __permb = cmse_TTT(__pb); + __perme = __single_check ? __permb : cmse_TTT(__pe); + break; +#if __ARM_CMSE_SECURE_MODE + case CMSE_MPU_NONSECURE: + __permb = cmse_TTA(__pb); + __perme = __single_check ? __permb : cmse_TTA(__pe); + break; + case CMSE_MPU_UNPRIV | CMSE_MPU_NONSECURE: + __permb = cmse_TTAT(__pb); + __perme = __single_check ? __permb : cmse_TTAT(__pe); + break; +#endif + /* if CMSE_NONSECURE is specified w/o __ARM_CMSE_SECURE_MODE */ + default: + return NULL; + } + + /* check that the range does not cross MPU, SAU, or IDAU region boundaries */ + if (__permb.value != __perme.value) + return NULL; +#if !(__ARM_CMSE_SECURE_MODE) + /* CMSE_AU_NONSECURE is only supported when __ARM_FEATURE_CMSE & 0x2 */ + if (__flags & CMSE_AU_NONSECURE) + return NULL; +#endif + + /* check the permission on the range */ + switch (__flags & ~(CMSE_MPU_UNPRIV | CMSE_MPU_NONSECURE)) { +#if (__ARM_CMSE_SECURE_MODE) + case CMSE_MPU_READ | CMSE_MPU_READWRITE | CMSE_AU_NONSECURE: + case CMSE_MPU_READWRITE | CMSE_AU_NONSECURE: + return __permb.flags.nonsecure_readwrite_ok ? __pb : NULL; + + case CMSE_MPU_READ | CMSE_AU_NONSECURE: + return __permb.flags.nonsecure_read_ok ? __pb : NULL; + + case CMSE_AU_NONSECURE: + return __permb.flags.secure ? NULL : __pb; +#endif + case CMSE_MPU_READ | CMSE_MPU_READWRITE: + case CMSE_MPU_READWRITE: + return __permb.flags.readwrite_ok ? __pb : NULL; + + case CMSE_MPU_READ: + return __permb.flags.read_ok ? __pb : NULL; + + default: + return NULL; + } +} + +#if __ARM_CMSE_SECURE_MODE +static int __attribute__((__always_inline__, __nodebug__)) +cmse_nonsecure_caller(void) { + return !((uintptr_t)__builtin_return_address(0) & 1); +} + +#define cmse_nsfptr_create(p) \ + __builtin_bit_cast(__typeof__(p), \ + (__builtin_bit_cast(uintptr_t, p) & ~(uintptr_t)1)) + +#define cmse_is_nsfptr(p) ((__builtin_bit_cast(uintptr_t, p) & 1) == 0) + +#endif /* __ARM_CMSE_SECURE_MODE */ + +void __attribute__((__noreturn__)) cmse_abort(void); +#if defined(__cplusplus) +} +#endif + +#endif /* (__ARM_FEATURE_CMSE & 0x1) */ + +#endif /* __ARM_CMSE_H */ diff --git a/third_party/aarch64/clang/arm_fp16.h b/third_party/aarch64/clang/arm_fp16.h new file mode 100644 index 000000000..2dd0653ab --- /dev/null +++ b/third_party/aarch64/clang/arm_fp16.h @@ -0,0 +1,596 @@ +/*===---- arm_fp16.h - ARM FP16 intrinsics ---------------------------------=== + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + * + *===-----------------------------------------------------------------------=== + */ + +#ifndef __ARM_FP16_H +#define __ARM_FP16_H + +#include + +typedef __fp16 float16_t; +#define __ai static __inline__ __attribute__((__always_inline__, __nodebug__)) + +#if defined(__aarch64__) || defined(__arm64ec__) +#define vabdh_f16(__p0, __p1) __extension__ ({ \ + float16_t __ret; \ + float16_t __s0 = __p0; \ + float16_t __s1 = __p1; \ + __ret = (float16_t) __builtin_neon_vabdh_f16(__s0, __s1); \ + __ret; \ +}) +#define vabsh_f16(__p0) __extension__ ({ \ + float16_t __ret; \ + float16_t __s0 = __p0; \ + __ret = (float16_t) __builtin_neon_vabsh_f16(__s0); \ + __ret; \ +}) +#define vaddh_f16(__p0, __p1) __extension__ ({ \ + float16_t __ret; \ + float16_t __s0 = __p0; \ + float16_t __s1 = __p1; \ + __ret = (float16_t) __builtin_neon_vaddh_f16(__s0, __s1); \ + __ret; \ +}) +#define vcageh_f16(__p0, __p1) __extension__ ({ \ + uint16_t __ret; \ + float16_t __s0 = __p0; \ + float16_t __s1 = __p1; \ + __ret = (uint16_t) __builtin_neon_vcageh_f16(__s0, __s1); \ + __ret; \ +}) +#define vcagth_f16(__p0, __p1) __extension__ ({ \ + uint16_t __ret; \ + float16_t __s0 = __p0; \ + float16_t __s1 = __p1; \ + __ret = (uint16_t) __builtin_neon_vcagth_f16(__s0, __s1); \ + __ret; \ +}) +#define vcaleh_f16(__p0, __p1) __extension__ ({ \ + uint16_t __ret; \ + float16_t __s0 = __p0; \ + float16_t __s1 = __p1; \ + __ret = (uint16_t) __builtin_neon_vcaleh_f16(__s0, __s1); \ + __ret; \ +}) +#define vcalth_f16(__p0, __p1) __extension__ ({ \ + uint16_t __ret; \ + float16_t __s0 = __p0; \ + float16_t __s1 = __p1; \ + __ret = (uint16_t) __builtin_neon_vcalth_f16(__s0, __s1); \ + __ret; \ +}) +#define vceqh_f16(__p0, __p1) __extension__ ({ \ + uint16_t __ret; \ + float16_t __s0 = __p0; \ + float16_t __s1 = __p1; \ + __ret = (uint16_t) __builtin_neon_vceqh_f16(__s0, __s1); \ + __ret; \ +}) +#define vceqzh_f16(__p0) __extension__ ({ \ + uint16_t __ret; \ + float16_t __s0 = __p0; \ + __ret = (uint16_t) __builtin_neon_vceqzh_f16(__s0); \ + __ret; \ +}) +#define vcgeh_f16(__p0, __p1) __extension__ ({ \ + uint16_t __ret; \ + float16_t __s0 = __p0; \ + float16_t __s1 = __p1; \ + __ret = (uint16_t) __builtin_neon_vcgeh_f16(__s0, __s1); \ + __ret; \ +}) +#define vcgezh_f16(__p0) __extension__ ({ \ + uint16_t __ret; \ + float16_t __s0 = __p0; \ + __ret = (uint16_t) __builtin_neon_vcgezh_f16(__s0); \ + __ret; \ +}) +#define vcgth_f16(__p0, __p1) __extension__ ({ \ + uint16_t __ret; \ + float16_t __s0 = __p0; \ + float16_t __s1 = __p1; \ + __ret = (uint16_t) __builtin_neon_vcgth_f16(__s0, __s1); \ + __ret; \ +}) +#define vcgtzh_f16(__p0) __extension__ ({ \ + uint16_t __ret; \ + float16_t __s0 = __p0; \ + __ret = (uint16_t) __builtin_neon_vcgtzh_f16(__s0); \ + __ret; \ +}) +#define vcleh_f16(__p0, __p1) __extension__ ({ \ + uint16_t __ret; \ + float16_t __s0 = __p0; \ + float16_t __s1 = __p1; \ + __ret = (uint16_t) __builtin_neon_vcleh_f16(__s0, __s1); \ + __ret; \ +}) +#define vclezh_f16(__p0) __extension__ ({ \ + uint16_t __ret; \ + float16_t __s0 = __p0; \ + __ret = (uint16_t) __builtin_neon_vclezh_f16(__s0); \ + __ret; \ +}) +#define vclth_f16(__p0, __p1) __extension__ ({ \ + uint16_t __ret; \ + float16_t __s0 = __p0; \ + float16_t __s1 = __p1; \ + __ret = (uint16_t) __builtin_neon_vclth_f16(__s0, __s1); \ + __ret; \ +}) +#define vcltzh_f16(__p0) __extension__ ({ \ + uint16_t __ret; \ + float16_t __s0 = __p0; \ + __ret = (uint16_t) __builtin_neon_vcltzh_f16(__s0); \ + __ret; \ +}) +#define vcvth_n_s16_f16(__p0, __p1) __extension__ ({ \ + int16_t __ret; \ + float16_t __s0 = __p0; \ + __ret = (int16_t) __builtin_neon_vcvth_n_s16_f16(__s0, __p1); \ + __ret; \ +}) +#define vcvth_n_s32_f16(__p0, __p1) __extension__ ({ \ + int32_t __ret; \ + float16_t __s0 = __p0; \ + __ret = (int32_t) __builtin_neon_vcvth_n_s32_f16(__s0, __p1); \ + __ret; \ +}) +#define vcvth_n_s64_f16(__p0, __p1) __extension__ ({ \ + int64_t __ret; \ + float16_t __s0 = __p0; \ + __ret = (int64_t) __builtin_neon_vcvth_n_s64_f16(__s0, __p1); \ + __ret; \ +}) +#define vcvth_n_u16_f16(__p0, __p1) __extension__ ({ \ + uint16_t __ret; \ + float16_t __s0 = __p0; \ + __ret = (uint16_t) __builtin_neon_vcvth_n_u16_f16(__s0, __p1); \ + __ret; \ +}) +#define vcvth_n_u32_f16(__p0, __p1) __extension__ ({ \ + uint32_t __ret; \ + float16_t __s0 = __p0; \ + __ret = (uint32_t) __builtin_neon_vcvth_n_u32_f16(__s0, __p1); \ + __ret; \ +}) +#define vcvth_n_u64_f16(__p0, __p1) __extension__ ({ \ + uint64_t __ret; \ + float16_t __s0 = __p0; \ + __ret = (uint64_t) __builtin_neon_vcvth_n_u64_f16(__s0, __p1); \ + __ret; \ +}) +#define vcvth_s16_f16(__p0) __extension__ ({ \ + int16_t __ret; \ + float16_t __s0 = __p0; \ + __ret = (int16_t) __builtin_neon_vcvth_s16_f16(__s0); \ + __ret; \ +}) +#define vcvth_s32_f16(__p0) __extension__ ({ \ + int32_t __ret; \ + float16_t __s0 = __p0; \ + __ret = (int32_t) __builtin_neon_vcvth_s32_f16(__s0); \ + __ret; \ +}) +#define vcvth_s64_f16(__p0) __extension__ ({ \ + int64_t __ret; \ + float16_t __s0 = __p0; \ + __ret = (int64_t) __builtin_neon_vcvth_s64_f16(__s0); \ + __ret; \ +}) +#define vcvth_u16_f16(__p0) __extension__ ({ \ + uint16_t __ret; \ + float16_t __s0 = __p0; \ + __ret = (uint16_t) __builtin_neon_vcvth_u16_f16(__s0); \ + __ret; \ +}) +#define vcvth_u32_f16(__p0) __extension__ ({ \ + uint32_t __ret; \ + float16_t __s0 = __p0; \ + __ret = (uint32_t) __builtin_neon_vcvth_u32_f16(__s0); \ + __ret; \ +}) +#define vcvth_u64_f16(__p0) __extension__ ({ \ + uint64_t __ret; \ + float16_t __s0 = __p0; \ + __ret = (uint64_t) __builtin_neon_vcvth_u64_f16(__s0); \ + __ret; \ +}) +#define vcvtah_s16_f16(__p0) __extension__ ({ \ + int16_t __ret; \ + float16_t __s0 = __p0; \ + __ret = (int16_t) __builtin_neon_vcvtah_s16_f16(__s0); \ + __ret; \ +}) +#define vcvtah_s32_f16(__p0) __extension__ ({ \ + int32_t __ret; \ + float16_t __s0 = __p0; \ + __ret = (int32_t) __builtin_neon_vcvtah_s32_f16(__s0); \ + __ret; \ +}) +#define vcvtah_s64_f16(__p0) __extension__ ({ \ + int64_t __ret; \ + float16_t __s0 = __p0; \ + __ret = (int64_t) __builtin_neon_vcvtah_s64_f16(__s0); \ + __ret; \ +}) +#define vcvtah_u16_f16(__p0) __extension__ ({ \ + uint16_t __ret; \ + float16_t __s0 = __p0; \ + __ret = (uint16_t) __builtin_neon_vcvtah_u16_f16(__s0); \ + __ret; \ +}) +#define vcvtah_u32_f16(__p0) __extension__ ({ \ + uint32_t __ret; \ + float16_t __s0 = __p0; \ + __ret = (uint32_t) __builtin_neon_vcvtah_u32_f16(__s0); \ + __ret; \ +}) +#define vcvtah_u64_f16(__p0) __extension__ ({ \ + uint64_t __ret; \ + float16_t __s0 = __p0; \ + __ret = (uint64_t) __builtin_neon_vcvtah_u64_f16(__s0); \ + __ret; \ +}) +#define vcvth_f16_u16(__p0) __extension__ ({ \ + float16_t __ret; \ + uint16_t __s0 = __p0; \ + __ret = (float16_t) __builtin_neon_vcvth_f16_u16(__s0); \ + __ret; \ +}) +#define vcvth_f16_s16(__p0) __extension__ ({ \ + float16_t __ret; \ + int16_t __s0 = __p0; \ + __ret = (float16_t) __builtin_neon_vcvth_f16_s16(__s0); \ + __ret; \ +}) +#define vcvth_f16_u32(__p0) __extension__ ({ \ + float16_t __ret; \ + uint32_t __s0 = __p0; \ + __ret = (float16_t) __builtin_neon_vcvth_f16_u32(__s0); \ + __ret; \ +}) +#define vcvth_f16_s32(__p0) __extension__ ({ \ + float16_t __ret; \ + int32_t __s0 = __p0; \ + __ret = (float16_t) __builtin_neon_vcvth_f16_s32(__s0); \ + __ret; \ +}) +#define vcvth_f16_u64(__p0) __extension__ ({ \ + float16_t __ret; \ + uint64_t __s0 = __p0; \ + __ret = (float16_t) __builtin_neon_vcvth_f16_u64(__s0); \ + __ret; \ +}) +#define vcvth_f16_s64(__p0) __extension__ ({ \ + float16_t __ret; \ + int64_t __s0 = __p0; \ + __ret = (float16_t) __builtin_neon_vcvth_f16_s64(__s0); \ + __ret; \ +}) +#define vcvth_n_f16_u32(__p0, __p1) __extension__ ({ \ + float16_t __ret; \ + uint32_t __s0 = __p0; \ + __ret = (float16_t) __builtin_neon_vcvth_n_f16_u32(__s0, __p1); \ + __ret; \ +}) +#define vcvth_n_f16_s32(__p0, __p1) __extension__ ({ \ + float16_t __ret; \ + int32_t __s0 = __p0; \ + __ret = (float16_t) __builtin_neon_vcvth_n_f16_s32(__s0, __p1); \ + __ret; \ +}) +#define vcvth_n_f16_u64(__p0, __p1) __extension__ ({ \ + float16_t __ret; \ + uint64_t __s0 = __p0; \ + __ret = (float16_t) __builtin_neon_vcvth_n_f16_u64(__s0, __p1); \ + __ret; \ +}) +#define vcvth_n_f16_s64(__p0, __p1) __extension__ ({ \ + float16_t __ret; \ + int64_t __s0 = __p0; \ + __ret = (float16_t) __builtin_neon_vcvth_n_f16_s64(__s0, __p1); \ + __ret; \ +}) +#define vcvth_n_f16_u16(__p0, __p1) __extension__ ({ \ + float16_t __ret; \ + uint16_t __s0 = __p0; \ + __ret = (float16_t) __builtin_neon_vcvth_n_f16_u16(__s0, __p1); \ + __ret; \ +}) +#define vcvth_n_f16_s16(__p0, __p1) __extension__ ({ \ + float16_t __ret; \ + int16_t __s0 = __p0; \ + __ret = (float16_t) __builtin_neon_vcvth_n_f16_s16(__s0, __p1); \ + __ret; \ +}) +#define vcvtmh_s16_f16(__p0) __extension__ ({ \ + int16_t __ret; \ + float16_t __s0 = __p0; \ + __ret = (int16_t) __builtin_neon_vcvtmh_s16_f16(__s0); \ + __ret; \ +}) +#define vcvtmh_s32_f16(__p0) __extension__ ({ \ + int32_t __ret; \ + float16_t __s0 = __p0; \ + __ret = (int32_t) __builtin_neon_vcvtmh_s32_f16(__s0); \ + __ret; \ +}) +#define vcvtmh_s64_f16(__p0) __extension__ ({ \ + int64_t __ret; \ + float16_t __s0 = __p0; \ + __ret = (int64_t) __builtin_neon_vcvtmh_s64_f16(__s0); \ + __ret; \ +}) +#define vcvtmh_u16_f16(__p0) __extension__ ({ \ + uint16_t __ret; \ + float16_t __s0 = __p0; \ + __ret = (uint16_t) __builtin_neon_vcvtmh_u16_f16(__s0); \ + __ret; \ +}) +#define vcvtmh_u32_f16(__p0) __extension__ ({ \ + uint32_t __ret; \ + float16_t __s0 = __p0; \ + __ret = (uint32_t) __builtin_neon_vcvtmh_u32_f16(__s0); \ + __ret; \ +}) +#define vcvtmh_u64_f16(__p0) __extension__ ({ \ + uint64_t __ret; \ + float16_t __s0 = __p0; \ + __ret = (uint64_t) __builtin_neon_vcvtmh_u64_f16(__s0); \ + __ret; \ +}) +#define vcvtnh_s16_f16(__p0) __extension__ ({ \ + int16_t __ret; \ + float16_t __s0 = __p0; \ + __ret = (int16_t) __builtin_neon_vcvtnh_s16_f16(__s0); \ + __ret; \ +}) +#define vcvtnh_s32_f16(__p0) __extension__ ({ \ + int32_t __ret; \ + float16_t __s0 = __p0; \ + __ret = (int32_t) __builtin_neon_vcvtnh_s32_f16(__s0); \ + __ret; \ +}) +#define vcvtnh_s64_f16(__p0) __extension__ ({ \ + int64_t __ret; \ + float16_t __s0 = __p0; \ + __ret = (int64_t) __builtin_neon_vcvtnh_s64_f16(__s0); \ + __ret; \ +}) +#define vcvtnh_u16_f16(__p0) __extension__ ({ \ + uint16_t __ret; \ + float16_t __s0 = __p0; \ + __ret = (uint16_t) __builtin_neon_vcvtnh_u16_f16(__s0); \ + __ret; \ +}) +#define vcvtnh_u32_f16(__p0) __extension__ ({ \ + uint32_t __ret; \ + float16_t __s0 = __p0; \ + __ret = (uint32_t) __builtin_neon_vcvtnh_u32_f16(__s0); \ + __ret; \ +}) +#define vcvtnh_u64_f16(__p0) __extension__ ({ \ + uint64_t __ret; \ + float16_t __s0 = __p0; \ + __ret = (uint64_t) __builtin_neon_vcvtnh_u64_f16(__s0); \ + __ret; \ +}) +#define vcvtph_s16_f16(__p0) __extension__ ({ \ + int16_t __ret; \ + float16_t __s0 = __p0; \ + __ret = (int16_t) __builtin_neon_vcvtph_s16_f16(__s0); \ + __ret; \ +}) +#define vcvtph_s32_f16(__p0) __extension__ ({ \ + int32_t __ret; \ + float16_t __s0 = __p0; \ + __ret = (int32_t) __builtin_neon_vcvtph_s32_f16(__s0); \ + __ret; \ +}) +#define vcvtph_s64_f16(__p0) __extension__ ({ \ + int64_t __ret; \ + float16_t __s0 = __p0; \ + __ret = (int64_t) __builtin_neon_vcvtph_s64_f16(__s0); \ + __ret; \ +}) +#define vcvtph_u16_f16(__p0) __extension__ ({ \ + uint16_t __ret; \ + float16_t __s0 = __p0; \ + __ret = (uint16_t) __builtin_neon_vcvtph_u16_f16(__s0); \ + __ret; \ +}) +#define vcvtph_u32_f16(__p0) __extension__ ({ \ + uint32_t __ret; \ + float16_t __s0 = __p0; \ + __ret = (uint32_t) __builtin_neon_vcvtph_u32_f16(__s0); \ + __ret; \ +}) +#define vcvtph_u64_f16(__p0) __extension__ ({ \ + uint64_t __ret; \ + float16_t __s0 = __p0; \ + __ret = (uint64_t) __builtin_neon_vcvtph_u64_f16(__s0); \ + __ret; \ +}) +#define vdivh_f16(__p0, __p1) __extension__ ({ \ + float16_t __ret; \ + float16_t __s0 = __p0; \ + float16_t __s1 = __p1; \ + __ret = (float16_t) __builtin_neon_vdivh_f16(__s0, __s1); \ + __ret; \ +}) +#define vfmah_f16(__p0, __p1, __p2) __extension__ ({ \ + float16_t __ret; \ + float16_t __s0 = __p0; \ + float16_t __s1 = __p1; \ + float16_t __s2 = __p2; \ + __ret = (float16_t) __builtin_neon_vfmah_f16(__s0, __s1, __s2); \ + __ret; \ +}) +#define vfmsh_f16(__p0, __p1, __p2) __extension__ ({ \ + float16_t __ret; \ + float16_t __s0 = __p0; \ + float16_t __s1 = __p1; \ + float16_t __s2 = __p2; \ + __ret = (float16_t) __builtin_neon_vfmsh_f16(__s0, __s1, __s2); \ + __ret; \ +}) +#define vmaxh_f16(__p0, __p1) __extension__ ({ \ + float16_t __ret; \ + float16_t __s0 = __p0; \ + float16_t __s1 = __p1; \ + __ret = (float16_t) __builtin_neon_vmaxh_f16(__s0, __s1); \ + __ret; \ +}) +#define vmaxnmh_f16(__p0, __p1) __extension__ ({ \ + float16_t __ret; \ + float16_t __s0 = __p0; \ + float16_t __s1 = __p1; \ + __ret = (float16_t) __builtin_neon_vmaxnmh_f16(__s0, __s1); \ + __ret; \ +}) +#define vminh_f16(__p0, __p1) __extension__ ({ \ + float16_t __ret; \ + float16_t __s0 = __p0; \ + float16_t __s1 = __p1; \ + __ret = (float16_t) __builtin_neon_vminh_f16(__s0, __s1); \ + __ret; \ +}) +#define vminnmh_f16(__p0, __p1) __extension__ ({ \ + float16_t __ret; \ + float16_t __s0 = __p0; \ + float16_t __s1 = __p1; \ + __ret = (float16_t) __builtin_neon_vminnmh_f16(__s0, __s1); \ + __ret; \ +}) +#define vmulh_f16(__p0, __p1) __extension__ ({ \ + float16_t __ret; \ + float16_t __s0 = __p0; \ + float16_t __s1 = __p1; \ + __ret = (float16_t) __builtin_neon_vmulh_f16(__s0, __s1); \ + __ret; \ +}) +#define vmulxh_f16(__p0, __p1) __extension__ ({ \ + float16_t __ret; \ + float16_t __s0 = __p0; \ + float16_t __s1 = __p1; \ + __ret = (float16_t) __builtin_neon_vmulxh_f16(__s0, __s1); \ + __ret; \ +}) +#define vnegh_f16(__p0) __extension__ ({ \ + float16_t __ret; \ + float16_t __s0 = __p0; \ + __ret = (float16_t) __builtin_neon_vnegh_f16(__s0); \ + __ret; \ +}) +#define vrecpeh_f16(__p0) __extension__ ({ \ + float16_t __ret; \ + float16_t __s0 = __p0; \ + __ret = (float16_t) __builtin_neon_vrecpeh_f16(__s0); \ + __ret; \ +}) +#define vrecpsh_f16(__p0, __p1) __extension__ ({ \ + float16_t __ret; \ + float16_t __s0 = __p0; \ + float16_t __s1 = __p1; \ + __ret = (float16_t) __builtin_neon_vrecpsh_f16(__s0, __s1); \ + __ret; \ +}) +#define vrecpxh_f16(__p0) __extension__ ({ \ + float16_t __ret; \ + float16_t __s0 = __p0; \ + __ret = (float16_t) __builtin_neon_vrecpxh_f16(__s0); \ + __ret; \ +}) +#define vrndh_f16(__p0) __extension__ ({ \ + float16_t __ret; \ + float16_t __s0 = __p0; \ + __ret = (float16_t) __builtin_neon_vrndh_f16(__s0); \ + __ret; \ +}) +#define vrndah_f16(__p0) __extension__ ({ \ + float16_t __ret; \ + float16_t __s0 = __p0; \ + __ret = (float16_t) __builtin_neon_vrndah_f16(__s0); \ + __ret; \ +}) +#define vrndih_f16(__p0) __extension__ ({ \ + float16_t __ret; \ + float16_t __s0 = __p0; \ + __ret = (float16_t) __builtin_neon_vrndih_f16(__s0); \ + __ret; \ +}) +#define vrndmh_f16(__p0) __extension__ ({ \ + float16_t __ret; \ + float16_t __s0 = __p0; \ + __ret = (float16_t) __builtin_neon_vrndmh_f16(__s0); \ + __ret; \ +}) +#define vrndnh_f16(__p0) __extension__ ({ \ + float16_t __ret; \ + float16_t __s0 = __p0; \ + __ret = (float16_t) __builtin_neon_vrndnh_f16(__s0); \ + __ret; \ +}) +#define vrndph_f16(__p0) __extension__ ({ \ + float16_t __ret; \ + float16_t __s0 = __p0; \ + __ret = (float16_t) __builtin_neon_vrndph_f16(__s0); \ + __ret; \ +}) +#define vrndxh_f16(__p0) __extension__ ({ \ + float16_t __ret; \ + float16_t __s0 = __p0; \ + __ret = (float16_t) __builtin_neon_vrndxh_f16(__s0); \ + __ret; \ +}) +#define vrsqrteh_f16(__p0) __extension__ ({ \ + float16_t __ret; \ + float16_t __s0 = __p0; \ + __ret = (float16_t) __builtin_neon_vrsqrteh_f16(__s0); \ + __ret; \ +}) +#define vrsqrtsh_f16(__p0, __p1) __extension__ ({ \ + float16_t __ret; \ + float16_t __s0 = __p0; \ + float16_t __s1 = __p1; \ + __ret = (float16_t) __builtin_neon_vrsqrtsh_f16(__s0, __s1); \ + __ret; \ +}) +#define vsqrth_f16(__p0) __extension__ ({ \ + float16_t __ret; \ + float16_t __s0 = __p0; \ + __ret = (float16_t) __builtin_neon_vsqrth_f16(__s0); \ + __ret; \ +}) +#define vsubh_f16(__p0, __p1) __extension__ ({ \ + float16_t __ret; \ + float16_t __s0 = __p0; \ + float16_t __s1 = __p1; \ + __ret = (float16_t) __builtin_neon_vsubh_f16(__s0, __s1); \ + __ret; \ +}) +#endif + +#undef __ai + +#endif /* __ARM_FP16_H */ diff --git a/third_party/aarch64/clang/arm_mve.h b/third_party/aarch64/clang/arm_mve.h new file mode 100644 index 000000000..4da41dc3c --- /dev/null +++ b/third_party/aarch64/clang/arm_mve.h @@ -0,0 +1,19187 @@ +/*===---- arm_mve.h - ARM MVE intrinsics -----------------------------------=== + * + * + * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. + * See https://llvm.org/LICENSE.txt for license information. + * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + * + *===-----------------------------------------------------------------------=== + */ + +#ifndef __ARM_MVE_H +#define __ARM_MVE_H + +#if !__ARM_FEATURE_MVE +#error "MVE support not enabled" +#endif + +#include + +#ifdef __cplusplus +extern "C" { +#endif + +typedef uint16_t mve_pred16_t; +typedef __attribute__((__neon_vector_type__(8), __clang_arm_mve_strict_polymorphism)) int16_t int16x8_t; +typedef struct { int16x8_t val[2]; } int16x8x2_t; +typedef struct { int16x8_t val[4]; } int16x8x4_t; +typedef __attribute__((__neon_vector_type__(4), __clang_arm_mve_strict_polymorphism)) int32_t int32x4_t; +typedef struct { int32x4_t val[2]; } int32x4x2_t; +typedef struct { int32x4_t val[4]; } int32x4x4_t; +typedef __attribute__((__neon_vector_type__(2), __clang_arm_mve_strict_polymorphism)) int64_t int64x2_t; +typedef struct { int64x2_t val[2]; } int64x2x2_t; +typedef struct { int64x2_t val[4]; } int64x2x4_t; +typedef __attribute__((__neon_vector_type__(16), __clang_arm_mve_strict_polymorphism)) int8_t int8x16_t; +typedef struct { int8x16_t val[2]; } int8x16x2_t; +typedef struct { int8x16_t val[4]; } int8x16x4_t; +typedef __attribute__((__neon_vector_type__(8), __clang_arm_mve_strict_polymorphism)) uint16_t uint16x8_t; +typedef struct { uint16x8_t val[2]; } uint16x8x2_t; +typedef struct { uint16x8_t val[4]; } uint16x8x4_t; +typedef __attribute__((__neon_vector_type__(4), __clang_arm_mve_strict_polymorphism)) uint32_t uint32x4_t; +typedef struct { uint32x4_t val[2]; } uint32x4x2_t; +typedef struct { uint32x4_t val[4]; } uint32x4x4_t; +typedef __attribute__((__neon_vector_type__(2), __clang_arm_mve_strict_polymorphism)) uint64_t uint64x2_t; +typedef struct { uint64x2_t val[2]; } uint64x2x2_t; +typedef struct { uint64x2_t val[4]; } uint64x2x4_t; +typedef __attribute__((__neon_vector_type__(16), __clang_arm_mve_strict_polymorphism)) uint8_t uint8x16_t; +typedef struct { uint8x16_t val[2]; } uint8x16x2_t; +typedef struct { uint8x16_t val[4]; } uint8x16x4_t; + +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_asrl))) +int64_t __arm_asrl(int64_t, int32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_lsll))) +uint64_t __arm_lsll(uint64_t, int32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_sqrshr))) +int32_t __arm_sqrshr(int32_t, int32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_sqrshrl))) +int64_t __arm_sqrshrl(int64_t, int32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_sqrshrl_sat48))) +int64_t __arm_sqrshrl_sat48(int64_t, int32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_sqshl))) +int32_t __arm_sqshl(int32_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_sqshll))) +int64_t __arm_sqshll(int64_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_srshr))) +int32_t __arm_srshr(int32_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_srshrl))) +int64_t __arm_srshrl(int64_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_uqrshl))) +uint32_t __arm_uqrshl(uint32_t, int32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_uqrshll))) +uint64_t __arm_uqrshll(uint64_t, int32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_uqrshll_sat48))) +uint64_t __arm_uqrshll_sat48(uint64_t, int32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_uqshl))) +uint32_t __arm_uqshl(uint32_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_uqshll))) +uint64_t __arm_uqshll(uint64_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_urshr))) +uint32_t __arm_urshr(uint32_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_urshrl))) +uint64_t __arm_urshrl(uint64_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabavq_p_s16))) +uint32_t __arm_vabavq_p_s16(uint32_t, int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabavq_p_s16))) +uint32_t __arm_vabavq_p(uint32_t, int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabavq_p_s32))) +uint32_t __arm_vabavq_p_s32(uint32_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabavq_p_s32))) +uint32_t __arm_vabavq_p(uint32_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabavq_p_s8))) +uint32_t __arm_vabavq_p_s8(uint32_t, int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabavq_p_s8))) +uint32_t __arm_vabavq_p(uint32_t, int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabavq_p_u16))) +uint32_t __arm_vabavq_p_u16(uint32_t, uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabavq_p_u16))) +uint32_t __arm_vabavq_p(uint32_t, uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabavq_p_u32))) +uint32_t __arm_vabavq_p_u32(uint32_t, uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabavq_p_u32))) +uint32_t __arm_vabavq_p(uint32_t, uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabavq_p_u8))) +uint32_t __arm_vabavq_p_u8(uint32_t, uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabavq_p_u8))) +uint32_t __arm_vabavq_p(uint32_t, uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabavq_s16))) +uint32_t __arm_vabavq_s16(uint32_t, int16x8_t, int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabavq_s16))) +uint32_t __arm_vabavq(uint32_t, int16x8_t, int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabavq_s32))) +uint32_t __arm_vabavq_s32(uint32_t, int32x4_t, int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabavq_s32))) +uint32_t __arm_vabavq(uint32_t, int32x4_t, int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabavq_s8))) +uint32_t __arm_vabavq_s8(uint32_t, int8x16_t, int8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabavq_s8))) +uint32_t __arm_vabavq(uint32_t, int8x16_t, int8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabavq_u16))) +uint32_t __arm_vabavq_u16(uint32_t, uint16x8_t, uint16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabavq_u16))) +uint32_t __arm_vabavq(uint32_t, uint16x8_t, uint16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabavq_u32))) +uint32_t __arm_vabavq_u32(uint32_t, uint32x4_t, uint32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabavq_u32))) +uint32_t __arm_vabavq(uint32_t, uint32x4_t, uint32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabavq_u8))) +uint32_t __arm_vabavq_u8(uint32_t, uint8x16_t, uint8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabavq_u8))) +uint32_t __arm_vabavq(uint32_t, uint8x16_t, uint8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabdq_m_s16))) +int16x8_t __arm_vabdq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabdq_m_s16))) +int16x8_t __arm_vabdq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabdq_m_s32))) +int32x4_t __arm_vabdq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabdq_m_s32))) +int32x4_t __arm_vabdq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabdq_m_s8))) +int8x16_t __arm_vabdq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabdq_m_s8))) +int8x16_t __arm_vabdq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabdq_m_u16))) +uint16x8_t __arm_vabdq_m_u16(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabdq_m_u16))) +uint16x8_t __arm_vabdq_m(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabdq_m_u32))) +uint32x4_t __arm_vabdq_m_u32(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabdq_m_u32))) +uint32x4_t __arm_vabdq_m(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabdq_m_u8))) +uint8x16_t __arm_vabdq_m_u8(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabdq_m_u8))) +uint8x16_t __arm_vabdq_m(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabdq_s16))) +int16x8_t __arm_vabdq_s16(int16x8_t, int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabdq_s16))) +int16x8_t __arm_vabdq(int16x8_t, int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabdq_s32))) +int32x4_t __arm_vabdq_s32(int32x4_t, int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabdq_s32))) +int32x4_t __arm_vabdq(int32x4_t, int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabdq_s8))) +int8x16_t __arm_vabdq_s8(int8x16_t, int8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabdq_s8))) +int8x16_t __arm_vabdq(int8x16_t, int8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabdq_u16))) +uint16x8_t __arm_vabdq_u16(uint16x8_t, uint16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabdq_u16))) +uint16x8_t __arm_vabdq(uint16x8_t, uint16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabdq_u32))) +uint32x4_t __arm_vabdq_u32(uint32x4_t, uint32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabdq_u32))) +uint32x4_t __arm_vabdq(uint32x4_t, uint32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabdq_u8))) +uint8x16_t __arm_vabdq_u8(uint8x16_t, uint8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabdq_u8))) +uint8x16_t __arm_vabdq(uint8x16_t, uint8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabdq_x_s16))) +int16x8_t __arm_vabdq_x_s16(int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabdq_x_s16))) +int16x8_t __arm_vabdq_x(int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabdq_x_s32))) +int32x4_t __arm_vabdq_x_s32(int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabdq_x_s32))) +int32x4_t __arm_vabdq_x(int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabdq_x_s8))) +int8x16_t __arm_vabdq_x_s8(int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabdq_x_s8))) +int8x16_t __arm_vabdq_x(int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabdq_x_u16))) +uint16x8_t __arm_vabdq_x_u16(uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabdq_x_u16))) +uint16x8_t __arm_vabdq_x(uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabdq_x_u32))) +uint32x4_t __arm_vabdq_x_u32(uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabdq_x_u32))) +uint32x4_t __arm_vabdq_x(uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabdq_x_u8))) +uint8x16_t __arm_vabdq_x_u8(uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabdq_x_u8))) +uint8x16_t __arm_vabdq_x(uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabsq_m_s16))) +int16x8_t __arm_vabsq_m_s16(int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabsq_m_s16))) +int16x8_t __arm_vabsq_m(int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabsq_m_s32))) +int32x4_t __arm_vabsq_m_s32(int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabsq_m_s32))) +int32x4_t __arm_vabsq_m(int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabsq_m_s8))) +int8x16_t __arm_vabsq_m_s8(int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabsq_m_s8))) +int8x16_t __arm_vabsq_m(int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabsq_s16))) +int16x8_t __arm_vabsq_s16(int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabsq_s16))) +int16x8_t __arm_vabsq(int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabsq_s32))) +int32x4_t __arm_vabsq_s32(int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabsq_s32))) +int32x4_t __arm_vabsq(int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabsq_s8))) +int8x16_t __arm_vabsq_s8(int8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabsq_s8))) +int8x16_t __arm_vabsq(int8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabsq_x_s16))) +int16x8_t __arm_vabsq_x_s16(int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabsq_x_s16))) +int16x8_t __arm_vabsq_x(int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabsq_x_s32))) +int32x4_t __arm_vabsq_x_s32(int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabsq_x_s32))) +int32x4_t __arm_vabsq_x(int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabsq_x_s8))) +int8x16_t __arm_vabsq_x_s8(int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabsq_x_s8))) +int8x16_t __arm_vabsq_x(int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vadciq_m_s32))) +int32x4_t __arm_vadciq_m_s32(int32x4_t, int32x4_t, int32x4_t, unsigned *, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vadciq_m_s32))) +int32x4_t __arm_vadciq_m(int32x4_t, int32x4_t, int32x4_t, unsigned *, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vadciq_m_u32))) +uint32x4_t __arm_vadciq_m_u32(uint32x4_t, uint32x4_t, uint32x4_t, unsigned *, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vadciq_m_u32))) +uint32x4_t __arm_vadciq_m(uint32x4_t, uint32x4_t, uint32x4_t, unsigned *, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vadciq_s32))) +int32x4_t __arm_vadciq_s32(int32x4_t, int32x4_t, unsigned *); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vadciq_s32))) +int32x4_t __arm_vadciq(int32x4_t, int32x4_t, unsigned *); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vadciq_u32))) +uint32x4_t __arm_vadciq_u32(uint32x4_t, uint32x4_t, unsigned *); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vadciq_u32))) +uint32x4_t __arm_vadciq(uint32x4_t, uint32x4_t, unsigned *); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vadcq_m_s32))) +int32x4_t __arm_vadcq_m_s32(int32x4_t, int32x4_t, int32x4_t, unsigned *, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vadcq_m_s32))) +int32x4_t __arm_vadcq_m(int32x4_t, int32x4_t, int32x4_t, unsigned *, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vadcq_m_u32))) +uint32x4_t __arm_vadcq_m_u32(uint32x4_t, uint32x4_t, uint32x4_t, unsigned *, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vadcq_m_u32))) +uint32x4_t __arm_vadcq_m(uint32x4_t, uint32x4_t, uint32x4_t, unsigned *, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vadcq_s32))) +int32x4_t __arm_vadcq_s32(int32x4_t, int32x4_t, unsigned *); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vadcq_s32))) +int32x4_t __arm_vadcq(int32x4_t, int32x4_t, unsigned *); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vadcq_u32))) +uint32x4_t __arm_vadcq_u32(uint32x4_t, uint32x4_t, unsigned *); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vadcq_u32))) +uint32x4_t __arm_vadcq(uint32x4_t, uint32x4_t, unsigned *); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddlvaq_p_s32))) +int64_t __arm_vaddlvaq_p_s32(int64_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddlvaq_p_s32))) +int64_t __arm_vaddlvaq_p(int64_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddlvaq_p_u32))) +uint64_t __arm_vaddlvaq_p_u32(uint64_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddlvaq_p_u32))) +uint64_t __arm_vaddlvaq_p(uint64_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddlvaq_s32))) +int64_t __arm_vaddlvaq_s32(int64_t, int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddlvaq_s32))) +int64_t __arm_vaddlvaq(int64_t, int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddlvaq_u32))) +uint64_t __arm_vaddlvaq_u32(uint64_t, uint32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddlvaq_u32))) +uint64_t __arm_vaddlvaq(uint64_t, uint32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddlvq_p_s32))) +int64_t __arm_vaddlvq_p_s32(int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddlvq_p_s32))) +int64_t __arm_vaddlvq_p(int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddlvq_p_u32))) +uint64_t __arm_vaddlvq_p_u32(uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddlvq_p_u32))) +uint64_t __arm_vaddlvq_p(uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddlvq_s32))) +int64_t __arm_vaddlvq_s32(int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddlvq_s32))) +int64_t __arm_vaddlvq(int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddlvq_u32))) +uint64_t __arm_vaddlvq_u32(uint32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddlvq_u32))) +uint64_t __arm_vaddlvq(uint32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_m_n_s16))) +int16x8_t __arm_vaddq_m_n_s16(int16x8_t, int16x8_t, int16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_m_n_s16))) +int16x8_t __arm_vaddq_m(int16x8_t, int16x8_t, int16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_m_n_s32))) +int32x4_t __arm_vaddq_m_n_s32(int32x4_t, int32x4_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_m_n_s32))) +int32x4_t __arm_vaddq_m(int32x4_t, int32x4_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_m_n_s8))) +int8x16_t __arm_vaddq_m_n_s8(int8x16_t, int8x16_t, int8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_m_n_s8))) +int8x16_t __arm_vaddq_m(int8x16_t, int8x16_t, int8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_m_n_u16))) +uint16x8_t __arm_vaddq_m_n_u16(uint16x8_t, uint16x8_t, uint16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_m_n_u16))) +uint16x8_t __arm_vaddq_m(uint16x8_t, uint16x8_t, uint16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_m_n_u32))) +uint32x4_t __arm_vaddq_m_n_u32(uint32x4_t, uint32x4_t, uint32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_m_n_u32))) +uint32x4_t __arm_vaddq_m(uint32x4_t, uint32x4_t, uint32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_m_n_u8))) +uint8x16_t __arm_vaddq_m_n_u8(uint8x16_t, uint8x16_t, uint8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_m_n_u8))) +uint8x16_t __arm_vaddq_m(uint8x16_t, uint8x16_t, uint8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_m_s16))) +int16x8_t __arm_vaddq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_m_s16))) +int16x8_t __arm_vaddq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_m_s32))) +int32x4_t __arm_vaddq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_m_s32))) +int32x4_t __arm_vaddq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_m_s8))) +int8x16_t __arm_vaddq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_m_s8))) +int8x16_t __arm_vaddq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_m_u16))) +uint16x8_t __arm_vaddq_m_u16(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_m_u16))) +uint16x8_t __arm_vaddq_m(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_m_u32))) +uint32x4_t __arm_vaddq_m_u32(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_m_u32))) +uint32x4_t __arm_vaddq_m(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_m_u8))) +uint8x16_t __arm_vaddq_m_u8(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_m_u8))) +uint8x16_t __arm_vaddq_m(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_n_s16))) +int16x8_t __arm_vaddq_n_s16(int16x8_t, int16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_n_s16))) +int16x8_t __arm_vaddq(int16x8_t, int16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_n_s32))) +int32x4_t __arm_vaddq_n_s32(int32x4_t, int32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_n_s32))) +int32x4_t __arm_vaddq(int32x4_t, int32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_n_s8))) +int8x16_t __arm_vaddq_n_s8(int8x16_t, int8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_n_s8))) +int8x16_t __arm_vaddq(int8x16_t, int8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_n_u16))) +uint16x8_t __arm_vaddq_n_u16(uint16x8_t, uint16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_n_u16))) +uint16x8_t __arm_vaddq(uint16x8_t, uint16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_n_u32))) +uint32x4_t __arm_vaddq_n_u32(uint32x4_t, uint32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_n_u32))) +uint32x4_t __arm_vaddq(uint32x4_t, uint32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_n_u8))) +uint8x16_t __arm_vaddq_n_u8(uint8x16_t, uint8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_n_u8))) +uint8x16_t __arm_vaddq(uint8x16_t, uint8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_s16))) +int16x8_t __arm_vaddq_s16(int16x8_t, int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_s16))) +int16x8_t __arm_vaddq(int16x8_t, int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_s32))) +int32x4_t __arm_vaddq_s32(int32x4_t, int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_s32))) +int32x4_t __arm_vaddq(int32x4_t, int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_s8))) +int8x16_t __arm_vaddq_s8(int8x16_t, int8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_s8))) +int8x16_t __arm_vaddq(int8x16_t, int8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_u16))) +uint16x8_t __arm_vaddq_u16(uint16x8_t, uint16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_u16))) +uint16x8_t __arm_vaddq(uint16x8_t, uint16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_u32))) +uint32x4_t __arm_vaddq_u32(uint32x4_t, uint32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_u32))) +uint32x4_t __arm_vaddq(uint32x4_t, uint32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_u8))) +uint8x16_t __arm_vaddq_u8(uint8x16_t, uint8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_u8))) +uint8x16_t __arm_vaddq(uint8x16_t, uint8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_x_n_s16))) +int16x8_t __arm_vaddq_x_n_s16(int16x8_t, int16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_x_n_s16))) +int16x8_t __arm_vaddq_x(int16x8_t, int16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_x_n_s32))) +int32x4_t __arm_vaddq_x_n_s32(int32x4_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_x_n_s32))) +int32x4_t __arm_vaddq_x(int32x4_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_x_n_s8))) +int8x16_t __arm_vaddq_x_n_s8(int8x16_t, int8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_x_n_s8))) +int8x16_t __arm_vaddq_x(int8x16_t, int8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_x_n_u16))) +uint16x8_t __arm_vaddq_x_n_u16(uint16x8_t, uint16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_x_n_u16))) +uint16x8_t __arm_vaddq_x(uint16x8_t, uint16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_x_n_u32))) +uint32x4_t __arm_vaddq_x_n_u32(uint32x4_t, uint32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_x_n_u32))) +uint32x4_t __arm_vaddq_x(uint32x4_t, uint32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_x_n_u8))) +uint8x16_t __arm_vaddq_x_n_u8(uint8x16_t, uint8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_x_n_u8))) +uint8x16_t __arm_vaddq_x(uint8x16_t, uint8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_x_s16))) +int16x8_t __arm_vaddq_x_s16(int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_x_s16))) +int16x8_t __arm_vaddq_x(int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_x_s32))) +int32x4_t __arm_vaddq_x_s32(int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_x_s32))) +int32x4_t __arm_vaddq_x(int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_x_s8))) +int8x16_t __arm_vaddq_x_s8(int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_x_s8))) +int8x16_t __arm_vaddq_x(int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_x_u16))) +uint16x8_t __arm_vaddq_x_u16(uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_x_u16))) +uint16x8_t __arm_vaddq_x(uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_x_u32))) +uint32x4_t __arm_vaddq_x_u32(uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_x_u32))) +uint32x4_t __arm_vaddq_x(uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_x_u8))) +uint8x16_t __arm_vaddq_x_u8(uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_x_u8))) +uint8x16_t __arm_vaddq_x(uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddvaq_p_s16))) +int32_t __arm_vaddvaq_p_s16(int32_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddvaq_p_s16))) +int32_t __arm_vaddvaq_p(int32_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddvaq_p_s32))) +int32_t __arm_vaddvaq_p_s32(int32_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddvaq_p_s32))) +int32_t __arm_vaddvaq_p(int32_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddvaq_p_s8))) +int32_t __arm_vaddvaq_p_s8(int32_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddvaq_p_s8))) +int32_t __arm_vaddvaq_p(int32_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddvaq_p_u16))) +uint32_t __arm_vaddvaq_p_u16(uint32_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddvaq_p_u16))) +uint32_t __arm_vaddvaq_p(uint32_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddvaq_p_u32))) +uint32_t __arm_vaddvaq_p_u32(uint32_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddvaq_p_u32))) +uint32_t __arm_vaddvaq_p(uint32_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddvaq_p_u8))) +uint32_t __arm_vaddvaq_p_u8(uint32_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddvaq_p_u8))) +uint32_t __arm_vaddvaq_p(uint32_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddvaq_s16))) +int32_t __arm_vaddvaq_s16(int32_t, int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddvaq_s16))) +int32_t __arm_vaddvaq(int32_t, int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddvaq_s32))) +int32_t __arm_vaddvaq_s32(int32_t, int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddvaq_s32))) +int32_t __arm_vaddvaq(int32_t, int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddvaq_s8))) +int32_t __arm_vaddvaq_s8(int32_t, int8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddvaq_s8))) +int32_t __arm_vaddvaq(int32_t, int8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddvaq_u16))) +uint32_t __arm_vaddvaq_u16(uint32_t, uint16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddvaq_u16))) +uint32_t __arm_vaddvaq(uint32_t, uint16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddvaq_u32))) +uint32_t __arm_vaddvaq_u32(uint32_t, uint32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddvaq_u32))) +uint32_t __arm_vaddvaq(uint32_t, uint32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddvaq_u8))) +uint32_t __arm_vaddvaq_u8(uint32_t, uint8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddvaq_u8))) +uint32_t __arm_vaddvaq(uint32_t, uint8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddvq_p_s16))) +int32_t __arm_vaddvq_p_s16(int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddvq_p_s16))) +int32_t __arm_vaddvq_p(int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddvq_p_s32))) +int32_t __arm_vaddvq_p_s32(int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddvq_p_s32))) +int32_t __arm_vaddvq_p(int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddvq_p_s8))) +int32_t __arm_vaddvq_p_s8(int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddvq_p_s8))) +int32_t __arm_vaddvq_p(int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddvq_p_u16))) +uint32_t __arm_vaddvq_p_u16(uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddvq_p_u16))) +uint32_t __arm_vaddvq_p(uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddvq_p_u32))) +uint32_t __arm_vaddvq_p_u32(uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddvq_p_u32))) +uint32_t __arm_vaddvq_p(uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddvq_p_u8))) +uint32_t __arm_vaddvq_p_u8(uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddvq_p_u8))) +uint32_t __arm_vaddvq_p(uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddvq_s16))) +int32_t __arm_vaddvq_s16(int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddvq_s16))) +int32_t __arm_vaddvq(int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddvq_s32))) +int32_t __arm_vaddvq_s32(int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddvq_s32))) +int32_t __arm_vaddvq(int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddvq_s8))) +int32_t __arm_vaddvq_s8(int8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddvq_s8))) +int32_t __arm_vaddvq(int8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddvq_u16))) +uint32_t __arm_vaddvq_u16(uint16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddvq_u16))) +uint32_t __arm_vaddvq(uint16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddvq_u32))) +uint32_t __arm_vaddvq_u32(uint32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddvq_u32))) +uint32_t __arm_vaddvq(uint32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddvq_u8))) +uint32_t __arm_vaddvq_u8(uint8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddvq_u8))) +uint32_t __arm_vaddvq(uint8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vandq_m_s16))) +int16x8_t __arm_vandq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vandq_m_s16))) +int16x8_t __arm_vandq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vandq_m_s32))) +int32x4_t __arm_vandq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vandq_m_s32))) +int32x4_t __arm_vandq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vandq_m_s8))) +int8x16_t __arm_vandq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vandq_m_s8))) +int8x16_t __arm_vandq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vandq_m_u16))) +uint16x8_t __arm_vandq_m_u16(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vandq_m_u16))) +uint16x8_t __arm_vandq_m(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vandq_m_u32))) +uint32x4_t __arm_vandq_m_u32(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vandq_m_u32))) +uint32x4_t __arm_vandq_m(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vandq_m_u8))) +uint8x16_t __arm_vandq_m_u8(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vandq_m_u8))) +uint8x16_t __arm_vandq_m(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vandq_s16))) +int16x8_t __arm_vandq_s16(int16x8_t, int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vandq_s16))) +int16x8_t __arm_vandq(int16x8_t, int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vandq_s32))) +int32x4_t __arm_vandq_s32(int32x4_t, int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vandq_s32))) +int32x4_t __arm_vandq(int32x4_t, int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vandq_s8))) +int8x16_t __arm_vandq_s8(int8x16_t, int8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vandq_s8))) +int8x16_t __arm_vandq(int8x16_t, int8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vandq_u16))) +uint16x8_t __arm_vandq_u16(uint16x8_t, uint16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vandq_u16))) +uint16x8_t __arm_vandq(uint16x8_t, uint16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vandq_u32))) +uint32x4_t __arm_vandq_u32(uint32x4_t, uint32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vandq_u32))) +uint32x4_t __arm_vandq(uint32x4_t, uint32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vandq_u8))) +uint8x16_t __arm_vandq_u8(uint8x16_t, uint8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vandq_u8))) +uint8x16_t __arm_vandq(uint8x16_t, uint8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vandq_x_s16))) +int16x8_t __arm_vandq_x_s16(int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vandq_x_s16))) +int16x8_t __arm_vandq_x(int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vandq_x_s32))) +int32x4_t __arm_vandq_x_s32(int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vandq_x_s32))) +int32x4_t __arm_vandq_x(int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vandq_x_s8))) +int8x16_t __arm_vandq_x_s8(int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vandq_x_s8))) +int8x16_t __arm_vandq_x(int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vandq_x_u16))) +uint16x8_t __arm_vandq_x_u16(uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vandq_x_u16))) +uint16x8_t __arm_vandq_x(uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vandq_x_u32))) +uint32x4_t __arm_vandq_x_u32(uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vandq_x_u32))) +uint32x4_t __arm_vandq_x(uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vandq_x_u8))) +uint8x16_t __arm_vandq_x_u8(uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vandq_x_u8))) +uint8x16_t __arm_vandq_x(uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbicq_m_n_s16))) +int16x8_t __arm_vbicq_m_n_s16(int16x8_t, int16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbicq_m_n_s16))) +int16x8_t __arm_vbicq_m_n(int16x8_t, int16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbicq_m_n_s32))) +int32x4_t __arm_vbicq_m_n_s32(int32x4_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbicq_m_n_s32))) +int32x4_t __arm_vbicq_m_n(int32x4_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbicq_m_n_u16))) +uint16x8_t __arm_vbicq_m_n_u16(uint16x8_t, uint16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbicq_m_n_u16))) +uint16x8_t __arm_vbicq_m_n(uint16x8_t, uint16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbicq_m_n_u32))) +uint32x4_t __arm_vbicq_m_n_u32(uint32x4_t, uint32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbicq_m_n_u32))) +uint32x4_t __arm_vbicq_m_n(uint32x4_t, uint32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbicq_m_s16))) +int16x8_t __arm_vbicq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbicq_m_s16))) +int16x8_t __arm_vbicq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbicq_m_s32))) +int32x4_t __arm_vbicq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbicq_m_s32))) +int32x4_t __arm_vbicq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbicq_m_s8))) +int8x16_t __arm_vbicq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbicq_m_s8))) +int8x16_t __arm_vbicq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbicq_m_u16))) +uint16x8_t __arm_vbicq_m_u16(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbicq_m_u16))) +uint16x8_t __arm_vbicq_m(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbicq_m_u32))) +uint32x4_t __arm_vbicq_m_u32(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbicq_m_u32))) +uint32x4_t __arm_vbicq_m(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbicq_m_u8))) +uint8x16_t __arm_vbicq_m_u8(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbicq_m_u8))) +uint8x16_t __arm_vbicq_m(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbicq_n_s16))) +int16x8_t __arm_vbicq_n_s16(int16x8_t, int16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbicq_n_s16))) +int16x8_t __arm_vbicq(int16x8_t, int16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbicq_n_s32))) +int32x4_t __arm_vbicq_n_s32(int32x4_t, int32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbicq_n_s32))) +int32x4_t __arm_vbicq(int32x4_t, int32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbicq_n_u16))) +uint16x8_t __arm_vbicq_n_u16(uint16x8_t, uint16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbicq_n_u16))) +uint16x8_t __arm_vbicq(uint16x8_t, uint16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbicq_n_u32))) +uint32x4_t __arm_vbicq_n_u32(uint32x4_t, uint32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbicq_n_u32))) +uint32x4_t __arm_vbicq(uint32x4_t, uint32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbicq_s16))) +int16x8_t __arm_vbicq_s16(int16x8_t, int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbicq_s16))) +int16x8_t __arm_vbicq(int16x8_t, int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbicq_s32))) +int32x4_t __arm_vbicq_s32(int32x4_t, int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbicq_s32))) +int32x4_t __arm_vbicq(int32x4_t, int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbicq_s8))) +int8x16_t __arm_vbicq_s8(int8x16_t, int8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbicq_s8))) +int8x16_t __arm_vbicq(int8x16_t, int8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbicq_u16))) +uint16x8_t __arm_vbicq_u16(uint16x8_t, uint16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbicq_u16))) +uint16x8_t __arm_vbicq(uint16x8_t, uint16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbicq_u32))) +uint32x4_t __arm_vbicq_u32(uint32x4_t, uint32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbicq_u32))) +uint32x4_t __arm_vbicq(uint32x4_t, uint32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbicq_u8))) +uint8x16_t __arm_vbicq_u8(uint8x16_t, uint8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbicq_u8))) +uint8x16_t __arm_vbicq(uint8x16_t, uint8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbicq_x_s16))) +int16x8_t __arm_vbicq_x_s16(int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbicq_x_s16))) +int16x8_t __arm_vbicq_x(int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbicq_x_s32))) +int32x4_t __arm_vbicq_x_s32(int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbicq_x_s32))) +int32x4_t __arm_vbicq_x(int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbicq_x_s8))) +int8x16_t __arm_vbicq_x_s8(int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbicq_x_s8))) +int8x16_t __arm_vbicq_x(int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbicq_x_u16))) +uint16x8_t __arm_vbicq_x_u16(uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbicq_x_u16))) +uint16x8_t __arm_vbicq_x(uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbicq_x_u32))) +uint32x4_t __arm_vbicq_x_u32(uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbicq_x_u32))) +uint32x4_t __arm_vbicq_x(uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbicq_x_u8))) +uint8x16_t __arm_vbicq_x_u8(uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbicq_x_u8))) +uint8x16_t __arm_vbicq_x(uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_m_n_s16))) +int16x8_t __arm_vbrsrq_m_n_s16(int16x8_t, int16x8_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_m_n_s16))) +int16x8_t __arm_vbrsrq_m(int16x8_t, int16x8_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_m_n_s32))) +int32x4_t __arm_vbrsrq_m_n_s32(int32x4_t, int32x4_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_m_n_s32))) +int32x4_t __arm_vbrsrq_m(int32x4_t, int32x4_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_m_n_s8))) +int8x16_t __arm_vbrsrq_m_n_s8(int8x16_t, int8x16_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_m_n_s8))) +int8x16_t __arm_vbrsrq_m(int8x16_t, int8x16_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_m_n_u16))) +uint16x8_t __arm_vbrsrq_m_n_u16(uint16x8_t, uint16x8_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_m_n_u16))) +uint16x8_t __arm_vbrsrq_m(uint16x8_t, uint16x8_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_m_n_u32))) +uint32x4_t __arm_vbrsrq_m_n_u32(uint32x4_t, uint32x4_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_m_n_u32))) +uint32x4_t __arm_vbrsrq_m(uint32x4_t, uint32x4_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_m_n_u8))) +uint8x16_t __arm_vbrsrq_m_n_u8(uint8x16_t, uint8x16_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_m_n_u8))) +uint8x16_t __arm_vbrsrq_m(uint8x16_t, uint8x16_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_n_s16))) +int16x8_t __arm_vbrsrq_n_s16(int16x8_t, int32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_n_s16))) +int16x8_t __arm_vbrsrq(int16x8_t, int32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_n_s32))) +int32x4_t __arm_vbrsrq_n_s32(int32x4_t, int32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_n_s32))) +int32x4_t __arm_vbrsrq(int32x4_t, int32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_n_s8))) +int8x16_t __arm_vbrsrq_n_s8(int8x16_t, int32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_n_s8))) +int8x16_t __arm_vbrsrq(int8x16_t, int32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_n_u16))) +uint16x8_t __arm_vbrsrq_n_u16(uint16x8_t, int32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_n_u16))) +uint16x8_t __arm_vbrsrq(uint16x8_t, int32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_n_u32))) +uint32x4_t __arm_vbrsrq_n_u32(uint32x4_t, int32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_n_u32))) +uint32x4_t __arm_vbrsrq(uint32x4_t, int32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_n_u8))) +uint8x16_t __arm_vbrsrq_n_u8(uint8x16_t, int32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_n_u8))) +uint8x16_t __arm_vbrsrq(uint8x16_t, int32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_x_n_s16))) +int16x8_t __arm_vbrsrq_x_n_s16(int16x8_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_x_n_s16))) +int16x8_t __arm_vbrsrq_x(int16x8_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_x_n_s32))) +int32x4_t __arm_vbrsrq_x_n_s32(int32x4_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_x_n_s32))) +int32x4_t __arm_vbrsrq_x(int32x4_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_x_n_s8))) +int8x16_t __arm_vbrsrq_x_n_s8(int8x16_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_x_n_s8))) +int8x16_t __arm_vbrsrq_x(int8x16_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_x_n_u16))) +uint16x8_t __arm_vbrsrq_x_n_u16(uint16x8_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_x_n_u16))) +uint16x8_t __arm_vbrsrq_x(uint16x8_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_x_n_u32))) +uint32x4_t __arm_vbrsrq_x_n_u32(uint32x4_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_x_n_u32))) +uint32x4_t __arm_vbrsrq_x(uint32x4_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_x_n_u8))) +uint8x16_t __arm_vbrsrq_x_n_u8(uint8x16_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_x_n_u8))) +uint8x16_t __arm_vbrsrq_x(uint8x16_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_m_s16))) +int16x8_t __arm_vcaddq_rot270_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_m_s16))) +int16x8_t __arm_vcaddq_rot270_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_m_s32))) +int32x4_t __arm_vcaddq_rot270_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_m_s32))) +int32x4_t __arm_vcaddq_rot270_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_m_s8))) +int8x16_t __arm_vcaddq_rot270_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_m_s8))) +int8x16_t __arm_vcaddq_rot270_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_m_u16))) +uint16x8_t __arm_vcaddq_rot270_m_u16(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_m_u16))) +uint16x8_t __arm_vcaddq_rot270_m(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_m_u32))) +uint32x4_t __arm_vcaddq_rot270_m_u32(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_m_u32))) +uint32x4_t __arm_vcaddq_rot270_m(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_m_u8))) +uint8x16_t __arm_vcaddq_rot270_m_u8(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_m_u8))) +uint8x16_t __arm_vcaddq_rot270_m(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_s16))) +int16x8_t __arm_vcaddq_rot270_s16(int16x8_t, int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_s16))) +int16x8_t __arm_vcaddq_rot270(int16x8_t, int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_s32))) +int32x4_t __arm_vcaddq_rot270_s32(int32x4_t, int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_s32))) +int32x4_t __arm_vcaddq_rot270(int32x4_t, int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_s8))) +int8x16_t __arm_vcaddq_rot270_s8(int8x16_t, int8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_s8))) +int8x16_t __arm_vcaddq_rot270(int8x16_t, int8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_u16))) +uint16x8_t __arm_vcaddq_rot270_u16(uint16x8_t, uint16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_u16))) +uint16x8_t __arm_vcaddq_rot270(uint16x8_t, uint16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_u32))) +uint32x4_t __arm_vcaddq_rot270_u32(uint32x4_t, uint32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_u32))) +uint32x4_t __arm_vcaddq_rot270(uint32x4_t, uint32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_u8))) +uint8x16_t __arm_vcaddq_rot270_u8(uint8x16_t, uint8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_u8))) +uint8x16_t __arm_vcaddq_rot270(uint8x16_t, uint8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_x_s16))) +int16x8_t __arm_vcaddq_rot270_x_s16(int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_x_s16))) +int16x8_t __arm_vcaddq_rot270_x(int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_x_s32))) +int32x4_t __arm_vcaddq_rot270_x_s32(int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_x_s32))) +int32x4_t __arm_vcaddq_rot270_x(int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_x_s8))) +int8x16_t __arm_vcaddq_rot270_x_s8(int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_x_s8))) +int8x16_t __arm_vcaddq_rot270_x(int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_x_u16))) +uint16x8_t __arm_vcaddq_rot270_x_u16(uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_x_u16))) +uint16x8_t __arm_vcaddq_rot270_x(uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_x_u32))) +uint32x4_t __arm_vcaddq_rot270_x_u32(uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_x_u32))) +uint32x4_t __arm_vcaddq_rot270_x(uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_x_u8))) +uint8x16_t __arm_vcaddq_rot270_x_u8(uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_x_u8))) +uint8x16_t __arm_vcaddq_rot270_x(uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_m_s16))) +int16x8_t __arm_vcaddq_rot90_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_m_s16))) +int16x8_t __arm_vcaddq_rot90_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_m_s32))) +int32x4_t __arm_vcaddq_rot90_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_m_s32))) +int32x4_t __arm_vcaddq_rot90_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_m_s8))) +int8x16_t __arm_vcaddq_rot90_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_m_s8))) +int8x16_t __arm_vcaddq_rot90_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_m_u16))) +uint16x8_t __arm_vcaddq_rot90_m_u16(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_m_u16))) +uint16x8_t __arm_vcaddq_rot90_m(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_m_u32))) +uint32x4_t __arm_vcaddq_rot90_m_u32(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_m_u32))) +uint32x4_t __arm_vcaddq_rot90_m(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_m_u8))) +uint8x16_t __arm_vcaddq_rot90_m_u8(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_m_u8))) +uint8x16_t __arm_vcaddq_rot90_m(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_s16))) +int16x8_t __arm_vcaddq_rot90_s16(int16x8_t, int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_s16))) +int16x8_t __arm_vcaddq_rot90(int16x8_t, int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_s32))) +int32x4_t __arm_vcaddq_rot90_s32(int32x4_t, int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_s32))) +int32x4_t __arm_vcaddq_rot90(int32x4_t, int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_s8))) +int8x16_t __arm_vcaddq_rot90_s8(int8x16_t, int8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_s8))) +int8x16_t __arm_vcaddq_rot90(int8x16_t, int8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_u16))) +uint16x8_t __arm_vcaddq_rot90_u16(uint16x8_t, uint16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_u16))) +uint16x8_t __arm_vcaddq_rot90(uint16x8_t, uint16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_u32))) +uint32x4_t __arm_vcaddq_rot90_u32(uint32x4_t, uint32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_u32))) +uint32x4_t __arm_vcaddq_rot90(uint32x4_t, uint32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_u8))) +uint8x16_t __arm_vcaddq_rot90_u8(uint8x16_t, uint8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_u8))) +uint8x16_t __arm_vcaddq_rot90(uint8x16_t, uint8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_x_s16))) +int16x8_t __arm_vcaddq_rot90_x_s16(int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_x_s16))) +int16x8_t __arm_vcaddq_rot90_x(int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_x_s32))) +int32x4_t __arm_vcaddq_rot90_x_s32(int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_x_s32))) +int32x4_t __arm_vcaddq_rot90_x(int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_x_s8))) +int8x16_t __arm_vcaddq_rot90_x_s8(int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_x_s8))) +int8x16_t __arm_vcaddq_rot90_x(int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_x_u16))) +uint16x8_t __arm_vcaddq_rot90_x_u16(uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_x_u16))) +uint16x8_t __arm_vcaddq_rot90_x(uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_x_u32))) +uint32x4_t __arm_vcaddq_rot90_x_u32(uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_x_u32))) +uint32x4_t __arm_vcaddq_rot90_x(uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_x_u8))) +uint8x16_t __arm_vcaddq_rot90_x_u8(uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_x_u8))) +uint8x16_t __arm_vcaddq_rot90_x(uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vclsq_m_s16))) +int16x8_t __arm_vclsq_m_s16(int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vclsq_m_s16))) +int16x8_t __arm_vclsq_m(int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vclsq_m_s32))) +int32x4_t __arm_vclsq_m_s32(int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vclsq_m_s32))) +int32x4_t __arm_vclsq_m(int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vclsq_m_s8))) +int8x16_t __arm_vclsq_m_s8(int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vclsq_m_s8))) +int8x16_t __arm_vclsq_m(int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vclsq_s16))) +int16x8_t __arm_vclsq_s16(int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vclsq_s16))) +int16x8_t __arm_vclsq(int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vclsq_s32))) +int32x4_t __arm_vclsq_s32(int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vclsq_s32))) +int32x4_t __arm_vclsq(int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vclsq_s8))) +int8x16_t __arm_vclsq_s8(int8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vclsq_s8))) +int8x16_t __arm_vclsq(int8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vclsq_x_s16))) +int16x8_t __arm_vclsq_x_s16(int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vclsq_x_s16))) +int16x8_t __arm_vclsq_x(int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vclsq_x_s32))) +int32x4_t __arm_vclsq_x_s32(int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vclsq_x_s32))) +int32x4_t __arm_vclsq_x(int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vclsq_x_s8))) +int8x16_t __arm_vclsq_x_s8(int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vclsq_x_s8))) +int8x16_t __arm_vclsq_x(int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vclzq_m_s16))) +int16x8_t __arm_vclzq_m_s16(int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vclzq_m_s16))) +int16x8_t __arm_vclzq_m(int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vclzq_m_s32))) +int32x4_t __arm_vclzq_m_s32(int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vclzq_m_s32))) +int32x4_t __arm_vclzq_m(int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vclzq_m_s8))) +int8x16_t __arm_vclzq_m_s8(int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vclzq_m_s8))) +int8x16_t __arm_vclzq_m(int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vclzq_m_u16))) +uint16x8_t __arm_vclzq_m_u16(uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vclzq_m_u16))) +uint16x8_t __arm_vclzq_m(uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vclzq_m_u32))) +uint32x4_t __arm_vclzq_m_u32(uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vclzq_m_u32))) +uint32x4_t __arm_vclzq_m(uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vclzq_m_u8))) +uint8x16_t __arm_vclzq_m_u8(uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vclzq_m_u8))) +uint8x16_t __arm_vclzq_m(uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vclzq_s16))) +int16x8_t __arm_vclzq_s16(int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vclzq_s16))) +int16x8_t __arm_vclzq(int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vclzq_s32))) +int32x4_t __arm_vclzq_s32(int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vclzq_s32))) +int32x4_t __arm_vclzq(int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vclzq_s8))) +int8x16_t __arm_vclzq_s8(int8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vclzq_s8))) +int8x16_t __arm_vclzq(int8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vclzq_u16))) +uint16x8_t __arm_vclzq_u16(uint16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vclzq_u16))) +uint16x8_t __arm_vclzq(uint16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vclzq_u32))) +uint32x4_t __arm_vclzq_u32(uint32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vclzq_u32))) +uint32x4_t __arm_vclzq(uint32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vclzq_u8))) +uint8x16_t __arm_vclzq_u8(uint8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vclzq_u8))) +uint8x16_t __arm_vclzq(uint8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vclzq_x_s16))) +int16x8_t __arm_vclzq_x_s16(int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vclzq_x_s16))) +int16x8_t __arm_vclzq_x(int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vclzq_x_s32))) +int32x4_t __arm_vclzq_x_s32(int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vclzq_x_s32))) +int32x4_t __arm_vclzq_x(int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vclzq_x_s8))) +int8x16_t __arm_vclzq_x_s8(int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vclzq_x_s8))) +int8x16_t __arm_vclzq_x(int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vclzq_x_u16))) +uint16x8_t __arm_vclzq_x_u16(uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vclzq_x_u16))) +uint16x8_t __arm_vclzq_x(uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vclzq_x_u32))) +uint32x4_t __arm_vclzq_x_u32(uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vclzq_x_u32))) +uint32x4_t __arm_vclzq_x(uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vclzq_x_u8))) +uint8x16_t __arm_vclzq_x_u8(uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vclzq_x_u8))) +uint8x16_t __arm_vclzq_x(uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpcsq_m_n_u16))) +mve_pred16_t __arm_vcmpcsq_m_n_u16(uint16x8_t, uint16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpcsq_m_n_u16))) +mve_pred16_t __arm_vcmpcsq_m(uint16x8_t, uint16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpcsq_m_n_u32))) +mve_pred16_t __arm_vcmpcsq_m_n_u32(uint32x4_t, uint32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpcsq_m_n_u32))) +mve_pred16_t __arm_vcmpcsq_m(uint32x4_t, uint32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpcsq_m_n_u8))) +mve_pred16_t __arm_vcmpcsq_m_n_u8(uint8x16_t, uint8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpcsq_m_n_u8))) +mve_pred16_t __arm_vcmpcsq_m(uint8x16_t, uint8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpcsq_m_u16))) +mve_pred16_t __arm_vcmpcsq_m_u16(uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpcsq_m_u16))) +mve_pred16_t __arm_vcmpcsq_m(uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpcsq_m_u32))) +mve_pred16_t __arm_vcmpcsq_m_u32(uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpcsq_m_u32))) +mve_pred16_t __arm_vcmpcsq_m(uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpcsq_m_u8))) +mve_pred16_t __arm_vcmpcsq_m_u8(uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpcsq_m_u8))) +mve_pred16_t __arm_vcmpcsq_m(uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpcsq_n_u16))) +mve_pred16_t __arm_vcmpcsq_n_u16(uint16x8_t, uint16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpcsq_n_u16))) +mve_pred16_t __arm_vcmpcsq(uint16x8_t, uint16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpcsq_n_u32))) +mve_pred16_t __arm_vcmpcsq_n_u32(uint32x4_t, uint32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpcsq_n_u32))) +mve_pred16_t __arm_vcmpcsq(uint32x4_t, uint32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpcsq_n_u8))) +mve_pred16_t __arm_vcmpcsq_n_u8(uint8x16_t, uint8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpcsq_n_u8))) +mve_pred16_t __arm_vcmpcsq(uint8x16_t, uint8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpcsq_u16))) +mve_pred16_t __arm_vcmpcsq_u16(uint16x8_t, uint16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpcsq_u16))) +mve_pred16_t __arm_vcmpcsq(uint16x8_t, uint16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpcsq_u32))) +mve_pred16_t __arm_vcmpcsq_u32(uint32x4_t, uint32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpcsq_u32))) +mve_pred16_t __arm_vcmpcsq(uint32x4_t, uint32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpcsq_u8))) +mve_pred16_t __arm_vcmpcsq_u8(uint8x16_t, uint8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpcsq_u8))) +mve_pred16_t __arm_vcmpcsq(uint8x16_t, uint8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_m_n_s16))) +mve_pred16_t __arm_vcmpeqq_m_n_s16(int16x8_t, int16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_m_n_s16))) +mve_pred16_t __arm_vcmpeqq_m(int16x8_t, int16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_m_n_s32))) +mve_pred16_t __arm_vcmpeqq_m_n_s32(int32x4_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_m_n_s32))) +mve_pred16_t __arm_vcmpeqq_m(int32x4_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_m_n_s8))) +mve_pred16_t __arm_vcmpeqq_m_n_s8(int8x16_t, int8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_m_n_s8))) +mve_pred16_t __arm_vcmpeqq_m(int8x16_t, int8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_m_n_u16))) +mve_pred16_t __arm_vcmpeqq_m_n_u16(uint16x8_t, uint16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_m_n_u16))) +mve_pred16_t __arm_vcmpeqq_m(uint16x8_t, uint16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_m_n_u32))) +mve_pred16_t __arm_vcmpeqq_m_n_u32(uint32x4_t, uint32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_m_n_u32))) +mve_pred16_t __arm_vcmpeqq_m(uint32x4_t, uint32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_m_n_u8))) +mve_pred16_t __arm_vcmpeqq_m_n_u8(uint8x16_t, uint8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_m_n_u8))) +mve_pred16_t __arm_vcmpeqq_m(uint8x16_t, uint8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_m_s16))) +mve_pred16_t __arm_vcmpeqq_m_s16(int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_m_s16))) +mve_pred16_t __arm_vcmpeqq_m(int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_m_s32))) +mve_pred16_t __arm_vcmpeqq_m_s32(int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_m_s32))) +mve_pred16_t __arm_vcmpeqq_m(int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_m_s8))) +mve_pred16_t __arm_vcmpeqq_m_s8(int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_m_s8))) +mve_pred16_t __arm_vcmpeqq_m(int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_m_u16))) +mve_pred16_t __arm_vcmpeqq_m_u16(uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_m_u16))) +mve_pred16_t __arm_vcmpeqq_m(uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_m_u32))) +mve_pred16_t __arm_vcmpeqq_m_u32(uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_m_u32))) +mve_pred16_t __arm_vcmpeqq_m(uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_m_u8))) +mve_pred16_t __arm_vcmpeqq_m_u8(uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_m_u8))) +mve_pred16_t __arm_vcmpeqq_m(uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_n_s16))) +mve_pred16_t __arm_vcmpeqq_n_s16(int16x8_t, int16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_n_s16))) +mve_pred16_t __arm_vcmpeqq(int16x8_t, int16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_n_s32))) +mve_pred16_t __arm_vcmpeqq_n_s32(int32x4_t, int32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_n_s32))) +mve_pred16_t __arm_vcmpeqq(int32x4_t, int32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_n_s8))) +mve_pred16_t __arm_vcmpeqq_n_s8(int8x16_t, int8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_n_s8))) +mve_pred16_t __arm_vcmpeqq(int8x16_t, int8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_n_u16))) +mve_pred16_t __arm_vcmpeqq_n_u16(uint16x8_t, uint16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_n_u16))) +mve_pred16_t __arm_vcmpeqq(uint16x8_t, uint16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_n_u32))) +mve_pred16_t __arm_vcmpeqq_n_u32(uint32x4_t, uint32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_n_u32))) +mve_pred16_t __arm_vcmpeqq(uint32x4_t, uint32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_n_u8))) +mve_pred16_t __arm_vcmpeqq_n_u8(uint8x16_t, uint8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_n_u8))) +mve_pred16_t __arm_vcmpeqq(uint8x16_t, uint8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_s16))) +mve_pred16_t __arm_vcmpeqq_s16(int16x8_t, int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_s16))) +mve_pred16_t __arm_vcmpeqq(int16x8_t, int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_s32))) +mve_pred16_t __arm_vcmpeqq_s32(int32x4_t, int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_s32))) +mve_pred16_t __arm_vcmpeqq(int32x4_t, int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_s8))) +mve_pred16_t __arm_vcmpeqq_s8(int8x16_t, int8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_s8))) +mve_pred16_t __arm_vcmpeqq(int8x16_t, int8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_u16))) +mve_pred16_t __arm_vcmpeqq_u16(uint16x8_t, uint16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_u16))) +mve_pred16_t __arm_vcmpeqq(uint16x8_t, uint16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_u32))) +mve_pred16_t __arm_vcmpeqq_u32(uint32x4_t, uint32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_u32))) +mve_pred16_t __arm_vcmpeqq(uint32x4_t, uint32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_u8))) +mve_pred16_t __arm_vcmpeqq_u8(uint8x16_t, uint8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_u8))) +mve_pred16_t __arm_vcmpeqq(uint8x16_t, uint8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpgeq_m_n_s16))) +mve_pred16_t __arm_vcmpgeq_m_n_s16(int16x8_t, int16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpgeq_m_n_s16))) +mve_pred16_t __arm_vcmpgeq_m(int16x8_t, int16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpgeq_m_n_s32))) +mve_pred16_t __arm_vcmpgeq_m_n_s32(int32x4_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpgeq_m_n_s32))) +mve_pred16_t __arm_vcmpgeq_m(int32x4_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpgeq_m_n_s8))) +mve_pred16_t __arm_vcmpgeq_m_n_s8(int8x16_t, int8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpgeq_m_n_s8))) +mve_pred16_t __arm_vcmpgeq_m(int8x16_t, int8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpgeq_m_s16))) +mve_pred16_t __arm_vcmpgeq_m_s16(int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpgeq_m_s16))) +mve_pred16_t __arm_vcmpgeq_m(int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpgeq_m_s32))) +mve_pred16_t __arm_vcmpgeq_m_s32(int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpgeq_m_s32))) +mve_pred16_t __arm_vcmpgeq_m(int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpgeq_m_s8))) +mve_pred16_t __arm_vcmpgeq_m_s8(int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpgeq_m_s8))) +mve_pred16_t __arm_vcmpgeq_m(int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpgeq_n_s16))) +mve_pred16_t __arm_vcmpgeq_n_s16(int16x8_t, int16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpgeq_n_s16))) +mve_pred16_t __arm_vcmpgeq(int16x8_t, int16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpgeq_n_s32))) +mve_pred16_t __arm_vcmpgeq_n_s32(int32x4_t, int32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpgeq_n_s32))) +mve_pred16_t __arm_vcmpgeq(int32x4_t, int32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpgeq_n_s8))) +mve_pred16_t __arm_vcmpgeq_n_s8(int8x16_t, int8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpgeq_n_s8))) +mve_pred16_t __arm_vcmpgeq(int8x16_t, int8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpgeq_s16))) +mve_pred16_t __arm_vcmpgeq_s16(int16x8_t, int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpgeq_s16))) +mve_pred16_t __arm_vcmpgeq(int16x8_t, int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpgeq_s32))) +mve_pred16_t __arm_vcmpgeq_s32(int32x4_t, int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpgeq_s32))) +mve_pred16_t __arm_vcmpgeq(int32x4_t, int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpgeq_s8))) +mve_pred16_t __arm_vcmpgeq_s8(int8x16_t, int8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpgeq_s8))) +mve_pred16_t __arm_vcmpgeq(int8x16_t, int8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpgtq_m_n_s16))) +mve_pred16_t __arm_vcmpgtq_m_n_s16(int16x8_t, int16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpgtq_m_n_s16))) +mve_pred16_t __arm_vcmpgtq_m(int16x8_t, int16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpgtq_m_n_s32))) +mve_pred16_t __arm_vcmpgtq_m_n_s32(int32x4_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpgtq_m_n_s32))) +mve_pred16_t __arm_vcmpgtq_m(int32x4_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpgtq_m_n_s8))) +mve_pred16_t __arm_vcmpgtq_m_n_s8(int8x16_t, int8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpgtq_m_n_s8))) +mve_pred16_t __arm_vcmpgtq_m(int8x16_t, int8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpgtq_m_s16))) +mve_pred16_t __arm_vcmpgtq_m_s16(int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpgtq_m_s16))) +mve_pred16_t __arm_vcmpgtq_m(int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpgtq_m_s32))) +mve_pred16_t __arm_vcmpgtq_m_s32(int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpgtq_m_s32))) +mve_pred16_t __arm_vcmpgtq_m(int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpgtq_m_s8))) +mve_pred16_t __arm_vcmpgtq_m_s8(int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpgtq_m_s8))) +mve_pred16_t __arm_vcmpgtq_m(int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpgtq_n_s16))) +mve_pred16_t __arm_vcmpgtq_n_s16(int16x8_t, int16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpgtq_n_s16))) +mve_pred16_t __arm_vcmpgtq(int16x8_t, int16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpgtq_n_s32))) +mve_pred16_t __arm_vcmpgtq_n_s32(int32x4_t, int32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpgtq_n_s32))) +mve_pred16_t __arm_vcmpgtq(int32x4_t, int32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpgtq_n_s8))) +mve_pred16_t __arm_vcmpgtq_n_s8(int8x16_t, int8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpgtq_n_s8))) +mve_pred16_t __arm_vcmpgtq(int8x16_t, int8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpgtq_s16))) +mve_pred16_t __arm_vcmpgtq_s16(int16x8_t, int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpgtq_s16))) +mve_pred16_t __arm_vcmpgtq(int16x8_t, int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpgtq_s32))) +mve_pred16_t __arm_vcmpgtq_s32(int32x4_t, int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpgtq_s32))) +mve_pred16_t __arm_vcmpgtq(int32x4_t, int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpgtq_s8))) +mve_pred16_t __arm_vcmpgtq_s8(int8x16_t, int8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpgtq_s8))) +mve_pred16_t __arm_vcmpgtq(int8x16_t, int8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmphiq_m_n_u16))) +mve_pred16_t __arm_vcmphiq_m_n_u16(uint16x8_t, uint16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmphiq_m_n_u16))) +mve_pred16_t __arm_vcmphiq_m(uint16x8_t, uint16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmphiq_m_n_u32))) +mve_pred16_t __arm_vcmphiq_m_n_u32(uint32x4_t, uint32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmphiq_m_n_u32))) +mve_pred16_t __arm_vcmphiq_m(uint32x4_t, uint32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmphiq_m_n_u8))) +mve_pred16_t __arm_vcmphiq_m_n_u8(uint8x16_t, uint8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmphiq_m_n_u8))) +mve_pred16_t __arm_vcmphiq_m(uint8x16_t, uint8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmphiq_m_u16))) +mve_pred16_t __arm_vcmphiq_m_u16(uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmphiq_m_u16))) +mve_pred16_t __arm_vcmphiq_m(uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmphiq_m_u32))) +mve_pred16_t __arm_vcmphiq_m_u32(uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmphiq_m_u32))) +mve_pred16_t __arm_vcmphiq_m(uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmphiq_m_u8))) +mve_pred16_t __arm_vcmphiq_m_u8(uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmphiq_m_u8))) +mve_pred16_t __arm_vcmphiq_m(uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmphiq_n_u16))) +mve_pred16_t __arm_vcmphiq_n_u16(uint16x8_t, uint16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmphiq_n_u16))) +mve_pred16_t __arm_vcmphiq(uint16x8_t, uint16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmphiq_n_u32))) +mve_pred16_t __arm_vcmphiq_n_u32(uint32x4_t, uint32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmphiq_n_u32))) +mve_pred16_t __arm_vcmphiq(uint32x4_t, uint32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmphiq_n_u8))) +mve_pred16_t __arm_vcmphiq_n_u8(uint8x16_t, uint8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmphiq_n_u8))) +mve_pred16_t __arm_vcmphiq(uint8x16_t, uint8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmphiq_u16))) +mve_pred16_t __arm_vcmphiq_u16(uint16x8_t, uint16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmphiq_u16))) +mve_pred16_t __arm_vcmphiq(uint16x8_t, uint16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmphiq_u32))) +mve_pred16_t __arm_vcmphiq_u32(uint32x4_t, uint32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmphiq_u32))) +mve_pred16_t __arm_vcmphiq(uint32x4_t, uint32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmphiq_u8))) +mve_pred16_t __arm_vcmphiq_u8(uint8x16_t, uint8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmphiq_u8))) +mve_pred16_t __arm_vcmphiq(uint8x16_t, uint8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpleq_m_n_s16))) +mve_pred16_t __arm_vcmpleq_m_n_s16(int16x8_t, int16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpleq_m_n_s16))) +mve_pred16_t __arm_vcmpleq_m(int16x8_t, int16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpleq_m_n_s32))) +mve_pred16_t __arm_vcmpleq_m_n_s32(int32x4_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpleq_m_n_s32))) +mve_pred16_t __arm_vcmpleq_m(int32x4_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpleq_m_n_s8))) +mve_pred16_t __arm_vcmpleq_m_n_s8(int8x16_t, int8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpleq_m_n_s8))) +mve_pred16_t __arm_vcmpleq_m(int8x16_t, int8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpleq_m_s16))) +mve_pred16_t __arm_vcmpleq_m_s16(int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpleq_m_s16))) +mve_pred16_t __arm_vcmpleq_m(int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpleq_m_s32))) +mve_pred16_t __arm_vcmpleq_m_s32(int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpleq_m_s32))) +mve_pred16_t __arm_vcmpleq_m(int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpleq_m_s8))) +mve_pred16_t __arm_vcmpleq_m_s8(int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpleq_m_s8))) +mve_pred16_t __arm_vcmpleq_m(int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpleq_n_s16))) +mve_pred16_t __arm_vcmpleq_n_s16(int16x8_t, int16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpleq_n_s16))) +mve_pred16_t __arm_vcmpleq(int16x8_t, int16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpleq_n_s32))) +mve_pred16_t __arm_vcmpleq_n_s32(int32x4_t, int32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpleq_n_s32))) +mve_pred16_t __arm_vcmpleq(int32x4_t, int32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpleq_n_s8))) +mve_pred16_t __arm_vcmpleq_n_s8(int8x16_t, int8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpleq_n_s8))) +mve_pred16_t __arm_vcmpleq(int8x16_t, int8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpleq_s16))) +mve_pred16_t __arm_vcmpleq_s16(int16x8_t, int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpleq_s16))) +mve_pred16_t __arm_vcmpleq(int16x8_t, int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpleq_s32))) +mve_pred16_t __arm_vcmpleq_s32(int32x4_t, int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpleq_s32))) +mve_pred16_t __arm_vcmpleq(int32x4_t, int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpleq_s8))) +mve_pred16_t __arm_vcmpleq_s8(int8x16_t, int8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpleq_s8))) +mve_pred16_t __arm_vcmpleq(int8x16_t, int8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpltq_m_n_s16))) +mve_pred16_t __arm_vcmpltq_m_n_s16(int16x8_t, int16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpltq_m_n_s16))) +mve_pred16_t __arm_vcmpltq_m(int16x8_t, int16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpltq_m_n_s32))) +mve_pred16_t __arm_vcmpltq_m_n_s32(int32x4_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpltq_m_n_s32))) +mve_pred16_t __arm_vcmpltq_m(int32x4_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpltq_m_n_s8))) +mve_pred16_t __arm_vcmpltq_m_n_s8(int8x16_t, int8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpltq_m_n_s8))) +mve_pred16_t __arm_vcmpltq_m(int8x16_t, int8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpltq_m_s16))) +mve_pred16_t __arm_vcmpltq_m_s16(int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpltq_m_s16))) +mve_pred16_t __arm_vcmpltq_m(int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpltq_m_s32))) +mve_pred16_t __arm_vcmpltq_m_s32(int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpltq_m_s32))) +mve_pred16_t __arm_vcmpltq_m(int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpltq_m_s8))) +mve_pred16_t __arm_vcmpltq_m_s8(int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpltq_m_s8))) +mve_pred16_t __arm_vcmpltq_m(int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpltq_n_s16))) +mve_pred16_t __arm_vcmpltq_n_s16(int16x8_t, int16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpltq_n_s16))) +mve_pred16_t __arm_vcmpltq(int16x8_t, int16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpltq_n_s32))) +mve_pred16_t __arm_vcmpltq_n_s32(int32x4_t, int32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpltq_n_s32))) +mve_pred16_t __arm_vcmpltq(int32x4_t, int32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpltq_n_s8))) +mve_pred16_t __arm_vcmpltq_n_s8(int8x16_t, int8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpltq_n_s8))) +mve_pred16_t __arm_vcmpltq(int8x16_t, int8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpltq_s16))) +mve_pred16_t __arm_vcmpltq_s16(int16x8_t, int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpltq_s16))) +mve_pred16_t __arm_vcmpltq(int16x8_t, int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpltq_s32))) +mve_pred16_t __arm_vcmpltq_s32(int32x4_t, int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpltq_s32))) +mve_pred16_t __arm_vcmpltq(int32x4_t, int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpltq_s8))) +mve_pred16_t __arm_vcmpltq_s8(int8x16_t, int8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpltq_s8))) +mve_pred16_t __arm_vcmpltq(int8x16_t, int8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_m_n_s16))) +mve_pred16_t __arm_vcmpneq_m_n_s16(int16x8_t, int16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_m_n_s16))) +mve_pred16_t __arm_vcmpneq_m(int16x8_t, int16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_m_n_s32))) +mve_pred16_t __arm_vcmpneq_m_n_s32(int32x4_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_m_n_s32))) +mve_pred16_t __arm_vcmpneq_m(int32x4_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_m_n_s8))) +mve_pred16_t __arm_vcmpneq_m_n_s8(int8x16_t, int8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_m_n_s8))) +mve_pred16_t __arm_vcmpneq_m(int8x16_t, int8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_m_n_u16))) +mve_pred16_t __arm_vcmpneq_m_n_u16(uint16x8_t, uint16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_m_n_u16))) +mve_pred16_t __arm_vcmpneq_m(uint16x8_t, uint16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_m_n_u32))) +mve_pred16_t __arm_vcmpneq_m_n_u32(uint32x4_t, uint32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_m_n_u32))) +mve_pred16_t __arm_vcmpneq_m(uint32x4_t, uint32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_m_n_u8))) +mve_pred16_t __arm_vcmpneq_m_n_u8(uint8x16_t, uint8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_m_n_u8))) +mve_pred16_t __arm_vcmpneq_m(uint8x16_t, uint8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_m_s16))) +mve_pred16_t __arm_vcmpneq_m_s16(int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_m_s16))) +mve_pred16_t __arm_vcmpneq_m(int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_m_s32))) +mve_pred16_t __arm_vcmpneq_m_s32(int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_m_s32))) +mve_pred16_t __arm_vcmpneq_m(int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_m_s8))) +mve_pred16_t __arm_vcmpneq_m_s8(int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_m_s8))) +mve_pred16_t __arm_vcmpneq_m(int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_m_u16))) +mve_pred16_t __arm_vcmpneq_m_u16(uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_m_u16))) +mve_pred16_t __arm_vcmpneq_m(uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_m_u32))) +mve_pred16_t __arm_vcmpneq_m_u32(uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_m_u32))) +mve_pred16_t __arm_vcmpneq_m(uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_m_u8))) +mve_pred16_t __arm_vcmpneq_m_u8(uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_m_u8))) +mve_pred16_t __arm_vcmpneq_m(uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_n_s16))) +mve_pred16_t __arm_vcmpneq_n_s16(int16x8_t, int16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_n_s16))) +mve_pred16_t __arm_vcmpneq(int16x8_t, int16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_n_s32))) +mve_pred16_t __arm_vcmpneq_n_s32(int32x4_t, int32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_n_s32))) +mve_pred16_t __arm_vcmpneq(int32x4_t, int32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_n_s8))) +mve_pred16_t __arm_vcmpneq_n_s8(int8x16_t, int8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_n_s8))) +mve_pred16_t __arm_vcmpneq(int8x16_t, int8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_n_u16))) +mve_pred16_t __arm_vcmpneq_n_u16(uint16x8_t, uint16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_n_u16))) +mve_pred16_t __arm_vcmpneq(uint16x8_t, uint16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_n_u32))) +mve_pred16_t __arm_vcmpneq_n_u32(uint32x4_t, uint32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_n_u32))) +mve_pred16_t __arm_vcmpneq(uint32x4_t, uint32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_n_u8))) +mve_pred16_t __arm_vcmpneq_n_u8(uint8x16_t, uint8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_n_u8))) +mve_pred16_t __arm_vcmpneq(uint8x16_t, uint8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_s16))) +mve_pred16_t __arm_vcmpneq_s16(int16x8_t, int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_s16))) +mve_pred16_t __arm_vcmpneq(int16x8_t, int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_s32))) +mve_pred16_t __arm_vcmpneq_s32(int32x4_t, int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_s32))) +mve_pred16_t __arm_vcmpneq(int32x4_t, int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_s8))) +mve_pred16_t __arm_vcmpneq_s8(int8x16_t, int8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_s8))) +mve_pred16_t __arm_vcmpneq(int8x16_t, int8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_u16))) +mve_pred16_t __arm_vcmpneq_u16(uint16x8_t, uint16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_u16))) +mve_pred16_t __arm_vcmpneq(uint16x8_t, uint16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_u32))) +mve_pred16_t __arm_vcmpneq_u32(uint32x4_t, uint32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_u32))) +mve_pred16_t __arm_vcmpneq(uint32x4_t, uint32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_u8))) +mve_pred16_t __arm_vcmpneq_u8(uint8x16_t, uint8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_u8))) +mve_pred16_t __arm_vcmpneq(uint8x16_t, uint8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcreateq_s16))) +int16x8_t __arm_vcreateq_s16(uint64_t, uint64_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcreateq_s32))) +int32x4_t __arm_vcreateq_s32(uint64_t, uint64_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcreateq_s64))) +int64x2_t __arm_vcreateq_s64(uint64_t, uint64_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcreateq_s8))) +int8x16_t __arm_vcreateq_s8(uint64_t, uint64_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcreateq_u16))) +uint16x8_t __arm_vcreateq_u16(uint64_t, uint64_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcreateq_u32))) +uint32x4_t __arm_vcreateq_u32(uint64_t, uint64_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcreateq_u64))) +uint64x2_t __arm_vcreateq_u64(uint64_t, uint64_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcreateq_u8))) +uint8x16_t __arm_vcreateq_u8(uint64_t, uint64_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vctp16q))) +mve_pred16_t __arm_vctp16q(uint32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vctp16q_m))) +mve_pred16_t __arm_vctp16q_m(uint32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vctp32q))) +mve_pred16_t __arm_vctp32q(uint32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vctp32q_m))) +mve_pred16_t __arm_vctp32q_m(uint32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vctp64q))) +mve_pred16_t __arm_vctp64q(uint32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vctp64q_m))) +mve_pred16_t __arm_vctp64q_m(uint32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vctp8q))) +mve_pred16_t __arm_vctp8q(uint32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vctp8q_m))) +mve_pred16_t __arm_vctp8q_m(uint32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vddupq_m_n_u16))) +uint16x8_t __arm_vddupq_m_n_u16(uint16x8_t, uint32_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vddupq_m_n_u16))) +uint16x8_t __arm_vddupq_m(uint16x8_t, uint32_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vddupq_m_n_u32))) +uint32x4_t __arm_vddupq_m_n_u32(uint32x4_t, uint32_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vddupq_m_n_u32))) +uint32x4_t __arm_vddupq_m(uint32x4_t, uint32_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vddupq_m_n_u8))) +uint8x16_t __arm_vddupq_m_n_u8(uint8x16_t, uint32_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vddupq_m_n_u8))) +uint8x16_t __arm_vddupq_m(uint8x16_t, uint32_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vddupq_m_wb_u16))) +uint16x8_t __arm_vddupq_m_wb_u16(uint16x8_t, uint32_t *, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vddupq_m_wb_u16))) +uint16x8_t __arm_vddupq_m(uint16x8_t, uint32_t *, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vddupq_m_wb_u32))) +uint32x4_t __arm_vddupq_m_wb_u32(uint32x4_t, uint32_t *, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vddupq_m_wb_u32))) +uint32x4_t __arm_vddupq_m(uint32x4_t, uint32_t *, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vddupq_m_wb_u8))) +uint8x16_t __arm_vddupq_m_wb_u8(uint8x16_t, uint32_t *, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vddupq_m_wb_u8))) +uint8x16_t __arm_vddupq_m(uint8x16_t, uint32_t *, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vddupq_n_u16))) +uint16x8_t __arm_vddupq_n_u16(uint32_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vddupq_n_u16))) +uint16x8_t __arm_vddupq_u16(uint32_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vddupq_n_u32))) +uint32x4_t __arm_vddupq_n_u32(uint32_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vddupq_n_u32))) +uint32x4_t __arm_vddupq_u32(uint32_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vddupq_n_u8))) +uint8x16_t __arm_vddupq_n_u8(uint32_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vddupq_n_u8))) +uint8x16_t __arm_vddupq_u8(uint32_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vddupq_wb_u16))) +uint16x8_t __arm_vddupq_wb_u16(uint32_t *, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vddupq_wb_u16))) +uint16x8_t __arm_vddupq_u16(uint32_t *, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vddupq_wb_u32))) +uint32x4_t __arm_vddupq_wb_u32(uint32_t *, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vddupq_wb_u32))) +uint32x4_t __arm_vddupq_u32(uint32_t *, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vddupq_wb_u8))) +uint8x16_t __arm_vddupq_wb_u8(uint32_t *, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vddupq_wb_u8))) +uint8x16_t __arm_vddupq_u8(uint32_t *, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vddupq_x_n_u16))) +uint16x8_t __arm_vddupq_x_n_u16(uint32_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vddupq_x_n_u16))) +uint16x8_t __arm_vddupq_x_u16(uint32_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vddupq_x_n_u32))) +uint32x4_t __arm_vddupq_x_n_u32(uint32_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vddupq_x_n_u32))) +uint32x4_t __arm_vddupq_x_u32(uint32_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vddupq_x_n_u8))) +uint8x16_t __arm_vddupq_x_n_u8(uint32_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vddupq_x_n_u8))) +uint8x16_t __arm_vddupq_x_u8(uint32_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vddupq_x_wb_u16))) +uint16x8_t __arm_vddupq_x_wb_u16(uint32_t *, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vddupq_x_wb_u16))) +uint16x8_t __arm_vddupq_x_u16(uint32_t *, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vddupq_x_wb_u32))) +uint32x4_t __arm_vddupq_x_wb_u32(uint32_t *, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vddupq_x_wb_u32))) +uint32x4_t __arm_vddupq_x_u32(uint32_t *, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vddupq_x_wb_u8))) +uint8x16_t __arm_vddupq_x_wb_u8(uint32_t *, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vddupq_x_wb_u8))) +uint8x16_t __arm_vddupq_x_u8(uint32_t *, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vdupq_m_n_s16))) +int16x8_t __arm_vdupq_m_n_s16(int16x8_t, int16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vdupq_m_n_s16))) +int16x8_t __arm_vdupq_m(int16x8_t, int16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vdupq_m_n_s32))) +int32x4_t __arm_vdupq_m_n_s32(int32x4_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vdupq_m_n_s32))) +int32x4_t __arm_vdupq_m(int32x4_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vdupq_m_n_s8))) +int8x16_t __arm_vdupq_m_n_s8(int8x16_t, int8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vdupq_m_n_s8))) +int8x16_t __arm_vdupq_m(int8x16_t, int8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vdupq_m_n_u16))) +uint16x8_t __arm_vdupq_m_n_u16(uint16x8_t, uint16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vdupq_m_n_u16))) +uint16x8_t __arm_vdupq_m(uint16x8_t, uint16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vdupq_m_n_u32))) +uint32x4_t __arm_vdupq_m_n_u32(uint32x4_t, uint32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vdupq_m_n_u32))) +uint32x4_t __arm_vdupq_m(uint32x4_t, uint32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vdupq_m_n_u8))) +uint8x16_t __arm_vdupq_m_n_u8(uint8x16_t, uint8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vdupq_m_n_u8))) +uint8x16_t __arm_vdupq_m(uint8x16_t, uint8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vdupq_n_s16))) +int16x8_t __arm_vdupq_n_s16(int16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vdupq_n_s32))) +int32x4_t __arm_vdupq_n_s32(int32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vdupq_n_s8))) +int8x16_t __arm_vdupq_n_s8(int8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vdupq_n_u16))) +uint16x8_t __arm_vdupq_n_u16(uint16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vdupq_n_u32))) +uint32x4_t __arm_vdupq_n_u32(uint32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vdupq_n_u8))) +uint8x16_t __arm_vdupq_n_u8(uint8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vdupq_x_n_s16))) +int16x8_t __arm_vdupq_x_n_s16(int16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vdupq_x_n_s32))) +int32x4_t __arm_vdupq_x_n_s32(int32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vdupq_x_n_s8))) +int8x16_t __arm_vdupq_x_n_s8(int8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vdupq_x_n_u16))) +uint16x8_t __arm_vdupq_x_n_u16(uint16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vdupq_x_n_u32))) +uint32x4_t __arm_vdupq_x_n_u32(uint32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vdupq_x_n_u8))) +uint8x16_t __arm_vdupq_x_n_u8(uint8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vdwdupq_m_n_u16))) +uint16x8_t __arm_vdwdupq_m_n_u16(uint16x8_t, uint32_t, uint32_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vdwdupq_m_n_u16))) +uint16x8_t __arm_vdwdupq_m(uint16x8_t, uint32_t, uint32_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vdwdupq_m_n_u32))) +uint32x4_t __arm_vdwdupq_m_n_u32(uint32x4_t, uint32_t, uint32_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vdwdupq_m_n_u32))) +uint32x4_t __arm_vdwdupq_m(uint32x4_t, uint32_t, uint32_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vdwdupq_m_n_u8))) +uint8x16_t __arm_vdwdupq_m_n_u8(uint8x16_t, uint32_t, uint32_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vdwdupq_m_n_u8))) +uint8x16_t __arm_vdwdupq_m(uint8x16_t, uint32_t, uint32_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vdwdupq_m_wb_u16))) +uint16x8_t __arm_vdwdupq_m_wb_u16(uint16x8_t, uint32_t *, uint32_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vdwdupq_m_wb_u16))) +uint16x8_t __arm_vdwdupq_m(uint16x8_t, uint32_t *, uint32_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vdwdupq_m_wb_u32))) +uint32x4_t __arm_vdwdupq_m_wb_u32(uint32x4_t, uint32_t *, uint32_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vdwdupq_m_wb_u32))) +uint32x4_t __arm_vdwdupq_m(uint32x4_t, uint32_t *, uint32_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vdwdupq_m_wb_u8))) +uint8x16_t __arm_vdwdupq_m_wb_u8(uint8x16_t, uint32_t *, uint32_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vdwdupq_m_wb_u8))) +uint8x16_t __arm_vdwdupq_m(uint8x16_t, uint32_t *, uint32_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vdwdupq_n_u16))) +uint16x8_t __arm_vdwdupq_n_u16(uint32_t, uint32_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vdwdupq_n_u16))) +uint16x8_t __arm_vdwdupq_u16(uint32_t, uint32_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vdwdupq_n_u32))) +uint32x4_t __arm_vdwdupq_n_u32(uint32_t, uint32_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vdwdupq_n_u32))) +uint32x4_t __arm_vdwdupq_u32(uint32_t, uint32_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vdwdupq_n_u8))) +uint8x16_t __arm_vdwdupq_n_u8(uint32_t, uint32_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vdwdupq_n_u8))) +uint8x16_t __arm_vdwdupq_u8(uint32_t, uint32_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vdwdupq_wb_u16))) +uint16x8_t __arm_vdwdupq_wb_u16(uint32_t *, uint32_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vdwdupq_wb_u16))) +uint16x8_t __arm_vdwdupq_u16(uint32_t *, uint32_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vdwdupq_wb_u32))) +uint32x4_t __arm_vdwdupq_wb_u32(uint32_t *, uint32_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vdwdupq_wb_u32))) +uint32x4_t __arm_vdwdupq_u32(uint32_t *, uint32_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vdwdupq_wb_u8))) +uint8x16_t __arm_vdwdupq_wb_u8(uint32_t *, uint32_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vdwdupq_wb_u8))) +uint8x16_t __arm_vdwdupq_u8(uint32_t *, uint32_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vdwdupq_x_n_u16))) +uint16x8_t __arm_vdwdupq_x_n_u16(uint32_t, uint32_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vdwdupq_x_n_u16))) +uint16x8_t __arm_vdwdupq_x_u16(uint32_t, uint32_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vdwdupq_x_n_u32))) +uint32x4_t __arm_vdwdupq_x_n_u32(uint32_t, uint32_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vdwdupq_x_n_u32))) +uint32x4_t __arm_vdwdupq_x_u32(uint32_t, uint32_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vdwdupq_x_n_u8))) +uint8x16_t __arm_vdwdupq_x_n_u8(uint32_t, uint32_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vdwdupq_x_n_u8))) +uint8x16_t __arm_vdwdupq_x_u8(uint32_t, uint32_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vdwdupq_x_wb_u16))) +uint16x8_t __arm_vdwdupq_x_wb_u16(uint32_t *, uint32_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vdwdupq_x_wb_u16))) +uint16x8_t __arm_vdwdupq_x_u16(uint32_t *, uint32_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vdwdupq_x_wb_u32))) +uint32x4_t __arm_vdwdupq_x_wb_u32(uint32_t *, uint32_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vdwdupq_x_wb_u32))) +uint32x4_t __arm_vdwdupq_x_u32(uint32_t *, uint32_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vdwdupq_x_wb_u8))) +uint8x16_t __arm_vdwdupq_x_wb_u8(uint32_t *, uint32_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vdwdupq_x_wb_u8))) +uint8x16_t __arm_vdwdupq_x_u8(uint32_t *, uint32_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_veorq_m_s16))) +int16x8_t __arm_veorq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_veorq_m_s16))) +int16x8_t __arm_veorq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_veorq_m_s32))) +int32x4_t __arm_veorq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_veorq_m_s32))) +int32x4_t __arm_veorq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_veorq_m_s8))) +int8x16_t __arm_veorq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_veorq_m_s8))) +int8x16_t __arm_veorq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_veorq_m_u16))) +uint16x8_t __arm_veorq_m_u16(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_veorq_m_u16))) +uint16x8_t __arm_veorq_m(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_veorq_m_u32))) +uint32x4_t __arm_veorq_m_u32(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_veorq_m_u32))) +uint32x4_t __arm_veorq_m(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_veorq_m_u8))) +uint8x16_t __arm_veorq_m_u8(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_veorq_m_u8))) +uint8x16_t __arm_veorq_m(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_veorq_s16))) +int16x8_t __arm_veorq_s16(int16x8_t, int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_veorq_s16))) +int16x8_t __arm_veorq(int16x8_t, int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_veorq_s32))) +int32x4_t __arm_veorq_s32(int32x4_t, int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_veorq_s32))) +int32x4_t __arm_veorq(int32x4_t, int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_veorq_s8))) +int8x16_t __arm_veorq_s8(int8x16_t, int8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_veorq_s8))) +int8x16_t __arm_veorq(int8x16_t, int8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_veorq_u16))) +uint16x8_t __arm_veorq_u16(uint16x8_t, uint16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_veorq_u16))) +uint16x8_t __arm_veorq(uint16x8_t, uint16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_veorq_u32))) +uint32x4_t __arm_veorq_u32(uint32x4_t, uint32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_veorq_u32))) +uint32x4_t __arm_veorq(uint32x4_t, uint32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_veorq_u8))) +uint8x16_t __arm_veorq_u8(uint8x16_t, uint8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_veorq_u8))) +uint8x16_t __arm_veorq(uint8x16_t, uint8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_veorq_x_s16))) +int16x8_t __arm_veorq_x_s16(int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_veorq_x_s16))) +int16x8_t __arm_veorq_x(int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_veorq_x_s32))) +int32x4_t __arm_veorq_x_s32(int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_veorq_x_s32))) +int32x4_t __arm_veorq_x(int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_veorq_x_s8))) +int8x16_t __arm_veorq_x_s8(int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_veorq_x_s8))) +int8x16_t __arm_veorq_x(int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_veorq_x_u16))) +uint16x8_t __arm_veorq_x_u16(uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_veorq_x_u16))) +uint16x8_t __arm_veorq_x(uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_veorq_x_u32))) +uint32x4_t __arm_veorq_x_u32(uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_veorq_x_u32))) +uint32x4_t __arm_veorq_x(uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_veorq_x_u8))) +uint8x16_t __arm_veorq_x_u8(uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_veorq_x_u8))) +uint8x16_t __arm_veorq_x(uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vgetq_lane_s16))) +int16_t __arm_vgetq_lane_s16(int16x8_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vgetq_lane_s16))) +int16_t __arm_vgetq_lane(int16x8_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vgetq_lane_s32))) +int32_t __arm_vgetq_lane_s32(int32x4_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vgetq_lane_s32))) +int32_t __arm_vgetq_lane(int32x4_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vgetq_lane_s64))) +int64_t __arm_vgetq_lane_s64(int64x2_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vgetq_lane_s64))) +int64_t __arm_vgetq_lane(int64x2_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vgetq_lane_s8))) +int8_t __arm_vgetq_lane_s8(int8x16_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vgetq_lane_s8))) +int8_t __arm_vgetq_lane(int8x16_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vgetq_lane_u16))) +uint16_t __arm_vgetq_lane_u16(uint16x8_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vgetq_lane_u16))) +uint16_t __arm_vgetq_lane(uint16x8_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vgetq_lane_u32))) +uint32_t __arm_vgetq_lane_u32(uint32x4_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vgetq_lane_u32))) +uint32_t __arm_vgetq_lane(uint32x4_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vgetq_lane_u64))) +uint64_t __arm_vgetq_lane_u64(uint64x2_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vgetq_lane_u64))) +uint64_t __arm_vgetq_lane(uint64x2_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vgetq_lane_u8))) +uint8_t __arm_vgetq_lane_u8(uint8x16_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vgetq_lane_u8))) +uint8_t __arm_vgetq_lane(uint8x16_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_m_n_s16))) +int16x8_t __arm_vhaddq_m_n_s16(int16x8_t, int16x8_t, int16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_m_n_s16))) +int16x8_t __arm_vhaddq_m(int16x8_t, int16x8_t, int16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_m_n_s32))) +int32x4_t __arm_vhaddq_m_n_s32(int32x4_t, int32x4_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_m_n_s32))) +int32x4_t __arm_vhaddq_m(int32x4_t, int32x4_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_m_n_s8))) +int8x16_t __arm_vhaddq_m_n_s8(int8x16_t, int8x16_t, int8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_m_n_s8))) +int8x16_t __arm_vhaddq_m(int8x16_t, int8x16_t, int8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_m_n_u16))) +uint16x8_t __arm_vhaddq_m_n_u16(uint16x8_t, uint16x8_t, uint16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_m_n_u16))) +uint16x8_t __arm_vhaddq_m(uint16x8_t, uint16x8_t, uint16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_m_n_u32))) +uint32x4_t __arm_vhaddq_m_n_u32(uint32x4_t, uint32x4_t, uint32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_m_n_u32))) +uint32x4_t __arm_vhaddq_m(uint32x4_t, uint32x4_t, uint32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_m_n_u8))) +uint8x16_t __arm_vhaddq_m_n_u8(uint8x16_t, uint8x16_t, uint8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_m_n_u8))) +uint8x16_t __arm_vhaddq_m(uint8x16_t, uint8x16_t, uint8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_m_s16))) +int16x8_t __arm_vhaddq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_m_s16))) +int16x8_t __arm_vhaddq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_m_s32))) +int32x4_t __arm_vhaddq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_m_s32))) +int32x4_t __arm_vhaddq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_m_s8))) +int8x16_t __arm_vhaddq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_m_s8))) +int8x16_t __arm_vhaddq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_m_u16))) +uint16x8_t __arm_vhaddq_m_u16(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_m_u16))) +uint16x8_t __arm_vhaddq_m(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_m_u32))) +uint32x4_t __arm_vhaddq_m_u32(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_m_u32))) +uint32x4_t __arm_vhaddq_m(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_m_u8))) +uint8x16_t __arm_vhaddq_m_u8(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_m_u8))) +uint8x16_t __arm_vhaddq_m(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_n_s16))) +int16x8_t __arm_vhaddq_n_s16(int16x8_t, int16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_n_s16))) +int16x8_t __arm_vhaddq(int16x8_t, int16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_n_s32))) +int32x4_t __arm_vhaddq_n_s32(int32x4_t, int32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_n_s32))) +int32x4_t __arm_vhaddq(int32x4_t, int32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_n_s8))) +int8x16_t __arm_vhaddq_n_s8(int8x16_t, int8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_n_s8))) +int8x16_t __arm_vhaddq(int8x16_t, int8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_n_u16))) +uint16x8_t __arm_vhaddq_n_u16(uint16x8_t, uint16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_n_u16))) +uint16x8_t __arm_vhaddq(uint16x8_t, uint16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_n_u32))) +uint32x4_t __arm_vhaddq_n_u32(uint32x4_t, uint32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_n_u32))) +uint32x4_t __arm_vhaddq(uint32x4_t, uint32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_n_u8))) +uint8x16_t __arm_vhaddq_n_u8(uint8x16_t, uint8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_n_u8))) +uint8x16_t __arm_vhaddq(uint8x16_t, uint8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_s16))) +int16x8_t __arm_vhaddq_s16(int16x8_t, int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_s16))) +int16x8_t __arm_vhaddq(int16x8_t, int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_s32))) +int32x4_t __arm_vhaddq_s32(int32x4_t, int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_s32))) +int32x4_t __arm_vhaddq(int32x4_t, int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_s8))) +int8x16_t __arm_vhaddq_s8(int8x16_t, int8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_s8))) +int8x16_t __arm_vhaddq(int8x16_t, int8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_u16))) +uint16x8_t __arm_vhaddq_u16(uint16x8_t, uint16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_u16))) +uint16x8_t __arm_vhaddq(uint16x8_t, uint16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_u32))) +uint32x4_t __arm_vhaddq_u32(uint32x4_t, uint32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_u32))) +uint32x4_t __arm_vhaddq(uint32x4_t, uint32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_u8))) +uint8x16_t __arm_vhaddq_u8(uint8x16_t, uint8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_u8))) +uint8x16_t __arm_vhaddq(uint8x16_t, uint8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_x_n_s16))) +int16x8_t __arm_vhaddq_x_n_s16(int16x8_t, int16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_x_n_s16))) +int16x8_t __arm_vhaddq_x(int16x8_t, int16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_x_n_s32))) +int32x4_t __arm_vhaddq_x_n_s32(int32x4_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_x_n_s32))) +int32x4_t __arm_vhaddq_x(int32x4_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_x_n_s8))) +int8x16_t __arm_vhaddq_x_n_s8(int8x16_t, int8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_x_n_s8))) +int8x16_t __arm_vhaddq_x(int8x16_t, int8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_x_n_u16))) +uint16x8_t __arm_vhaddq_x_n_u16(uint16x8_t, uint16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_x_n_u16))) +uint16x8_t __arm_vhaddq_x(uint16x8_t, uint16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_x_n_u32))) +uint32x4_t __arm_vhaddq_x_n_u32(uint32x4_t, uint32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_x_n_u32))) +uint32x4_t __arm_vhaddq_x(uint32x4_t, uint32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_x_n_u8))) +uint8x16_t __arm_vhaddq_x_n_u8(uint8x16_t, uint8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_x_n_u8))) +uint8x16_t __arm_vhaddq_x(uint8x16_t, uint8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_x_s16))) +int16x8_t __arm_vhaddq_x_s16(int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_x_s16))) +int16x8_t __arm_vhaddq_x(int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_x_s32))) +int32x4_t __arm_vhaddq_x_s32(int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_x_s32))) +int32x4_t __arm_vhaddq_x(int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_x_s8))) +int8x16_t __arm_vhaddq_x_s8(int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_x_s8))) +int8x16_t __arm_vhaddq_x(int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_x_u16))) +uint16x8_t __arm_vhaddq_x_u16(uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_x_u16))) +uint16x8_t __arm_vhaddq_x(uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_x_u32))) +uint32x4_t __arm_vhaddq_x_u32(uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_x_u32))) +uint32x4_t __arm_vhaddq_x(uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_x_u8))) +uint8x16_t __arm_vhaddq_x_u8(uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_x_u8))) +uint8x16_t __arm_vhaddq_x(uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhcaddq_rot270_m_s16))) +int16x8_t __arm_vhcaddq_rot270_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhcaddq_rot270_m_s16))) +int16x8_t __arm_vhcaddq_rot270_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhcaddq_rot270_m_s32))) +int32x4_t __arm_vhcaddq_rot270_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhcaddq_rot270_m_s32))) +int32x4_t __arm_vhcaddq_rot270_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhcaddq_rot270_m_s8))) +int8x16_t __arm_vhcaddq_rot270_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhcaddq_rot270_m_s8))) +int8x16_t __arm_vhcaddq_rot270_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhcaddq_rot270_s16))) +int16x8_t __arm_vhcaddq_rot270_s16(int16x8_t, int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhcaddq_rot270_s16))) +int16x8_t __arm_vhcaddq_rot270(int16x8_t, int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhcaddq_rot270_s32))) +int32x4_t __arm_vhcaddq_rot270_s32(int32x4_t, int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhcaddq_rot270_s32))) +int32x4_t __arm_vhcaddq_rot270(int32x4_t, int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhcaddq_rot270_s8))) +int8x16_t __arm_vhcaddq_rot270_s8(int8x16_t, int8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhcaddq_rot270_s8))) +int8x16_t __arm_vhcaddq_rot270(int8x16_t, int8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhcaddq_rot270_x_s16))) +int16x8_t __arm_vhcaddq_rot270_x_s16(int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhcaddq_rot270_x_s16))) +int16x8_t __arm_vhcaddq_rot270_x(int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhcaddq_rot270_x_s32))) +int32x4_t __arm_vhcaddq_rot270_x_s32(int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhcaddq_rot270_x_s32))) +int32x4_t __arm_vhcaddq_rot270_x(int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhcaddq_rot270_x_s8))) +int8x16_t __arm_vhcaddq_rot270_x_s8(int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhcaddq_rot270_x_s8))) +int8x16_t __arm_vhcaddq_rot270_x(int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhcaddq_rot90_m_s16))) +int16x8_t __arm_vhcaddq_rot90_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhcaddq_rot90_m_s16))) +int16x8_t __arm_vhcaddq_rot90_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhcaddq_rot90_m_s32))) +int32x4_t __arm_vhcaddq_rot90_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhcaddq_rot90_m_s32))) +int32x4_t __arm_vhcaddq_rot90_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhcaddq_rot90_m_s8))) +int8x16_t __arm_vhcaddq_rot90_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhcaddq_rot90_m_s8))) +int8x16_t __arm_vhcaddq_rot90_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhcaddq_rot90_s16))) +int16x8_t __arm_vhcaddq_rot90_s16(int16x8_t, int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhcaddq_rot90_s16))) +int16x8_t __arm_vhcaddq_rot90(int16x8_t, int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhcaddq_rot90_s32))) +int32x4_t __arm_vhcaddq_rot90_s32(int32x4_t, int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhcaddq_rot90_s32))) +int32x4_t __arm_vhcaddq_rot90(int32x4_t, int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhcaddq_rot90_s8))) +int8x16_t __arm_vhcaddq_rot90_s8(int8x16_t, int8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhcaddq_rot90_s8))) +int8x16_t __arm_vhcaddq_rot90(int8x16_t, int8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhcaddq_rot90_x_s16))) +int16x8_t __arm_vhcaddq_rot90_x_s16(int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhcaddq_rot90_x_s16))) +int16x8_t __arm_vhcaddq_rot90_x(int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhcaddq_rot90_x_s32))) +int32x4_t __arm_vhcaddq_rot90_x_s32(int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhcaddq_rot90_x_s32))) +int32x4_t __arm_vhcaddq_rot90_x(int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhcaddq_rot90_x_s8))) +int8x16_t __arm_vhcaddq_rot90_x_s8(int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhcaddq_rot90_x_s8))) +int8x16_t __arm_vhcaddq_rot90_x(int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_m_n_s16))) +int16x8_t __arm_vhsubq_m_n_s16(int16x8_t, int16x8_t, int16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_m_n_s16))) +int16x8_t __arm_vhsubq_m(int16x8_t, int16x8_t, int16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_m_n_s32))) +int32x4_t __arm_vhsubq_m_n_s32(int32x4_t, int32x4_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_m_n_s32))) +int32x4_t __arm_vhsubq_m(int32x4_t, int32x4_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_m_n_s8))) +int8x16_t __arm_vhsubq_m_n_s8(int8x16_t, int8x16_t, int8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_m_n_s8))) +int8x16_t __arm_vhsubq_m(int8x16_t, int8x16_t, int8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_m_n_u16))) +uint16x8_t __arm_vhsubq_m_n_u16(uint16x8_t, uint16x8_t, uint16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_m_n_u16))) +uint16x8_t __arm_vhsubq_m(uint16x8_t, uint16x8_t, uint16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_m_n_u32))) +uint32x4_t __arm_vhsubq_m_n_u32(uint32x4_t, uint32x4_t, uint32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_m_n_u32))) +uint32x4_t __arm_vhsubq_m(uint32x4_t, uint32x4_t, uint32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_m_n_u8))) +uint8x16_t __arm_vhsubq_m_n_u8(uint8x16_t, uint8x16_t, uint8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_m_n_u8))) +uint8x16_t __arm_vhsubq_m(uint8x16_t, uint8x16_t, uint8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_m_s16))) +int16x8_t __arm_vhsubq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_m_s16))) +int16x8_t __arm_vhsubq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_m_s32))) +int32x4_t __arm_vhsubq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_m_s32))) +int32x4_t __arm_vhsubq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_m_s8))) +int8x16_t __arm_vhsubq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_m_s8))) +int8x16_t __arm_vhsubq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_m_u16))) +uint16x8_t __arm_vhsubq_m_u16(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_m_u16))) +uint16x8_t __arm_vhsubq_m(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_m_u32))) +uint32x4_t __arm_vhsubq_m_u32(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_m_u32))) +uint32x4_t __arm_vhsubq_m(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_m_u8))) +uint8x16_t __arm_vhsubq_m_u8(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_m_u8))) +uint8x16_t __arm_vhsubq_m(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_n_s16))) +int16x8_t __arm_vhsubq_n_s16(int16x8_t, int16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_n_s16))) +int16x8_t __arm_vhsubq(int16x8_t, int16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_n_s32))) +int32x4_t __arm_vhsubq_n_s32(int32x4_t, int32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_n_s32))) +int32x4_t __arm_vhsubq(int32x4_t, int32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_n_s8))) +int8x16_t __arm_vhsubq_n_s8(int8x16_t, int8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_n_s8))) +int8x16_t __arm_vhsubq(int8x16_t, int8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_n_u16))) +uint16x8_t __arm_vhsubq_n_u16(uint16x8_t, uint16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_n_u16))) +uint16x8_t __arm_vhsubq(uint16x8_t, uint16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_n_u32))) +uint32x4_t __arm_vhsubq_n_u32(uint32x4_t, uint32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_n_u32))) +uint32x4_t __arm_vhsubq(uint32x4_t, uint32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_n_u8))) +uint8x16_t __arm_vhsubq_n_u8(uint8x16_t, uint8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_n_u8))) +uint8x16_t __arm_vhsubq(uint8x16_t, uint8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_s16))) +int16x8_t __arm_vhsubq_s16(int16x8_t, int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_s16))) +int16x8_t __arm_vhsubq(int16x8_t, int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_s32))) +int32x4_t __arm_vhsubq_s32(int32x4_t, int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_s32))) +int32x4_t __arm_vhsubq(int32x4_t, int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_s8))) +int8x16_t __arm_vhsubq_s8(int8x16_t, int8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_s8))) +int8x16_t __arm_vhsubq(int8x16_t, int8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_u16))) +uint16x8_t __arm_vhsubq_u16(uint16x8_t, uint16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_u16))) +uint16x8_t __arm_vhsubq(uint16x8_t, uint16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_u32))) +uint32x4_t __arm_vhsubq_u32(uint32x4_t, uint32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_u32))) +uint32x4_t __arm_vhsubq(uint32x4_t, uint32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_u8))) +uint8x16_t __arm_vhsubq_u8(uint8x16_t, uint8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_u8))) +uint8x16_t __arm_vhsubq(uint8x16_t, uint8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_x_n_s16))) +int16x8_t __arm_vhsubq_x_n_s16(int16x8_t, int16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_x_n_s16))) +int16x8_t __arm_vhsubq_x(int16x8_t, int16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_x_n_s32))) +int32x4_t __arm_vhsubq_x_n_s32(int32x4_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_x_n_s32))) +int32x4_t __arm_vhsubq_x(int32x4_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_x_n_s8))) +int8x16_t __arm_vhsubq_x_n_s8(int8x16_t, int8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_x_n_s8))) +int8x16_t __arm_vhsubq_x(int8x16_t, int8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_x_n_u16))) +uint16x8_t __arm_vhsubq_x_n_u16(uint16x8_t, uint16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_x_n_u16))) +uint16x8_t __arm_vhsubq_x(uint16x8_t, uint16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_x_n_u32))) +uint32x4_t __arm_vhsubq_x_n_u32(uint32x4_t, uint32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_x_n_u32))) +uint32x4_t __arm_vhsubq_x(uint32x4_t, uint32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_x_n_u8))) +uint8x16_t __arm_vhsubq_x_n_u8(uint8x16_t, uint8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_x_n_u8))) +uint8x16_t __arm_vhsubq_x(uint8x16_t, uint8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_x_s16))) +int16x8_t __arm_vhsubq_x_s16(int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_x_s16))) +int16x8_t __arm_vhsubq_x(int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_x_s32))) +int32x4_t __arm_vhsubq_x_s32(int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_x_s32))) +int32x4_t __arm_vhsubq_x(int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_x_s8))) +int8x16_t __arm_vhsubq_x_s8(int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_x_s8))) +int8x16_t __arm_vhsubq_x(int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_x_u16))) +uint16x8_t __arm_vhsubq_x_u16(uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_x_u16))) +uint16x8_t __arm_vhsubq_x(uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_x_u32))) +uint32x4_t __arm_vhsubq_x_u32(uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_x_u32))) +uint32x4_t __arm_vhsubq_x(uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_x_u8))) +uint8x16_t __arm_vhsubq_x_u8(uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_x_u8))) +uint8x16_t __arm_vhsubq_x(uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vidupq_m_n_u16))) +uint16x8_t __arm_vidupq_m_n_u16(uint16x8_t, uint32_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vidupq_m_n_u16))) +uint16x8_t __arm_vidupq_m(uint16x8_t, uint32_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vidupq_m_n_u32))) +uint32x4_t __arm_vidupq_m_n_u32(uint32x4_t, uint32_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vidupq_m_n_u32))) +uint32x4_t __arm_vidupq_m(uint32x4_t, uint32_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vidupq_m_n_u8))) +uint8x16_t __arm_vidupq_m_n_u8(uint8x16_t, uint32_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vidupq_m_n_u8))) +uint8x16_t __arm_vidupq_m(uint8x16_t, uint32_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vidupq_m_wb_u16))) +uint16x8_t __arm_vidupq_m_wb_u16(uint16x8_t, uint32_t *, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vidupq_m_wb_u16))) +uint16x8_t __arm_vidupq_m(uint16x8_t, uint32_t *, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vidupq_m_wb_u32))) +uint32x4_t __arm_vidupq_m_wb_u32(uint32x4_t, uint32_t *, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vidupq_m_wb_u32))) +uint32x4_t __arm_vidupq_m(uint32x4_t, uint32_t *, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vidupq_m_wb_u8))) +uint8x16_t __arm_vidupq_m_wb_u8(uint8x16_t, uint32_t *, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vidupq_m_wb_u8))) +uint8x16_t __arm_vidupq_m(uint8x16_t, uint32_t *, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vidupq_n_u16))) +uint16x8_t __arm_vidupq_n_u16(uint32_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vidupq_n_u16))) +uint16x8_t __arm_vidupq_u16(uint32_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vidupq_n_u32))) +uint32x4_t __arm_vidupq_n_u32(uint32_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vidupq_n_u32))) +uint32x4_t __arm_vidupq_u32(uint32_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vidupq_n_u8))) +uint8x16_t __arm_vidupq_n_u8(uint32_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vidupq_n_u8))) +uint8x16_t __arm_vidupq_u8(uint32_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vidupq_wb_u16))) +uint16x8_t __arm_vidupq_wb_u16(uint32_t *, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vidupq_wb_u16))) +uint16x8_t __arm_vidupq_u16(uint32_t *, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vidupq_wb_u32))) +uint32x4_t __arm_vidupq_wb_u32(uint32_t *, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vidupq_wb_u32))) +uint32x4_t __arm_vidupq_u32(uint32_t *, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vidupq_wb_u8))) +uint8x16_t __arm_vidupq_wb_u8(uint32_t *, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vidupq_wb_u8))) +uint8x16_t __arm_vidupq_u8(uint32_t *, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vidupq_x_n_u16))) +uint16x8_t __arm_vidupq_x_n_u16(uint32_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vidupq_x_n_u16))) +uint16x8_t __arm_vidupq_x_u16(uint32_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vidupq_x_n_u32))) +uint32x4_t __arm_vidupq_x_n_u32(uint32_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vidupq_x_n_u32))) +uint32x4_t __arm_vidupq_x_u32(uint32_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vidupq_x_n_u8))) +uint8x16_t __arm_vidupq_x_n_u8(uint32_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vidupq_x_n_u8))) +uint8x16_t __arm_vidupq_x_u8(uint32_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vidupq_x_wb_u16))) +uint16x8_t __arm_vidupq_x_wb_u16(uint32_t *, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vidupq_x_wb_u16))) +uint16x8_t __arm_vidupq_x_u16(uint32_t *, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vidupq_x_wb_u32))) +uint32x4_t __arm_vidupq_x_wb_u32(uint32_t *, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vidupq_x_wb_u32))) +uint32x4_t __arm_vidupq_x_u32(uint32_t *, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vidupq_x_wb_u8))) +uint8x16_t __arm_vidupq_x_wb_u8(uint32_t *, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vidupq_x_wb_u8))) +uint8x16_t __arm_vidupq_x_u8(uint32_t *, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_viwdupq_m_n_u16))) +uint16x8_t __arm_viwdupq_m_n_u16(uint16x8_t, uint32_t, uint32_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_viwdupq_m_n_u16))) +uint16x8_t __arm_viwdupq_m(uint16x8_t, uint32_t, uint32_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_viwdupq_m_n_u32))) +uint32x4_t __arm_viwdupq_m_n_u32(uint32x4_t, uint32_t, uint32_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_viwdupq_m_n_u32))) +uint32x4_t __arm_viwdupq_m(uint32x4_t, uint32_t, uint32_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_viwdupq_m_n_u8))) +uint8x16_t __arm_viwdupq_m_n_u8(uint8x16_t, uint32_t, uint32_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_viwdupq_m_n_u8))) +uint8x16_t __arm_viwdupq_m(uint8x16_t, uint32_t, uint32_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_viwdupq_m_wb_u16))) +uint16x8_t __arm_viwdupq_m_wb_u16(uint16x8_t, uint32_t *, uint32_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_viwdupq_m_wb_u16))) +uint16x8_t __arm_viwdupq_m(uint16x8_t, uint32_t *, uint32_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_viwdupq_m_wb_u32))) +uint32x4_t __arm_viwdupq_m_wb_u32(uint32x4_t, uint32_t *, uint32_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_viwdupq_m_wb_u32))) +uint32x4_t __arm_viwdupq_m(uint32x4_t, uint32_t *, uint32_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_viwdupq_m_wb_u8))) +uint8x16_t __arm_viwdupq_m_wb_u8(uint8x16_t, uint32_t *, uint32_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_viwdupq_m_wb_u8))) +uint8x16_t __arm_viwdupq_m(uint8x16_t, uint32_t *, uint32_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_viwdupq_n_u16))) +uint16x8_t __arm_viwdupq_n_u16(uint32_t, uint32_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_viwdupq_n_u16))) +uint16x8_t __arm_viwdupq_u16(uint32_t, uint32_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_viwdupq_n_u32))) +uint32x4_t __arm_viwdupq_n_u32(uint32_t, uint32_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_viwdupq_n_u32))) +uint32x4_t __arm_viwdupq_u32(uint32_t, uint32_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_viwdupq_n_u8))) +uint8x16_t __arm_viwdupq_n_u8(uint32_t, uint32_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_viwdupq_n_u8))) +uint8x16_t __arm_viwdupq_u8(uint32_t, uint32_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_viwdupq_wb_u16))) +uint16x8_t __arm_viwdupq_wb_u16(uint32_t *, uint32_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_viwdupq_wb_u16))) +uint16x8_t __arm_viwdupq_u16(uint32_t *, uint32_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_viwdupq_wb_u32))) +uint32x4_t __arm_viwdupq_wb_u32(uint32_t *, uint32_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_viwdupq_wb_u32))) +uint32x4_t __arm_viwdupq_u32(uint32_t *, uint32_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_viwdupq_wb_u8))) +uint8x16_t __arm_viwdupq_wb_u8(uint32_t *, uint32_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_viwdupq_wb_u8))) +uint8x16_t __arm_viwdupq_u8(uint32_t *, uint32_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_viwdupq_x_n_u16))) +uint16x8_t __arm_viwdupq_x_n_u16(uint32_t, uint32_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_viwdupq_x_n_u16))) +uint16x8_t __arm_viwdupq_x_u16(uint32_t, uint32_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_viwdupq_x_n_u32))) +uint32x4_t __arm_viwdupq_x_n_u32(uint32_t, uint32_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_viwdupq_x_n_u32))) +uint32x4_t __arm_viwdupq_x_u32(uint32_t, uint32_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_viwdupq_x_n_u8))) +uint8x16_t __arm_viwdupq_x_n_u8(uint32_t, uint32_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_viwdupq_x_n_u8))) +uint8x16_t __arm_viwdupq_x_u8(uint32_t, uint32_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_viwdupq_x_wb_u16))) +uint16x8_t __arm_viwdupq_x_wb_u16(uint32_t *, uint32_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_viwdupq_x_wb_u16))) +uint16x8_t __arm_viwdupq_x_u16(uint32_t *, uint32_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_viwdupq_x_wb_u32))) +uint32x4_t __arm_viwdupq_x_wb_u32(uint32_t *, uint32_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_viwdupq_x_wb_u32))) +uint32x4_t __arm_viwdupq_x_u32(uint32_t *, uint32_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_viwdupq_x_wb_u8))) +uint8x16_t __arm_viwdupq_x_wb_u8(uint32_t *, uint32_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_viwdupq_x_wb_u8))) +uint8x16_t __arm_viwdupq_x_u8(uint32_t *, uint32_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vld1q_s16))) +int16x8_t __arm_vld1q_s16(const int16_t *); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vld1q_s16))) +int16x8_t __arm_vld1q(const int16_t *); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vld1q_s32))) +int32x4_t __arm_vld1q_s32(const int32_t *); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vld1q_s32))) +int32x4_t __arm_vld1q(const int32_t *); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vld1q_s8))) +int8x16_t __arm_vld1q_s8(const int8_t *); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vld1q_s8))) +int8x16_t __arm_vld1q(const int8_t *); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vld1q_u16))) +uint16x8_t __arm_vld1q_u16(const uint16_t *); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vld1q_u16))) +uint16x8_t __arm_vld1q(const uint16_t *); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vld1q_u32))) +uint32x4_t __arm_vld1q_u32(const uint32_t *); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vld1q_u32))) +uint32x4_t __arm_vld1q(const uint32_t *); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vld1q_u8))) +uint8x16_t __arm_vld1q_u8(const uint8_t *); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vld1q_u8))) +uint8x16_t __arm_vld1q(const uint8_t *); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vld1q_z_s16))) +int16x8_t __arm_vld1q_z_s16(const int16_t *, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vld1q_z_s16))) +int16x8_t __arm_vld1q_z(const int16_t *, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vld1q_z_s32))) +int32x4_t __arm_vld1q_z_s32(const int32_t *, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vld1q_z_s32))) +int32x4_t __arm_vld1q_z(const int32_t *, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vld1q_z_s8))) +int8x16_t __arm_vld1q_z_s8(const int8_t *, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vld1q_z_s8))) +int8x16_t __arm_vld1q_z(const int8_t *, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vld1q_z_u16))) +uint16x8_t __arm_vld1q_z_u16(const uint16_t *, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vld1q_z_u16))) +uint16x8_t __arm_vld1q_z(const uint16_t *, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vld1q_z_u32))) +uint32x4_t __arm_vld1q_z_u32(const uint32_t *, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vld1q_z_u32))) +uint32x4_t __arm_vld1q_z(const uint32_t *, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vld1q_z_u8))) +uint8x16_t __arm_vld1q_z_u8(const uint8_t *, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vld1q_z_u8))) +uint8x16_t __arm_vld1q_z(const uint8_t *, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vld2q_s16))) +int16x8x2_t __arm_vld2q_s16(const int16_t *); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vld2q_s16))) +int16x8x2_t __arm_vld2q(const int16_t *); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vld2q_s32))) +int32x4x2_t __arm_vld2q_s32(const int32_t *); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vld2q_s32))) +int32x4x2_t __arm_vld2q(const int32_t *); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vld2q_s8))) +int8x16x2_t __arm_vld2q_s8(const int8_t *); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vld2q_s8))) +int8x16x2_t __arm_vld2q(const int8_t *); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vld2q_u16))) +uint16x8x2_t __arm_vld2q_u16(const uint16_t *); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vld2q_u16))) +uint16x8x2_t __arm_vld2q(const uint16_t *); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vld2q_u32))) +uint32x4x2_t __arm_vld2q_u32(const uint32_t *); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vld2q_u32))) +uint32x4x2_t __arm_vld2q(const uint32_t *); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vld2q_u8))) +uint8x16x2_t __arm_vld2q_u8(const uint8_t *); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vld2q_u8))) +uint8x16x2_t __arm_vld2q(const uint8_t *); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vld4q_s16))) +int16x8x4_t __arm_vld4q_s16(const int16_t *); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vld4q_s16))) +int16x8x4_t __arm_vld4q(const int16_t *); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vld4q_s32))) +int32x4x4_t __arm_vld4q_s32(const int32_t *); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vld4q_s32))) +int32x4x4_t __arm_vld4q(const int32_t *); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vld4q_s8))) +int8x16x4_t __arm_vld4q_s8(const int8_t *); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vld4q_s8))) +int8x16x4_t __arm_vld4q(const int8_t *); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vld4q_u16))) +uint16x8x4_t __arm_vld4q_u16(const uint16_t *); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vld4q_u16))) +uint16x8x4_t __arm_vld4q(const uint16_t *); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vld4q_u32))) +uint32x4x4_t __arm_vld4q_u32(const uint32_t *); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vld4q_u32))) +uint32x4x4_t __arm_vld4q(const uint32_t *); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vld4q_u8))) +uint8x16x4_t __arm_vld4q_u8(const uint8_t *); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vld4q_u8))) +uint8x16x4_t __arm_vld4q(const uint8_t *); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrbq_gather_offset_s16))) +int16x8_t __arm_vldrbq_gather_offset_s16(const int8_t *, uint16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrbq_gather_offset_s16))) +int16x8_t __arm_vldrbq_gather_offset(const int8_t *, uint16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrbq_gather_offset_s32))) +int32x4_t __arm_vldrbq_gather_offset_s32(const int8_t *, uint32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrbq_gather_offset_s32))) +int32x4_t __arm_vldrbq_gather_offset(const int8_t *, uint32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrbq_gather_offset_s8))) +int8x16_t __arm_vldrbq_gather_offset_s8(const int8_t *, uint8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrbq_gather_offset_s8))) +int8x16_t __arm_vldrbq_gather_offset(const int8_t *, uint8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrbq_gather_offset_u16))) +uint16x8_t __arm_vldrbq_gather_offset_u16(const uint8_t *, uint16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrbq_gather_offset_u16))) +uint16x8_t __arm_vldrbq_gather_offset(const uint8_t *, uint16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrbq_gather_offset_u32))) +uint32x4_t __arm_vldrbq_gather_offset_u32(const uint8_t *, uint32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrbq_gather_offset_u32))) +uint32x4_t __arm_vldrbq_gather_offset(const uint8_t *, uint32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrbq_gather_offset_u8))) +uint8x16_t __arm_vldrbq_gather_offset_u8(const uint8_t *, uint8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrbq_gather_offset_u8))) +uint8x16_t __arm_vldrbq_gather_offset(const uint8_t *, uint8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrbq_gather_offset_z_s16))) +int16x8_t __arm_vldrbq_gather_offset_z_s16(const int8_t *, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrbq_gather_offset_z_s16))) +int16x8_t __arm_vldrbq_gather_offset_z(const int8_t *, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrbq_gather_offset_z_s32))) +int32x4_t __arm_vldrbq_gather_offset_z_s32(const int8_t *, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrbq_gather_offset_z_s32))) +int32x4_t __arm_vldrbq_gather_offset_z(const int8_t *, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrbq_gather_offset_z_s8))) +int8x16_t __arm_vldrbq_gather_offset_z_s8(const int8_t *, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrbq_gather_offset_z_s8))) +int8x16_t __arm_vldrbq_gather_offset_z(const int8_t *, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrbq_gather_offset_z_u16))) +uint16x8_t __arm_vldrbq_gather_offset_z_u16(const uint8_t *, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrbq_gather_offset_z_u16))) +uint16x8_t __arm_vldrbq_gather_offset_z(const uint8_t *, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrbq_gather_offset_z_u32))) +uint32x4_t __arm_vldrbq_gather_offset_z_u32(const uint8_t *, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrbq_gather_offset_z_u32))) +uint32x4_t __arm_vldrbq_gather_offset_z(const uint8_t *, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrbq_gather_offset_z_u8))) +uint8x16_t __arm_vldrbq_gather_offset_z_u8(const uint8_t *, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrbq_gather_offset_z_u8))) +uint8x16_t __arm_vldrbq_gather_offset_z(const uint8_t *, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrbq_s16))) +int16x8_t __arm_vldrbq_s16(const int8_t *); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrbq_s32))) +int32x4_t __arm_vldrbq_s32(const int8_t *); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrbq_s8))) +int8x16_t __arm_vldrbq_s8(const int8_t *); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrbq_u16))) +uint16x8_t __arm_vldrbq_u16(const uint8_t *); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrbq_u32))) +uint32x4_t __arm_vldrbq_u32(const uint8_t *); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrbq_u8))) +uint8x16_t __arm_vldrbq_u8(const uint8_t *); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrbq_z_s16))) +int16x8_t __arm_vldrbq_z_s16(const int8_t *, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrbq_z_s32))) +int32x4_t __arm_vldrbq_z_s32(const int8_t *, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrbq_z_s8))) +int8x16_t __arm_vldrbq_z_s8(const int8_t *, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrbq_z_u16))) +uint16x8_t __arm_vldrbq_z_u16(const uint8_t *, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrbq_z_u32))) +uint32x4_t __arm_vldrbq_z_u32(const uint8_t *, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrbq_z_u8))) +uint8x16_t __arm_vldrbq_z_u8(const uint8_t *, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrdq_gather_base_s64))) +int64x2_t __arm_vldrdq_gather_base_s64(uint64x2_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrdq_gather_base_u64))) +uint64x2_t __arm_vldrdq_gather_base_u64(uint64x2_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrdq_gather_base_wb_s64))) +int64x2_t __arm_vldrdq_gather_base_wb_s64(uint64x2_t *, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrdq_gather_base_wb_u64))) +uint64x2_t __arm_vldrdq_gather_base_wb_u64(uint64x2_t *, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrdq_gather_base_wb_z_s64))) +int64x2_t __arm_vldrdq_gather_base_wb_z_s64(uint64x2_t *, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrdq_gather_base_wb_z_u64))) +uint64x2_t __arm_vldrdq_gather_base_wb_z_u64(uint64x2_t *, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrdq_gather_base_z_s64))) +int64x2_t __arm_vldrdq_gather_base_z_s64(uint64x2_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrdq_gather_base_z_u64))) +uint64x2_t __arm_vldrdq_gather_base_z_u64(uint64x2_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrdq_gather_offset_s64))) +int64x2_t __arm_vldrdq_gather_offset_s64(const int64_t *, uint64x2_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrdq_gather_offset_s64))) +int64x2_t __arm_vldrdq_gather_offset(const int64_t *, uint64x2_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrdq_gather_offset_u64))) +uint64x2_t __arm_vldrdq_gather_offset_u64(const uint64_t *, uint64x2_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrdq_gather_offset_u64))) +uint64x2_t __arm_vldrdq_gather_offset(const uint64_t *, uint64x2_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrdq_gather_offset_z_s64))) +int64x2_t __arm_vldrdq_gather_offset_z_s64(const int64_t *, uint64x2_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrdq_gather_offset_z_s64))) +int64x2_t __arm_vldrdq_gather_offset_z(const int64_t *, uint64x2_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrdq_gather_offset_z_u64))) +uint64x2_t __arm_vldrdq_gather_offset_z_u64(const uint64_t *, uint64x2_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrdq_gather_offset_z_u64))) +uint64x2_t __arm_vldrdq_gather_offset_z(const uint64_t *, uint64x2_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrdq_gather_shifted_offset_s64))) +int64x2_t __arm_vldrdq_gather_shifted_offset_s64(const int64_t *, uint64x2_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrdq_gather_shifted_offset_s64))) +int64x2_t __arm_vldrdq_gather_shifted_offset(const int64_t *, uint64x2_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrdq_gather_shifted_offset_u64))) +uint64x2_t __arm_vldrdq_gather_shifted_offset_u64(const uint64_t *, uint64x2_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrdq_gather_shifted_offset_u64))) +uint64x2_t __arm_vldrdq_gather_shifted_offset(const uint64_t *, uint64x2_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrdq_gather_shifted_offset_z_s64))) +int64x2_t __arm_vldrdq_gather_shifted_offset_z_s64(const int64_t *, uint64x2_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrdq_gather_shifted_offset_z_s64))) +int64x2_t __arm_vldrdq_gather_shifted_offset_z(const int64_t *, uint64x2_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrdq_gather_shifted_offset_z_u64))) +uint64x2_t __arm_vldrdq_gather_shifted_offset_z_u64(const uint64_t *, uint64x2_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrdq_gather_shifted_offset_z_u64))) +uint64x2_t __arm_vldrdq_gather_shifted_offset_z(const uint64_t *, uint64x2_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_gather_offset_s16))) +int16x8_t __arm_vldrhq_gather_offset_s16(const int16_t *, uint16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_gather_offset_s16))) +int16x8_t __arm_vldrhq_gather_offset(const int16_t *, uint16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_gather_offset_s32))) +int32x4_t __arm_vldrhq_gather_offset_s32(const int16_t *, uint32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_gather_offset_s32))) +int32x4_t __arm_vldrhq_gather_offset(const int16_t *, uint32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_gather_offset_u16))) +uint16x8_t __arm_vldrhq_gather_offset_u16(const uint16_t *, uint16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_gather_offset_u16))) +uint16x8_t __arm_vldrhq_gather_offset(const uint16_t *, uint16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_gather_offset_u32))) +uint32x4_t __arm_vldrhq_gather_offset_u32(const uint16_t *, uint32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_gather_offset_u32))) +uint32x4_t __arm_vldrhq_gather_offset(const uint16_t *, uint32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_gather_offset_z_s16))) +int16x8_t __arm_vldrhq_gather_offset_z_s16(const int16_t *, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_gather_offset_z_s16))) +int16x8_t __arm_vldrhq_gather_offset_z(const int16_t *, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_gather_offset_z_s32))) +int32x4_t __arm_vldrhq_gather_offset_z_s32(const int16_t *, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_gather_offset_z_s32))) +int32x4_t __arm_vldrhq_gather_offset_z(const int16_t *, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_gather_offset_z_u16))) +uint16x8_t __arm_vldrhq_gather_offset_z_u16(const uint16_t *, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_gather_offset_z_u16))) +uint16x8_t __arm_vldrhq_gather_offset_z(const uint16_t *, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_gather_offset_z_u32))) +uint32x4_t __arm_vldrhq_gather_offset_z_u32(const uint16_t *, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_gather_offset_z_u32))) +uint32x4_t __arm_vldrhq_gather_offset_z(const uint16_t *, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_gather_shifted_offset_s16))) +int16x8_t __arm_vldrhq_gather_shifted_offset_s16(const int16_t *, uint16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_gather_shifted_offset_s16))) +int16x8_t __arm_vldrhq_gather_shifted_offset(const int16_t *, uint16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_gather_shifted_offset_s32))) +int32x4_t __arm_vldrhq_gather_shifted_offset_s32(const int16_t *, uint32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_gather_shifted_offset_s32))) +int32x4_t __arm_vldrhq_gather_shifted_offset(const int16_t *, uint32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_gather_shifted_offset_u16))) +uint16x8_t __arm_vldrhq_gather_shifted_offset_u16(const uint16_t *, uint16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_gather_shifted_offset_u16))) +uint16x8_t __arm_vldrhq_gather_shifted_offset(const uint16_t *, uint16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_gather_shifted_offset_u32))) +uint32x4_t __arm_vldrhq_gather_shifted_offset_u32(const uint16_t *, uint32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_gather_shifted_offset_u32))) +uint32x4_t __arm_vldrhq_gather_shifted_offset(const uint16_t *, uint32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_gather_shifted_offset_z_s16))) +int16x8_t __arm_vldrhq_gather_shifted_offset_z_s16(const int16_t *, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_gather_shifted_offset_z_s16))) +int16x8_t __arm_vldrhq_gather_shifted_offset_z(const int16_t *, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_gather_shifted_offset_z_s32))) +int32x4_t __arm_vldrhq_gather_shifted_offset_z_s32(const int16_t *, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_gather_shifted_offset_z_s32))) +int32x4_t __arm_vldrhq_gather_shifted_offset_z(const int16_t *, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_gather_shifted_offset_z_u16))) +uint16x8_t __arm_vldrhq_gather_shifted_offset_z_u16(const uint16_t *, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_gather_shifted_offset_z_u16))) +uint16x8_t __arm_vldrhq_gather_shifted_offset_z(const uint16_t *, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_gather_shifted_offset_z_u32))) +uint32x4_t __arm_vldrhq_gather_shifted_offset_z_u32(const uint16_t *, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_gather_shifted_offset_z_u32))) +uint32x4_t __arm_vldrhq_gather_shifted_offset_z(const uint16_t *, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_s16))) +int16x8_t __arm_vldrhq_s16(const int16_t *); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_s32))) +int32x4_t __arm_vldrhq_s32(const int16_t *); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_u16))) +uint16x8_t __arm_vldrhq_u16(const uint16_t *); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_u32))) +uint32x4_t __arm_vldrhq_u32(const uint16_t *); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_z_s16))) +int16x8_t __arm_vldrhq_z_s16(const int16_t *, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_z_s32))) +int32x4_t __arm_vldrhq_z_s32(const int16_t *, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_z_u16))) +uint16x8_t __arm_vldrhq_z_u16(const uint16_t *, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_z_u32))) +uint32x4_t __arm_vldrhq_z_u32(const uint16_t *, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrwq_gather_base_s32))) +int32x4_t __arm_vldrwq_gather_base_s32(uint32x4_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrwq_gather_base_u32))) +uint32x4_t __arm_vldrwq_gather_base_u32(uint32x4_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrwq_gather_base_wb_s32))) +int32x4_t __arm_vldrwq_gather_base_wb_s32(uint32x4_t *, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrwq_gather_base_wb_u32))) +uint32x4_t __arm_vldrwq_gather_base_wb_u32(uint32x4_t *, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrwq_gather_base_wb_z_s32))) +int32x4_t __arm_vldrwq_gather_base_wb_z_s32(uint32x4_t *, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrwq_gather_base_wb_z_u32))) +uint32x4_t __arm_vldrwq_gather_base_wb_z_u32(uint32x4_t *, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrwq_gather_base_z_s32))) +int32x4_t __arm_vldrwq_gather_base_z_s32(uint32x4_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrwq_gather_base_z_u32))) +uint32x4_t __arm_vldrwq_gather_base_z_u32(uint32x4_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrwq_gather_offset_s32))) +int32x4_t __arm_vldrwq_gather_offset_s32(const int32_t *, uint32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrwq_gather_offset_s32))) +int32x4_t __arm_vldrwq_gather_offset(const int32_t *, uint32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrwq_gather_offset_u32))) +uint32x4_t __arm_vldrwq_gather_offset_u32(const uint32_t *, uint32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrwq_gather_offset_u32))) +uint32x4_t __arm_vldrwq_gather_offset(const uint32_t *, uint32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrwq_gather_offset_z_s32))) +int32x4_t __arm_vldrwq_gather_offset_z_s32(const int32_t *, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrwq_gather_offset_z_s32))) +int32x4_t __arm_vldrwq_gather_offset_z(const int32_t *, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrwq_gather_offset_z_u32))) +uint32x4_t __arm_vldrwq_gather_offset_z_u32(const uint32_t *, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrwq_gather_offset_z_u32))) +uint32x4_t __arm_vldrwq_gather_offset_z(const uint32_t *, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrwq_gather_shifted_offset_s32))) +int32x4_t __arm_vldrwq_gather_shifted_offset_s32(const int32_t *, uint32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrwq_gather_shifted_offset_s32))) +int32x4_t __arm_vldrwq_gather_shifted_offset(const int32_t *, uint32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrwq_gather_shifted_offset_u32))) +uint32x4_t __arm_vldrwq_gather_shifted_offset_u32(const uint32_t *, uint32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrwq_gather_shifted_offset_u32))) +uint32x4_t __arm_vldrwq_gather_shifted_offset(const uint32_t *, uint32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrwq_gather_shifted_offset_z_s32))) +int32x4_t __arm_vldrwq_gather_shifted_offset_z_s32(const int32_t *, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrwq_gather_shifted_offset_z_s32))) +int32x4_t __arm_vldrwq_gather_shifted_offset_z(const int32_t *, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrwq_gather_shifted_offset_z_u32))) +uint32x4_t __arm_vldrwq_gather_shifted_offset_z_u32(const uint32_t *, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrwq_gather_shifted_offset_z_u32))) +uint32x4_t __arm_vldrwq_gather_shifted_offset_z(const uint32_t *, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrwq_s32))) +int32x4_t __arm_vldrwq_s32(const int32_t *); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrwq_u32))) +uint32x4_t __arm_vldrwq_u32(const uint32_t *); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrwq_z_s32))) +int32x4_t __arm_vldrwq_z_s32(const int32_t *, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrwq_z_u32))) +uint32x4_t __arm_vldrwq_z_u32(const uint32_t *, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxaq_m_s16))) +uint16x8_t __arm_vmaxaq_m_s16(uint16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxaq_m_s16))) +uint16x8_t __arm_vmaxaq_m(uint16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxaq_m_s32))) +uint32x4_t __arm_vmaxaq_m_s32(uint32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxaq_m_s32))) +uint32x4_t __arm_vmaxaq_m(uint32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxaq_m_s8))) +uint8x16_t __arm_vmaxaq_m_s8(uint8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxaq_m_s8))) +uint8x16_t __arm_vmaxaq_m(uint8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxaq_s16))) +uint16x8_t __arm_vmaxaq_s16(uint16x8_t, int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxaq_s16))) +uint16x8_t __arm_vmaxaq(uint16x8_t, int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxaq_s32))) +uint32x4_t __arm_vmaxaq_s32(uint32x4_t, int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxaq_s32))) +uint32x4_t __arm_vmaxaq(uint32x4_t, int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxaq_s8))) +uint8x16_t __arm_vmaxaq_s8(uint8x16_t, int8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxaq_s8))) +uint8x16_t __arm_vmaxaq(uint8x16_t, int8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxavq_p_s16))) +uint16_t __arm_vmaxavq_p_s16(uint16_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxavq_p_s16))) +uint16_t __arm_vmaxavq_p(uint16_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxavq_p_s32))) +uint32_t __arm_vmaxavq_p_s32(uint32_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxavq_p_s32))) +uint32_t __arm_vmaxavq_p(uint32_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxavq_p_s8))) +uint8_t __arm_vmaxavq_p_s8(uint8_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxavq_p_s8))) +uint8_t __arm_vmaxavq_p(uint8_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxavq_s16))) +uint16_t __arm_vmaxavq_s16(uint16_t, int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxavq_s16))) +uint16_t __arm_vmaxavq(uint16_t, int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxavq_s32))) +uint32_t __arm_vmaxavq_s32(uint32_t, int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxavq_s32))) +uint32_t __arm_vmaxavq(uint32_t, int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxavq_s8))) +uint8_t __arm_vmaxavq_s8(uint8_t, int8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxavq_s8))) +uint8_t __arm_vmaxavq(uint8_t, int8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxq_m_s16))) +int16x8_t __arm_vmaxq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxq_m_s16))) +int16x8_t __arm_vmaxq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxq_m_s32))) +int32x4_t __arm_vmaxq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxq_m_s32))) +int32x4_t __arm_vmaxq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxq_m_s8))) +int8x16_t __arm_vmaxq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxq_m_s8))) +int8x16_t __arm_vmaxq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxq_m_u16))) +uint16x8_t __arm_vmaxq_m_u16(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxq_m_u16))) +uint16x8_t __arm_vmaxq_m(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxq_m_u32))) +uint32x4_t __arm_vmaxq_m_u32(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxq_m_u32))) +uint32x4_t __arm_vmaxq_m(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxq_m_u8))) +uint8x16_t __arm_vmaxq_m_u8(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxq_m_u8))) +uint8x16_t __arm_vmaxq_m(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxq_s16))) +int16x8_t __arm_vmaxq_s16(int16x8_t, int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxq_s16))) +int16x8_t __arm_vmaxq(int16x8_t, int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxq_s32))) +int32x4_t __arm_vmaxq_s32(int32x4_t, int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxq_s32))) +int32x4_t __arm_vmaxq(int32x4_t, int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxq_s8))) +int8x16_t __arm_vmaxq_s8(int8x16_t, int8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxq_s8))) +int8x16_t __arm_vmaxq(int8x16_t, int8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxq_u16))) +uint16x8_t __arm_vmaxq_u16(uint16x8_t, uint16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxq_u16))) +uint16x8_t __arm_vmaxq(uint16x8_t, uint16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxq_u32))) +uint32x4_t __arm_vmaxq_u32(uint32x4_t, uint32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxq_u32))) +uint32x4_t __arm_vmaxq(uint32x4_t, uint32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxq_u8))) +uint8x16_t __arm_vmaxq_u8(uint8x16_t, uint8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxq_u8))) +uint8x16_t __arm_vmaxq(uint8x16_t, uint8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxq_x_s16))) +int16x8_t __arm_vmaxq_x_s16(int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxq_x_s16))) +int16x8_t __arm_vmaxq_x(int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxq_x_s32))) +int32x4_t __arm_vmaxq_x_s32(int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxq_x_s32))) +int32x4_t __arm_vmaxq_x(int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxq_x_s8))) +int8x16_t __arm_vmaxq_x_s8(int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxq_x_s8))) +int8x16_t __arm_vmaxq_x(int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxq_x_u16))) +uint16x8_t __arm_vmaxq_x_u16(uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxq_x_u16))) +uint16x8_t __arm_vmaxq_x(uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxq_x_u32))) +uint32x4_t __arm_vmaxq_x_u32(uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxq_x_u32))) +uint32x4_t __arm_vmaxq_x(uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxq_x_u8))) +uint8x16_t __arm_vmaxq_x_u8(uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxq_x_u8))) +uint8x16_t __arm_vmaxq_x(uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxvq_p_s16))) +int16_t __arm_vmaxvq_p_s16(int16_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxvq_p_s16))) +int16_t __arm_vmaxvq_p(int16_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxvq_p_s32))) +int32_t __arm_vmaxvq_p_s32(int32_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxvq_p_s32))) +int32_t __arm_vmaxvq_p(int32_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxvq_p_s8))) +int8_t __arm_vmaxvq_p_s8(int8_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxvq_p_s8))) +int8_t __arm_vmaxvq_p(int8_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxvq_p_u16))) +uint16_t __arm_vmaxvq_p_u16(uint16_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxvq_p_u16))) +uint16_t __arm_vmaxvq_p(uint16_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxvq_p_u32))) +uint32_t __arm_vmaxvq_p_u32(uint32_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxvq_p_u32))) +uint32_t __arm_vmaxvq_p(uint32_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxvq_p_u8))) +uint8_t __arm_vmaxvq_p_u8(uint8_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxvq_p_u8))) +uint8_t __arm_vmaxvq_p(uint8_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxvq_s16))) +int16_t __arm_vmaxvq_s16(int16_t, int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxvq_s16))) +int16_t __arm_vmaxvq(int16_t, int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxvq_s32))) +int32_t __arm_vmaxvq_s32(int32_t, int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxvq_s32))) +int32_t __arm_vmaxvq(int32_t, int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxvq_s8))) +int8_t __arm_vmaxvq_s8(int8_t, int8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxvq_s8))) +int8_t __arm_vmaxvq(int8_t, int8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxvq_u16))) +uint16_t __arm_vmaxvq_u16(uint16_t, uint16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxvq_u16))) +uint16_t __arm_vmaxvq(uint16_t, uint16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxvq_u32))) +uint32_t __arm_vmaxvq_u32(uint32_t, uint32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxvq_u32))) +uint32_t __arm_vmaxvq(uint32_t, uint32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxvq_u8))) +uint8_t __arm_vmaxvq_u8(uint8_t, uint8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxvq_u8))) +uint8_t __arm_vmaxvq(uint8_t, uint8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminaq_m_s16))) +uint16x8_t __arm_vminaq_m_s16(uint16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminaq_m_s16))) +uint16x8_t __arm_vminaq_m(uint16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminaq_m_s32))) +uint32x4_t __arm_vminaq_m_s32(uint32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminaq_m_s32))) +uint32x4_t __arm_vminaq_m(uint32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminaq_m_s8))) +uint8x16_t __arm_vminaq_m_s8(uint8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminaq_m_s8))) +uint8x16_t __arm_vminaq_m(uint8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminaq_s16))) +uint16x8_t __arm_vminaq_s16(uint16x8_t, int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminaq_s16))) +uint16x8_t __arm_vminaq(uint16x8_t, int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminaq_s32))) +uint32x4_t __arm_vminaq_s32(uint32x4_t, int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminaq_s32))) +uint32x4_t __arm_vminaq(uint32x4_t, int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminaq_s8))) +uint8x16_t __arm_vminaq_s8(uint8x16_t, int8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminaq_s8))) +uint8x16_t __arm_vminaq(uint8x16_t, int8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminavq_p_s16))) +uint16_t __arm_vminavq_p_s16(uint16_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminavq_p_s16))) +uint16_t __arm_vminavq_p(uint16_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminavq_p_s32))) +uint32_t __arm_vminavq_p_s32(uint32_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminavq_p_s32))) +uint32_t __arm_vminavq_p(uint32_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminavq_p_s8))) +uint8_t __arm_vminavq_p_s8(uint8_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminavq_p_s8))) +uint8_t __arm_vminavq_p(uint8_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminavq_s16))) +uint16_t __arm_vminavq_s16(uint16_t, int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminavq_s16))) +uint16_t __arm_vminavq(uint16_t, int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminavq_s32))) +uint32_t __arm_vminavq_s32(uint32_t, int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminavq_s32))) +uint32_t __arm_vminavq(uint32_t, int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminavq_s8))) +uint8_t __arm_vminavq_s8(uint8_t, int8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminavq_s8))) +uint8_t __arm_vminavq(uint8_t, int8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminq_m_s16))) +int16x8_t __arm_vminq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminq_m_s16))) +int16x8_t __arm_vminq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminq_m_s32))) +int32x4_t __arm_vminq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminq_m_s32))) +int32x4_t __arm_vminq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminq_m_s8))) +int8x16_t __arm_vminq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminq_m_s8))) +int8x16_t __arm_vminq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminq_m_u16))) +uint16x8_t __arm_vminq_m_u16(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminq_m_u16))) +uint16x8_t __arm_vminq_m(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminq_m_u32))) +uint32x4_t __arm_vminq_m_u32(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminq_m_u32))) +uint32x4_t __arm_vminq_m(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminq_m_u8))) +uint8x16_t __arm_vminq_m_u8(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminq_m_u8))) +uint8x16_t __arm_vminq_m(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminq_s16))) +int16x8_t __arm_vminq_s16(int16x8_t, int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminq_s16))) +int16x8_t __arm_vminq(int16x8_t, int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminq_s32))) +int32x4_t __arm_vminq_s32(int32x4_t, int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminq_s32))) +int32x4_t __arm_vminq(int32x4_t, int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminq_s8))) +int8x16_t __arm_vminq_s8(int8x16_t, int8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminq_s8))) +int8x16_t __arm_vminq(int8x16_t, int8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminq_u16))) +uint16x8_t __arm_vminq_u16(uint16x8_t, uint16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminq_u16))) +uint16x8_t __arm_vminq(uint16x8_t, uint16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminq_u32))) +uint32x4_t __arm_vminq_u32(uint32x4_t, uint32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminq_u32))) +uint32x4_t __arm_vminq(uint32x4_t, uint32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminq_u8))) +uint8x16_t __arm_vminq_u8(uint8x16_t, uint8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminq_u8))) +uint8x16_t __arm_vminq(uint8x16_t, uint8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminq_x_s16))) +int16x8_t __arm_vminq_x_s16(int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminq_x_s16))) +int16x8_t __arm_vminq_x(int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminq_x_s32))) +int32x4_t __arm_vminq_x_s32(int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminq_x_s32))) +int32x4_t __arm_vminq_x(int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminq_x_s8))) +int8x16_t __arm_vminq_x_s8(int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminq_x_s8))) +int8x16_t __arm_vminq_x(int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminq_x_u16))) +uint16x8_t __arm_vminq_x_u16(uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminq_x_u16))) +uint16x8_t __arm_vminq_x(uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminq_x_u32))) +uint32x4_t __arm_vminq_x_u32(uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminq_x_u32))) +uint32x4_t __arm_vminq_x(uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminq_x_u8))) +uint8x16_t __arm_vminq_x_u8(uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminq_x_u8))) +uint8x16_t __arm_vminq_x(uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminvq_p_s16))) +int16_t __arm_vminvq_p_s16(int16_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminvq_p_s16))) +int16_t __arm_vminvq_p(int16_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminvq_p_s32))) +int32_t __arm_vminvq_p_s32(int32_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminvq_p_s32))) +int32_t __arm_vminvq_p(int32_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminvq_p_s8))) +int8_t __arm_vminvq_p_s8(int8_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminvq_p_s8))) +int8_t __arm_vminvq_p(int8_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminvq_p_u16))) +uint16_t __arm_vminvq_p_u16(uint16_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminvq_p_u16))) +uint16_t __arm_vminvq_p(uint16_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminvq_p_u32))) +uint32_t __arm_vminvq_p_u32(uint32_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminvq_p_u32))) +uint32_t __arm_vminvq_p(uint32_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminvq_p_u8))) +uint8_t __arm_vminvq_p_u8(uint8_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminvq_p_u8))) +uint8_t __arm_vminvq_p(uint8_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminvq_s16))) +int16_t __arm_vminvq_s16(int16_t, int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminvq_s16))) +int16_t __arm_vminvq(int16_t, int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminvq_s32))) +int32_t __arm_vminvq_s32(int32_t, int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminvq_s32))) +int32_t __arm_vminvq(int32_t, int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminvq_s8))) +int8_t __arm_vminvq_s8(int8_t, int8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminvq_s8))) +int8_t __arm_vminvq(int8_t, int8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminvq_u16))) +uint16_t __arm_vminvq_u16(uint16_t, uint16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminvq_u16))) +uint16_t __arm_vminvq(uint16_t, uint16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminvq_u32))) +uint32_t __arm_vminvq_u32(uint32_t, uint32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminvq_u32))) +uint32_t __arm_vminvq(uint32_t, uint32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminvq_u8))) +uint8_t __arm_vminvq_u8(uint8_t, uint8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminvq_u8))) +uint8_t __arm_vminvq(uint8_t, uint8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmladavaq_p_s16))) +int32_t __arm_vmladavaq_p_s16(int32_t, int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmladavaq_p_s16))) +int32_t __arm_vmladavaq_p(int32_t, int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmladavaq_p_s32))) +int32_t __arm_vmladavaq_p_s32(int32_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmladavaq_p_s32))) +int32_t __arm_vmladavaq_p(int32_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmladavaq_p_s8))) +int32_t __arm_vmladavaq_p_s8(int32_t, int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmladavaq_p_s8))) +int32_t __arm_vmladavaq_p(int32_t, int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmladavaq_p_u16))) +uint32_t __arm_vmladavaq_p_u16(uint32_t, uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmladavaq_p_u16))) +uint32_t __arm_vmladavaq_p(uint32_t, uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmladavaq_p_u32))) +uint32_t __arm_vmladavaq_p_u32(uint32_t, uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmladavaq_p_u32))) +uint32_t __arm_vmladavaq_p(uint32_t, uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmladavaq_p_u8))) +uint32_t __arm_vmladavaq_p_u8(uint32_t, uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmladavaq_p_u8))) +uint32_t __arm_vmladavaq_p(uint32_t, uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmladavaq_s16))) +int32_t __arm_vmladavaq_s16(int32_t, int16x8_t, int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmladavaq_s16))) +int32_t __arm_vmladavaq(int32_t, int16x8_t, int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmladavaq_s32))) +int32_t __arm_vmladavaq_s32(int32_t, int32x4_t, int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmladavaq_s32))) +int32_t __arm_vmladavaq(int32_t, int32x4_t, int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmladavaq_s8))) +int32_t __arm_vmladavaq_s8(int32_t, int8x16_t, int8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmladavaq_s8))) +int32_t __arm_vmladavaq(int32_t, int8x16_t, int8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmladavaq_u16))) +uint32_t __arm_vmladavaq_u16(uint32_t, uint16x8_t, uint16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmladavaq_u16))) +uint32_t __arm_vmladavaq(uint32_t, uint16x8_t, uint16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmladavaq_u32))) +uint32_t __arm_vmladavaq_u32(uint32_t, uint32x4_t, uint32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmladavaq_u32))) +uint32_t __arm_vmladavaq(uint32_t, uint32x4_t, uint32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmladavaq_u8))) +uint32_t __arm_vmladavaq_u8(uint32_t, uint8x16_t, uint8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmladavaq_u8))) +uint32_t __arm_vmladavaq(uint32_t, uint8x16_t, uint8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmladavaxq_p_s16))) +int32_t __arm_vmladavaxq_p_s16(int32_t, int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmladavaxq_p_s16))) +int32_t __arm_vmladavaxq_p(int32_t, int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmladavaxq_p_s32))) +int32_t __arm_vmladavaxq_p_s32(int32_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmladavaxq_p_s32))) +int32_t __arm_vmladavaxq_p(int32_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmladavaxq_p_s8))) +int32_t __arm_vmladavaxq_p_s8(int32_t, int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmladavaxq_p_s8))) +int32_t __arm_vmladavaxq_p(int32_t, int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmladavaxq_s16))) +int32_t __arm_vmladavaxq_s16(int32_t, int16x8_t, int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmladavaxq_s16))) +int32_t __arm_vmladavaxq(int32_t, int16x8_t, int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmladavaxq_s32))) +int32_t __arm_vmladavaxq_s32(int32_t, int32x4_t, int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmladavaxq_s32))) +int32_t __arm_vmladavaxq(int32_t, int32x4_t, int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmladavaxq_s8))) +int32_t __arm_vmladavaxq_s8(int32_t, int8x16_t, int8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmladavaxq_s8))) +int32_t __arm_vmladavaxq(int32_t, int8x16_t, int8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmladavq_p_s16))) +int32_t __arm_vmladavq_p_s16(int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmladavq_p_s16))) +int32_t __arm_vmladavq_p(int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmladavq_p_s32))) +int32_t __arm_vmladavq_p_s32(int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmladavq_p_s32))) +int32_t __arm_vmladavq_p(int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmladavq_p_s8))) +int32_t __arm_vmladavq_p_s8(int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmladavq_p_s8))) +int32_t __arm_vmladavq_p(int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmladavq_p_u16))) +uint32_t __arm_vmladavq_p_u16(uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmladavq_p_u16))) +uint32_t __arm_vmladavq_p(uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmladavq_p_u32))) +uint32_t __arm_vmladavq_p_u32(uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmladavq_p_u32))) +uint32_t __arm_vmladavq_p(uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmladavq_p_u8))) +uint32_t __arm_vmladavq_p_u8(uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmladavq_p_u8))) +uint32_t __arm_vmladavq_p(uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmladavq_s16))) +int32_t __arm_vmladavq_s16(int16x8_t, int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmladavq_s16))) +int32_t __arm_vmladavq(int16x8_t, int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmladavq_s32))) +int32_t __arm_vmladavq_s32(int32x4_t, int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmladavq_s32))) +int32_t __arm_vmladavq(int32x4_t, int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmladavq_s8))) +int32_t __arm_vmladavq_s8(int8x16_t, int8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmladavq_s8))) +int32_t __arm_vmladavq(int8x16_t, int8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmladavq_u16))) +uint32_t __arm_vmladavq_u16(uint16x8_t, uint16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmladavq_u16))) +uint32_t __arm_vmladavq(uint16x8_t, uint16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmladavq_u32))) +uint32_t __arm_vmladavq_u32(uint32x4_t, uint32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmladavq_u32))) +uint32_t __arm_vmladavq(uint32x4_t, uint32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmladavq_u8))) +uint32_t __arm_vmladavq_u8(uint8x16_t, uint8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmladavq_u8))) +uint32_t __arm_vmladavq(uint8x16_t, uint8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmladavxq_p_s16))) +int32_t __arm_vmladavxq_p_s16(int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmladavxq_p_s16))) +int32_t __arm_vmladavxq_p(int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmladavxq_p_s32))) +int32_t __arm_vmladavxq_p_s32(int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmladavxq_p_s32))) +int32_t __arm_vmladavxq_p(int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmladavxq_p_s8))) +int32_t __arm_vmladavxq_p_s8(int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmladavxq_p_s8))) +int32_t __arm_vmladavxq_p(int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmladavxq_s16))) +int32_t __arm_vmladavxq_s16(int16x8_t, int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmladavxq_s16))) +int32_t __arm_vmladavxq(int16x8_t, int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmladavxq_s32))) +int32_t __arm_vmladavxq_s32(int32x4_t, int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmladavxq_s32))) +int32_t __arm_vmladavxq(int32x4_t, int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmladavxq_s8))) +int32_t __arm_vmladavxq_s8(int8x16_t, int8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmladavxq_s8))) +int32_t __arm_vmladavxq(int8x16_t, int8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavaq_p_s16))) +int64_t __arm_vmlaldavaq_p_s16(int64_t, int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavaq_p_s16))) +int64_t __arm_vmlaldavaq_p(int64_t, int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavaq_p_s32))) +int64_t __arm_vmlaldavaq_p_s32(int64_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavaq_p_s32))) +int64_t __arm_vmlaldavaq_p(int64_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavaq_p_u16))) +uint64_t __arm_vmlaldavaq_p_u16(uint64_t, uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavaq_p_u16))) +uint64_t __arm_vmlaldavaq_p(uint64_t, uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavaq_p_u32))) +uint64_t __arm_vmlaldavaq_p_u32(uint64_t, uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavaq_p_u32))) +uint64_t __arm_vmlaldavaq_p(uint64_t, uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavaq_s16))) +int64_t __arm_vmlaldavaq_s16(int64_t, int16x8_t, int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavaq_s16))) +int64_t __arm_vmlaldavaq(int64_t, int16x8_t, int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavaq_s32))) +int64_t __arm_vmlaldavaq_s32(int64_t, int32x4_t, int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavaq_s32))) +int64_t __arm_vmlaldavaq(int64_t, int32x4_t, int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavaq_u16))) +uint64_t __arm_vmlaldavaq_u16(uint64_t, uint16x8_t, uint16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavaq_u16))) +uint64_t __arm_vmlaldavaq(uint64_t, uint16x8_t, uint16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavaq_u32))) +uint64_t __arm_vmlaldavaq_u32(uint64_t, uint32x4_t, uint32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavaq_u32))) +uint64_t __arm_vmlaldavaq(uint64_t, uint32x4_t, uint32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavaxq_p_s16))) +int64_t __arm_vmlaldavaxq_p_s16(int64_t, int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavaxq_p_s16))) +int64_t __arm_vmlaldavaxq_p(int64_t, int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavaxq_p_s32))) +int64_t __arm_vmlaldavaxq_p_s32(int64_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavaxq_p_s32))) +int64_t __arm_vmlaldavaxq_p(int64_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavaxq_s16))) +int64_t __arm_vmlaldavaxq_s16(int64_t, int16x8_t, int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavaxq_s16))) +int64_t __arm_vmlaldavaxq(int64_t, int16x8_t, int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavaxq_s32))) +int64_t __arm_vmlaldavaxq_s32(int64_t, int32x4_t, int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavaxq_s32))) +int64_t __arm_vmlaldavaxq(int64_t, int32x4_t, int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavq_p_s16))) +int64_t __arm_vmlaldavq_p_s16(int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavq_p_s16))) +int64_t __arm_vmlaldavq_p(int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavq_p_s32))) +int64_t __arm_vmlaldavq_p_s32(int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavq_p_s32))) +int64_t __arm_vmlaldavq_p(int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavq_p_u16))) +uint64_t __arm_vmlaldavq_p_u16(uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavq_p_u16))) +uint64_t __arm_vmlaldavq_p(uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavq_p_u32))) +uint64_t __arm_vmlaldavq_p_u32(uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavq_p_u32))) +uint64_t __arm_vmlaldavq_p(uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavq_s16))) +int64_t __arm_vmlaldavq_s16(int16x8_t, int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavq_s16))) +int64_t __arm_vmlaldavq(int16x8_t, int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavq_s32))) +int64_t __arm_vmlaldavq_s32(int32x4_t, int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavq_s32))) +int64_t __arm_vmlaldavq(int32x4_t, int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavq_u16))) +uint64_t __arm_vmlaldavq_u16(uint16x8_t, uint16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavq_u16))) +uint64_t __arm_vmlaldavq(uint16x8_t, uint16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavq_u32))) +uint64_t __arm_vmlaldavq_u32(uint32x4_t, uint32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavq_u32))) +uint64_t __arm_vmlaldavq(uint32x4_t, uint32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavxq_p_s16))) +int64_t __arm_vmlaldavxq_p_s16(int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavxq_p_s16))) +int64_t __arm_vmlaldavxq_p(int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavxq_p_s32))) +int64_t __arm_vmlaldavxq_p_s32(int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavxq_p_s32))) +int64_t __arm_vmlaldavxq_p(int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavxq_s16))) +int64_t __arm_vmlaldavxq_s16(int16x8_t, int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavxq_s16))) +int64_t __arm_vmlaldavxq(int16x8_t, int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavxq_s32))) +int64_t __arm_vmlaldavxq_s32(int32x4_t, int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavxq_s32))) +int64_t __arm_vmlaldavxq(int32x4_t, int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlaq_m_n_s16))) +int16x8_t __arm_vmlaq_m_n_s16(int16x8_t, int16x8_t, int16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlaq_m_n_s16))) +int16x8_t __arm_vmlaq_m(int16x8_t, int16x8_t, int16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlaq_m_n_s32))) +int32x4_t __arm_vmlaq_m_n_s32(int32x4_t, int32x4_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlaq_m_n_s32))) +int32x4_t __arm_vmlaq_m(int32x4_t, int32x4_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlaq_m_n_s8))) +int8x16_t __arm_vmlaq_m_n_s8(int8x16_t, int8x16_t, int8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlaq_m_n_s8))) +int8x16_t __arm_vmlaq_m(int8x16_t, int8x16_t, int8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlaq_m_n_u16))) +uint16x8_t __arm_vmlaq_m_n_u16(uint16x8_t, uint16x8_t, uint16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlaq_m_n_u16))) +uint16x8_t __arm_vmlaq_m(uint16x8_t, uint16x8_t, uint16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlaq_m_n_u32))) +uint32x4_t __arm_vmlaq_m_n_u32(uint32x4_t, uint32x4_t, uint32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlaq_m_n_u32))) +uint32x4_t __arm_vmlaq_m(uint32x4_t, uint32x4_t, uint32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlaq_m_n_u8))) +uint8x16_t __arm_vmlaq_m_n_u8(uint8x16_t, uint8x16_t, uint8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlaq_m_n_u8))) +uint8x16_t __arm_vmlaq_m(uint8x16_t, uint8x16_t, uint8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlaq_n_s16))) +int16x8_t __arm_vmlaq_n_s16(int16x8_t, int16x8_t, int16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlaq_n_s16))) +int16x8_t __arm_vmlaq(int16x8_t, int16x8_t, int16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlaq_n_s32))) +int32x4_t __arm_vmlaq_n_s32(int32x4_t, int32x4_t, int32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlaq_n_s32))) +int32x4_t __arm_vmlaq(int32x4_t, int32x4_t, int32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlaq_n_s8))) +int8x16_t __arm_vmlaq_n_s8(int8x16_t, int8x16_t, int8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlaq_n_s8))) +int8x16_t __arm_vmlaq(int8x16_t, int8x16_t, int8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlaq_n_u16))) +uint16x8_t __arm_vmlaq_n_u16(uint16x8_t, uint16x8_t, uint16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlaq_n_u16))) +uint16x8_t __arm_vmlaq(uint16x8_t, uint16x8_t, uint16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlaq_n_u32))) +uint32x4_t __arm_vmlaq_n_u32(uint32x4_t, uint32x4_t, uint32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlaq_n_u32))) +uint32x4_t __arm_vmlaq(uint32x4_t, uint32x4_t, uint32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlaq_n_u8))) +uint8x16_t __arm_vmlaq_n_u8(uint8x16_t, uint8x16_t, uint8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlaq_n_u8))) +uint8x16_t __arm_vmlaq(uint8x16_t, uint8x16_t, uint8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlasq_m_n_s16))) +int16x8_t __arm_vmlasq_m_n_s16(int16x8_t, int16x8_t, int16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlasq_m_n_s16))) +int16x8_t __arm_vmlasq_m(int16x8_t, int16x8_t, int16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlasq_m_n_s32))) +int32x4_t __arm_vmlasq_m_n_s32(int32x4_t, int32x4_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlasq_m_n_s32))) +int32x4_t __arm_vmlasq_m(int32x4_t, int32x4_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlasq_m_n_s8))) +int8x16_t __arm_vmlasq_m_n_s8(int8x16_t, int8x16_t, int8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlasq_m_n_s8))) +int8x16_t __arm_vmlasq_m(int8x16_t, int8x16_t, int8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlasq_m_n_u16))) +uint16x8_t __arm_vmlasq_m_n_u16(uint16x8_t, uint16x8_t, uint16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlasq_m_n_u16))) +uint16x8_t __arm_vmlasq_m(uint16x8_t, uint16x8_t, uint16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlasq_m_n_u32))) +uint32x4_t __arm_vmlasq_m_n_u32(uint32x4_t, uint32x4_t, uint32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlasq_m_n_u32))) +uint32x4_t __arm_vmlasq_m(uint32x4_t, uint32x4_t, uint32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlasq_m_n_u8))) +uint8x16_t __arm_vmlasq_m_n_u8(uint8x16_t, uint8x16_t, uint8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlasq_m_n_u8))) +uint8x16_t __arm_vmlasq_m(uint8x16_t, uint8x16_t, uint8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlasq_n_s16))) +int16x8_t __arm_vmlasq_n_s16(int16x8_t, int16x8_t, int16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlasq_n_s16))) +int16x8_t __arm_vmlasq(int16x8_t, int16x8_t, int16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlasq_n_s32))) +int32x4_t __arm_vmlasq_n_s32(int32x4_t, int32x4_t, int32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlasq_n_s32))) +int32x4_t __arm_vmlasq(int32x4_t, int32x4_t, int32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlasq_n_s8))) +int8x16_t __arm_vmlasq_n_s8(int8x16_t, int8x16_t, int8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlasq_n_s8))) +int8x16_t __arm_vmlasq(int8x16_t, int8x16_t, int8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlasq_n_u16))) +uint16x8_t __arm_vmlasq_n_u16(uint16x8_t, uint16x8_t, uint16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlasq_n_u16))) +uint16x8_t __arm_vmlasq(uint16x8_t, uint16x8_t, uint16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlasq_n_u32))) +uint32x4_t __arm_vmlasq_n_u32(uint32x4_t, uint32x4_t, uint32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlasq_n_u32))) +uint32x4_t __arm_vmlasq(uint32x4_t, uint32x4_t, uint32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlasq_n_u8))) +uint8x16_t __arm_vmlasq_n_u8(uint8x16_t, uint8x16_t, uint8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlasq_n_u8))) +uint8x16_t __arm_vmlasq(uint8x16_t, uint8x16_t, uint8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavaq_p_s16))) +int32_t __arm_vmlsdavaq_p_s16(int32_t, int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavaq_p_s16))) +int32_t __arm_vmlsdavaq_p(int32_t, int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavaq_p_s32))) +int32_t __arm_vmlsdavaq_p_s32(int32_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavaq_p_s32))) +int32_t __arm_vmlsdavaq_p(int32_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavaq_p_s8))) +int32_t __arm_vmlsdavaq_p_s8(int32_t, int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavaq_p_s8))) +int32_t __arm_vmlsdavaq_p(int32_t, int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavaq_s16))) +int32_t __arm_vmlsdavaq_s16(int32_t, int16x8_t, int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavaq_s16))) +int32_t __arm_vmlsdavaq(int32_t, int16x8_t, int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavaq_s32))) +int32_t __arm_vmlsdavaq_s32(int32_t, int32x4_t, int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavaq_s32))) +int32_t __arm_vmlsdavaq(int32_t, int32x4_t, int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavaq_s8))) +int32_t __arm_vmlsdavaq_s8(int32_t, int8x16_t, int8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavaq_s8))) +int32_t __arm_vmlsdavaq(int32_t, int8x16_t, int8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavaxq_p_s16))) +int32_t __arm_vmlsdavaxq_p_s16(int32_t, int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavaxq_p_s16))) +int32_t __arm_vmlsdavaxq_p(int32_t, int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavaxq_p_s32))) +int32_t __arm_vmlsdavaxq_p_s32(int32_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavaxq_p_s32))) +int32_t __arm_vmlsdavaxq_p(int32_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavaxq_p_s8))) +int32_t __arm_vmlsdavaxq_p_s8(int32_t, int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavaxq_p_s8))) +int32_t __arm_vmlsdavaxq_p(int32_t, int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavaxq_s16))) +int32_t __arm_vmlsdavaxq_s16(int32_t, int16x8_t, int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavaxq_s16))) +int32_t __arm_vmlsdavaxq(int32_t, int16x8_t, int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavaxq_s32))) +int32_t __arm_vmlsdavaxq_s32(int32_t, int32x4_t, int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavaxq_s32))) +int32_t __arm_vmlsdavaxq(int32_t, int32x4_t, int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavaxq_s8))) +int32_t __arm_vmlsdavaxq_s8(int32_t, int8x16_t, int8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavaxq_s8))) +int32_t __arm_vmlsdavaxq(int32_t, int8x16_t, int8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavq_p_s16))) +int32_t __arm_vmlsdavq_p_s16(int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavq_p_s16))) +int32_t __arm_vmlsdavq_p(int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavq_p_s32))) +int32_t __arm_vmlsdavq_p_s32(int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavq_p_s32))) +int32_t __arm_vmlsdavq_p(int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavq_p_s8))) +int32_t __arm_vmlsdavq_p_s8(int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavq_p_s8))) +int32_t __arm_vmlsdavq_p(int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavq_s16))) +int32_t __arm_vmlsdavq_s16(int16x8_t, int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavq_s16))) +int32_t __arm_vmlsdavq(int16x8_t, int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavq_s32))) +int32_t __arm_vmlsdavq_s32(int32x4_t, int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavq_s32))) +int32_t __arm_vmlsdavq(int32x4_t, int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavq_s8))) +int32_t __arm_vmlsdavq_s8(int8x16_t, int8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavq_s8))) +int32_t __arm_vmlsdavq(int8x16_t, int8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavxq_p_s16))) +int32_t __arm_vmlsdavxq_p_s16(int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavxq_p_s16))) +int32_t __arm_vmlsdavxq_p(int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavxq_p_s32))) +int32_t __arm_vmlsdavxq_p_s32(int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavxq_p_s32))) +int32_t __arm_vmlsdavxq_p(int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavxq_p_s8))) +int32_t __arm_vmlsdavxq_p_s8(int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavxq_p_s8))) +int32_t __arm_vmlsdavxq_p(int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavxq_s16))) +int32_t __arm_vmlsdavxq_s16(int16x8_t, int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavxq_s16))) +int32_t __arm_vmlsdavxq(int16x8_t, int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavxq_s32))) +int32_t __arm_vmlsdavxq_s32(int32x4_t, int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavxq_s32))) +int32_t __arm_vmlsdavxq(int32x4_t, int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavxq_s8))) +int32_t __arm_vmlsdavxq_s8(int8x16_t, int8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavxq_s8))) +int32_t __arm_vmlsdavxq(int8x16_t, int8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlsldavaq_p_s16))) +int64_t __arm_vmlsldavaq_p_s16(int64_t, int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlsldavaq_p_s16))) +int64_t __arm_vmlsldavaq_p(int64_t, int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlsldavaq_p_s32))) +int64_t __arm_vmlsldavaq_p_s32(int64_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlsldavaq_p_s32))) +int64_t __arm_vmlsldavaq_p(int64_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlsldavaq_s16))) +int64_t __arm_vmlsldavaq_s16(int64_t, int16x8_t, int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlsldavaq_s16))) +int64_t __arm_vmlsldavaq(int64_t, int16x8_t, int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlsldavaq_s32))) +int64_t __arm_vmlsldavaq_s32(int64_t, int32x4_t, int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlsldavaq_s32))) +int64_t __arm_vmlsldavaq(int64_t, int32x4_t, int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlsldavaxq_p_s16))) +int64_t __arm_vmlsldavaxq_p_s16(int64_t, int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlsldavaxq_p_s16))) +int64_t __arm_vmlsldavaxq_p(int64_t, int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlsldavaxq_p_s32))) +int64_t __arm_vmlsldavaxq_p_s32(int64_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlsldavaxq_p_s32))) +int64_t __arm_vmlsldavaxq_p(int64_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlsldavaxq_s16))) +int64_t __arm_vmlsldavaxq_s16(int64_t, int16x8_t, int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlsldavaxq_s16))) +int64_t __arm_vmlsldavaxq(int64_t, int16x8_t, int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlsldavaxq_s32))) +int64_t __arm_vmlsldavaxq_s32(int64_t, int32x4_t, int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlsldavaxq_s32))) +int64_t __arm_vmlsldavaxq(int64_t, int32x4_t, int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlsldavq_p_s16))) +int64_t __arm_vmlsldavq_p_s16(int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlsldavq_p_s16))) +int64_t __arm_vmlsldavq_p(int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlsldavq_p_s32))) +int64_t __arm_vmlsldavq_p_s32(int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlsldavq_p_s32))) +int64_t __arm_vmlsldavq_p(int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlsldavq_s16))) +int64_t __arm_vmlsldavq_s16(int16x8_t, int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlsldavq_s16))) +int64_t __arm_vmlsldavq(int16x8_t, int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlsldavq_s32))) +int64_t __arm_vmlsldavq_s32(int32x4_t, int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlsldavq_s32))) +int64_t __arm_vmlsldavq(int32x4_t, int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlsldavxq_p_s16))) +int64_t __arm_vmlsldavxq_p_s16(int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlsldavxq_p_s16))) +int64_t __arm_vmlsldavxq_p(int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlsldavxq_p_s32))) +int64_t __arm_vmlsldavxq_p_s32(int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlsldavxq_p_s32))) +int64_t __arm_vmlsldavxq_p(int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlsldavxq_s16))) +int64_t __arm_vmlsldavxq_s16(int16x8_t, int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlsldavxq_s16))) +int64_t __arm_vmlsldavxq(int16x8_t, int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlsldavxq_s32))) +int64_t __arm_vmlsldavxq_s32(int32x4_t, int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlsldavxq_s32))) +int64_t __arm_vmlsldavxq(int32x4_t, int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmovlbq_m_s16))) +int32x4_t __arm_vmovlbq_m_s16(int32x4_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmovlbq_m_s16))) +int32x4_t __arm_vmovlbq_m(int32x4_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmovlbq_m_s8))) +int16x8_t __arm_vmovlbq_m_s8(int16x8_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmovlbq_m_s8))) +int16x8_t __arm_vmovlbq_m(int16x8_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmovlbq_m_u16))) +uint32x4_t __arm_vmovlbq_m_u16(uint32x4_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmovlbq_m_u16))) +uint32x4_t __arm_vmovlbq_m(uint32x4_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmovlbq_m_u8))) +uint16x8_t __arm_vmovlbq_m_u8(uint16x8_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmovlbq_m_u8))) +uint16x8_t __arm_vmovlbq_m(uint16x8_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmovlbq_s16))) +int32x4_t __arm_vmovlbq_s16(int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmovlbq_s16))) +int32x4_t __arm_vmovlbq(int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmovlbq_s8))) +int16x8_t __arm_vmovlbq_s8(int8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmovlbq_s8))) +int16x8_t __arm_vmovlbq(int8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmovlbq_u16))) +uint32x4_t __arm_vmovlbq_u16(uint16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmovlbq_u16))) +uint32x4_t __arm_vmovlbq(uint16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmovlbq_u8))) +uint16x8_t __arm_vmovlbq_u8(uint8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmovlbq_u8))) +uint16x8_t __arm_vmovlbq(uint8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmovlbq_x_s16))) +int32x4_t __arm_vmovlbq_x_s16(int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmovlbq_x_s16))) +int32x4_t __arm_vmovlbq_x(int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmovlbq_x_s8))) +int16x8_t __arm_vmovlbq_x_s8(int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmovlbq_x_s8))) +int16x8_t __arm_vmovlbq_x(int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmovlbq_x_u16))) +uint32x4_t __arm_vmovlbq_x_u16(uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmovlbq_x_u16))) +uint32x4_t __arm_vmovlbq_x(uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmovlbq_x_u8))) +uint16x8_t __arm_vmovlbq_x_u8(uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmovlbq_x_u8))) +uint16x8_t __arm_vmovlbq_x(uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmovltq_m_s16))) +int32x4_t __arm_vmovltq_m_s16(int32x4_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmovltq_m_s16))) +int32x4_t __arm_vmovltq_m(int32x4_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmovltq_m_s8))) +int16x8_t __arm_vmovltq_m_s8(int16x8_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmovltq_m_s8))) +int16x8_t __arm_vmovltq_m(int16x8_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmovltq_m_u16))) +uint32x4_t __arm_vmovltq_m_u16(uint32x4_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmovltq_m_u16))) +uint32x4_t __arm_vmovltq_m(uint32x4_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmovltq_m_u8))) +uint16x8_t __arm_vmovltq_m_u8(uint16x8_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmovltq_m_u8))) +uint16x8_t __arm_vmovltq_m(uint16x8_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmovltq_s16))) +int32x4_t __arm_vmovltq_s16(int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmovltq_s16))) +int32x4_t __arm_vmovltq(int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmovltq_s8))) +int16x8_t __arm_vmovltq_s8(int8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmovltq_s8))) +int16x8_t __arm_vmovltq(int8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmovltq_u16))) +uint32x4_t __arm_vmovltq_u16(uint16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmovltq_u16))) +uint32x4_t __arm_vmovltq(uint16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmovltq_u8))) +uint16x8_t __arm_vmovltq_u8(uint8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmovltq_u8))) +uint16x8_t __arm_vmovltq(uint8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmovltq_x_s16))) +int32x4_t __arm_vmovltq_x_s16(int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmovltq_x_s16))) +int32x4_t __arm_vmovltq_x(int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmovltq_x_s8))) +int16x8_t __arm_vmovltq_x_s8(int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmovltq_x_s8))) +int16x8_t __arm_vmovltq_x(int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmovltq_x_u16))) +uint32x4_t __arm_vmovltq_x_u16(uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmovltq_x_u16))) +uint32x4_t __arm_vmovltq_x(uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmovltq_x_u8))) +uint16x8_t __arm_vmovltq_x_u8(uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmovltq_x_u8))) +uint16x8_t __arm_vmovltq_x(uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmovnbq_m_s16))) +int8x16_t __arm_vmovnbq_m_s16(int8x16_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmovnbq_m_s16))) +int8x16_t __arm_vmovnbq_m(int8x16_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmovnbq_m_s32))) +int16x8_t __arm_vmovnbq_m_s32(int16x8_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmovnbq_m_s32))) +int16x8_t __arm_vmovnbq_m(int16x8_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmovnbq_m_u16))) +uint8x16_t __arm_vmovnbq_m_u16(uint8x16_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmovnbq_m_u16))) +uint8x16_t __arm_vmovnbq_m(uint8x16_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmovnbq_m_u32))) +uint16x8_t __arm_vmovnbq_m_u32(uint16x8_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmovnbq_m_u32))) +uint16x8_t __arm_vmovnbq_m(uint16x8_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmovnbq_s16))) +int8x16_t __arm_vmovnbq_s16(int8x16_t, int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmovnbq_s16))) +int8x16_t __arm_vmovnbq(int8x16_t, int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmovnbq_s32))) +int16x8_t __arm_vmovnbq_s32(int16x8_t, int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmovnbq_s32))) +int16x8_t __arm_vmovnbq(int16x8_t, int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmovnbq_u16))) +uint8x16_t __arm_vmovnbq_u16(uint8x16_t, uint16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmovnbq_u16))) +uint8x16_t __arm_vmovnbq(uint8x16_t, uint16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmovnbq_u32))) +uint16x8_t __arm_vmovnbq_u32(uint16x8_t, uint32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmovnbq_u32))) +uint16x8_t __arm_vmovnbq(uint16x8_t, uint32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmovntq_m_s16))) +int8x16_t __arm_vmovntq_m_s16(int8x16_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmovntq_m_s16))) +int8x16_t __arm_vmovntq_m(int8x16_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmovntq_m_s32))) +int16x8_t __arm_vmovntq_m_s32(int16x8_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmovntq_m_s32))) +int16x8_t __arm_vmovntq_m(int16x8_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmovntq_m_u16))) +uint8x16_t __arm_vmovntq_m_u16(uint8x16_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmovntq_m_u16))) +uint8x16_t __arm_vmovntq_m(uint8x16_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmovntq_m_u32))) +uint16x8_t __arm_vmovntq_m_u32(uint16x8_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmovntq_m_u32))) +uint16x8_t __arm_vmovntq_m(uint16x8_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmovntq_s16))) +int8x16_t __arm_vmovntq_s16(int8x16_t, int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmovntq_s16))) +int8x16_t __arm_vmovntq(int8x16_t, int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmovntq_s32))) +int16x8_t __arm_vmovntq_s32(int16x8_t, int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmovntq_s32))) +int16x8_t __arm_vmovntq(int16x8_t, int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmovntq_u16))) +uint8x16_t __arm_vmovntq_u16(uint8x16_t, uint16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmovntq_u16))) +uint8x16_t __arm_vmovntq(uint8x16_t, uint16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmovntq_u32))) +uint16x8_t __arm_vmovntq_u32(uint16x8_t, uint32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmovntq_u32))) +uint16x8_t __arm_vmovntq(uint16x8_t, uint32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulhq_m_s16))) +int16x8_t __arm_vmulhq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulhq_m_s16))) +int16x8_t __arm_vmulhq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulhq_m_s32))) +int32x4_t __arm_vmulhq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulhq_m_s32))) +int32x4_t __arm_vmulhq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulhq_m_s8))) +int8x16_t __arm_vmulhq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulhq_m_s8))) +int8x16_t __arm_vmulhq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulhq_m_u16))) +uint16x8_t __arm_vmulhq_m_u16(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulhq_m_u16))) +uint16x8_t __arm_vmulhq_m(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulhq_m_u32))) +uint32x4_t __arm_vmulhq_m_u32(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulhq_m_u32))) +uint32x4_t __arm_vmulhq_m(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulhq_m_u8))) +uint8x16_t __arm_vmulhq_m_u8(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulhq_m_u8))) +uint8x16_t __arm_vmulhq_m(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulhq_s16))) +int16x8_t __arm_vmulhq_s16(int16x8_t, int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulhq_s16))) +int16x8_t __arm_vmulhq(int16x8_t, int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulhq_s32))) +int32x4_t __arm_vmulhq_s32(int32x4_t, int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulhq_s32))) +int32x4_t __arm_vmulhq(int32x4_t, int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulhq_s8))) +int8x16_t __arm_vmulhq_s8(int8x16_t, int8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulhq_s8))) +int8x16_t __arm_vmulhq(int8x16_t, int8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulhq_u16))) +uint16x8_t __arm_vmulhq_u16(uint16x8_t, uint16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulhq_u16))) +uint16x8_t __arm_vmulhq(uint16x8_t, uint16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulhq_u32))) +uint32x4_t __arm_vmulhq_u32(uint32x4_t, uint32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulhq_u32))) +uint32x4_t __arm_vmulhq(uint32x4_t, uint32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulhq_u8))) +uint8x16_t __arm_vmulhq_u8(uint8x16_t, uint8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulhq_u8))) +uint8x16_t __arm_vmulhq(uint8x16_t, uint8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulhq_x_s16))) +int16x8_t __arm_vmulhq_x_s16(int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulhq_x_s16))) +int16x8_t __arm_vmulhq_x(int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulhq_x_s32))) +int32x4_t __arm_vmulhq_x_s32(int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulhq_x_s32))) +int32x4_t __arm_vmulhq_x(int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulhq_x_s8))) +int8x16_t __arm_vmulhq_x_s8(int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulhq_x_s8))) +int8x16_t __arm_vmulhq_x(int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulhq_x_u16))) +uint16x8_t __arm_vmulhq_x_u16(uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulhq_x_u16))) +uint16x8_t __arm_vmulhq_x(uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulhq_x_u32))) +uint32x4_t __arm_vmulhq_x_u32(uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulhq_x_u32))) +uint32x4_t __arm_vmulhq_x(uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulhq_x_u8))) +uint8x16_t __arm_vmulhq_x_u8(uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulhq_x_u8))) +uint8x16_t __arm_vmulhq_x(uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_int_m_s16))) +int32x4_t __arm_vmullbq_int_m_s16(int32x4_t, int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_int_m_s16))) +int32x4_t __arm_vmullbq_int_m(int32x4_t, int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_int_m_s32))) +int64x2_t __arm_vmullbq_int_m_s32(int64x2_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_int_m_s32))) +int64x2_t __arm_vmullbq_int_m(int64x2_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_int_m_s8))) +int16x8_t __arm_vmullbq_int_m_s8(int16x8_t, int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_int_m_s8))) +int16x8_t __arm_vmullbq_int_m(int16x8_t, int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_int_m_u16))) +uint32x4_t __arm_vmullbq_int_m_u16(uint32x4_t, uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_int_m_u16))) +uint32x4_t __arm_vmullbq_int_m(uint32x4_t, uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_int_m_u32))) +uint64x2_t __arm_vmullbq_int_m_u32(uint64x2_t, uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_int_m_u32))) +uint64x2_t __arm_vmullbq_int_m(uint64x2_t, uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_int_m_u8))) +uint16x8_t __arm_vmullbq_int_m_u8(uint16x8_t, uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_int_m_u8))) +uint16x8_t __arm_vmullbq_int_m(uint16x8_t, uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_int_s16))) +int32x4_t __arm_vmullbq_int_s16(int16x8_t, int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_int_s16))) +int32x4_t __arm_vmullbq_int(int16x8_t, int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_int_s32))) +int64x2_t __arm_vmullbq_int_s32(int32x4_t, int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_int_s32))) +int64x2_t __arm_vmullbq_int(int32x4_t, int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_int_s8))) +int16x8_t __arm_vmullbq_int_s8(int8x16_t, int8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_int_s8))) +int16x8_t __arm_vmullbq_int(int8x16_t, int8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_int_u16))) +uint32x4_t __arm_vmullbq_int_u16(uint16x8_t, uint16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_int_u16))) +uint32x4_t __arm_vmullbq_int(uint16x8_t, uint16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_int_u32))) +uint64x2_t __arm_vmullbq_int_u32(uint32x4_t, uint32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_int_u32))) +uint64x2_t __arm_vmullbq_int(uint32x4_t, uint32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_int_u8))) +uint16x8_t __arm_vmullbq_int_u8(uint8x16_t, uint8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_int_u8))) +uint16x8_t __arm_vmullbq_int(uint8x16_t, uint8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_int_x_s16))) +int32x4_t __arm_vmullbq_int_x_s16(int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_int_x_s16))) +int32x4_t __arm_vmullbq_int_x(int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_int_x_s32))) +int64x2_t __arm_vmullbq_int_x_s32(int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_int_x_s32))) +int64x2_t __arm_vmullbq_int_x(int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_int_x_s8))) +int16x8_t __arm_vmullbq_int_x_s8(int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_int_x_s8))) +int16x8_t __arm_vmullbq_int_x(int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_int_x_u16))) +uint32x4_t __arm_vmullbq_int_x_u16(uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_int_x_u16))) +uint32x4_t __arm_vmullbq_int_x(uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_int_x_u32))) +uint64x2_t __arm_vmullbq_int_x_u32(uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_int_x_u32))) +uint64x2_t __arm_vmullbq_int_x(uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_int_x_u8))) +uint16x8_t __arm_vmullbq_int_x_u8(uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_int_x_u8))) +uint16x8_t __arm_vmullbq_int_x(uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_poly_m_p16))) +uint32x4_t __arm_vmullbq_poly_m_p16(uint32x4_t, uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_poly_m_p16))) +uint32x4_t __arm_vmullbq_poly_m(uint32x4_t, uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_poly_m_p8))) +uint16x8_t __arm_vmullbq_poly_m_p8(uint16x8_t, uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_poly_m_p8))) +uint16x8_t __arm_vmullbq_poly_m(uint16x8_t, uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_poly_p16))) +uint32x4_t __arm_vmullbq_poly_p16(uint16x8_t, uint16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_poly_p16))) +uint32x4_t __arm_vmullbq_poly(uint16x8_t, uint16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_poly_p8))) +uint16x8_t __arm_vmullbq_poly_p8(uint8x16_t, uint8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_poly_p8))) +uint16x8_t __arm_vmullbq_poly(uint8x16_t, uint8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_poly_x_p16))) +uint32x4_t __arm_vmullbq_poly_x_p16(uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_poly_x_p16))) +uint32x4_t __arm_vmullbq_poly_x(uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_poly_x_p8))) +uint16x8_t __arm_vmullbq_poly_x_p8(uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_poly_x_p8))) +uint16x8_t __arm_vmullbq_poly_x(uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_int_m_s16))) +int32x4_t __arm_vmulltq_int_m_s16(int32x4_t, int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_int_m_s16))) +int32x4_t __arm_vmulltq_int_m(int32x4_t, int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_int_m_s32))) +int64x2_t __arm_vmulltq_int_m_s32(int64x2_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_int_m_s32))) +int64x2_t __arm_vmulltq_int_m(int64x2_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_int_m_s8))) +int16x8_t __arm_vmulltq_int_m_s8(int16x8_t, int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_int_m_s8))) +int16x8_t __arm_vmulltq_int_m(int16x8_t, int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_int_m_u16))) +uint32x4_t __arm_vmulltq_int_m_u16(uint32x4_t, uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_int_m_u16))) +uint32x4_t __arm_vmulltq_int_m(uint32x4_t, uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_int_m_u32))) +uint64x2_t __arm_vmulltq_int_m_u32(uint64x2_t, uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_int_m_u32))) +uint64x2_t __arm_vmulltq_int_m(uint64x2_t, uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_int_m_u8))) +uint16x8_t __arm_vmulltq_int_m_u8(uint16x8_t, uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_int_m_u8))) +uint16x8_t __arm_vmulltq_int_m(uint16x8_t, uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_int_s16))) +int32x4_t __arm_vmulltq_int_s16(int16x8_t, int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_int_s16))) +int32x4_t __arm_vmulltq_int(int16x8_t, int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_int_s32))) +int64x2_t __arm_vmulltq_int_s32(int32x4_t, int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_int_s32))) +int64x2_t __arm_vmulltq_int(int32x4_t, int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_int_s8))) +int16x8_t __arm_vmulltq_int_s8(int8x16_t, int8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_int_s8))) +int16x8_t __arm_vmulltq_int(int8x16_t, int8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_int_u16))) +uint32x4_t __arm_vmulltq_int_u16(uint16x8_t, uint16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_int_u16))) +uint32x4_t __arm_vmulltq_int(uint16x8_t, uint16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_int_u32))) +uint64x2_t __arm_vmulltq_int_u32(uint32x4_t, uint32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_int_u32))) +uint64x2_t __arm_vmulltq_int(uint32x4_t, uint32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_int_u8))) +uint16x8_t __arm_vmulltq_int_u8(uint8x16_t, uint8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_int_u8))) +uint16x8_t __arm_vmulltq_int(uint8x16_t, uint8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_int_x_s16))) +int32x4_t __arm_vmulltq_int_x_s16(int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_int_x_s16))) +int32x4_t __arm_vmulltq_int_x(int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_int_x_s32))) +int64x2_t __arm_vmulltq_int_x_s32(int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_int_x_s32))) +int64x2_t __arm_vmulltq_int_x(int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_int_x_s8))) +int16x8_t __arm_vmulltq_int_x_s8(int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_int_x_s8))) +int16x8_t __arm_vmulltq_int_x(int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_int_x_u16))) +uint32x4_t __arm_vmulltq_int_x_u16(uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_int_x_u16))) +uint32x4_t __arm_vmulltq_int_x(uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_int_x_u32))) +uint64x2_t __arm_vmulltq_int_x_u32(uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_int_x_u32))) +uint64x2_t __arm_vmulltq_int_x(uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_int_x_u8))) +uint16x8_t __arm_vmulltq_int_x_u8(uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_int_x_u8))) +uint16x8_t __arm_vmulltq_int_x(uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_poly_m_p16))) +uint32x4_t __arm_vmulltq_poly_m_p16(uint32x4_t, uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_poly_m_p16))) +uint32x4_t __arm_vmulltq_poly_m(uint32x4_t, uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_poly_m_p8))) +uint16x8_t __arm_vmulltq_poly_m_p8(uint16x8_t, uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_poly_m_p8))) +uint16x8_t __arm_vmulltq_poly_m(uint16x8_t, uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_poly_p16))) +uint32x4_t __arm_vmulltq_poly_p16(uint16x8_t, uint16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_poly_p16))) +uint32x4_t __arm_vmulltq_poly(uint16x8_t, uint16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_poly_p8))) +uint16x8_t __arm_vmulltq_poly_p8(uint8x16_t, uint8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_poly_p8))) +uint16x8_t __arm_vmulltq_poly(uint8x16_t, uint8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_poly_x_p16))) +uint32x4_t __arm_vmulltq_poly_x_p16(uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_poly_x_p16))) +uint32x4_t __arm_vmulltq_poly_x(uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_poly_x_p8))) +uint16x8_t __arm_vmulltq_poly_x_p8(uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_poly_x_p8))) +uint16x8_t __arm_vmulltq_poly_x(uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_m_n_s16))) +int16x8_t __arm_vmulq_m_n_s16(int16x8_t, int16x8_t, int16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_m_n_s16))) +int16x8_t __arm_vmulq_m(int16x8_t, int16x8_t, int16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_m_n_s32))) +int32x4_t __arm_vmulq_m_n_s32(int32x4_t, int32x4_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_m_n_s32))) +int32x4_t __arm_vmulq_m(int32x4_t, int32x4_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_m_n_s8))) +int8x16_t __arm_vmulq_m_n_s8(int8x16_t, int8x16_t, int8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_m_n_s8))) +int8x16_t __arm_vmulq_m(int8x16_t, int8x16_t, int8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_m_n_u16))) +uint16x8_t __arm_vmulq_m_n_u16(uint16x8_t, uint16x8_t, uint16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_m_n_u16))) +uint16x8_t __arm_vmulq_m(uint16x8_t, uint16x8_t, uint16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_m_n_u32))) +uint32x4_t __arm_vmulq_m_n_u32(uint32x4_t, uint32x4_t, uint32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_m_n_u32))) +uint32x4_t __arm_vmulq_m(uint32x4_t, uint32x4_t, uint32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_m_n_u8))) +uint8x16_t __arm_vmulq_m_n_u8(uint8x16_t, uint8x16_t, uint8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_m_n_u8))) +uint8x16_t __arm_vmulq_m(uint8x16_t, uint8x16_t, uint8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_m_s16))) +int16x8_t __arm_vmulq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_m_s16))) +int16x8_t __arm_vmulq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_m_s32))) +int32x4_t __arm_vmulq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_m_s32))) +int32x4_t __arm_vmulq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_m_s8))) +int8x16_t __arm_vmulq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_m_s8))) +int8x16_t __arm_vmulq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_m_u16))) +uint16x8_t __arm_vmulq_m_u16(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_m_u16))) +uint16x8_t __arm_vmulq_m(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_m_u32))) +uint32x4_t __arm_vmulq_m_u32(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_m_u32))) +uint32x4_t __arm_vmulq_m(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_m_u8))) +uint8x16_t __arm_vmulq_m_u8(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_m_u8))) +uint8x16_t __arm_vmulq_m(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_n_s16))) +int16x8_t __arm_vmulq_n_s16(int16x8_t, int16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_n_s16))) +int16x8_t __arm_vmulq(int16x8_t, int16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_n_s32))) +int32x4_t __arm_vmulq_n_s32(int32x4_t, int32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_n_s32))) +int32x4_t __arm_vmulq(int32x4_t, int32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_n_s8))) +int8x16_t __arm_vmulq_n_s8(int8x16_t, int8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_n_s8))) +int8x16_t __arm_vmulq(int8x16_t, int8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_n_u16))) +uint16x8_t __arm_vmulq_n_u16(uint16x8_t, uint16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_n_u16))) +uint16x8_t __arm_vmulq(uint16x8_t, uint16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_n_u32))) +uint32x4_t __arm_vmulq_n_u32(uint32x4_t, uint32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_n_u32))) +uint32x4_t __arm_vmulq(uint32x4_t, uint32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_n_u8))) +uint8x16_t __arm_vmulq_n_u8(uint8x16_t, uint8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_n_u8))) +uint8x16_t __arm_vmulq(uint8x16_t, uint8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_s16))) +int16x8_t __arm_vmulq_s16(int16x8_t, int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_s16))) +int16x8_t __arm_vmulq(int16x8_t, int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_s32))) +int32x4_t __arm_vmulq_s32(int32x4_t, int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_s32))) +int32x4_t __arm_vmulq(int32x4_t, int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_s8))) +int8x16_t __arm_vmulq_s8(int8x16_t, int8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_s8))) +int8x16_t __arm_vmulq(int8x16_t, int8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_u16))) +uint16x8_t __arm_vmulq_u16(uint16x8_t, uint16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_u16))) +uint16x8_t __arm_vmulq(uint16x8_t, uint16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_u32))) +uint32x4_t __arm_vmulq_u32(uint32x4_t, uint32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_u32))) +uint32x4_t __arm_vmulq(uint32x4_t, uint32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_u8))) +uint8x16_t __arm_vmulq_u8(uint8x16_t, uint8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_u8))) +uint8x16_t __arm_vmulq(uint8x16_t, uint8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_x_n_s16))) +int16x8_t __arm_vmulq_x_n_s16(int16x8_t, int16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_x_n_s16))) +int16x8_t __arm_vmulq_x(int16x8_t, int16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_x_n_s32))) +int32x4_t __arm_vmulq_x_n_s32(int32x4_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_x_n_s32))) +int32x4_t __arm_vmulq_x(int32x4_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_x_n_s8))) +int8x16_t __arm_vmulq_x_n_s8(int8x16_t, int8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_x_n_s8))) +int8x16_t __arm_vmulq_x(int8x16_t, int8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_x_n_u16))) +uint16x8_t __arm_vmulq_x_n_u16(uint16x8_t, uint16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_x_n_u16))) +uint16x8_t __arm_vmulq_x(uint16x8_t, uint16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_x_n_u32))) +uint32x4_t __arm_vmulq_x_n_u32(uint32x4_t, uint32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_x_n_u32))) +uint32x4_t __arm_vmulq_x(uint32x4_t, uint32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_x_n_u8))) +uint8x16_t __arm_vmulq_x_n_u8(uint8x16_t, uint8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_x_n_u8))) +uint8x16_t __arm_vmulq_x(uint8x16_t, uint8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_x_s16))) +int16x8_t __arm_vmulq_x_s16(int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_x_s16))) +int16x8_t __arm_vmulq_x(int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_x_s32))) +int32x4_t __arm_vmulq_x_s32(int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_x_s32))) +int32x4_t __arm_vmulq_x(int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_x_s8))) +int8x16_t __arm_vmulq_x_s8(int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_x_s8))) +int8x16_t __arm_vmulq_x(int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_x_u16))) +uint16x8_t __arm_vmulq_x_u16(uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_x_u16))) +uint16x8_t __arm_vmulq_x(uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_x_u32))) +uint32x4_t __arm_vmulq_x_u32(uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_x_u32))) +uint32x4_t __arm_vmulq_x(uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_x_u8))) +uint8x16_t __arm_vmulq_x_u8(uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_x_u8))) +uint8x16_t __arm_vmulq_x(uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_m_n_s16))) +int16x8_t __arm_vmvnq_m_n_s16(int16x8_t, int16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_m_n_s16))) +int16x8_t __arm_vmvnq_m(int16x8_t, int16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_m_n_s32))) +int32x4_t __arm_vmvnq_m_n_s32(int32x4_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_m_n_s32))) +int32x4_t __arm_vmvnq_m(int32x4_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_m_n_u16))) +uint16x8_t __arm_vmvnq_m_n_u16(uint16x8_t, uint16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_m_n_u16))) +uint16x8_t __arm_vmvnq_m(uint16x8_t, uint16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_m_n_u32))) +uint32x4_t __arm_vmvnq_m_n_u32(uint32x4_t, uint32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_m_n_u32))) +uint32x4_t __arm_vmvnq_m(uint32x4_t, uint32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_m_s16))) +int16x8_t __arm_vmvnq_m_s16(int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_m_s16))) +int16x8_t __arm_vmvnq_m(int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_m_s32))) +int32x4_t __arm_vmvnq_m_s32(int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_m_s32))) +int32x4_t __arm_vmvnq_m(int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_m_s8))) +int8x16_t __arm_vmvnq_m_s8(int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_m_s8))) +int8x16_t __arm_vmvnq_m(int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_m_u16))) +uint16x8_t __arm_vmvnq_m_u16(uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_m_u16))) +uint16x8_t __arm_vmvnq_m(uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_m_u32))) +uint32x4_t __arm_vmvnq_m_u32(uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_m_u32))) +uint32x4_t __arm_vmvnq_m(uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_m_u8))) +uint8x16_t __arm_vmvnq_m_u8(uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_m_u8))) +uint8x16_t __arm_vmvnq_m(uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_n_s16))) +int16x8_t __arm_vmvnq_n_s16(int16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_n_s32))) +int32x4_t __arm_vmvnq_n_s32(int32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_n_u16))) +uint16x8_t __arm_vmvnq_n_u16(uint16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_n_u32))) +uint32x4_t __arm_vmvnq_n_u32(uint32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_s16))) +int16x8_t __arm_vmvnq_s16(int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_s16))) +int16x8_t __arm_vmvnq(int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_s32))) +int32x4_t __arm_vmvnq_s32(int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_s32))) +int32x4_t __arm_vmvnq(int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_s8))) +int8x16_t __arm_vmvnq_s8(int8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_s8))) +int8x16_t __arm_vmvnq(int8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_u16))) +uint16x8_t __arm_vmvnq_u16(uint16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_u16))) +uint16x8_t __arm_vmvnq(uint16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_u32))) +uint32x4_t __arm_vmvnq_u32(uint32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_u32))) +uint32x4_t __arm_vmvnq(uint32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_u8))) +uint8x16_t __arm_vmvnq_u8(uint8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_u8))) +uint8x16_t __arm_vmvnq(uint8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_x_n_s16))) +int16x8_t __arm_vmvnq_x_n_s16(int16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_x_n_s32))) +int32x4_t __arm_vmvnq_x_n_s32(int32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_x_n_u16))) +uint16x8_t __arm_vmvnq_x_n_u16(uint16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_x_n_u32))) +uint32x4_t __arm_vmvnq_x_n_u32(uint32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_x_s16))) +int16x8_t __arm_vmvnq_x_s16(int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_x_s16))) +int16x8_t __arm_vmvnq_x(int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_x_s32))) +int32x4_t __arm_vmvnq_x_s32(int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_x_s32))) +int32x4_t __arm_vmvnq_x(int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_x_s8))) +int8x16_t __arm_vmvnq_x_s8(int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_x_s8))) +int8x16_t __arm_vmvnq_x(int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_x_u16))) +uint16x8_t __arm_vmvnq_x_u16(uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_x_u16))) +uint16x8_t __arm_vmvnq_x(uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_x_u32))) +uint32x4_t __arm_vmvnq_x_u32(uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_x_u32))) +uint32x4_t __arm_vmvnq_x(uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_x_u8))) +uint8x16_t __arm_vmvnq_x_u8(uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_x_u8))) +uint8x16_t __arm_vmvnq_x(uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vnegq_m_s16))) +int16x8_t __arm_vnegq_m_s16(int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vnegq_m_s16))) +int16x8_t __arm_vnegq_m(int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vnegq_m_s32))) +int32x4_t __arm_vnegq_m_s32(int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vnegq_m_s32))) +int32x4_t __arm_vnegq_m(int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vnegq_m_s8))) +int8x16_t __arm_vnegq_m_s8(int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vnegq_m_s8))) +int8x16_t __arm_vnegq_m(int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vnegq_s16))) +int16x8_t __arm_vnegq_s16(int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vnegq_s16))) +int16x8_t __arm_vnegq(int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vnegq_s32))) +int32x4_t __arm_vnegq_s32(int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vnegq_s32))) +int32x4_t __arm_vnegq(int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vnegq_s8))) +int8x16_t __arm_vnegq_s8(int8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vnegq_s8))) +int8x16_t __arm_vnegq(int8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vnegq_x_s16))) +int16x8_t __arm_vnegq_x_s16(int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vnegq_x_s16))) +int16x8_t __arm_vnegq_x(int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vnegq_x_s32))) +int32x4_t __arm_vnegq_x_s32(int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vnegq_x_s32))) +int32x4_t __arm_vnegq_x(int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vnegq_x_s8))) +int8x16_t __arm_vnegq_x_s8(int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vnegq_x_s8))) +int8x16_t __arm_vnegq_x(int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vornq_m_s16))) +int16x8_t __arm_vornq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vornq_m_s16))) +int16x8_t __arm_vornq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vornq_m_s32))) +int32x4_t __arm_vornq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vornq_m_s32))) +int32x4_t __arm_vornq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vornq_m_s8))) +int8x16_t __arm_vornq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vornq_m_s8))) +int8x16_t __arm_vornq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vornq_m_u16))) +uint16x8_t __arm_vornq_m_u16(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vornq_m_u16))) +uint16x8_t __arm_vornq_m(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vornq_m_u32))) +uint32x4_t __arm_vornq_m_u32(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vornq_m_u32))) +uint32x4_t __arm_vornq_m(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vornq_m_u8))) +uint8x16_t __arm_vornq_m_u8(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vornq_m_u8))) +uint8x16_t __arm_vornq_m(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vornq_s16))) +int16x8_t __arm_vornq_s16(int16x8_t, int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vornq_s16))) +int16x8_t __arm_vornq(int16x8_t, int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vornq_s32))) +int32x4_t __arm_vornq_s32(int32x4_t, int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vornq_s32))) +int32x4_t __arm_vornq(int32x4_t, int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vornq_s8))) +int8x16_t __arm_vornq_s8(int8x16_t, int8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vornq_s8))) +int8x16_t __arm_vornq(int8x16_t, int8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vornq_u16))) +uint16x8_t __arm_vornq_u16(uint16x8_t, uint16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vornq_u16))) +uint16x8_t __arm_vornq(uint16x8_t, uint16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vornq_u32))) +uint32x4_t __arm_vornq_u32(uint32x4_t, uint32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vornq_u32))) +uint32x4_t __arm_vornq(uint32x4_t, uint32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vornq_u8))) +uint8x16_t __arm_vornq_u8(uint8x16_t, uint8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vornq_u8))) +uint8x16_t __arm_vornq(uint8x16_t, uint8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vornq_x_s16))) +int16x8_t __arm_vornq_x_s16(int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vornq_x_s16))) +int16x8_t __arm_vornq_x(int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vornq_x_s32))) +int32x4_t __arm_vornq_x_s32(int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vornq_x_s32))) +int32x4_t __arm_vornq_x(int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vornq_x_s8))) +int8x16_t __arm_vornq_x_s8(int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vornq_x_s8))) +int8x16_t __arm_vornq_x(int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vornq_x_u16))) +uint16x8_t __arm_vornq_x_u16(uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vornq_x_u16))) +uint16x8_t __arm_vornq_x(uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vornq_x_u32))) +uint32x4_t __arm_vornq_x_u32(uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vornq_x_u32))) +uint32x4_t __arm_vornq_x(uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vornq_x_u8))) +uint8x16_t __arm_vornq_x_u8(uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vornq_x_u8))) +uint8x16_t __arm_vornq_x(uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vorrq_m_n_s16))) +int16x8_t __arm_vorrq_m_n_s16(int16x8_t, int16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vorrq_m_n_s16))) +int16x8_t __arm_vorrq_m_n(int16x8_t, int16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vorrq_m_n_s32))) +int32x4_t __arm_vorrq_m_n_s32(int32x4_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vorrq_m_n_s32))) +int32x4_t __arm_vorrq_m_n(int32x4_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vorrq_m_n_u16))) +uint16x8_t __arm_vorrq_m_n_u16(uint16x8_t, uint16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vorrq_m_n_u16))) +uint16x8_t __arm_vorrq_m_n(uint16x8_t, uint16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vorrq_m_n_u32))) +uint32x4_t __arm_vorrq_m_n_u32(uint32x4_t, uint32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vorrq_m_n_u32))) +uint32x4_t __arm_vorrq_m_n(uint32x4_t, uint32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vorrq_m_s16))) +int16x8_t __arm_vorrq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vorrq_m_s16))) +int16x8_t __arm_vorrq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vorrq_m_s32))) +int32x4_t __arm_vorrq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vorrq_m_s32))) +int32x4_t __arm_vorrq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vorrq_m_s8))) +int8x16_t __arm_vorrq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vorrq_m_s8))) +int8x16_t __arm_vorrq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vorrq_m_u16))) +uint16x8_t __arm_vorrq_m_u16(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vorrq_m_u16))) +uint16x8_t __arm_vorrq_m(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vorrq_m_u32))) +uint32x4_t __arm_vorrq_m_u32(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vorrq_m_u32))) +uint32x4_t __arm_vorrq_m(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vorrq_m_u8))) +uint8x16_t __arm_vorrq_m_u8(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vorrq_m_u8))) +uint8x16_t __arm_vorrq_m(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vorrq_n_s16))) +int16x8_t __arm_vorrq_n_s16(int16x8_t, int16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vorrq_n_s16))) +int16x8_t __arm_vorrq(int16x8_t, int16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vorrq_n_s32))) +int32x4_t __arm_vorrq_n_s32(int32x4_t, int32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vorrq_n_s32))) +int32x4_t __arm_vorrq(int32x4_t, int32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vorrq_n_u16))) +uint16x8_t __arm_vorrq_n_u16(uint16x8_t, uint16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vorrq_n_u16))) +uint16x8_t __arm_vorrq(uint16x8_t, uint16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vorrq_n_u32))) +uint32x4_t __arm_vorrq_n_u32(uint32x4_t, uint32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vorrq_n_u32))) +uint32x4_t __arm_vorrq(uint32x4_t, uint32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vorrq_s16))) +int16x8_t __arm_vorrq_s16(int16x8_t, int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vorrq_s16))) +int16x8_t __arm_vorrq(int16x8_t, int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vorrq_s32))) +int32x4_t __arm_vorrq_s32(int32x4_t, int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vorrq_s32))) +int32x4_t __arm_vorrq(int32x4_t, int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vorrq_s8))) +int8x16_t __arm_vorrq_s8(int8x16_t, int8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vorrq_s8))) +int8x16_t __arm_vorrq(int8x16_t, int8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vorrq_u16))) +uint16x8_t __arm_vorrq_u16(uint16x8_t, uint16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vorrq_u16))) +uint16x8_t __arm_vorrq(uint16x8_t, uint16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vorrq_u32))) +uint32x4_t __arm_vorrq_u32(uint32x4_t, uint32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vorrq_u32))) +uint32x4_t __arm_vorrq(uint32x4_t, uint32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vorrq_u8))) +uint8x16_t __arm_vorrq_u8(uint8x16_t, uint8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vorrq_u8))) +uint8x16_t __arm_vorrq(uint8x16_t, uint8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vorrq_x_s16))) +int16x8_t __arm_vorrq_x_s16(int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vorrq_x_s16))) +int16x8_t __arm_vorrq_x(int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vorrq_x_s32))) +int32x4_t __arm_vorrq_x_s32(int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vorrq_x_s32))) +int32x4_t __arm_vorrq_x(int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vorrq_x_s8))) +int8x16_t __arm_vorrq_x_s8(int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vorrq_x_s8))) +int8x16_t __arm_vorrq_x(int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vorrq_x_u16))) +uint16x8_t __arm_vorrq_x_u16(uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vorrq_x_u16))) +uint16x8_t __arm_vorrq_x(uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vorrq_x_u32))) +uint32x4_t __arm_vorrq_x_u32(uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vorrq_x_u32))) +uint32x4_t __arm_vorrq_x(uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vorrq_x_u8))) +uint8x16_t __arm_vorrq_x_u8(uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vorrq_x_u8))) +uint8x16_t __arm_vorrq_x(uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vpnot))) +mve_pred16_t __arm_vpnot(mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vpselq_s16))) +int16x8_t __arm_vpselq_s16(int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vpselq_s16))) +int16x8_t __arm_vpselq(int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vpselq_s32))) +int32x4_t __arm_vpselq_s32(int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vpselq_s32))) +int32x4_t __arm_vpselq(int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vpselq_s64))) +int64x2_t __arm_vpselq_s64(int64x2_t, int64x2_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vpselq_s64))) +int64x2_t __arm_vpselq(int64x2_t, int64x2_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vpselq_s8))) +int8x16_t __arm_vpselq_s8(int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vpselq_s8))) +int8x16_t __arm_vpselq(int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vpselq_u16))) +uint16x8_t __arm_vpselq_u16(uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vpselq_u16))) +uint16x8_t __arm_vpselq(uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vpselq_u32))) +uint32x4_t __arm_vpselq_u32(uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vpselq_u32))) +uint32x4_t __arm_vpselq(uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vpselq_u64))) +uint64x2_t __arm_vpselq_u64(uint64x2_t, uint64x2_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vpselq_u64))) +uint64x2_t __arm_vpselq(uint64x2_t, uint64x2_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vpselq_u8))) +uint8x16_t __arm_vpselq_u8(uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vpselq_u8))) +uint8x16_t __arm_vpselq(uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqabsq_m_s16))) +int16x8_t __arm_vqabsq_m_s16(int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqabsq_m_s16))) +int16x8_t __arm_vqabsq_m(int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqabsq_m_s32))) +int32x4_t __arm_vqabsq_m_s32(int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqabsq_m_s32))) +int32x4_t __arm_vqabsq_m(int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqabsq_m_s8))) +int8x16_t __arm_vqabsq_m_s8(int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqabsq_m_s8))) +int8x16_t __arm_vqabsq_m(int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqabsq_s16))) +int16x8_t __arm_vqabsq_s16(int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqabsq_s16))) +int16x8_t __arm_vqabsq(int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqabsq_s32))) +int32x4_t __arm_vqabsq_s32(int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqabsq_s32))) +int32x4_t __arm_vqabsq(int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqabsq_s8))) +int8x16_t __arm_vqabsq_s8(int8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqabsq_s8))) +int8x16_t __arm_vqabsq(int8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_m_n_s16))) +int16x8_t __arm_vqaddq_m_n_s16(int16x8_t, int16x8_t, int16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_m_n_s16))) +int16x8_t __arm_vqaddq_m(int16x8_t, int16x8_t, int16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_m_n_s32))) +int32x4_t __arm_vqaddq_m_n_s32(int32x4_t, int32x4_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_m_n_s32))) +int32x4_t __arm_vqaddq_m(int32x4_t, int32x4_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_m_n_s8))) +int8x16_t __arm_vqaddq_m_n_s8(int8x16_t, int8x16_t, int8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_m_n_s8))) +int8x16_t __arm_vqaddq_m(int8x16_t, int8x16_t, int8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_m_n_u16))) +uint16x8_t __arm_vqaddq_m_n_u16(uint16x8_t, uint16x8_t, uint16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_m_n_u16))) +uint16x8_t __arm_vqaddq_m(uint16x8_t, uint16x8_t, uint16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_m_n_u32))) +uint32x4_t __arm_vqaddq_m_n_u32(uint32x4_t, uint32x4_t, uint32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_m_n_u32))) +uint32x4_t __arm_vqaddq_m(uint32x4_t, uint32x4_t, uint32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_m_n_u8))) +uint8x16_t __arm_vqaddq_m_n_u8(uint8x16_t, uint8x16_t, uint8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_m_n_u8))) +uint8x16_t __arm_vqaddq_m(uint8x16_t, uint8x16_t, uint8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_m_s16))) +int16x8_t __arm_vqaddq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_m_s16))) +int16x8_t __arm_vqaddq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_m_s32))) +int32x4_t __arm_vqaddq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_m_s32))) +int32x4_t __arm_vqaddq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_m_s8))) +int8x16_t __arm_vqaddq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_m_s8))) +int8x16_t __arm_vqaddq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_m_u16))) +uint16x8_t __arm_vqaddq_m_u16(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_m_u16))) +uint16x8_t __arm_vqaddq_m(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_m_u32))) +uint32x4_t __arm_vqaddq_m_u32(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_m_u32))) +uint32x4_t __arm_vqaddq_m(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_m_u8))) +uint8x16_t __arm_vqaddq_m_u8(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_m_u8))) +uint8x16_t __arm_vqaddq_m(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_n_s16))) +int16x8_t __arm_vqaddq_n_s16(int16x8_t, int16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_n_s16))) +int16x8_t __arm_vqaddq(int16x8_t, int16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_n_s32))) +int32x4_t __arm_vqaddq_n_s32(int32x4_t, int32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_n_s32))) +int32x4_t __arm_vqaddq(int32x4_t, int32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_n_s8))) +int8x16_t __arm_vqaddq_n_s8(int8x16_t, int8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_n_s8))) +int8x16_t __arm_vqaddq(int8x16_t, int8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_n_u16))) +uint16x8_t __arm_vqaddq_n_u16(uint16x8_t, uint16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_n_u16))) +uint16x8_t __arm_vqaddq(uint16x8_t, uint16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_n_u32))) +uint32x4_t __arm_vqaddq_n_u32(uint32x4_t, uint32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_n_u32))) +uint32x4_t __arm_vqaddq(uint32x4_t, uint32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_n_u8))) +uint8x16_t __arm_vqaddq_n_u8(uint8x16_t, uint8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_n_u8))) +uint8x16_t __arm_vqaddq(uint8x16_t, uint8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_s16))) +int16x8_t __arm_vqaddq_s16(int16x8_t, int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_s16))) +int16x8_t __arm_vqaddq(int16x8_t, int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_s32))) +int32x4_t __arm_vqaddq_s32(int32x4_t, int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_s32))) +int32x4_t __arm_vqaddq(int32x4_t, int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_s8))) +int8x16_t __arm_vqaddq_s8(int8x16_t, int8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_s8))) +int8x16_t __arm_vqaddq(int8x16_t, int8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_u16))) +uint16x8_t __arm_vqaddq_u16(uint16x8_t, uint16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_u16))) +uint16x8_t __arm_vqaddq(uint16x8_t, uint16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_u32))) +uint32x4_t __arm_vqaddq_u32(uint32x4_t, uint32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_u32))) +uint32x4_t __arm_vqaddq(uint32x4_t, uint32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_u8))) +uint8x16_t __arm_vqaddq_u8(uint8x16_t, uint8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_u8))) +uint8x16_t __arm_vqaddq(uint8x16_t, uint8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmladhq_m_s16))) +int16x8_t __arm_vqdmladhq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmladhq_m_s16))) +int16x8_t __arm_vqdmladhq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmladhq_m_s32))) +int32x4_t __arm_vqdmladhq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmladhq_m_s32))) +int32x4_t __arm_vqdmladhq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmladhq_m_s8))) +int8x16_t __arm_vqdmladhq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmladhq_m_s8))) +int8x16_t __arm_vqdmladhq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmladhq_s16))) +int16x8_t __arm_vqdmladhq_s16(int16x8_t, int16x8_t, int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmladhq_s16))) +int16x8_t __arm_vqdmladhq(int16x8_t, int16x8_t, int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmladhq_s32))) +int32x4_t __arm_vqdmladhq_s32(int32x4_t, int32x4_t, int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmladhq_s32))) +int32x4_t __arm_vqdmladhq(int32x4_t, int32x4_t, int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmladhq_s8))) +int8x16_t __arm_vqdmladhq_s8(int8x16_t, int8x16_t, int8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmladhq_s8))) +int8x16_t __arm_vqdmladhq(int8x16_t, int8x16_t, int8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmladhxq_m_s16))) +int16x8_t __arm_vqdmladhxq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmladhxq_m_s16))) +int16x8_t __arm_vqdmladhxq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmladhxq_m_s32))) +int32x4_t __arm_vqdmladhxq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmladhxq_m_s32))) +int32x4_t __arm_vqdmladhxq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmladhxq_m_s8))) +int8x16_t __arm_vqdmladhxq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmladhxq_m_s8))) +int8x16_t __arm_vqdmladhxq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmladhxq_s16))) +int16x8_t __arm_vqdmladhxq_s16(int16x8_t, int16x8_t, int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmladhxq_s16))) +int16x8_t __arm_vqdmladhxq(int16x8_t, int16x8_t, int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmladhxq_s32))) +int32x4_t __arm_vqdmladhxq_s32(int32x4_t, int32x4_t, int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmladhxq_s32))) +int32x4_t __arm_vqdmladhxq(int32x4_t, int32x4_t, int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmladhxq_s8))) +int8x16_t __arm_vqdmladhxq_s8(int8x16_t, int8x16_t, int8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmladhxq_s8))) +int8x16_t __arm_vqdmladhxq(int8x16_t, int8x16_t, int8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmlahq_m_n_s16))) +int16x8_t __arm_vqdmlahq_m_n_s16(int16x8_t, int16x8_t, int16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmlahq_m_n_s16))) +int16x8_t __arm_vqdmlahq_m(int16x8_t, int16x8_t, int16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmlahq_m_n_s32))) +int32x4_t __arm_vqdmlahq_m_n_s32(int32x4_t, int32x4_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmlahq_m_n_s32))) +int32x4_t __arm_vqdmlahq_m(int32x4_t, int32x4_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmlahq_m_n_s8))) +int8x16_t __arm_vqdmlahq_m_n_s8(int8x16_t, int8x16_t, int8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmlahq_m_n_s8))) +int8x16_t __arm_vqdmlahq_m(int8x16_t, int8x16_t, int8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmlahq_n_s16))) +int16x8_t __arm_vqdmlahq_n_s16(int16x8_t, int16x8_t, int16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmlahq_n_s16))) +int16x8_t __arm_vqdmlahq(int16x8_t, int16x8_t, int16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmlahq_n_s32))) +int32x4_t __arm_vqdmlahq_n_s32(int32x4_t, int32x4_t, int32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmlahq_n_s32))) +int32x4_t __arm_vqdmlahq(int32x4_t, int32x4_t, int32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmlahq_n_s8))) +int8x16_t __arm_vqdmlahq_n_s8(int8x16_t, int8x16_t, int8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmlahq_n_s8))) +int8x16_t __arm_vqdmlahq(int8x16_t, int8x16_t, int8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmlashq_m_n_s16))) +int16x8_t __arm_vqdmlashq_m_n_s16(int16x8_t, int16x8_t, int16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmlashq_m_n_s16))) +int16x8_t __arm_vqdmlashq_m(int16x8_t, int16x8_t, int16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmlashq_m_n_s32))) +int32x4_t __arm_vqdmlashq_m_n_s32(int32x4_t, int32x4_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmlashq_m_n_s32))) +int32x4_t __arm_vqdmlashq_m(int32x4_t, int32x4_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmlashq_m_n_s8))) +int8x16_t __arm_vqdmlashq_m_n_s8(int8x16_t, int8x16_t, int8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmlashq_m_n_s8))) +int8x16_t __arm_vqdmlashq_m(int8x16_t, int8x16_t, int8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmlashq_n_s16))) +int16x8_t __arm_vqdmlashq_n_s16(int16x8_t, int16x8_t, int16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmlashq_n_s16))) +int16x8_t __arm_vqdmlashq(int16x8_t, int16x8_t, int16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmlashq_n_s32))) +int32x4_t __arm_vqdmlashq_n_s32(int32x4_t, int32x4_t, int32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmlashq_n_s32))) +int32x4_t __arm_vqdmlashq(int32x4_t, int32x4_t, int32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmlashq_n_s8))) +int8x16_t __arm_vqdmlashq_n_s8(int8x16_t, int8x16_t, int8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmlashq_n_s8))) +int8x16_t __arm_vqdmlashq(int8x16_t, int8x16_t, int8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmlsdhq_m_s16))) +int16x8_t __arm_vqdmlsdhq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmlsdhq_m_s16))) +int16x8_t __arm_vqdmlsdhq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmlsdhq_m_s32))) +int32x4_t __arm_vqdmlsdhq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmlsdhq_m_s32))) +int32x4_t __arm_vqdmlsdhq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmlsdhq_m_s8))) +int8x16_t __arm_vqdmlsdhq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmlsdhq_m_s8))) +int8x16_t __arm_vqdmlsdhq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmlsdhq_s16))) +int16x8_t __arm_vqdmlsdhq_s16(int16x8_t, int16x8_t, int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmlsdhq_s16))) +int16x8_t __arm_vqdmlsdhq(int16x8_t, int16x8_t, int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmlsdhq_s32))) +int32x4_t __arm_vqdmlsdhq_s32(int32x4_t, int32x4_t, int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmlsdhq_s32))) +int32x4_t __arm_vqdmlsdhq(int32x4_t, int32x4_t, int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmlsdhq_s8))) +int8x16_t __arm_vqdmlsdhq_s8(int8x16_t, int8x16_t, int8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmlsdhq_s8))) +int8x16_t __arm_vqdmlsdhq(int8x16_t, int8x16_t, int8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmlsdhxq_m_s16))) +int16x8_t __arm_vqdmlsdhxq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmlsdhxq_m_s16))) +int16x8_t __arm_vqdmlsdhxq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmlsdhxq_m_s32))) +int32x4_t __arm_vqdmlsdhxq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmlsdhxq_m_s32))) +int32x4_t __arm_vqdmlsdhxq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmlsdhxq_m_s8))) +int8x16_t __arm_vqdmlsdhxq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmlsdhxq_m_s8))) +int8x16_t __arm_vqdmlsdhxq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmlsdhxq_s16))) +int16x8_t __arm_vqdmlsdhxq_s16(int16x8_t, int16x8_t, int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmlsdhxq_s16))) +int16x8_t __arm_vqdmlsdhxq(int16x8_t, int16x8_t, int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmlsdhxq_s32))) +int32x4_t __arm_vqdmlsdhxq_s32(int32x4_t, int32x4_t, int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmlsdhxq_s32))) +int32x4_t __arm_vqdmlsdhxq(int32x4_t, int32x4_t, int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmlsdhxq_s8))) +int8x16_t __arm_vqdmlsdhxq_s8(int8x16_t, int8x16_t, int8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmlsdhxq_s8))) +int8x16_t __arm_vqdmlsdhxq(int8x16_t, int8x16_t, int8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmulhq_m_n_s16))) +int16x8_t __arm_vqdmulhq_m_n_s16(int16x8_t, int16x8_t, int16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmulhq_m_n_s16))) +int16x8_t __arm_vqdmulhq_m(int16x8_t, int16x8_t, int16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmulhq_m_n_s32))) +int32x4_t __arm_vqdmulhq_m_n_s32(int32x4_t, int32x4_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmulhq_m_n_s32))) +int32x4_t __arm_vqdmulhq_m(int32x4_t, int32x4_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmulhq_m_n_s8))) +int8x16_t __arm_vqdmulhq_m_n_s8(int8x16_t, int8x16_t, int8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmulhq_m_n_s8))) +int8x16_t __arm_vqdmulhq_m(int8x16_t, int8x16_t, int8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmulhq_m_s16))) +int16x8_t __arm_vqdmulhq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmulhq_m_s16))) +int16x8_t __arm_vqdmulhq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmulhq_m_s32))) +int32x4_t __arm_vqdmulhq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmulhq_m_s32))) +int32x4_t __arm_vqdmulhq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmulhq_m_s8))) +int8x16_t __arm_vqdmulhq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmulhq_m_s8))) +int8x16_t __arm_vqdmulhq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmulhq_n_s16))) +int16x8_t __arm_vqdmulhq_n_s16(int16x8_t, int16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmulhq_n_s16))) +int16x8_t __arm_vqdmulhq(int16x8_t, int16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmulhq_n_s32))) +int32x4_t __arm_vqdmulhq_n_s32(int32x4_t, int32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmulhq_n_s32))) +int32x4_t __arm_vqdmulhq(int32x4_t, int32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmulhq_n_s8))) +int8x16_t __arm_vqdmulhq_n_s8(int8x16_t, int8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmulhq_n_s8))) +int8x16_t __arm_vqdmulhq(int8x16_t, int8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmulhq_s16))) +int16x8_t __arm_vqdmulhq_s16(int16x8_t, int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmulhq_s16))) +int16x8_t __arm_vqdmulhq(int16x8_t, int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmulhq_s32))) +int32x4_t __arm_vqdmulhq_s32(int32x4_t, int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmulhq_s32))) +int32x4_t __arm_vqdmulhq(int32x4_t, int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmulhq_s8))) +int8x16_t __arm_vqdmulhq_s8(int8x16_t, int8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmulhq_s8))) +int8x16_t __arm_vqdmulhq(int8x16_t, int8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmullbq_m_n_s16))) +int32x4_t __arm_vqdmullbq_m_n_s16(int32x4_t, int16x8_t, int16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmullbq_m_n_s16))) +int32x4_t __arm_vqdmullbq_m(int32x4_t, int16x8_t, int16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmullbq_m_n_s32))) +int64x2_t __arm_vqdmullbq_m_n_s32(int64x2_t, int32x4_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmullbq_m_n_s32))) +int64x2_t __arm_vqdmullbq_m(int64x2_t, int32x4_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmullbq_m_s16))) +int32x4_t __arm_vqdmullbq_m_s16(int32x4_t, int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmullbq_m_s16))) +int32x4_t __arm_vqdmullbq_m(int32x4_t, int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmullbq_m_s32))) +int64x2_t __arm_vqdmullbq_m_s32(int64x2_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmullbq_m_s32))) +int64x2_t __arm_vqdmullbq_m(int64x2_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmullbq_n_s16))) +int32x4_t __arm_vqdmullbq_n_s16(int16x8_t, int16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmullbq_n_s16))) +int32x4_t __arm_vqdmullbq(int16x8_t, int16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmullbq_n_s32))) +int64x2_t __arm_vqdmullbq_n_s32(int32x4_t, int32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmullbq_n_s32))) +int64x2_t __arm_vqdmullbq(int32x4_t, int32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmullbq_s16))) +int32x4_t __arm_vqdmullbq_s16(int16x8_t, int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmullbq_s16))) +int32x4_t __arm_vqdmullbq(int16x8_t, int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmullbq_s32))) +int64x2_t __arm_vqdmullbq_s32(int32x4_t, int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmullbq_s32))) +int64x2_t __arm_vqdmullbq(int32x4_t, int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmulltq_m_n_s16))) +int32x4_t __arm_vqdmulltq_m_n_s16(int32x4_t, int16x8_t, int16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmulltq_m_n_s16))) +int32x4_t __arm_vqdmulltq_m(int32x4_t, int16x8_t, int16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmulltq_m_n_s32))) +int64x2_t __arm_vqdmulltq_m_n_s32(int64x2_t, int32x4_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmulltq_m_n_s32))) +int64x2_t __arm_vqdmulltq_m(int64x2_t, int32x4_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmulltq_m_s16))) +int32x4_t __arm_vqdmulltq_m_s16(int32x4_t, int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmulltq_m_s16))) +int32x4_t __arm_vqdmulltq_m(int32x4_t, int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmulltq_m_s32))) +int64x2_t __arm_vqdmulltq_m_s32(int64x2_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmulltq_m_s32))) +int64x2_t __arm_vqdmulltq_m(int64x2_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmulltq_n_s16))) +int32x4_t __arm_vqdmulltq_n_s16(int16x8_t, int16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmulltq_n_s16))) +int32x4_t __arm_vqdmulltq(int16x8_t, int16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmulltq_n_s32))) +int64x2_t __arm_vqdmulltq_n_s32(int32x4_t, int32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmulltq_n_s32))) +int64x2_t __arm_vqdmulltq(int32x4_t, int32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmulltq_s16))) +int32x4_t __arm_vqdmulltq_s16(int16x8_t, int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmulltq_s16))) +int32x4_t __arm_vqdmulltq(int16x8_t, int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmulltq_s32))) +int64x2_t __arm_vqdmulltq_s32(int32x4_t, int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmulltq_s32))) +int64x2_t __arm_vqdmulltq(int32x4_t, int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqmovnbq_m_s16))) +int8x16_t __arm_vqmovnbq_m_s16(int8x16_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqmovnbq_m_s16))) +int8x16_t __arm_vqmovnbq_m(int8x16_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqmovnbq_m_s32))) +int16x8_t __arm_vqmovnbq_m_s32(int16x8_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqmovnbq_m_s32))) +int16x8_t __arm_vqmovnbq_m(int16x8_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqmovnbq_m_u16))) +uint8x16_t __arm_vqmovnbq_m_u16(uint8x16_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqmovnbq_m_u16))) +uint8x16_t __arm_vqmovnbq_m(uint8x16_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqmovnbq_m_u32))) +uint16x8_t __arm_vqmovnbq_m_u32(uint16x8_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqmovnbq_m_u32))) +uint16x8_t __arm_vqmovnbq_m(uint16x8_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqmovnbq_s16))) +int8x16_t __arm_vqmovnbq_s16(int8x16_t, int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqmovnbq_s16))) +int8x16_t __arm_vqmovnbq(int8x16_t, int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqmovnbq_s32))) +int16x8_t __arm_vqmovnbq_s32(int16x8_t, int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqmovnbq_s32))) +int16x8_t __arm_vqmovnbq(int16x8_t, int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqmovnbq_u16))) +uint8x16_t __arm_vqmovnbq_u16(uint8x16_t, uint16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqmovnbq_u16))) +uint8x16_t __arm_vqmovnbq(uint8x16_t, uint16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqmovnbq_u32))) +uint16x8_t __arm_vqmovnbq_u32(uint16x8_t, uint32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqmovnbq_u32))) +uint16x8_t __arm_vqmovnbq(uint16x8_t, uint32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqmovntq_m_s16))) +int8x16_t __arm_vqmovntq_m_s16(int8x16_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqmovntq_m_s16))) +int8x16_t __arm_vqmovntq_m(int8x16_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqmovntq_m_s32))) +int16x8_t __arm_vqmovntq_m_s32(int16x8_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqmovntq_m_s32))) +int16x8_t __arm_vqmovntq_m(int16x8_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqmovntq_m_u16))) +uint8x16_t __arm_vqmovntq_m_u16(uint8x16_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqmovntq_m_u16))) +uint8x16_t __arm_vqmovntq_m(uint8x16_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqmovntq_m_u32))) +uint16x8_t __arm_vqmovntq_m_u32(uint16x8_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqmovntq_m_u32))) +uint16x8_t __arm_vqmovntq_m(uint16x8_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqmovntq_s16))) +int8x16_t __arm_vqmovntq_s16(int8x16_t, int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqmovntq_s16))) +int8x16_t __arm_vqmovntq(int8x16_t, int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqmovntq_s32))) +int16x8_t __arm_vqmovntq_s32(int16x8_t, int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqmovntq_s32))) +int16x8_t __arm_vqmovntq(int16x8_t, int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqmovntq_u16))) +uint8x16_t __arm_vqmovntq_u16(uint8x16_t, uint16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqmovntq_u16))) +uint8x16_t __arm_vqmovntq(uint8x16_t, uint16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqmovntq_u32))) +uint16x8_t __arm_vqmovntq_u32(uint16x8_t, uint32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqmovntq_u32))) +uint16x8_t __arm_vqmovntq(uint16x8_t, uint32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqmovunbq_m_s16))) +uint8x16_t __arm_vqmovunbq_m_s16(uint8x16_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqmovunbq_m_s16))) +uint8x16_t __arm_vqmovunbq_m(uint8x16_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqmovunbq_m_s32))) +uint16x8_t __arm_vqmovunbq_m_s32(uint16x8_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqmovunbq_m_s32))) +uint16x8_t __arm_vqmovunbq_m(uint16x8_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqmovunbq_s16))) +uint8x16_t __arm_vqmovunbq_s16(uint8x16_t, int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqmovunbq_s16))) +uint8x16_t __arm_vqmovunbq(uint8x16_t, int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqmovunbq_s32))) +uint16x8_t __arm_vqmovunbq_s32(uint16x8_t, int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqmovunbq_s32))) +uint16x8_t __arm_vqmovunbq(uint16x8_t, int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqmovuntq_m_s16))) +uint8x16_t __arm_vqmovuntq_m_s16(uint8x16_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqmovuntq_m_s16))) +uint8x16_t __arm_vqmovuntq_m(uint8x16_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqmovuntq_m_s32))) +uint16x8_t __arm_vqmovuntq_m_s32(uint16x8_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqmovuntq_m_s32))) +uint16x8_t __arm_vqmovuntq_m(uint16x8_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqmovuntq_s16))) +uint8x16_t __arm_vqmovuntq_s16(uint8x16_t, int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqmovuntq_s16))) +uint8x16_t __arm_vqmovuntq(uint8x16_t, int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqmovuntq_s32))) +uint16x8_t __arm_vqmovuntq_s32(uint16x8_t, int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqmovuntq_s32))) +uint16x8_t __arm_vqmovuntq(uint16x8_t, int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqnegq_m_s16))) +int16x8_t __arm_vqnegq_m_s16(int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqnegq_m_s16))) +int16x8_t __arm_vqnegq_m(int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqnegq_m_s32))) +int32x4_t __arm_vqnegq_m_s32(int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqnegq_m_s32))) +int32x4_t __arm_vqnegq_m(int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqnegq_m_s8))) +int8x16_t __arm_vqnegq_m_s8(int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqnegq_m_s8))) +int8x16_t __arm_vqnegq_m(int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqnegq_s16))) +int16x8_t __arm_vqnegq_s16(int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqnegq_s16))) +int16x8_t __arm_vqnegq(int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqnegq_s32))) +int32x4_t __arm_vqnegq_s32(int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqnegq_s32))) +int32x4_t __arm_vqnegq(int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqnegq_s8))) +int8x16_t __arm_vqnegq_s8(int8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqnegq_s8))) +int8x16_t __arm_vqnegq(int8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmladhq_m_s16))) +int16x8_t __arm_vqrdmladhq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmladhq_m_s16))) +int16x8_t __arm_vqrdmladhq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmladhq_m_s32))) +int32x4_t __arm_vqrdmladhq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmladhq_m_s32))) +int32x4_t __arm_vqrdmladhq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmladhq_m_s8))) +int8x16_t __arm_vqrdmladhq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmladhq_m_s8))) +int8x16_t __arm_vqrdmladhq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmladhq_s16))) +int16x8_t __arm_vqrdmladhq_s16(int16x8_t, int16x8_t, int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmladhq_s16))) +int16x8_t __arm_vqrdmladhq(int16x8_t, int16x8_t, int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmladhq_s32))) +int32x4_t __arm_vqrdmladhq_s32(int32x4_t, int32x4_t, int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmladhq_s32))) +int32x4_t __arm_vqrdmladhq(int32x4_t, int32x4_t, int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmladhq_s8))) +int8x16_t __arm_vqrdmladhq_s8(int8x16_t, int8x16_t, int8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmladhq_s8))) +int8x16_t __arm_vqrdmladhq(int8x16_t, int8x16_t, int8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmladhxq_m_s16))) +int16x8_t __arm_vqrdmladhxq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmladhxq_m_s16))) +int16x8_t __arm_vqrdmladhxq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmladhxq_m_s32))) +int32x4_t __arm_vqrdmladhxq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmladhxq_m_s32))) +int32x4_t __arm_vqrdmladhxq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmladhxq_m_s8))) +int8x16_t __arm_vqrdmladhxq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmladhxq_m_s8))) +int8x16_t __arm_vqrdmladhxq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmladhxq_s16))) +int16x8_t __arm_vqrdmladhxq_s16(int16x8_t, int16x8_t, int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmladhxq_s16))) +int16x8_t __arm_vqrdmladhxq(int16x8_t, int16x8_t, int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmladhxq_s32))) +int32x4_t __arm_vqrdmladhxq_s32(int32x4_t, int32x4_t, int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmladhxq_s32))) +int32x4_t __arm_vqrdmladhxq(int32x4_t, int32x4_t, int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmladhxq_s8))) +int8x16_t __arm_vqrdmladhxq_s8(int8x16_t, int8x16_t, int8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmladhxq_s8))) +int8x16_t __arm_vqrdmladhxq(int8x16_t, int8x16_t, int8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlahq_m_n_s16))) +int16x8_t __arm_vqrdmlahq_m_n_s16(int16x8_t, int16x8_t, int16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlahq_m_n_s16))) +int16x8_t __arm_vqrdmlahq_m(int16x8_t, int16x8_t, int16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlahq_m_n_s32))) +int32x4_t __arm_vqrdmlahq_m_n_s32(int32x4_t, int32x4_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlahq_m_n_s32))) +int32x4_t __arm_vqrdmlahq_m(int32x4_t, int32x4_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlahq_m_n_s8))) +int8x16_t __arm_vqrdmlahq_m_n_s8(int8x16_t, int8x16_t, int8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlahq_m_n_s8))) +int8x16_t __arm_vqrdmlahq_m(int8x16_t, int8x16_t, int8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlahq_n_s16))) +int16x8_t __arm_vqrdmlahq_n_s16(int16x8_t, int16x8_t, int16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlahq_n_s16))) +int16x8_t __arm_vqrdmlahq(int16x8_t, int16x8_t, int16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlahq_n_s32))) +int32x4_t __arm_vqrdmlahq_n_s32(int32x4_t, int32x4_t, int32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlahq_n_s32))) +int32x4_t __arm_vqrdmlahq(int32x4_t, int32x4_t, int32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlahq_n_s8))) +int8x16_t __arm_vqrdmlahq_n_s8(int8x16_t, int8x16_t, int8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlahq_n_s8))) +int8x16_t __arm_vqrdmlahq(int8x16_t, int8x16_t, int8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlashq_m_n_s16))) +int16x8_t __arm_vqrdmlashq_m_n_s16(int16x8_t, int16x8_t, int16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlashq_m_n_s16))) +int16x8_t __arm_vqrdmlashq_m(int16x8_t, int16x8_t, int16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlashq_m_n_s32))) +int32x4_t __arm_vqrdmlashq_m_n_s32(int32x4_t, int32x4_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlashq_m_n_s32))) +int32x4_t __arm_vqrdmlashq_m(int32x4_t, int32x4_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlashq_m_n_s8))) +int8x16_t __arm_vqrdmlashq_m_n_s8(int8x16_t, int8x16_t, int8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlashq_m_n_s8))) +int8x16_t __arm_vqrdmlashq_m(int8x16_t, int8x16_t, int8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlashq_n_s16))) +int16x8_t __arm_vqrdmlashq_n_s16(int16x8_t, int16x8_t, int16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlashq_n_s16))) +int16x8_t __arm_vqrdmlashq(int16x8_t, int16x8_t, int16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlashq_n_s32))) +int32x4_t __arm_vqrdmlashq_n_s32(int32x4_t, int32x4_t, int32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlashq_n_s32))) +int32x4_t __arm_vqrdmlashq(int32x4_t, int32x4_t, int32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlashq_n_s8))) +int8x16_t __arm_vqrdmlashq_n_s8(int8x16_t, int8x16_t, int8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlashq_n_s8))) +int8x16_t __arm_vqrdmlashq(int8x16_t, int8x16_t, int8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlsdhq_m_s16))) +int16x8_t __arm_vqrdmlsdhq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlsdhq_m_s16))) +int16x8_t __arm_vqrdmlsdhq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlsdhq_m_s32))) +int32x4_t __arm_vqrdmlsdhq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlsdhq_m_s32))) +int32x4_t __arm_vqrdmlsdhq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlsdhq_m_s8))) +int8x16_t __arm_vqrdmlsdhq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlsdhq_m_s8))) +int8x16_t __arm_vqrdmlsdhq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlsdhq_s16))) +int16x8_t __arm_vqrdmlsdhq_s16(int16x8_t, int16x8_t, int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlsdhq_s16))) +int16x8_t __arm_vqrdmlsdhq(int16x8_t, int16x8_t, int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlsdhq_s32))) +int32x4_t __arm_vqrdmlsdhq_s32(int32x4_t, int32x4_t, int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlsdhq_s32))) +int32x4_t __arm_vqrdmlsdhq(int32x4_t, int32x4_t, int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlsdhq_s8))) +int8x16_t __arm_vqrdmlsdhq_s8(int8x16_t, int8x16_t, int8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlsdhq_s8))) +int8x16_t __arm_vqrdmlsdhq(int8x16_t, int8x16_t, int8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlsdhxq_m_s16))) +int16x8_t __arm_vqrdmlsdhxq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlsdhxq_m_s16))) +int16x8_t __arm_vqrdmlsdhxq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlsdhxq_m_s32))) +int32x4_t __arm_vqrdmlsdhxq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlsdhxq_m_s32))) +int32x4_t __arm_vqrdmlsdhxq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlsdhxq_m_s8))) +int8x16_t __arm_vqrdmlsdhxq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlsdhxq_m_s8))) +int8x16_t __arm_vqrdmlsdhxq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlsdhxq_s16))) +int16x8_t __arm_vqrdmlsdhxq_s16(int16x8_t, int16x8_t, int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlsdhxq_s16))) +int16x8_t __arm_vqrdmlsdhxq(int16x8_t, int16x8_t, int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlsdhxq_s32))) +int32x4_t __arm_vqrdmlsdhxq_s32(int32x4_t, int32x4_t, int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlsdhxq_s32))) +int32x4_t __arm_vqrdmlsdhxq(int32x4_t, int32x4_t, int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlsdhxq_s8))) +int8x16_t __arm_vqrdmlsdhxq_s8(int8x16_t, int8x16_t, int8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlsdhxq_s8))) +int8x16_t __arm_vqrdmlsdhxq(int8x16_t, int8x16_t, int8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmulhq_m_n_s16))) +int16x8_t __arm_vqrdmulhq_m_n_s16(int16x8_t, int16x8_t, int16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmulhq_m_n_s16))) +int16x8_t __arm_vqrdmulhq_m(int16x8_t, int16x8_t, int16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmulhq_m_n_s32))) +int32x4_t __arm_vqrdmulhq_m_n_s32(int32x4_t, int32x4_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmulhq_m_n_s32))) +int32x4_t __arm_vqrdmulhq_m(int32x4_t, int32x4_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmulhq_m_n_s8))) +int8x16_t __arm_vqrdmulhq_m_n_s8(int8x16_t, int8x16_t, int8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmulhq_m_n_s8))) +int8x16_t __arm_vqrdmulhq_m(int8x16_t, int8x16_t, int8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmulhq_m_s16))) +int16x8_t __arm_vqrdmulhq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmulhq_m_s16))) +int16x8_t __arm_vqrdmulhq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmulhq_m_s32))) +int32x4_t __arm_vqrdmulhq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmulhq_m_s32))) +int32x4_t __arm_vqrdmulhq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmulhq_m_s8))) +int8x16_t __arm_vqrdmulhq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmulhq_m_s8))) +int8x16_t __arm_vqrdmulhq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmulhq_n_s16))) +int16x8_t __arm_vqrdmulhq_n_s16(int16x8_t, int16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmulhq_n_s16))) +int16x8_t __arm_vqrdmulhq(int16x8_t, int16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmulhq_n_s32))) +int32x4_t __arm_vqrdmulhq_n_s32(int32x4_t, int32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmulhq_n_s32))) +int32x4_t __arm_vqrdmulhq(int32x4_t, int32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmulhq_n_s8))) +int8x16_t __arm_vqrdmulhq_n_s8(int8x16_t, int8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmulhq_n_s8))) +int8x16_t __arm_vqrdmulhq(int8x16_t, int8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmulhq_s16))) +int16x8_t __arm_vqrdmulhq_s16(int16x8_t, int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmulhq_s16))) +int16x8_t __arm_vqrdmulhq(int16x8_t, int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmulhq_s32))) +int32x4_t __arm_vqrdmulhq_s32(int32x4_t, int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmulhq_s32))) +int32x4_t __arm_vqrdmulhq(int32x4_t, int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmulhq_s8))) +int8x16_t __arm_vqrdmulhq_s8(int8x16_t, int8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmulhq_s8))) +int8x16_t __arm_vqrdmulhq(int8x16_t, int8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_m_n_s16))) +int16x8_t __arm_vqrshlq_m_n_s16(int16x8_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_m_n_s16))) +int16x8_t __arm_vqrshlq_m_n(int16x8_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_m_n_s32))) +int32x4_t __arm_vqrshlq_m_n_s32(int32x4_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_m_n_s32))) +int32x4_t __arm_vqrshlq_m_n(int32x4_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_m_n_s8))) +int8x16_t __arm_vqrshlq_m_n_s8(int8x16_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_m_n_s8))) +int8x16_t __arm_vqrshlq_m_n(int8x16_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_m_n_u16))) +uint16x8_t __arm_vqrshlq_m_n_u16(uint16x8_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_m_n_u16))) +uint16x8_t __arm_vqrshlq_m_n(uint16x8_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_m_n_u32))) +uint32x4_t __arm_vqrshlq_m_n_u32(uint32x4_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_m_n_u32))) +uint32x4_t __arm_vqrshlq_m_n(uint32x4_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_m_n_u8))) +uint8x16_t __arm_vqrshlq_m_n_u8(uint8x16_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_m_n_u8))) +uint8x16_t __arm_vqrshlq_m_n(uint8x16_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_m_s16))) +int16x8_t __arm_vqrshlq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_m_s16))) +int16x8_t __arm_vqrshlq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_m_s32))) +int32x4_t __arm_vqrshlq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_m_s32))) +int32x4_t __arm_vqrshlq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_m_s8))) +int8x16_t __arm_vqrshlq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_m_s8))) +int8x16_t __arm_vqrshlq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_m_u16))) +uint16x8_t __arm_vqrshlq_m_u16(uint16x8_t, uint16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_m_u16))) +uint16x8_t __arm_vqrshlq_m(uint16x8_t, uint16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_m_u32))) +uint32x4_t __arm_vqrshlq_m_u32(uint32x4_t, uint32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_m_u32))) +uint32x4_t __arm_vqrshlq_m(uint32x4_t, uint32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_m_u8))) +uint8x16_t __arm_vqrshlq_m_u8(uint8x16_t, uint8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_m_u8))) +uint8x16_t __arm_vqrshlq_m(uint8x16_t, uint8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_n_s16))) +int16x8_t __arm_vqrshlq_n_s16(int16x8_t, int32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_n_s16))) +int16x8_t __arm_vqrshlq(int16x8_t, int32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_n_s32))) +int32x4_t __arm_vqrshlq_n_s32(int32x4_t, int32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_n_s32))) +int32x4_t __arm_vqrshlq(int32x4_t, int32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_n_s8))) +int8x16_t __arm_vqrshlq_n_s8(int8x16_t, int32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_n_s8))) +int8x16_t __arm_vqrshlq(int8x16_t, int32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_n_u16))) +uint16x8_t __arm_vqrshlq_n_u16(uint16x8_t, int32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_n_u16))) +uint16x8_t __arm_vqrshlq(uint16x8_t, int32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_n_u32))) +uint32x4_t __arm_vqrshlq_n_u32(uint32x4_t, int32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_n_u32))) +uint32x4_t __arm_vqrshlq(uint32x4_t, int32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_n_u8))) +uint8x16_t __arm_vqrshlq_n_u8(uint8x16_t, int32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_n_u8))) +uint8x16_t __arm_vqrshlq(uint8x16_t, int32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_s16))) +int16x8_t __arm_vqrshlq_s16(int16x8_t, int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_s16))) +int16x8_t __arm_vqrshlq(int16x8_t, int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_s32))) +int32x4_t __arm_vqrshlq_s32(int32x4_t, int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_s32))) +int32x4_t __arm_vqrshlq(int32x4_t, int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_s8))) +int8x16_t __arm_vqrshlq_s8(int8x16_t, int8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_s8))) +int8x16_t __arm_vqrshlq(int8x16_t, int8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_u16))) +uint16x8_t __arm_vqrshlq_u16(uint16x8_t, int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_u16))) +uint16x8_t __arm_vqrshlq(uint16x8_t, int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_u32))) +uint32x4_t __arm_vqrshlq_u32(uint32x4_t, int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_u32))) +uint32x4_t __arm_vqrshlq(uint32x4_t, int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_u8))) +uint8x16_t __arm_vqrshlq_u8(uint8x16_t, int8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_u8))) +uint8x16_t __arm_vqrshlq(uint8x16_t, int8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshrnbq_m_n_s16))) +int8x16_t __arm_vqrshrnbq_m_n_s16(int8x16_t, int16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshrnbq_m_n_s16))) +int8x16_t __arm_vqrshrnbq_m(int8x16_t, int16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshrnbq_m_n_s32))) +int16x8_t __arm_vqrshrnbq_m_n_s32(int16x8_t, int32x4_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshrnbq_m_n_s32))) +int16x8_t __arm_vqrshrnbq_m(int16x8_t, int32x4_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshrnbq_m_n_u16))) +uint8x16_t __arm_vqrshrnbq_m_n_u16(uint8x16_t, uint16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshrnbq_m_n_u16))) +uint8x16_t __arm_vqrshrnbq_m(uint8x16_t, uint16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshrnbq_m_n_u32))) +uint16x8_t __arm_vqrshrnbq_m_n_u32(uint16x8_t, uint32x4_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshrnbq_m_n_u32))) +uint16x8_t __arm_vqrshrnbq_m(uint16x8_t, uint32x4_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshrnbq_n_s16))) +int8x16_t __arm_vqrshrnbq_n_s16(int8x16_t, int16x8_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshrnbq_n_s16))) +int8x16_t __arm_vqrshrnbq(int8x16_t, int16x8_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshrnbq_n_s32))) +int16x8_t __arm_vqrshrnbq_n_s32(int16x8_t, int32x4_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshrnbq_n_s32))) +int16x8_t __arm_vqrshrnbq(int16x8_t, int32x4_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshrnbq_n_u16))) +uint8x16_t __arm_vqrshrnbq_n_u16(uint8x16_t, uint16x8_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshrnbq_n_u16))) +uint8x16_t __arm_vqrshrnbq(uint8x16_t, uint16x8_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshrnbq_n_u32))) +uint16x8_t __arm_vqrshrnbq_n_u32(uint16x8_t, uint32x4_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshrnbq_n_u32))) +uint16x8_t __arm_vqrshrnbq(uint16x8_t, uint32x4_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshrntq_m_n_s16))) +int8x16_t __arm_vqrshrntq_m_n_s16(int8x16_t, int16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshrntq_m_n_s16))) +int8x16_t __arm_vqrshrntq_m(int8x16_t, int16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshrntq_m_n_s32))) +int16x8_t __arm_vqrshrntq_m_n_s32(int16x8_t, int32x4_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshrntq_m_n_s32))) +int16x8_t __arm_vqrshrntq_m(int16x8_t, int32x4_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshrntq_m_n_u16))) +uint8x16_t __arm_vqrshrntq_m_n_u16(uint8x16_t, uint16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshrntq_m_n_u16))) +uint8x16_t __arm_vqrshrntq_m(uint8x16_t, uint16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshrntq_m_n_u32))) +uint16x8_t __arm_vqrshrntq_m_n_u32(uint16x8_t, uint32x4_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshrntq_m_n_u32))) +uint16x8_t __arm_vqrshrntq_m(uint16x8_t, uint32x4_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshrntq_n_s16))) +int8x16_t __arm_vqrshrntq_n_s16(int8x16_t, int16x8_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshrntq_n_s16))) +int8x16_t __arm_vqrshrntq(int8x16_t, int16x8_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshrntq_n_s32))) +int16x8_t __arm_vqrshrntq_n_s32(int16x8_t, int32x4_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshrntq_n_s32))) +int16x8_t __arm_vqrshrntq(int16x8_t, int32x4_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshrntq_n_u16))) +uint8x16_t __arm_vqrshrntq_n_u16(uint8x16_t, uint16x8_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshrntq_n_u16))) +uint8x16_t __arm_vqrshrntq(uint8x16_t, uint16x8_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshrntq_n_u32))) +uint16x8_t __arm_vqrshrntq_n_u32(uint16x8_t, uint32x4_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshrntq_n_u32))) +uint16x8_t __arm_vqrshrntq(uint16x8_t, uint32x4_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshrunbq_m_n_s16))) +uint8x16_t __arm_vqrshrunbq_m_n_s16(uint8x16_t, int16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshrunbq_m_n_s16))) +uint8x16_t __arm_vqrshrunbq_m(uint8x16_t, int16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshrunbq_m_n_s32))) +uint16x8_t __arm_vqrshrunbq_m_n_s32(uint16x8_t, int32x4_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshrunbq_m_n_s32))) +uint16x8_t __arm_vqrshrunbq_m(uint16x8_t, int32x4_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshrunbq_n_s16))) +uint8x16_t __arm_vqrshrunbq_n_s16(uint8x16_t, int16x8_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshrunbq_n_s16))) +uint8x16_t __arm_vqrshrunbq(uint8x16_t, int16x8_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshrunbq_n_s32))) +uint16x8_t __arm_vqrshrunbq_n_s32(uint16x8_t, int32x4_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshrunbq_n_s32))) +uint16x8_t __arm_vqrshrunbq(uint16x8_t, int32x4_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshruntq_m_n_s16))) +uint8x16_t __arm_vqrshruntq_m_n_s16(uint8x16_t, int16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshruntq_m_n_s16))) +uint8x16_t __arm_vqrshruntq_m(uint8x16_t, int16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshruntq_m_n_s32))) +uint16x8_t __arm_vqrshruntq_m_n_s32(uint16x8_t, int32x4_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshruntq_m_n_s32))) +uint16x8_t __arm_vqrshruntq_m(uint16x8_t, int32x4_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshruntq_n_s16))) +uint8x16_t __arm_vqrshruntq_n_s16(uint8x16_t, int16x8_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshruntq_n_s16))) +uint8x16_t __arm_vqrshruntq(uint8x16_t, int16x8_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshruntq_n_s32))) +uint16x8_t __arm_vqrshruntq_n_s32(uint16x8_t, int32x4_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshruntq_n_s32))) +uint16x8_t __arm_vqrshruntq(uint16x8_t, int32x4_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_m_n_s16))) +int16x8_t __arm_vqshlq_m_n_s16(int16x8_t, int16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_m_n_s16))) +int16x8_t __arm_vqshlq_m_n(int16x8_t, int16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_m_n_s32))) +int32x4_t __arm_vqshlq_m_n_s32(int32x4_t, int32x4_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_m_n_s32))) +int32x4_t __arm_vqshlq_m_n(int32x4_t, int32x4_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_m_n_s8))) +int8x16_t __arm_vqshlq_m_n_s8(int8x16_t, int8x16_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_m_n_s8))) +int8x16_t __arm_vqshlq_m_n(int8x16_t, int8x16_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_m_n_u16))) +uint16x8_t __arm_vqshlq_m_n_u16(uint16x8_t, uint16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_m_n_u16))) +uint16x8_t __arm_vqshlq_m_n(uint16x8_t, uint16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_m_n_u32))) +uint32x4_t __arm_vqshlq_m_n_u32(uint32x4_t, uint32x4_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_m_n_u32))) +uint32x4_t __arm_vqshlq_m_n(uint32x4_t, uint32x4_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_m_n_u8))) +uint8x16_t __arm_vqshlq_m_n_u8(uint8x16_t, uint8x16_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_m_n_u8))) +uint8x16_t __arm_vqshlq_m_n(uint8x16_t, uint8x16_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_m_r_s16))) +int16x8_t __arm_vqshlq_m_r_s16(int16x8_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_m_r_s16))) +int16x8_t __arm_vqshlq_m_r(int16x8_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_m_r_s32))) +int32x4_t __arm_vqshlq_m_r_s32(int32x4_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_m_r_s32))) +int32x4_t __arm_vqshlq_m_r(int32x4_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_m_r_s8))) +int8x16_t __arm_vqshlq_m_r_s8(int8x16_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_m_r_s8))) +int8x16_t __arm_vqshlq_m_r(int8x16_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_m_r_u16))) +uint16x8_t __arm_vqshlq_m_r_u16(uint16x8_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_m_r_u16))) +uint16x8_t __arm_vqshlq_m_r(uint16x8_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_m_r_u32))) +uint32x4_t __arm_vqshlq_m_r_u32(uint32x4_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_m_r_u32))) +uint32x4_t __arm_vqshlq_m_r(uint32x4_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_m_r_u8))) +uint8x16_t __arm_vqshlq_m_r_u8(uint8x16_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_m_r_u8))) +uint8x16_t __arm_vqshlq_m_r(uint8x16_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_m_s16))) +int16x8_t __arm_vqshlq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_m_s16))) +int16x8_t __arm_vqshlq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_m_s32))) +int32x4_t __arm_vqshlq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_m_s32))) +int32x4_t __arm_vqshlq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_m_s8))) +int8x16_t __arm_vqshlq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_m_s8))) +int8x16_t __arm_vqshlq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_m_u16))) +uint16x8_t __arm_vqshlq_m_u16(uint16x8_t, uint16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_m_u16))) +uint16x8_t __arm_vqshlq_m(uint16x8_t, uint16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_m_u32))) +uint32x4_t __arm_vqshlq_m_u32(uint32x4_t, uint32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_m_u32))) +uint32x4_t __arm_vqshlq_m(uint32x4_t, uint32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_m_u8))) +uint8x16_t __arm_vqshlq_m_u8(uint8x16_t, uint8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_m_u8))) +uint8x16_t __arm_vqshlq_m(uint8x16_t, uint8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_n_s16))) +int16x8_t __arm_vqshlq_n_s16(int16x8_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_n_s16))) +int16x8_t __arm_vqshlq_n(int16x8_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_n_s32))) +int32x4_t __arm_vqshlq_n_s32(int32x4_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_n_s32))) +int32x4_t __arm_vqshlq_n(int32x4_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_n_s8))) +int8x16_t __arm_vqshlq_n_s8(int8x16_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_n_s8))) +int8x16_t __arm_vqshlq_n(int8x16_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_n_u16))) +uint16x8_t __arm_vqshlq_n_u16(uint16x8_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_n_u16))) +uint16x8_t __arm_vqshlq_n(uint16x8_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_n_u32))) +uint32x4_t __arm_vqshlq_n_u32(uint32x4_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_n_u32))) +uint32x4_t __arm_vqshlq_n(uint32x4_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_n_u8))) +uint8x16_t __arm_vqshlq_n_u8(uint8x16_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_n_u8))) +uint8x16_t __arm_vqshlq_n(uint8x16_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_r_s16))) +int16x8_t __arm_vqshlq_r_s16(int16x8_t, int32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_r_s16))) +int16x8_t __arm_vqshlq_r(int16x8_t, int32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_r_s32))) +int32x4_t __arm_vqshlq_r_s32(int32x4_t, int32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_r_s32))) +int32x4_t __arm_vqshlq_r(int32x4_t, int32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_r_s8))) +int8x16_t __arm_vqshlq_r_s8(int8x16_t, int32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_r_s8))) +int8x16_t __arm_vqshlq_r(int8x16_t, int32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_r_u16))) +uint16x8_t __arm_vqshlq_r_u16(uint16x8_t, int32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_r_u16))) +uint16x8_t __arm_vqshlq_r(uint16x8_t, int32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_r_u32))) +uint32x4_t __arm_vqshlq_r_u32(uint32x4_t, int32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_r_u32))) +uint32x4_t __arm_vqshlq_r(uint32x4_t, int32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_r_u8))) +uint8x16_t __arm_vqshlq_r_u8(uint8x16_t, int32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_r_u8))) +uint8x16_t __arm_vqshlq_r(uint8x16_t, int32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_s16))) +int16x8_t __arm_vqshlq_s16(int16x8_t, int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_s16))) +int16x8_t __arm_vqshlq(int16x8_t, int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_s32))) +int32x4_t __arm_vqshlq_s32(int32x4_t, int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_s32))) +int32x4_t __arm_vqshlq(int32x4_t, int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_s8))) +int8x16_t __arm_vqshlq_s8(int8x16_t, int8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_s8))) +int8x16_t __arm_vqshlq(int8x16_t, int8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_u16))) +uint16x8_t __arm_vqshlq_u16(uint16x8_t, int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_u16))) +uint16x8_t __arm_vqshlq(uint16x8_t, int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_u32))) +uint32x4_t __arm_vqshlq_u32(uint32x4_t, int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_u32))) +uint32x4_t __arm_vqshlq(uint32x4_t, int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_u8))) +uint8x16_t __arm_vqshlq_u8(uint8x16_t, int8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_u8))) +uint8x16_t __arm_vqshlq(uint8x16_t, int8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshluq_m_n_s16))) +uint16x8_t __arm_vqshluq_m_n_s16(uint16x8_t, int16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshluq_m_n_s16))) +uint16x8_t __arm_vqshluq_m(uint16x8_t, int16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshluq_m_n_s32))) +uint32x4_t __arm_vqshluq_m_n_s32(uint32x4_t, int32x4_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshluq_m_n_s32))) +uint32x4_t __arm_vqshluq_m(uint32x4_t, int32x4_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshluq_m_n_s8))) +uint8x16_t __arm_vqshluq_m_n_s8(uint8x16_t, int8x16_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshluq_m_n_s8))) +uint8x16_t __arm_vqshluq_m(uint8x16_t, int8x16_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshluq_n_s16))) +uint16x8_t __arm_vqshluq_n_s16(int16x8_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshluq_n_s16))) +uint16x8_t __arm_vqshluq(int16x8_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshluq_n_s32))) +uint32x4_t __arm_vqshluq_n_s32(int32x4_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshluq_n_s32))) +uint32x4_t __arm_vqshluq(int32x4_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshluq_n_s8))) +uint8x16_t __arm_vqshluq_n_s8(int8x16_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshluq_n_s8))) +uint8x16_t __arm_vqshluq(int8x16_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshrnbq_m_n_s16))) +int8x16_t __arm_vqshrnbq_m_n_s16(int8x16_t, int16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshrnbq_m_n_s16))) +int8x16_t __arm_vqshrnbq_m(int8x16_t, int16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshrnbq_m_n_s32))) +int16x8_t __arm_vqshrnbq_m_n_s32(int16x8_t, int32x4_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshrnbq_m_n_s32))) +int16x8_t __arm_vqshrnbq_m(int16x8_t, int32x4_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshrnbq_m_n_u16))) +uint8x16_t __arm_vqshrnbq_m_n_u16(uint8x16_t, uint16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshrnbq_m_n_u16))) +uint8x16_t __arm_vqshrnbq_m(uint8x16_t, uint16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshrnbq_m_n_u32))) +uint16x8_t __arm_vqshrnbq_m_n_u32(uint16x8_t, uint32x4_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshrnbq_m_n_u32))) +uint16x8_t __arm_vqshrnbq_m(uint16x8_t, uint32x4_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshrnbq_n_s16))) +int8x16_t __arm_vqshrnbq_n_s16(int8x16_t, int16x8_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshrnbq_n_s16))) +int8x16_t __arm_vqshrnbq(int8x16_t, int16x8_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshrnbq_n_s32))) +int16x8_t __arm_vqshrnbq_n_s32(int16x8_t, int32x4_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshrnbq_n_s32))) +int16x8_t __arm_vqshrnbq(int16x8_t, int32x4_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshrnbq_n_u16))) +uint8x16_t __arm_vqshrnbq_n_u16(uint8x16_t, uint16x8_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshrnbq_n_u16))) +uint8x16_t __arm_vqshrnbq(uint8x16_t, uint16x8_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshrnbq_n_u32))) +uint16x8_t __arm_vqshrnbq_n_u32(uint16x8_t, uint32x4_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshrnbq_n_u32))) +uint16x8_t __arm_vqshrnbq(uint16x8_t, uint32x4_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshrntq_m_n_s16))) +int8x16_t __arm_vqshrntq_m_n_s16(int8x16_t, int16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshrntq_m_n_s16))) +int8x16_t __arm_vqshrntq_m(int8x16_t, int16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshrntq_m_n_s32))) +int16x8_t __arm_vqshrntq_m_n_s32(int16x8_t, int32x4_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshrntq_m_n_s32))) +int16x8_t __arm_vqshrntq_m(int16x8_t, int32x4_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshrntq_m_n_u16))) +uint8x16_t __arm_vqshrntq_m_n_u16(uint8x16_t, uint16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshrntq_m_n_u16))) +uint8x16_t __arm_vqshrntq_m(uint8x16_t, uint16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshrntq_m_n_u32))) +uint16x8_t __arm_vqshrntq_m_n_u32(uint16x8_t, uint32x4_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshrntq_m_n_u32))) +uint16x8_t __arm_vqshrntq_m(uint16x8_t, uint32x4_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshrntq_n_s16))) +int8x16_t __arm_vqshrntq_n_s16(int8x16_t, int16x8_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshrntq_n_s16))) +int8x16_t __arm_vqshrntq(int8x16_t, int16x8_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshrntq_n_s32))) +int16x8_t __arm_vqshrntq_n_s32(int16x8_t, int32x4_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshrntq_n_s32))) +int16x8_t __arm_vqshrntq(int16x8_t, int32x4_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshrntq_n_u16))) +uint8x16_t __arm_vqshrntq_n_u16(uint8x16_t, uint16x8_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshrntq_n_u16))) +uint8x16_t __arm_vqshrntq(uint8x16_t, uint16x8_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshrntq_n_u32))) +uint16x8_t __arm_vqshrntq_n_u32(uint16x8_t, uint32x4_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshrntq_n_u32))) +uint16x8_t __arm_vqshrntq(uint16x8_t, uint32x4_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshrunbq_m_n_s16))) +uint8x16_t __arm_vqshrunbq_m_n_s16(uint8x16_t, int16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshrunbq_m_n_s16))) +uint8x16_t __arm_vqshrunbq_m(uint8x16_t, int16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshrunbq_m_n_s32))) +uint16x8_t __arm_vqshrunbq_m_n_s32(uint16x8_t, int32x4_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshrunbq_m_n_s32))) +uint16x8_t __arm_vqshrunbq_m(uint16x8_t, int32x4_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshrunbq_n_s16))) +uint8x16_t __arm_vqshrunbq_n_s16(uint8x16_t, int16x8_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshrunbq_n_s16))) +uint8x16_t __arm_vqshrunbq(uint8x16_t, int16x8_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshrunbq_n_s32))) +uint16x8_t __arm_vqshrunbq_n_s32(uint16x8_t, int32x4_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshrunbq_n_s32))) +uint16x8_t __arm_vqshrunbq(uint16x8_t, int32x4_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshruntq_m_n_s16))) +uint8x16_t __arm_vqshruntq_m_n_s16(uint8x16_t, int16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshruntq_m_n_s16))) +uint8x16_t __arm_vqshruntq_m(uint8x16_t, int16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshruntq_m_n_s32))) +uint16x8_t __arm_vqshruntq_m_n_s32(uint16x8_t, int32x4_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshruntq_m_n_s32))) +uint16x8_t __arm_vqshruntq_m(uint16x8_t, int32x4_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshruntq_n_s16))) +uint8x16_t __arm_vqshruntq_n_s16(uint8x16_t, int16x8_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshruntq_n_s16))) +uint8x16_t __arm_vqshruntq(uint8x16_t, int16x8_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshruntq_n_s32))) +uint16x8_t __arm_vqshruntq_n_s32(uint16x8_t, int32x4_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshruntq_n_s32))) +uint16x8_t __arm_vqshruntq(uint16x8_t, int32x4_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_m_n_s16))) +int16x8_t __arm_vqsubq_m_n_s16(int16x8_t, int16x8_t, int16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_m_n_s16))) +int16x8_t __arm_vqsubq_m(int16x8_t, int16x8_t, int16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_m_n_s32))) +int32x4_t __arm_vqsubq_m_n_s32(int32x4_t, int32x4_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_m_n_s32))) +int32x4_t __arm_vqsubq_m(int32x4_t, int32x4_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_m_n_s8))) +int8x16_t __arm_vqsubq_m_n_s8(int8x16_t, int8x16_t, int8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_m_n_s8))) +int8x16_t __arm_vqsubq_m(int8x16_t, int8x16_t, int8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_m_n_u16))) +uint16x8_t __arm_vqsubq_m_n_u16(uint16x8_t, uint16x8_t, uint16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_m_n_u16))) +uint16x8_t __arm_vqsubq_m(uint16x8_t, uint16x8_t, uint16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_m_n_u32))) +uint32x4_t __arm_vqsubq_m_n_u32(uint32x4_t, uint32x4_t, uint32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_m_n_u32))) +uint32x4_t __arm_vqsubq_m(uint32x4_t, uint32x4_t, uint32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_m_n_u8))) +uint8x16_t __arm_vqsubq_m_n_u8(uint8x16_t, uint8x16_t, uint8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_m_n_u8))) +uint8x16_t __arm_vqsubq_m(uint8x16_t, uint8x16_t, uint8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_m_s16))) +int16x8_t __arm_vqsubq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_m_s16))) +int16x8_t __arm_vqsubq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_m_s32))) +int32x4_t __arm_vqsubq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_m_s32))) +int32x4_t __arm_vqsubq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_m_s8))) +int8x16_t __arm_vqsubq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_m_s8))) +int8x16_t __arm_vqsubq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_m_u16))) +uint16x8_t __arm_vqsubq_m_u16(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_m_u16))) +uint16x8_t __arm_vqsubq_m(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_m_u32))) +uint32x4_t __arm_vqsubq_m_u32(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_m_u32))) +uint32x4_t __arm_vqsubq_m(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_m_u8))) +uint8x16_t __arm_vqsubq_m_u8(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_m_u8))) +uint8x16_t __arm_vqsubq_m(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_n_s16))) +int16x8_t __arm_vqsubq_n_s16(int16x8_t, int16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_n_s16))) +int16x8_t __arm_vqsubq(int16x8_t, int16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_n_s32))) +int32x4_t __arm_vqsubq_n_s32(int32x4_t, int32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_n_s32))) +int32x4_t __arm_vqsubq(int32x4_t, int32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_n_s8))) +int8x16_t __arm_vqsubq_n_s8(int8x16_t, int8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_n_s8))) +int8x16_t __arm_vqsubq(int8x16_t, int8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_n_u16))) +uint16x8_t __arm_vqsubq_n_u16(uint16x8_t, uint16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_n_u16))) +uint16x8_t __arm_vqsubq(uint16x8_t, uint16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_n_u32))) +uint32x4_t __arm_vqsubq_n_u32(uint32x4_t, uint32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_n_u32))) +uint32x4_t __arm_vqsubq(uint32x4_t, uint32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_n_u8))) +uint8x16_t __arm_vqsubq_n_u8(uint8x16_t, uint8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_n_u8))) +uint8x16_t __arm_vqsubq(uint8x16_t, uint8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_s16))) +int16x8_t __arm_vqsubq_s16(int16x8_t, int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_s16))) +int16x8_t __arm_vqsubq(int16x8_t, int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_s32))) +int32x4_t __arm_vqsubq_s32(int32x4_t, int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_s32))) +int32x4_t __arm_vqsubq(int32x4_t, int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_s8))) +int8x16_t __arm_vqsubq_s8(int8x16_t, int8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_s8))) +int8x16_t __arm_vqsubq(int8x16_t, int8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_u16))) +uint16x8_t __arm_vqsubq_u16(uint16x8_t, uint16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_u16))) +uint16x8_t __arm_vqsubq(uint16x8_t, uint16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_u32))) +uint32x4_t __arm_vqsubq_u32(uint32x4_t, uint32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_u32))) +uint32x4_t __arm_vqsubq(uint32x4_t, uint32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_u8))) +uint8x16_t __arm_vqsubq_u8(uint8x16_t, uint8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_u8))) +uint8x16_t __arm_vqsubq(uint8x16_t, uint8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s16_s32))) +int16x8_t __arm_vreinterpretq_s16_s32(int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s16_s32))) +int16x8_t __arm_vreinterpretq_s16(int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s16_s64))) +int16x8_t __arm_vreinterpretq_s16_s64(int64x2_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s16_s64))) +int16x8_t __arm_vreinterpretq_s16(int64x2_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s16_s8))) +int16x8_t __arm_vreinterpretq_s16_s8(int8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s16_s8))) +int16x8_t __arm_vreinterpretq_s16(int8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s16_u16))) +int16x8_t __arm_vreinterpretq_s16_u16(uint16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s16_u16))) +int16x8_t __arm_vreinterpretq_s16(uint16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s16_u32))) +int16x8_t __arm_vreinterpretq_s16_u32(uint32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s16_u32))) +int16x8_t __arm_vreinterpretq_s16(uint32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s16_u64))) +int16x8_t __arm_vreinterpretq_s16_u64(uint64x2_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s16_u64))) +int16x8_t __arm_vreinterpretq_s16(uint64x2_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s16_u8))) +int16x8_t __arm_vreinterpretq_s16_u8(uint8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s16_u8))) +int16x8_t __arm_vreinterpretq_s16(uint8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s32_s16))) +int32x4_t __arm_vreinterpretq_s32_s16(int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s32_s16))) +int32x4_t __arm_vreinterpretq_s32(int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s32_s64))) +int32x4_t __arm_vreinterpretq_s32_s64(int64x2_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s32_s64))) +int32x4_t __arm_vreinterpretq_s32(int64x2_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s32_s8))) +int32x4_t __arm_vreinterpretq_s32_s8(int8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s32_s8))) +int32x4_t __arm_vreinterpretq_s32(int8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s32_u16))) +int32x4_t __arm_vreinterpretq_s32_u16(uint16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s32_u16))) +int32x4_t __arm_vreinterpretq_s32(uint16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s32_u32))) +int32x4_t __arm_vreinterpretq_s32_u32(uint32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s32_u32))) +int32x4_t __arm_vreinterpretq_s32(uint32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s32_u64))) +int32x4_t __arm_vreinterpretq_s32_u64(uint64x2_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s32_u64))) +int32x4_t __arm_vreinterpretq_s32(uint64x2_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s32_u8))) +int32x4_t __arm_vreinterpretq_s32_u8(uint8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s32_u8))) +int32x4_t __arm_vreinterpretq_s32(uint8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s64_s16))) +int64x2_t __arm_vreinterpretq_s64_s16(int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s64_s16))) +int64x2_t __arm_vreinterpretq_s64(int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s64_s32))) +int64x2_t __arm_vreinterpretq_s64_s32(int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s64_s32))) +int64x2_t __arm_vreinterpretq_s64(int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s64_s8))) +int64x2_t __arm_vreinterpretq_s64_s8(int8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s64_s8))) +int64x2_t __arm_vreinterpretq_s64(int8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s64_u16))) +int64x2_t __arm_vreinterpretq_s64_u16(uint16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s64_u16))) +int64x2_t __arm_vreinterpretq_s64(uint16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s64_u32))) +int64x2_t __arm_vreinterpretq_s64_u32(uint32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s64_u32))) +int64x2_t __arm_vreinterpretq_s64(uint32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s64_u64))) +int64x2_t __arm_vreinterpretq_s64_u64(uint64x2_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s64_u64))) +int64x2_t __arm_vreinterpretq_s64(uint64x2_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s64_u8))) +int64x2_t __arm_vreinterpretq_s64_u8(uint8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s64_u8))) +int64x2_t __arm_vreinterpretq_s64(uint8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s8_s16))) +int8x16_t __arm_vreinterpretq_s8_s16(int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s8_s16))) +int8x16_t __arm_vreinterpretq_s8(int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s8_s32))) +int8x16_t __arm_vreinterpretq_s8_s32(int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s8_s32))) +int8x16_t __arm_vreinterpretq_s8(int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s8_s64))) +int8x16_t __arm_vreinterpretq_s8_s64(int64x2_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s8_s64))) +int8x16_t __arm_vreinterpretq_s8(int64x2_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s8_u16))) +int8x16_t __arm_vreinterpretq_s8_u16(uint16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s8_u16))) +int8x16_t __arm_vreinterpretq_s8(uint16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s8_u32))) +int8x16_t __arm_vreinterpretq_s8_u32(uint32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s8_u32))) +int8x16_t __arm_vreinterpretq_s8(uint32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s8_u64))) +int8x16_t __arm_vreinterpretq_s8_u64(uint64x2_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s8_u64))) +int8x16_t __arm_vreinterpretq_s8(uint64x2_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s8_u8))) +int8x16_t __arm_vreinterpretq_s8_u8(uint8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s8_u8))) +int8x16_t __arm_vreinterpretq_s8(uint8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u16_s16))) +uint16x8_t __arm_vreinterpretq_u16_s16(int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u16_s16))) +uint16x8_t __arm_vreinterpretq_u16(int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u16_s32))) +uint16x8_t __arm_vreinterpretq_u16_s32(int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u16_s32))) +uint16x8_t __arm_vreinterpretq_u16(int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u16_s64))) +uint16x8_t __arm_vreinterpretq_u16_s64(int64x2_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u16_s64))) +uint16x8_t __arm_vreinterpretq_u16(int64x2_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u16_s8))) +uint16x8_t __arm_vreinterpretq_u16_s8(int8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u16_s8))) +uint16x8_t __arm_vreinterpretq_u16(int8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u16_u32))) +uint16x8_t __arm_vreinterpretq_u16_u32(uint32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u16_u32))) +uint16x8_t __arm_vreinterpretq_u16(uint32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u16_u64))) +uint16x8_t __arm_vreinterpretq_u16_u64(uint64x2_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u16_u64))) +uint16x8_t __arm_vreinterpretq_u16(uint64x2_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u16_u8))) +uint16x8_t __arm_vreinterpretq_u16_u8(uint8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u16_u8))) +uint16x8_t __arm_vreinterpretq_u16(uint8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u32_s16))) +uint32x4_t __arm_vreinterpretq_u32_s16(int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u32_s16))) +uint32x4_t __arm_vreinterpretq_u32(int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u32_s32))) +uint32x4_t __arm_vreinterpretq_u32_s32(int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u32_s32))) +uint32x4_t __arm_vreinterpretq_u32(int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u32_s64))) +uint32x4_t __arm_vreinterpretq_u32_s64(int64x2_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u32_s64))) +uint32x4_t __arm_vreinterpretq_u32(int64x2_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u32_s8))) +uint32x4_t __arm_vreinterpretq_u32_s8(int8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u32_s8))) +uint32x4_t __arm_vreinterpretq_u32(int8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u32_u16))) +uint32x4_t __arm_vreinterpretq_u32_u16(uint16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u32_u16))) +uint32x4_t __arm_vreinterpretq_u32(uint16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u32_u64))) +uint32x4_t __arm_vreinterpretq_u32_u64(uint64x2_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u32_u64))) +uint32x4_t __arm_vreinterpretq_u32(uint64x2_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u32_u8))) +uint32x4_t __arm_vreinterpretq_u32_u8(uint8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u32_u8))) +uint32x4_t __arm_vreinterpretq_u32(uint8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u64_s16))) +uint64x2_t __arm_vreinterpretq_u64_s16(int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u64_s16))) +uint64x2_t __arm_vreinterpretq_u64(int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u64_s32))) +uint64x2_t __arm_vreinterpretq_u64_s32(int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u64_s32))) +uint64x2_t __arm_vreinterpretq_u64(int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u64_s64))) +uint64x2_t __arm_vreinterpretq_u64_s64(int64x2_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u64_s64))) +uint64x2_t __arm_vreinterpretq_u64(int64x2_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u64_s8))) +uint64x2_t __arm_vreinterpretq_u64_s8(int8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u64_s8))) +uint64x2_t __arm_vreinterpretq_u64(int8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u64_u16))) +uint64x2_t __arm_vreinterpretq_u64_u16(uint16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u64_u16))) +uint64x2_t __arm_vreinterpretq_u64(uint16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u64_u32))) +uint64x2_t __arm_vreinterpretq_u64_u32(uint32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u64_u32))) +uint64x2_t __arm_vreinterpretq_u64(uint32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u64_u8))) +uint64x2_t __arm_vreinterpretq_u64_u8(uint8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u64_u8))) +uint64x2_t __arm_vreinterpretq_u64(uint8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u8_s16))) +uint8x16_t __arm_vreinterpretq_u8_s16(int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u8_s16))) +uint8x16_t __arm_vreinterpretq_u8(int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u8_s32))) +uint8x16_t __arm_vreinterpretq_u8_s32(int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u8_s32))) +uint8x16_t __arm_vreinterpretq_u8(int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u8_s64))) +uint8x16_t __arm_vreinterpretq_u8_s64(int64x2_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u8_s64))) +uint8x16_t __arm_vreinterpretq_u8(int64x2_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u8_s8))) +uint8x16_t __arm_vreinterpretq_u8_s8(int8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u8_s8))) +uint8x16_t __arm_vreinterpretq_u8(int8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u8_u16))) +uint8x16_t __arm_vreinterpretq_u8_u16(uint16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u8_u16))) +uint8x16_t __arm_vreinterpretq_u8(uint16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u8_u32))) +uint8x16_t __arm_vreinterpretq_u8_u32(uint32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u8_u32))) +uint8x16_t __arm_vreinterpretq_u8(uint32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u8_u64))) +uint8x16_t __arm_vreinterpretq_u8_u64(uint64x2_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u8_u64))) +uint8x16_t __arm_vreinterpretq_u8(uint64x2_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev16q_m_s8))) +int8x16_t __arm_vrev16q_m_s8(int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev16q_m_s8))) +int8x16_t __arm_vrev16q_m(int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev16q_m_u8))) +uint8x16_t __arm_vrev16q_m_u8(uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev16q_m_u8))) +uint8x16_t __arm_vrev16q_m(uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev16q_s8))) +int8x16_t __arm_vrev16q_s8(int8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev16q_s8))) +int8x16_t __arm_vrev16q(int8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev16q_u8))) +uint8x16_t __arm_vrev16q_u8(uint8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev16q_u8))) +uint8x16_t __arm_vrev16q(uint8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev16q_x_s8))) +int8x16_t __arm_vrev16q_x_s8(int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev16q_x_s8))) +int8x16_t __arm_vrev16q_x(int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev16q_x_u8))) +uint8x16_t __arm_vrev16q_x_u8(uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev16q_x_u8))) +uint8x16_t __arm_vrev16q_x(uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev32q_m_s16))) +int16x8_t __arm_vrev32q_m_s16(int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev32q_m_s16))) +int16x8_t __arm_vrev32q_m(int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev32q_m_s8))) +int8x16_t __arm_vrev32q_m_s8(int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev32q_m_s8))) +int8x16_t __arm_vrev32q_m(int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev32q_m_u16))) +uint16x8_t __arm_vrev32q_m_u16(uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev32q_m_u16))) +uint16x8_t __arm_vrev32q_m(uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev32q_m_u8))) +uint8x16_t __arm_vrev32q_m_u8(uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev32q_m_u8))) +uint8x16_t __arm_vrev32q_m(uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev32q_s16))) +int16x8_t __arm_vrev32q_s16(int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev32q_s16))) +int16x8_t __arm_vrev32q(int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev32q_s8))) +int8x16_t __arm_vrev32q_s8(int8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev32q_s8))) +int8x16_t __arm_vrev32q(int8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev32q_u16))) +uint16x8_t __arm_vrev32q_u16(uint16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev32q_u16))) +uint16x8_t __arm_vrev32q(uint16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev32q_u8))) +uint8x16_t __arm_vrev32q_u8(uint8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev32q_u8))) +uint8x16_t __arm_vrev32q(uint8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev32q_x_s16))) +int16x8_t __arm_vrev32q_x_s16(int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev32q_x_s16))) +int16x8_t __arm_vrev32q_x(int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev32q_x_s8))) +int8x16_t __arm_vrev32q_x_s8(int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev32q_x_s8))) +int8x16_t __arm_vrev32q_x(int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev32q_x_u16))) +uint16x8_t __arm_vrev32q_x_u16(uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev32q_x_u16))) +uint16x8_t __arm_vrev32q_x(uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev32q_x_u8))) +uint8x16_t __arm_vrev32q_x_u8(uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev32q_x_u8))) +uint8x16_t __arm_vrev32q_x(uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_m_s16))) +int16x8_t __arm_vrev64q_m_s16(int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_m_s16))) +int16x8_t __arm_vrev64q_m(int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_m_s32))) +int32x4_t __arm_vrev64q_m_s32(int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_m_s32))) +int32x4_t __arm_vrev64q_m(int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_m_s8))) +int8x16_t __arm_vrev64q_m_s8(int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_m_s8))) +int8x16_t __arm_vrev64q_m(int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_m_u16))) +uint16x8_t __arm_vrev64q_m_u16(uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_m_u16))) +uint16x8_t __arm_vrev64q_m(uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_m_u32))) +uint32x4_t __arm_vrev64q_m_u32(uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_m_u32))) +uint32x4_t __arm_vrev64q_m(uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_m_u8))) +uint8x16_t __arm_vrev64q_m_u8(uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_m_u8))) +uint8x16_t __arm_vrev64q_m(uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_s16))) +int16x8_t __arm_vrev64q_s16(int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_s16))) +int16x8_t __arm_vrev64q(int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_s32))) +int32x4_t __arm_vrev64q_s32(int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_s32))) +int32x4_t __arm_vrev64q(int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_s8))) +int8x16_t __arm_vrev64q_s8(int8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_s8))) +int8x16_t __arm_vrev64q(int8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_u16))) +uint16x8_t __arm_vrev64q_u16(uint16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_u16))) +uint16x8_t __arm_vrev64q(uint16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_u32))) +uint32x4_t __arm_vrev64q_u32(uint32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_u32))) +uint32x4_t __arm_vrev64q(uint32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_u8))) +uint8x16_t __arm_vrev64q_u8(uint8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_u8))) +uint8x16_t __arm_vrev64q(uint8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_x_s16))) +int16x8_t __arm_vrev64q_x_s16(int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_x_s16))) +int16x8_t __arm_vrev64q_x(int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_x_s32))) +int32x4_t __arm_vrev64q_x_s32(int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_x_s32))) +int32x4_t __arm_vrev64q_x(int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_x_s8))) +int8x16_t __arm_vrev64q_x_s8(int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_x_s8))) +int8x16_t __arm_vrev64q_x(int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_x_u16))) +uint16x8_t __arm_vrev64q_x_u16(uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_x_u16))) +uint16x8_t __arm_vrev64q_x(uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_x_u32))) +uint32x4_t __arm_vrev64q_x_u32(uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_x_u32))) +uint32x4_t __arm_vrev64q_x(uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_x_u8))) +uint8x16_t __arm_vrev64q_x_u8(uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_x_u8))) +uint8x16_t __arm_vrev64q_x(uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrhaddq_m_s16))) +int16x8_t __arm_vrhaddq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrhaddq_m_s16))) +int16x8_t __arm_vrhaddq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrhaddq_m_s32))) +int32x4_t __arm_vrhaddq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrhaddq_m_s32))) +int32x4_t __arm_vrhaddq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrhaddq_m_s8))) +int8x16_t __arm_vrhaddq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrhaddq_m_s8))) +int8x16_t __arm_vrhaddq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrhaddq_m_u16))) +uint16x8_t __arm_vrhaddq_m_u16(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrhaddq_m_u16))) +uint16x8_t __arm_vrhaddq_m(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrhaddq_m_u32))) +uint32x4_t __arm_vrhaddq_m_u32(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrhaddq_m_u32))) +uint32x4_t __arm_vrhaddq_m(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrhaddq_m_u8))) +uint8x16_t __arm_vrhaddq_m_u8(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrhaddq_m_u8))) +uint8x16_t __arm_vrhaddq_m(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrhaddq_s16))) +int16x8_t __arm_vrhaddq_s16(int16x8_t, int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrhaddq_s16))) +int16x8_t __arm_vrhaddq(int16x8_t, int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrhaddq_s32))) +int32x4_t __arm_vrhaddq_s32(int32x4_t, int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrhaddq_s32))) +int32x4_t __arm_vrhaddq(int32x4_t, int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrhaddq_s8))) +int8x16_t __arm_vrhaddq_s8(int8x16_t, int8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrhaddq_s8))) +int8x16_t __arm_vrhaddq(int8x16_t, int8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrhaddq_u16))) +uint16x8_t __arm_vrhaddq_u16(uint16x8_t, uint16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrhaddq_u16))) +uint16x8_t __arm_vrhaddq(uint16x8_t, uint16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrhaddq_u32))) +uint32x4_t __arm_vrhaddq_u32(uint32x4_t, uint32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrhaddq_u32))) +uint32x4_t __arm_vrhaddq(uint32x4_t, uint32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrhaddq_u8))) +uint8x16_t __arm_vrhaddq_u8(uint8x16_t, uint8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrhaddq_u8))) +uint8x16_t __arm_vrhaddq(uint8x16_t, uint8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrhaddq_x_s16))) +int16x8_t __arm_vrhaddq_x_s16(int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrhaddq_x_s16))) +int16x8_t __arm_vrhaddq_x(int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrhaddq_x_s32))) +int32x4_t __arm_vrhaddq_x_s32(int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrhaddq_x_s32))) +int32x4_t __arm_vrhaddq_x(int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrhaddq_x_s8))) +int8x16_t __arm_vrhaddq_x_s8(int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrhaddq_x_s8))) +int8x16_t __arm_vrhaddq_x(int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrhaddq_x_u16))) +uint16x8_t __arm_vrhaddq_x_u16(uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrhaddq_x_u16))) +uint16x8_t __arm_vrhaddq_x(uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrhaddq_x_u32))) +uint32x4_t __arm_vrhaddq_x_u32(uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrhaddq_x_u32))) +uint32x4_t __arm_vrhaddq_x(uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrhaddq_x_u8))) +uint8x16_t __arm_vrhaddq_x_u8(uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrhaddq_x_u8))) +uint8x16_t __arm_vrhaddq_x(uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrmlaldavhaq_p_s32))) +int64_t __arm_vrmlaldavhaq_p_s32(int64_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrmlaldavhaq_p_s32))) +int64_t __arm_vrmlaldavhaq_p(int64_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrmlaldavhaq_p_u32))) +uint64_t __arm_vrmlaldavhaq_p_u32(uint64_t, uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrmlaldavhaq_p_u32))) +uint64_t __arm_vrmlaldavhaq_p(uint64_t, uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrmlaldavhaq_s32))) +int64_t __arm_vrmlaldavhaq_s32(int64_t, int32x4_t, int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrmlaldavhaq_s32))) +int64_t __arm_vrmlaldavhaq(int64_t, int32x4_t, int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrmlaldavhaq_u32))) +uint64_t __arm_vrmlaldavhaq_u32(uint64_t, uint32x4_t, uint32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrmlaldavhaq_u32))) +uint64_t __arm_vrmlaldavhaq(uint64_t, uint32x4_t, uint32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrmlaldavhaxq_p_s32))) +int64_t __arm_vrmlaldavhaxq_p_s32(int64_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrmlaldavhaxq_p_s32))) +int64_t __arm_vrmlaldavhaxq_p(int64_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrmlaldavhaxq_s32))) +int64_t __arm_vrmlaldavhaxq_s32(int64_t, int32x4_t, int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrmlaldavhaxq_s32))) +int64_t __arm_vrmlaldavhaxq(int64_t, int32x4_t, int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrmlaldavhq_p_s32))) +int64_t __arm_vrmlaldavhq_p_s32(int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrmlaldavhq_p_s32))) +int64_t __arm_vrmlaldavhq_p(int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrmlaldavhq_p_u32))) +uint64_t __arm_vrmlaldavhq_p_u32(uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrmlaldavhq_p_u32))) +uint64_t __arm_vrmlaldavhq_p(uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrmlaldavhq_s32))) +int64_t __arm_vrmlaldavhq_s32(int32x4_t, int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrmlaldavhq_s32))) +int64_t __arm_vrmlaldavhq(int32x4_t, int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrmlaldavhq_u32))) +uint64_t __arm_vrmlaldavhq_u32(uint32x4_t, uint32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrmlaldavhq_u32))) +uint64_t __arm_vrmlaldavhq(uint32x4_t, uint32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrmlaldavhxq_p_s32))) +int64_t __arm_vrmlaldavhxq_p_s32(int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrmlaldavhxq_p_s32))) +int64_t __arm_vrmlaldavhxq_p(int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrmlaldavhxq_s32))) +int64_t __arm_vrmlaldavhxq_s32(int32x4_t, int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrmlaldavhxq_s32))) +int64_t __arm_vrmlaldavhxq(int32x4_t, int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrmlsldavhaq_p_s32))) +int64_t __arm_vrmlsldavhaq_p_s32(int64_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrmlsldavhaq_p_s32))) +int64_t __arm_vrmlsldavhaq_p(int64_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrmlsldavhaq_s32))) +int64_t __arm_vrmlsldavhaq_s32(int64_t, int32x4_t, int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrmlsldavhaq_s32))) +int64_t __arm_vrmlsldavhaq(int64_t, int32x4_t, int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrmlsldavhaxq_p_s32))) +int64_t __arm_vrmlsldavhaxq_p_s32(int64_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrmlsldavhaxq_p_s32))) +int64_t __arm_vrmlsldavhaxq_p(int64_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrmlsldavhaxq_s32))) +int64_t __arm_vrmlsldavhaxq_s32(int64_t, int32x4_t, int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrmlsldavhaxq_s32))) +int64_t __arm_vrmlsldavhaxq(int64_t, int32x4_t, int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrmlsldavhq_p_s32))) +int64_t __arm_vrmlsldavhq_p_s32(int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrmlsldavhq_p_s32))) +int64_t __arm_vrmlsldavhq_p(int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrmlsldavhq_s32))) +int64_t __arm_vrmlsldavhq_s32(int32x4_t, int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrmlsldavhq_s32))) +int64_t __arm_vrmlsldavhq(int32x4_t, int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrmlsldavhxq_p_s32))) +int64_t __arm_vrmlsldavhxq_p_s32(int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrmlsldavhxq_p_s32))) +int64_t __arm_vrmlsldavhxq_p(int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrmlsldavhxq_s32))) +int64_t __arm_vrmlsldavhxq_s32(int32x4_t, int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrmlsldavhxq_s32))) +int64_t __arm_vrmlsldavhxq(int32x4_t, int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrmulhq_m_s16))) +int16x8_t __arm_vrmulhq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrmulhq_m_s16))) +int16x8_t __arm_vrmulhq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrmulhq_m_s32))) +int32x4_t __arm_vrmulhq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrmulhq_m_s32))) +int32x4_t __arm_vrmulhq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrmulhq_m_s8))) +int8x16_t __arm_vrmulhq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrmulhq_m_s8))) +int8x16_t __arm_vrmulhq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrmulhq_m_u16))) +uint16x8_t __arm_vrmulhq_m_u16(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrmulhq_m_u16))) +uint16x8_t __arm_vrmulhq_m(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrmulhq_m_u32))) +uint32x4_t __arm_vrmulhq_m_u32(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrmulhq_m_u32))) +uint32x4_t __arm_vrmulhq_m(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrmulhq_m_u8))) +uint8x16_t __arm_vrmulhq_m_u8(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrmulhq_m_u8))) +uint8x16_t __arm_vrmulhq_m(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrmulhq_s16))) +int16x8_t __arm_vrmulhq_s16(int16x8_t, int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrmulhq_s16))) +int16x8_t __arm_vrmulhq(int16x8_t, int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrmulhq_s32))) +int32x4_t __arm_vrmulhq_s32(int32x4_t, int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrmulhq_s32))) +int32x4_t __arm_vrmulhq(int32x4_t, int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrmulhq_s8))) +int8x16_t __arm_vrmulhq_s8(int8x16_t, int8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrmulhq_s8))) +int8x16_t __arm_vrmulhq(int8x16_t, int8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrmulhq_u16))) +uint16x8_t __arm_vrmulhq_u16(uint16x8_t, uint16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrmulhq_u16))) +uint16x8_t __arm_vrmulhq(uint16x8_t, uint16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrmulhq_u32))) +uint32x4_t __arm_vrmulhq_u32(uint32x4_t, uint32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrmulhq_u32))) +uint32x4_t __arm_vrmulhq(uint32x4_t, uint32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrmulhq_u8))) +uint8x16_t __arm_vrmulhq_u8(uint8x16_t, uint8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrmulhq_u8))) +uint8x16_t __arm_vrmulhq(uint8x16_t, uint8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrmulhq_x_s16))) +int16x8_t __arm_vrmulhq_x_s16(int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrmulhq_x_s16))) +int16x8_t __arm_vrmulhq_x(int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrmulhq_x_s32))) +int32x4_t __arm_vrmulhq_x_s32(int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrmulhq_x_s32))) +int32x4_t __arm_vrmulhq_x(int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrmulhq_x_s8))) +int8x16_t __arm_vrmulhq_x_s8(int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrmulhq_x_s8))) +int8x16_t __arm_vrmulhq_x(int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrmulhq_x_u16))) +uint16x8_t __arm_vrmulhq_x_u16(uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrmulhq_x_u16))) +uint16x8_t __arm_vrmulhq_x(uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrmulhq_x_u32))) +uint32x4_t __arm_vrmulhq_x_u32(uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrmulhq_x_u32))) +uint32x4_t __arm_vrmulhq_x(uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrmulhq_x_u8))) +uint8x16_t __arm_vrmulhq_x_u8(uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrmulhq_x_u8))) +uint8x16_t __arm_vrmulhq_x(uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_m_n_s16))) +int16x8_t __arm_vrshlq_m_n_s16(int16x8_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_m_n_s16))) +int16x8_t __arm_vrshlq_m_n(int16x8_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_m_n_s32))) +int32x4_t __arm_vrshlq_m_n_s32(int32x4_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_m_n_s32))) +int32x4_t __arm_vrshlq_m_n(int32x4_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_m_n_s8))) +int8x16_t __arm_vrshlq_m_n_s8(int8x16_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_m_n_s8))) +int8x16_t __arm_vrshlq_m_n(int8x16_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_m_n_u16))) +uint16x8_t __arm_vrshlq_m_n_u16(uint16x8_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_m_n_u16))) +uint16x8_t __arm_vrshlq_m_n(uint16x8_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_m_n_u32))) +uint32x4_t __arm_vrshlq_m_n_u32(uint32x4_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_m_n_u32))) +uint32x4_t __arm_vrshlq_m_n(uint32x4_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_m_n_u8))) +uint8x16_t __arm_vrshlq_m_n_u8(uint8x16_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_m_n_u8))) +uint8x16_t __arm_vrshlq_m_n(uint8x16_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_m_s16))) +int16x8_t __arm_vrshlq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_m_s16))) +int16x8_t __arm_vrshlq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_m_s32))) +int32x4_t __arm_vrshlq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_m_s32))) +int32x4_t __arm_vrshlq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_m_s8))) +int8x16_t __arm_vrshlq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_m_s8))) +int8x16_t __arm_vrshlq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_m_u16))) +uint16x8_t __arm_vrshlq_m_u16(uint16x8_t, uint16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_m_u16))) +uint16x8_t __arm_vrshlq_m(uint16x8_t, uint16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_m_u32))) +uint32x4_t __arm_vrshlq_m_u32(uint32x4_t, uint32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_m_u32))) +uint32x4_t __arm_vrshlq_m(uint32x4_t, uint32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_m_u8))) +uint8x16_t __arm_vrshlq_m_u8(uint8x16_t, uint8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_m_u8))) +uint8x16_t __arm_vrshlq_m(uint8x16_t, uint8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_n_s16))) +int16x8_t __arm_vrshlq_n_s16(int16x8_t, int32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_n_s16))) +int16x8_t __arm_vrshlq(int16x8_t, int32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_n_s32))) +int32x4_t __arm_vrshlq_n_s32(int32x4_t, int32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_n_s32))) +int32x4_t __arm_vrshlq(int32x4_t, int32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_n_s8))) +int8x16_t __arm_vrshlq_n_s8(int8x16_t, int32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_n_s8))) +int8x16_t __arm_vrshlq(int8x16_t, int32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_n_u16))) +uint16x8_t __arm_vrshlq_n_u16(uint16x8_t, int32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_n_u16))) +uint16x8_t __arm_vrshlq(uint16x8_t, int32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_n_u32))) +uint32x4_t __arm_vrshlq_n_u32(uint32x4_t, int32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_n_u32))) +uint32x4_t __arm_vrshlq(uint32x4_t, int32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_n_u8))) +uint8x16_t __arm_vrshlq_n_u8(uint8x16_t, int32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_n_u8))) +uint8x16_t __arm_vrshlq(uint8x16_t, int32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_s16))) +int16x8_t __arm_vrshlq_s16(int16x8_t, int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_s16))) +int16x8_t __arm_vrshlq(int16x8_t, int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_s32))) +int32x4_t __arm_vrshlq_s32(int32x4_t, int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_s32))) +int32x4_t __arm_vrshlq(int32x4_t, int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_s8))) +int8x16_t __arm_vrshlq_s8(int8x16_t, int8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_s8))) +int8x16_t __arm_vrshlq(int8x16_t, int8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_u16))) +uint16x8_t __arm_vrshlq_u16(uint16x8_t, int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_u16))) +uint16x8_t __arm_vrshlq(uint16x8_t, int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_u32))) +uint32x4_t __arm_vrshlq_u32(uint32x4_t, int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_u32))) +uint32x4_t __arm_vrshlq(uint32x4_t, int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_u8))) +uint8x16_t __arm_vrshlq_u8(uint8x16_t, int8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_u8))) +uint8x16_t __arm_vrshlq(uint8x16_t, int8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_x_s16))) +int16x8_t __arm_vrshlq_x_s16(int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_x_s16))) +int16x8_t __arm_vrshlq_x(int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_x_s32))) +int32x4_t __arm_vrshlq_x_s32(int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_x_s32))) +int32x4_t __arm_vrshlq_x(int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_x_s8))) +int8x16_t __arm_vrshlq_x_s8(int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_x_s8))) +int8x16_t __arm_vrshlq_x(int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_x_u16))) +uint16x8_t __arm_vrshlq_x_u16(uint16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_x_u16))) +uint16x8_t __arm_vrshlq_x(uint16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_x_u32))) +uint32x4_t __arm_vrshlq_x_u32(uint32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_x_u32))) +uint32x4_t __arm_vrshlq_x(uint32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_x_u8))) +uint8x16_t __arm_vrshlq_x_u8(uint8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_x_u8))) +uint8x16_t __arm_vrshlq_x(uint8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshrnbq_m_n_s16))) +int8x16_t __arm_vrshrnbq_m_n_s16(int8x16_t, int16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshrnbq_m_n_s16))) +int8x16_t __arm_vrshrnbq_m(int8x16_t, int16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshrnbq_m_n_s32))) +int16x8_t __arm_vrshrnbq_m_n_s32(int16x8_t, int32x4_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshrnbq_m_n_s32))) +int16x8_t __arm_vrshrnbq_m(int16x8_t, int32x4_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshrnbq_m_n_u16))) +uint8x16_t __arm_vrshrnbq_m_n_u16(uint8x16_t, uint16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshrnbq_m_n_u16))) +uint8x16_t __arm_vrshrnbq_m(uint8x16_t, uint16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshrnbq_m_n_u32))) +uint16x8_t __arm_vrshrnbq_m_n_u32(uint16x8_t, uint32x4_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshrnbq_m_n_u32))) +uint16x8_t __arm_vrshrnbq_m(uint16x8_t, uint32x4_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshrnbq_n_s16))) +int8x16_t __arm_vrshrnbq_n_s16(int8x16_t, int16x8_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshrnbq_n_s16))) +int8x16_t __arm_vrshrnbq(int8x16_t, int16x8_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshrnbq_n_s32))) +int16x8_t __arm_vrshrnbq_n_s32(int16x8_t, int32x4_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshrnbq_n_s32))) +int16x8_t __arm_vrshrnbq(int16x8_t, int32x4_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshrnbq_n_u16))) +uint8x16_t __arm_vrshrnbq_n_u16(uint8x16_t, uint16x8_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshrnbq_n_u16))) +uint8x16_t __arm_vrshrnbq(uint8x16_t, uint16x8_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshrnbq_n_u32))) +uint16x8_t __arm_vrshrnbq_n_u32(uint16x8_t, uint32x4_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshrnbq_n_u32))) +uint16x8_t __arm_vrshrnbq(uint16x8_t, uint32x4_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshrntq_m_n_s16))) +int8x16_t __arm_vrshrntq_m_n_s16(int8x16_t, int16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshrntq_m_n_s16))) +int8x16_t __arm_vrshrntq_m(int8x16_t, int16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshrntq_m_n_s32))) +int16x8_t __arm_vrshrntq_m_n_s32(int16x8_t, int32x4_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshrntq_m_n_s32))) +int16x8_t __arm_vrshrntq_m(int16x8_t, int32x4_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshrntq_m_n_u16))) +uint8x16_t __arm_vrshrntq_m_n_u16(uint8x16_t, uint16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshrntq_m_n_u16))) +uint8x16_t __arm_vrshrntq_m(uint8x16_t, uint16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshrntq_m_n_u32))) +uint16x8_t __arm_vrshrntq_m_n_u32(uint16x8_t, uint32x4_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshrntq_m_n_u32))) +uint16x8_t __arm_vrshrntq_m(uint16x8_t, uint32x4_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshrntq_n_s16))) +int8x16_t __arm_vrshrntq_n_s16(int8x16_t, int16x8_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshrntq_n_s16))) +int8x16_t __arm_vrshrntq(int8x16_t, int16x8_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshrntq_n_s32))) +int16x8_t __arm_vrshrntq_n_s32(int16x8_t, int32x4_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshrntq_n_s32))) +int16x8_t __arm_vrshrntq(int16x8_t, int32x4_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshrntq_n_u16))) +uint8x16_t __arm_vrshrntq_n_u16(uint8x16_t, uint16x8_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshrntq_n_u16))) +uint8x16_t __arm_vrshrntq(uint8x16_t, uint16x8_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshrntq_n_u32))) +uint16x8_t __arm_vrshrntq_n_u32(uint16x8_t, uint32x4_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshrntq_n_u32))) +uint16x8_t __arm_vrshrntq(uint16x8_t, uint32x4_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshrq_m_n_s16))) +int16x8_t __arm_vrshrq_m_n_s16(int16x8_t, int16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshrq_m_n_s16))) +int16x8_t __arm_vrshrq_m(int16x8_t, int16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshrq_m_n_s32))) +int32x4_t __arm_vrshrq_m_n_s32(int32x4_t, int32x4_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshrq_m_n_s32))) +int32x4_t __arm_vrshrq_m(int32x4_t, int32x4_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshrq_m_n_s8))) +int8x16_t __arm_vrshrq_m_n_s8(int8x16_t, int8x16_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshrq_m_n_s8))) +int8x16_t __arm_vrshrq_m(int8x16_t, int8x16_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshrq_m_n_u16))) +uint16x8_t __arm_vrshrq_m_n_u16(uint16x8_t, uint16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshrq_m_n_u16))) +uint16x8_t __arm_vrshrq_m(uint16x8_t, uint16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshrq_m_n_u32))) +uint32x4_t __arm_vrshrq_m_n_u32(uint32x4_t, uint32x4_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshrq_m_n_u32))) +uint32x4_t __arm_vrshrq_m(uint32x4_t, uint32x4_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshrq_m_n_u8))) +uint8x16_t __arm_vrshrq_m_n_u8(uint8x16_t, uint8x16_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshrq_m_n_u8))) +uint8x16_t __arm_vrshrq_m(uint8x16_t, uint8x16_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshrq_n_s16))) +int16x8_t __arm_vrshrq_n_s16(int16x8_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshrq_n_s16))) +int16x8_t __arm_vrshrq(int16x8_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshrq_n_s32))) +int32x4_t __arm_vrshrq_n_s32(int32x4_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshrq_n_s32))) +int32x4_t __arm_vrshrq(int32x4_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshrq_n_s8))) +int8x16_t __arm_vrshrq_n_s8(int8x16_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshrq_n_s8))) +int8x16_t __arm_vrshrq(int8x16_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshrq_n_u16))) +uint16x8_t __arm_vrshrq_n_u16(uint16x8_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshrq_n_u16))) +uint16x8_t __arm_vrshrq(uint16x8_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshrq_n_u32))) +uint32x4_t __arm_vrshrq_n_u32(uint32x4_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshrq_n_u32))) +uint32x4_t __arm_vrshrq(uint32x4_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshrq_n_u8))) +uint8x16_t __arm_vrshrq_n_u8(uint8x16_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshrq_n_u8))) +uint8x16_t __arm_vrshrq(uint8x16_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshrq_x_n_s16))) +int16x8_t __arm_vrshrq_x_n_s16(int16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshrq_x_n_s16))) +int16x8_t __arm_vrshrq_x(int16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshrq_x_n_s32))) +int32x4_t __arm_vrshrq_x_n_s32(int32x4_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshrq_x_n_s32))) +int32x4_t __arm_vrshrq_x(int32x4_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshrq_x_n_s8))) +int8x16_t __arm_vrshrq_x_n_s8(int8x16_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshrq_x_n_s8))) +int8x16_t __arm_vrshrq_x(int8x16_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshrq_x_n_u16))) +uint16x8_t __arm_vrshrq_x_n_u16(uint16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshrq_x_n_u16))) +uint16x8_t __arm_vrshrq_x(uint16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshrq_x_n_u32))) +uint32x4_t __arm_vrshrq_x_n_u32(uint32x4_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshrq_x_n_u32))) +uint32x4_t __arm_vrshrq_x(uint32x4_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshrq_x_n_u8))) +uint8x16_t __arm_vrshrq_x_n_u8(uint8x16_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshrq_x_n_u8))) +uint8x16_t __arm_vrshrq_x(uint8x16_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsbciq_m_s32))) +int32x4_t __arm_vsbciq_m_s32(int32x4_t, int32x4_t, int32x4_t, unsigned *, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsbciq_m_s32))) +int32x4_t __arm_vsbciq_m(int32x4_t, int32x4_t, int32x4_t, unsigned *, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsbciq_m_u32))) +uint32x4_t __arm_vsbciq_m_u32(uint32x4_t, uint32x4_t, uint32x4_t, unsigned *, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsbciq_m_u32))) +uint32x4_t __arm_vsbciq_m(uint32x4_t, uint32x4_t, uint32x4_t, unsigned *, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsbciq_s32))) +int32x4_t __arm_vsbciq_s32(int32x4_t, int32x4_t, unsigned *); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsbciq_s32))) +int32x4_t __arm_vsbciq(int32x4_t, int32x4_t, unsigned *); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsbciq_u32))) +uint32x4_t __arm_vsbciq_u32(uint32x4_t, uint32x4_t, unsigned *); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsbciq_u32))) +uint32x4_t __arm_vsbciq(uint32x4_t, uint32x4_t, unsigned *); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsbcq_m_s32))) +int32x4_t __arm_vsbcq_m_s32(int32x4_t, int32x4_t, int32x4_t, unsigned *, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsbcq_m_s32))) +int32x4_t __arm_vsbcq_m(int32x4_t, int32x4_t, int32x4_t, unsigned *, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsbcq_m_u32))) +uint32x4_t __arm_vsbcq_m_u32(uint32x4_t, uint32x4_t, uint32x4_t, unsigned *, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsbcq_m_u32))) +uint32x4_t __arm_vsbcq_m(uint32x4_t, uint32x4_t, uint32x4_t, unsigned *, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsbcq_s32))) +int32x4_t __arm_vsbcq_s32(int32x4_t, int32x4_t, unsigned *); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsbcq_s32))) +int32x4_t __arm_vsbcq(int32x4_t, int32x4_t, unsigned *); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsbcq_u32))) +uint32x4_t __arm_vsbcq_u32(uint32x4_t, uint32x4_t, unsigned *); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsbcq_u32))) +uint32x4_t __arm_vsbcq(uint32x4_t, uint32x4_t, unsigned *); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsetq_lane_s16))) +int16x8_t __arm_vsetq_lane_s16(int16_t, int16x8_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsetq_lane_s16))) +int16x8_t __arm_vsetq_lane(int16_t, int16x8_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsetq_lane_s32))) +int32x4_t __arm_vsetq_lane_s32(int32_t, int32x4_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsetq_lane_s32))) +int32x4_t __arm_vsetq_lane(int32_t, int32x4_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsetq_lane_s64))) +int64x2_t __arm_vsetq_lane_s64(int64_t, int64x2_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsetq_lane_s64))) +int64x2_t __arm_vsetq_lane(int64_t, int64x2_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsetq_lane_s8))) +int8x16_t __arm_vsetq_lane_s8(int8_t, int8x16_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsetq_lane_s8))) +int8x16_t __arm_vsetq_lane(int8_t, int8x16_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsetq_lane_u16))) +uint16x8_t __arm_vsetq_lane_u16(uint16_t, uint16x8_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsetq_lane_u16))) +uint16x8_t __arm_vsetq_lane(uint16_t, uint16x8_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsetq_lane_u32))) +uint32x4_t __arm_vsetq_lane_u32(uint32_t, uint32x4_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsetq_lane_u32))) +uint32x4_t __arm_vsetq_lane(uint32_t, uint32x4_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsetq_lane_u64))) +uint64x2_t __arm_vsetq_lane_u64(uint64_t, uint64x2_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsetq_lane_u64))) +uint64x2_t __arm_vsetq_lane(uint64_t, uint64x2_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsetq_lane_u8))) +uint8x16_t __arm_vsetq_lane_u8(uint8_t, uint8x16_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsetq_lane_u8))) +uint8x16_t __arm_vsetq_lane(uint8_t, uint8x16_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlcq_m_s16))) +int16x8_t __arm_vshlcq_m_s16(int16x8_t, uint32_t *, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlcq_m_s16))) +int16x8_t __arm_vshlcq_m(int16x8_t, uint32_t *, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlcq_m_s32))) +int32x4_t __arm_vshlcq_m_s32(int32x4_t, uint32_t *, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlcq_m_s32))) +int32x4_t __arm_vshlcq_m(int32x4_t, uint32_t *, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlcq_m_s8))) +int8x16_t __arm_vshlcq_m_s8(int8x16_t, uint32_t *, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlcq_m_s8))) +int8x16_t __arm_vshlcq_m(int8x16_t, uint32_t *, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlcq_m_u16))) +uint16x8_t __arm_vshlcq_m_u16(uint16x8_t, uint32_t *, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlcq_m_u16))) +uint16x8_t __arm_vshlcq_m(uint16x8_t, uint32_t *, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlcq_m_u32))) +uint32x4_t __arm_vshlcq_m_u32(uint32x4_t, uint32_t *, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlcq_m_u32))) +uint32x4_t __arm_vshlcq_m(uint32x4_t, uint32_t *, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlcq_m_u8))) +uint8x16_t __arm_vshlcq_m_u8(uint8x16_t, uint32_t *, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlcq_m_u8))) +uint8x16_t __arm_vshlcq_m(uint8x16_t, uint32_t *, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlcq_s16))) +int16x8_t __arm_vshlcq_s16(int16x8_t, uint32_t *, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlcq_s16))) +int16x8_t __arm_vshlcq(int16x8_t, uint32_t *, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlcq_s32))) +int32x4_t __arm_vshlcq_s32(int32x4_t, uint32_t *, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlcq_s32))) +int32x4_t __arm_vshlcq(int32x4_t, uint32_t *, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlcq_s8))) +int8x16_t __arm_vshlcq_s8(int8x16_t, uint32_t *, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlcq_s8))) +int8x16_t __arm_vshlcq(int8x16_t, uint32_t *, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlcq_u16))) +uint16x8_t __arm_vshlcq_u16(uint16x8_t, uint32_t *, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlcq_u16))) +uint16x8_t __arm_vshlcq(uint16x8_t, uint32_t *, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlcq_u32))) +uint32x4_t __arm_vshlcq_u32(uint32x4_t, uint32_t *, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlcq_u32))) +uint32x4_t __arm_vshlcq(uint32x4_t, uint32_t *, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlcq_u8))) +uint8x16_t __arm_vshlcq_u8(uint8x16_t, uint32_t *, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlcq_u8))) +uint8x16_t __arm_vshlcq(uint8x16_t, uint32_t *, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshllbq_m_n_s16))) +int32x4_t __arm_vshllbq_m_n_s16(int32x4_t, int16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshllbq_m_n_s16))) +int32x4_t __arm_vshllbq_m(int32x4_t, int16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshllbq_m_n_s8))) +int16x8_t __arm_vshllbq_m_n_s8(int16x8_t, int8x16_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshllbq_m_n_s8))) +int16x8_t __arm_vshllbq_m(int16x8_t, int8x16_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshllbq_m_n_u16))) +uint32x4_t __arm_vshllbq_m_n_u16(uint32x4_t, uint16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshllbq_m_n_u16))) +uint32x4_t __arm_vshllbq_m(uint32x4_t, uint16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshllbq_m_n_u8))) +uint16x8_t __arm_vshllbq_m_n_u8(uint16x8_t, uint8x16_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshllbq_m_n_u8))) +uint16x8_t __arm_vshllbq_m(uint16x8_t, uint8x16_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshllbq_n_s16))) +int32x4_t __arm_vshllbq_n_s16(int16x8_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshllbq_n_s16))) +int32x4_t __arm_vshllbq(int16x8_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshllbq_n_s8))) +int16x8_t __arm_vshllbq_n_s8(int8x16_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshllbq_n_s8))) +int16x8_t __arm_vshllbq(int8x16_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshllbq_n_u16))) +uint32x4_t __arm_vshllbq_n_u16(uint16x8_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshllbq_n_u16))) +uint32x4_t __arm_vshllbq(uint16x8_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshllbq_n_u8))) +uint16x8_t __arm_vshllbq_n_u8(uint8x16_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshllbq_n_u8))) +uint16x8_t __arm_vshllbq(uint8x16_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshllbq_x_n_s16))) +int32x4_t __arm_vshllbq_x_n_s16(int16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshllbq_x_n_s16))) +int32x4_t __arm_vshllbq_x(int16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshllbq_x_n_s8))) +int16x8_t __arm_vshllbq_x_n_s8(int8x16_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshllbq_x_n_s8))) +int16x8_t __arm_vshllbq_x(int8x16_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshllbq_x_n_u16))) +uint32x4_t __arm_vshllbq_x_n_u16(uint16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshllbq_x_n_u16))) +uint32x4_t __arm_vshllbq_x(uint16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshllbq_x_n_u8))) +uint16x8_t __arm_vshllbq_x_n_u8(uint8x16_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshllbq_x_n_u8))) +uint16x8_t __arm_vshllbq_x(uint8x16_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlltq_m_n_s16))) +int32x4_t __arm_vshlltq_m_n_s16(int32x4_t, int16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlltq_m_n_s16))) +int32x4_t __arm_vshlltq_m(int32x4_t, int16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlltq_m_n_s8))) +int16x8_t __arm_vshlltq_m_n_s8(int16x8_t, int8x16_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlltq_m_n_s8))) +int16x8_t __arm_vshlltq_m(int16x8_t, int8x16_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlltq_m_n_u16))) +uint32x4_t __arm_vshlltq_m_n_u16(uint32x4_t, uint16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlltq_m_n_u16))) +uint32x4_t __arm_vshlltq_m(uint32x4_t, uint16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlltq_m_n_u8))) +uint16x8_t __arm_vshlltq_m_n_u8(uint16x8_t, uint8x16_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlltq_m_n_u8))) +uint16x8_t __arm_vshlltq_m(uint16x8_t, uint8x16_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlltq_n_s16))) +int32x4_t __arm_vshlltq_n_s16(int16x8_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlltq_n_s16))) +int32x4_t __arm_vshlltq(int16x8_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlltq_n_s8))) +int16x8_t __arm_vshlltq_n_s8(int8x16_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlltq_n_s8))) +int16x8_t __arm_vshlltq(int8x16_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlltq_n_u16))) +uint32x4_t __arm_vshlltq_n_u16(uint16x8_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlltq_n_u16))) +uint32x4_t __arm_vshlltq(uint16x8_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlltq_n_u8))) +uint16x8_t __arm_vshlltq_n_u8(uint8x16_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlltq_n_u8))) +uint16x8_t __arm_vshlltq(uint8x16_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlltq_x_n_s16))) +int32x4_t __arm_vshlltq_x_n_s16(int16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlltq_x_n_s16))) +int32x4_t __arm_vshlltq_x(int16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlltq_x_n_s8))) +int16x8_t __arm_vshlltq_x_n_s8(int8x16_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlltq_x_n_s8))) +int16x8_t __arm_vshlltq_x(int8x16_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlltq_x_n_u16))) +uint32x4_t __arm_vshlltq_x_n_u16(uint16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlltq_x_n_u16))) +uint32x4_t __arm_vshlltq_x(uint16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlltq_x_n_u8))) +uint16x8_t __arm_vshlltq_x_n_u8(uint8x16_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlltq_x_n_u8))) +uint16x8_t __arm_vshlltq_x(uint8x16_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_m_n_s16))) +int16x8_t __arm_vshlq_m_n_s16(int16x8_t, int16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_m_n_s16))) +int16x8_t __arm_vshlq_m_n(int16x8_t, int16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_m_n_s32))) +int32x4_t __arm_vshlq_m_n_s32(int32x4_t, int32x4_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_m_n_s32))) +int32x4_t __arm_vshlq_m_n(int32x4_t, int32x4_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_m_n_s8))) +int8x16_t __arm_vshlq_m_n_s8(int8x16_t, int8x16_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_m_n_s8))) +int8x16_t __arm_vshlq_m_n(int8x16_t, int8x16_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_m_n_u16))) +uint16x8_t __arm_vshlq_m_n_u16(uint16x8_t, uint16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_m_n_u16))) +uint16x8_t __arm_vshlq_m_n(uint16x8_t, uint16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_m_n_u32))) +uint32x4_t __arm_vshlq_m_n_u32(uint32x4_t, uint32x4_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_m_n_u32))) +uint32x4_t __arm_vshlq_m_n(uint32x4_t, uint32x4_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_m_n_u8))) +uint8x16_t __arm_vshlq_m_n_u8(uint8x16_t, uint8x16_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_m_n_u8))) +uint8x16_t __arm_vshlq_m_n(uint8x16_t, uint8x16_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_m_r_s16))) +int16x8_t __arm_vshlq_m_r_s16(int16x8_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_m_r_s16))) +int16x8_t __arm_vshlq_m_r(int16x8_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_m_r_s32))) +int32x4_t __arm_vshlq_m_r_s32(int32x4_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_m_r_s32))) +int32x4_t __arm_vshlq_m_r(int32x4_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_m_r_s8))) +int8x16_t __arm_vshlq_m_r_s8(int8x16_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_m_r_s8))) +int8x16_t __arm_vshlq_m_r(int8x16_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_m_r_u16))) +uint16x8_t __arm_vshlq_m_r_u16(uint16x8_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_m_r_u16))) +uint16x8_t __arm_vshlq_m_r(uint16x8_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_m_r_u32))) +uint32x4_t __arm_vshlq_m_r_u32(uint32x4_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_m_r_u32))) +uint32x4_t __arm_vshlq_m_r(uint32x4_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_m_r_u8))) +uint8x16_t __arm_vshlq_m_r_u8(uint8x16_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_m_r_u8))) +uint8x16_t __arm_vshlq_m_r(uint8x16_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_m_s16))) +int16x8_t __arm_vshlq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_m_s16))) +int16x8_t __arm_vshlq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_m_s32))) +int32x4_t __arm_vshlq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_m_s32))) +int32x4_t __arm_vshlq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_m_s8))) +int8x16_t __arm_vshlq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_m_s8))) +int8x16_t __arm_vshlq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_m_u16))) +uint16x8_t __arm_vshlq_m_u16(uint16x8_t, uint16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_m_u16))) +uint16x8_t __arm_vshlq_m(uint16x8_t, uint16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_m_u32))) +uint32x4_t __arm_vshlq_m_u32(uint32x4_t, uint32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_m_u32))) +uint32x4_t __arm_vshlq_m(uint32x4_t, uint32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_m_u8))) +uint8x16_t __arm_vshlq_m_u8(uint8x16_t, uint8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_m_u8))) +uint8x16_t __arm_vshlq_m(uint8x16_t, uint8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_n_s16))) +int16x8_t __arm_vshlq_n_s16(int16x8_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_n_s16))) +int16x8_t __arm_vshlq_n(int16x8_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_n_s32))) +int32x4_t __arm_vshlq_n_s32(int32x4_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_n_s32))) +int32x4_t __arm_vshlq_n(int32x4_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_n_s8))) +int8x16_t __arm_vshlq_n_s8(int8x16_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_n_s8))) +int8x16_t __arm_vshlq_n(int8x16_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_n_u16))) +uint16x8_t __arm_vshlq_n_u16(uint16x8_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_n_u16))) +uint16x8_t __arm_vshlq_n(uint16x8_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_n_u32))) +uint32x4_t __arm_vshlq_n_u32(uint32x4_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_n_u32))) +uint32x4_t __arm_vshlq_n(uint32x4_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_n_u8))) +uint8x16_t __arm_vshlq_n_u8(uint8x16_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_n_u8))) +uint8x16_t __arm_vshlq_n(uint8x16_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_r_s16))) +int16x8_t __arm_vshlq_r_s16(int16x8_t, int32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_r_s16))) +int16x8_t __arm_vshlq_r(int16x8_t, int32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_r_s32))) +int32x4_t __arm_vshlq_r_s32(int32x4_t, int32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_r_s32))) +int32x4_t __arm_vshlq_r(int32x4_t, int32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_r_s8))) +int8x16_t __arm_vshlq_r_s8(int8x16_t, int32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_r_s8))) +int8x16_t __arm_vshlq_r(int8x16_t, int32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_r_u16))) +uint16x8_t __arm_vshlq_r_u16(uint16x8_t, int32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_r_u16))) +uint16x8_t __arm_vshlq_r(uint16x8_t, int32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_r_u32))) +uint32x4_t __arm_vshlq_r_u32(uint32x4_t, int32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_r_u32))) +uint32x4_t __arm_vshlq_r(uint32x4_t, int32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_r_u8))) +uint8x16_t __arm_vshlq_r_u8(uint8x16_t, int32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_r_u8))) +uint8x16_t __arm_vshlq_r(uint8x16_t, int32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_s16))) +int16x8_t __arm_vshlq_s16(int16x8_t, int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_s16))) +int16x8_t __arm_vshlq(int16x8_t, int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_s32))) +int32x4_t __arm_vshlq_s32(int32x4_t, int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_s32))) +int32x4_t __arm_vshlq(int32x4_t, int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_s8))) +int8x16_t __arm_vshlq_s8(int8x16_t, int8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_s8))) +int8x16_t __arm_vshlq(int8x16_t, int8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_u16))) +uint16x8_t __arm_vshlq_u16(uint16x8_t, int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_u16))) +uint16x8_t __arm_vshlq(uint16x8_t, int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_u32))) +uint32x4_t __arm_vshlq_u32(uint32x4_t, int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_u32))) +uint32x4_t __arm_vshlq(uint32x4_t, int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_u8))) +uint8x16_t __arm_vshlq_u8(uint8x16_t, int8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_u8))) +uint8x16_t __arm_vshlq(uint8x16_t, int8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_x_n_s16))) +int16x8_t __arm_vshlq_x_n_s16(int16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_x_n_s16))) +int16x8_t __arm_vshlq_x_n(int16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_x_n_s32))) +int32x4_t __arm_vshlq_x_n_s32(int32x4_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_x_n_s32))) +int32x4_t __arm_vshlq_x_n(int32x4_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_x_n_s8))) +int8x16_t __arm_vshlq_x_n_s8(int8x16_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_x_n_s8))) +int8x16_t __arm_vshlq_x_n(int8x16_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_x_n_u16))) +uint16x8_t __arm_vshlq_x_n_u16(uint16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_x_n_u16))) +uint16x8_t __arm_vshlq_x_n(uint16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_x_n_u32))) +uint32x4_t __arm_vshlq_x_n_u32(uint32x4_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_x_n_u32))) +uint32x4_t __arm_vshlq_x_n(uint32x4_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_x_n_u8))) +uint8x16_t __arm_vshlq_x_n_u8(uint8x16_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_x_n_u8))) +uint8x16_t __arm_vshlq_x_n(uint8x16_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_x_s16))) +int16x8_t __arm_vshlq_x_s16(int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_x_s16))) +int16x8_t __arm_vshlq_x(int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_x_s32))) +int32x4_t __arm_vshlq_x_s32(int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_x_s32))) +int32x4_t __arm_vshlq_x(int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_x_s8))) +int8x16_t __arm_vshlq_x_s8(int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_x_s8))) +int8x16_t __arm_vshlq_x(int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_x_u16))) +uint16x8_t __arm_vshlq_x_u16(uint16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_x_u16))) +uint16x8_t __arm_vshlq_x(uint16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_x_u32))) +uint32x4_t __arm_vshlq_x_u32(uint32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_x_u32))) +uint32x4_t __arm_vshlq_x(uint32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_x_u8))) +uint8x16_t __arm_vshlq_x_u8(uint8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_x_u8))) +uint8x16_t __arm_vshlq_x(uint8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshrnbq_m_n_s16))) +int8x16_t __arm_vshrnbq_m_n_s16(int8x16_t, int16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshrnbq_m_n_s16))) +int8x16_t __arm_vshrnbq_m(int8x16_t, int16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshrnbq_m_n_s32))) +int16x8_t __arm_vshrnbq_m_n_s32(int16x8_t, int32x4_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshrnbq_m_n_s32))) +int16x8_t __arm_vshrnbq_m(int16x8_t, int32x4_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshrnbq_m_n_u16))) +uint8x16_t __arm_vshrnbq_m_n_u16(uint8x16_t, uint16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshrnbq_m_n_u16))) +uint8x16_t __arm_vshrnbq_m(uint8x16_t, uint16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshrnbq_m_n_u32))) +uint16x8_t __arm_vshrnbq_m_n_u32(uint16x8_t, uint32x4_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshrnbq_m_n_u32))) +uint16x8_t __arm_vshrnbq_m(uint16x8_t, uint32x4_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshrnbq_n_s16))) +int8x16_t __arm_vshrnbq_n_s16(int8x16_t, int16x8_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshrnbq_n_s16))) +int8x16_t __arm_vshrnbq(int8x16_t, int16x8_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshrnbq_n_s32))) +int16x8_t __arm_vshrnbq_n_s32(int16x8_t, int32x4_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshrnbq_n_s32))) +int16x8_t __arm_vshrnbq(int16x8_t, int32x4_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshrnbq_n_u16))) +uint8x16_t __arm_vshrnbq_n_u16(uint8x16_t, uint16x8_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshrnbq_n_u16))) +uint8x16_t __arm_vshrnbq(uint8x16_t, uint16x8_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshrnbq_n_u32))) +uint16x8_t __arm_vshrnbq_n_u32(uint16x8_t, uint32x4_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshrnbq_n_u32))) +uint16x8_t __arm_vshrnbq(uint16x8_t, uint32x4_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshrntq_m_n_s16))) +int8x16_t __arm_vshrntq_m_n_s16(int8x16_t, int16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshrntq_m_n_s16))) +int8x16_t __arm_vshrntq_m(int8x16_t, int16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshrntq_m_n_s32))) +int16x8_t __arm_vshrntq_m_n_s32(int16x8_t, int32x4_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshrntq_m_n_s32))) +int16x8_t __arm_vshrntq_m(int16x8_t, int32x4_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshrntq_m_n_u16))) +uint8x16_t __arm_vshrntq_m_n_u16(uint8x16_t, uint16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshrntq_m_n_u16))) +uint8x16_t __arm_vshrntq_m(uint8x16_t, uint16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshrntq_m_n_u32))) +uint16x8_t __arm_vshrntq_m_n_u32(uint16x8_t, uint32x4_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshrntq_m_n_u32))) +uint16x8_t __arm_vshrntq_m(uint16x8_t, uint32x4_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshrntq_n_s16))) +int8x16_t __arm_vshrntq_n_s16(int8x16_t, int16x8_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshrntq_n_s16))) +int8x16_t __arm_vshrntq(int8x16_t, int16x8_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshrntq_n_s32))) +int16x8_t __arm_vshrntq_n_s32(int16x8_t, int32x4_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshrntq_n_s32))) +int16x8_t __arm_vshrntq(int16x8_t, int32x4_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshrntq_n_u16))) +uint8x16_t __arm_vshrntq_n_u16(uint8x16_t, uint16x8_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshrntq_n_u16))) +uint8x16_t __arm_vshrntq(uint8x16_t, uint16x8_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshrntq_n_u32))) +uint16x8_t __arm_vshrntq_n_u32(uint16x8_t, uint32x4_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshrntq_n_u32))) +uint16x8_t __arm_vshrntq(uint16x8_t, uint32x4_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshrq_m_n_s16))) +int16x8_t __arm_vshrq_m_n_s16(int16x8_t, int16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshrq_m_n_s16))) +int16x8_t __arm_vshrq_m(int16x8_t, int16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshrq_m_n_s32))) +int32x4_t __arm_vshrq_m_n_s32(int32x4_t, int32x4_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshrq_m_n_s32))) +int32x4_t __arm_vshrq_m(int32x4_t, int32x4_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshrq_m_n_s8))) +int8x16_t __arm_vshrq_m_n_s8(int8x16_t, int8x16_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshrq_m_n_s8))) +int8x16_t __arm_vshrq_m(int8x16_t, int8x16_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshrq_m_n_u16))) +uint16x8_t __arm_vshrq_m_n_u16(uint16x8_t, uint16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshrq_m_n_u16))) +uint16x8_t __arm_vshrq_m(uint16x8_t, uint16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshrq_m_n_u32))) +uint32x4_t __arm_vshrq_m_n_u32(uint32x4_t, uint32x4_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshrq_m_n_u32))) +uint32x4_t __arm_vshrq_m(uint32x4_t, uint32x4_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshrq_m_n_u8))) +uint8x16_t __arm_vshrq_m_n_u8(uint8x16_t, uint8x16_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshrq_m_n_u8))) +uint8x16_t __arm_vshrq_m(uint8x16_t, uint8x16_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshrq_n_s16))) +int16x8_t __arm_vshrq_n_s16(int16x8_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshrq_n_s16))) +int16x8_t __arm_vshrq(int16x8_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshrq_n_s32))) +int32x4_t __arm_vshrq_n_s32(int32x4_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshrq_n_s32))) +int32x4_t __arm_vshrq(int32x4_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshrq_n_s8))) +int8x16_t __arm_vshrq_n_s8(int8x16_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshrq_n_s8))) +int8x16_t __arm_vshrq(int8x16_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshrq_n_u16))) +uint16x8_t __arm_vshrq_n_u16(uint16x8_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshrq_n_u16))) +uint16x8_t __arm_vshrq(uint16x8_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshrq_n_u32))) +uint32x4_t __arm_vshrq_n_u32(uint32x4_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshrq_n_u32))) +uint32x4_t __arm_vshrq(uint32x4_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshrq_n_u8))) +uint8x16_t __arm_vshrq_n_u8(uint8x16_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshrq_n_u8))) +uint8x16_t __arm_vshrq(uint8x16_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshrq_x_n_s16))) +int16x8_t __arm_vshrq_x_n_s16(int16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshrq_x_n_s16))) +int16x8_t __arm_vshrq_x(int16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshrq_x_n_s32))) +int32x4_t __arm_vshrq_x_n_s32(int32x4_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshrq_x_n_s32))) +int32x4_t __arm_vshrq_x(int32x4_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshrq_x_n_s8))) +int8x16_t __arm_vshrq_x_n_s8(int8x16_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshrq_x_n_s8))) +int8x16_t __arm_vshrq_x(int8x16_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshrq_x_n_u16))) +uint16x8_t __arm_vshrq_x_n_u16(uint16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshrq_x_n_u16))) +uint16x8_t __arm_vshrq_x(uint16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshrq_x_n_u32))) +uint32x4_t __arm_vshrq_x_n_u32(uint32x4_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshrq_x_n_u32))) +uint32x4_t __arm_vshrq_x(uint32x4_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshrq_x_n_u8))) +uint8x16_t __arm_vshrq_x_n_u8(uint8x16_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshrq_x_n_u8))) +uint8x16_t __arm_vshrq_x(uint8x16_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsliq_m_n_s16))) +int16x8_t __arm_vsliq_m_n_s16(int16x8_t, int16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsliq_m_n_s16))) +int16x8_t __arm_vsliq_m(int16x8_t, int16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsliq_m_n_s32))) +int32x4_t __arm_vsliq_m_n_s32(int32x4_t, int32x4_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsliq_m_n_s32))) +int32x4_t __arm_vsliq_m(int32x4_t, int32x4_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsliq_m_n_s8))) +int8x16_t __arm_vsliq_m_n_s8(int8x16_t, int8x16_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsliq_m_n_s8))) +int8x16_t __arm_vsliq_m(int8x16_t, int8x16_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsliq_m_n_u16))) +uint16x8_t __arm_vsliq_m_n_u16(uint16x8_t, uint16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsliq_m_n_u16))) +uint16x8_t __arm_vsliq_m(uint16x8_t, uint16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsliq_m_n_u32))) +uint32x4_t __arm_vsliq_m_n_u32(uint32x4_t, uint32x4_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsliq_m_n_u32))) +uint32x4_t __arm_vsliq_m(uint32x4_t, uint32x4_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsliq_m_n_u8))) +uint8x16_t __arm_vsliq_m_n_u8(uint8x16_t, uint8x16_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsliq_m_n_u8))) +uint8x16_t __arm_vsliq_m(uint8x16_t, uint8x16_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsliq_n_s16))) +int16x8_t __arm_vsliq_n_s16(int16x8_t, int16x8_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsliq_n_s16))) +int16x8_t __arm_vsliq(int16x8_t, int16x8_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsliq_n_s32))) +int32x4_t __arm_vsliq_n_s32(int32x4_t, int32x4_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsliq_n_s32))) +int32x4_t __arm_vsliq(int32x4_t, int32x4_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsliq_n_s8))) +int8x16_t __arm_vsliq_n_s8(int8x16_t, int8x16_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsliq_n_s8))) +int8x16_t __arm_vsliq(int8x16_t, int8x16_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsliq_n_u16))) +uint16x8_t __arm_vsliq_n_u16(uint16x8_t, uint16x8_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsliq_n_u16))) +uint16x8_t __arm_vsliq(uint16x8_t, uint16x8_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsliq_n_u32))) +uint32x4_t __arm_vsliq_n_u32(uint32x4_t, uint32x4_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsliq_n_u32))) +uint32x4_t __arm_vsliq(uint32x4_t, uint32x4_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsliq_n_u8))) +uint8x16_t __arm_vsliq_n_u8(uint8x16_t, uint8x16_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsliq_n_u8))) +uint8x16_t __arm_vsliq(uint8x16_t, uint8x16_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsriq_m_n_s16))) +int16x8_t __arm_vsriq_m_n_s16(int16x8_t, int16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsriq_m_n_s16))) +int16x8_t __arm_vsriq_m(int16x8_t, int16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsriq_m_n_s32))) +int32x4_t __arm_vsriq_m_n_s32(int32x4_t, int32x4_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsriq_m_n_s32))) +int32x4_t __arm_vsriq_m(int32x4_t, int32x4_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsriq_m_n_s8))) +int8x16_t __arm_vsriq_m_n_s8(int8x16_t, int8x16_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsriq_m_n_s8))) +int8x16_t __arm_vsriq_m(int8x16_t, int8x16_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsriq_m_n_u16))) +uint16x8_t __arm_vsriq_m_n_u16(uint16x8_t, uint16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsriq_m_n_u16))) +uint16x8_t __arm_vsriq_m(uint16x8_t, uint16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsriq_m_n_u32))) +uint32x4_t __arm_vsriq_m_n_u32(uint32x4_t, uint32x4_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsriq_m_n_u32))) +uint32x4_t __arm_vsriq_m(uint32x4_t, uint32x4_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsriq_m_n_u8))) +uint8x16_t __arm_vsriq_m_n_u8(uint8x16_t, uint8x16_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsriq_m_n_u8))) +uint8x16_t __arm_vsriq_m(uint8x16_t, uint8x16_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsriq_n_s16))) +int16x8_t __arm_vsriq_n_s16(int16x8_t, int16x8_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsriq_n_s16))) +int16x8_t __arm_vsriq(int16x8_t, int16x8_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsriq_n_s32))) +int32x4_t __arm_vsriq_n_s32(int32x4_t, int32x4_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsriq_n_s32))) +int32x4_t __arm_vsriq(int32x4_t, int32x4_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsriq_n_s8))) +int8x16_t __arm_vsriq_n_s8(int8x16_t, int8x16_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsriq_n_s8))) +int8x16_t __arm_vsriq(int8x16_t, int8x16_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsriq_n_u16))) +uint16x8_t __arm_vsriq_n_u16(uint16x8_t, uint16x8_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsriq_n_u16))) +uint16x8_t __arm_vsriq(uint16x8_t, uint16x8_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsriq_n_u32))) +uint32x4_t __arm_vsriq_n_u32(uint32x4_t, uint32x4_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsriq_n_u32))) +uint32x4_t __arm_vsriq(uint32x4_t, uint32x4_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsriq_n_u8))) +uint8x16_t __arm_vsriq_n_u8(uint8x16_t, uint8x16_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsriq_n_u8))) +uint8x16_t __arm_vsriq(uint8x16_t, uint8x16_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vst1q_p_s16))) +void __arm_vst1q_p_s16(int16_t *, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vst1q_p_s16))) +void __arm_vst1q_p(int16_t *, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vst1q_p_s32))) +void __arm_vst1q_p_s32(int32_t *, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vst1q_p_s32))) +void __arm_vst1q_p(int32_t *, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vst1q_p_s8))) +void __arm_vst1q_p_s8(int8_t *, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vst1q_p_s8))) +void __arm_vst1q_p(int8_t *, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vst1q_p_u16))) +void __arm_vst1q_p_u16(uint16_t *, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vst1q_p_u16))) +void __arm_vst1q_p(uint16_t *, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vst1q_p_u32))) +void __arm_vst1q_p_u32(uint32_t *, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vst1q_p_u32))) +void __arm_vst1q_p(uint32_t *, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vst1q_p_u8))) +void __arm_vst1q_p_u8(uint8_t *, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vst1q_p_u8))) +void __arm_vst1q_p(uint8_t *, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vst1q_s16))) +void __arm_vst1q_s16(int16_t *, int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vst1q_s16))) +void __arm_vst1q(int16_t *, int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vst1q_s32))) +void __arm_vst1q_s32(int32_t *, int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vst1q_s32))) +void __arm_vst1q(int32_t *, int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vst1q_s8))) +void __arm_vst1q_s8(int8_t *, int8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vst1q_s8))) +void __arm_vst1q(int8_t *, int8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vst1q_u16))) +void __arm_vst1q_u16(uint16_t *, uint16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vst1q_u16))) +void __arm_vst1q(uint16_t *, uint16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vst1q_u32))) +void __arm_vst1q_u32(uint32_t *, uint32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vst1q_u32))) +void __arm_vst1q(uint32_t *, uint32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vst1q_u8))) +void __arm_vst1q_u8(uint8_t *, uint8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vst1q_u8))) +void __arm_vst1q(uint8_t *, uint8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vst2q_s16))) +void __arm_vst2q_s16(int16_t *, int16x8x2_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vst2q_s16))) +void __arm_vst2q(int16_t *, int16x8x2_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vst2q_s32))) +void __arm_vst2q_s32(int32_t *, int32x4x2_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vst2q_s32))) +void __arm_vst2q(int32_t *, int32x4x2_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vst2q_s8))) +void __arm_vst2q_s8(int8_t *, int8x16x2_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vst2q_s8))) +void __arm_vst2q(int8_t *, int8x16x2_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vst2q_u16))) +void __arm_vst2q_u16(uint16_t *, uint16x8x2_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vst2q_u16))) +void __arm_vst2q(uint16_t *, uint16x8x2_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vst2q_u32))) +void __arm_vst2q_u32(uint32_t *, uint32x4x2_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vst2q_u32))) +void __arm_vst2q(uint32_t *, uint32x4x2_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vst2q_u8))) +void __arm_vst2q_u8(uint8_t *, uint8x16x2_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vst2q_u8))) +void __arm_vst2q(uint8_t *, uint8x16x2_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vst4q_s16))) +void __arm_vst4q_s16(int16_t *, int16x8x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vst4q_s16))) +void __arm_vst4q(int16_t *, int16x8x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vst4q_s32))) +void __arm_vst4q_s32(int32_t *, int32x4x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vst4q_s32))) +void __arm_vst4q(int32_t *, int32x4x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vst4q_s8))) +void __arm_vst4q_s8(int8_t *, int8x16x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vst4q_s8))) +void __arm_vst4q(int8_t *, int8x16x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vst4q_u16))) +void __arm_vst4q_u16(uint16_t *, uint16x8x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vst4q_u16))) +void __arm_vst4q(uint16_t *, uint16x8x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vst4q_u32))) +void __arm_vst4q_u32(uint32_t *, uint32x4x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vst4q_u32))) +void __arm_vst4q(uint32_t *, uint32x4x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vst4q_u8))) +void __arm_vst4q_u8(uint8_t *, uint8x16x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vst4q_u8))) +void __arm_vst4q(uint8_t *, uint8x16x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_p_s16))) +void __arm_vstrbq_p_s16(int8_t *, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_p_s16))) +void __arm_vstrbq_p(int8_t *, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_p_s32))) +void __arm_vstrbq_p_s32(int8_t *, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_p_s32))) +void __arm_vstrbq_p(int8_t *, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_p_s8))) +void __arm_vstrbq_p_s8(int8_t *, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_p_s8))) +void __arm_vstrbq_p(int8_t *, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_p_u16))) +void __arm_vstrbq_p_u16(uint8_t *, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_p_u16))) +void __arm_vstrbq_p(uint8_t *, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_p_u32))) +void __arm_vstrbq_p_u32(uint8_t *, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_p_u32))) +void __arm_vstrbq_p(uint8_t *, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_p_u8))) +void __arm_vstrbq_p_u8(uint8_t *, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_p_u8))) +void __arm_vstrbq_p(uint8_t *, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_s16))) +void __arm_vstrbq_s16(int8_t *, int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_s16))) +void __arm_vstrbq(int8_t *, int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_s32))) +void __arm_vstrbq_s32(int8_t *, int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_s32))) +void __arm_vstrbq(int8_t *, int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_s8))) +void __arm_vstrbq_s8(int8_t *, int8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_s8))) +void __arm_vstrbq(int8_t *, int8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_scatter_offset_p_s16))) +void __arm_vstrbq_scatter_offset_p_s16(int8_t *, uint16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_scatter_offset_p_s16))) +void __arm_vstrbq_scatter_offset_p(int8_t *, uint16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_scatter_offset_p_s32))) +void __arm_vstrbq_scatter_offset_p_s32(int8_t *, uint32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_scatter_offset_p_s32))) +void __arm_vstrbq_scatter_offset_p(int8_t *, uint32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_scatter_offset_p_s8))) +void __arm_vstrbq_scatter_offset_p_s8(int8_t *, uint8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_scatter_offset_p_s8))) +void __arm_vstrbq_scatter_offset_p(int8_t *, uint8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_scatter_offset_p_u16))) +void __arm_vstrbq_scatter_offset_p_u16(uint8_t *, uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_scatter_offset_p_u16))) +void __arm_vstrbq_scatter_offset_p(uint8_t *, uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_scatter_offset_p_u32))) +void __arm_vstrbq_scatter_offset_p_u32(uint8_t *, uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_scatter_offset_p_u32))) +void __arm_vstrbq_scatter_offset_p(uint8_t *, uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_scatter_offset_p_u8))) +void __arm_vstrbq_scatter_offset_p_u8(uint8_t *, uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_scatter_offset_p_u8))) +void __arm_vstrbq_scatter_offset_p(uint8_t *, uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_scatter_offset_s16))) +void __arm_vstrbq_scatter_offset_s16(int8_t *, uint16x8_t, int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_scatter_offset_s16))) +void __arm_vstrbq_scatter_offset(int8_t *, uint16x8_t, int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_scatter_offset_s32))) +void __arm_vstrbq_scatter_offset_s32(int8_t *, uint32x4_t, int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_scatter_offset_s32))) +void __arm_vstrbq_scatter_offset(int8_t *, uint32x4_t, int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_scatter_offset_s8))) +void __arm_vstrbq_scatter_offset_s8(int8_t *, uint8x16_t, int8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_scatter_offset_s8))) +void __arm_vstrbq_scatter_offset(int8_t *, uint8x16_t, int8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_scatter_offset_u16))) +void __arm_vstrbq_scatter_offset_u16(uint8_t *, uint16x8_t, uint16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_scatter_offset_u16))) +void __arm_vstrbq_scatter_offset(uint8_t *, uint16x8_t, uint16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_scatter_offset_u32))) +void __arm_vstrbq_scatter_offset_u32(uint8_t *, uint32x4_t, uint32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_scatter_offset_u32))) +void __arm_vstrbq_scatter_offset(uint8_t *, uint32x4_t, uint32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_scatter_offset_u8))) +void __arm_vstrbq_scatter_offset_u8(uint8_t *, uint8x16_t, uint8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_scatter_offset_u8))) +void __arm_vstrbq_scatter_offset(uint8_t *, uint8x16_t, uint8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_u16))) +void __arm_vstrbq_u16(uint8_t *, uint16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_u16))) +void __arm_vstrbq(uint8_t *, uint16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_u32))) +void __arm_vstrbq_u32(uint8_t *, uint32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_u32))) +void __arm_vstrbq(uint8_t *, uint32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_u8))) +void __arm_vstrbq_u8(uint8_t *, uint8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_u8))) +void __arm_vstrbq(uint8_t *, uint8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrdq_scatter_base_p_s64))) +void __arm_vstrdq_scatter_base_p_s64(uint64x2_t, int, int64x2_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrdq_scatter_base_p_s64))) +void __arm_vstrdq_scatter_base_p(uint64x2_t, int, int64x2_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrdq_scatter_base_p_u64))) +void __arm_vstrdq_scatter_base_p_u64(uint64x2_t, int, uint64x2_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrdq_scatter_base_p_u64))) +void __arm_vstrdq_scatter_base_p(uint64x2_t, int, uint64x2_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrdq_scatter_base_s64))) +void __arm_vstrdq_scatter_base_s64(uint64x2_t, int, int64x2_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrdq_scatter_base_s64))) +void __arm_vstrdq_scatter_base(uint64x2_t, int, int64x2_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrdq_scatter_base_u64))) +void __arm_vstrdq_scatter_base_u64(uint64x2_t, int, uint64x2_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrdq_scatter_base_u64))) +void __arm_vstrdq_scatter_base(uint64x2_t, int, uint64x2_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrdq_scatter_base_wb_p_s64))) +void __arm_vstrdq_scatter_base_wb_p_s64(uint64x2_t *, int, int64x2_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrdq_scatter_base_wb_p_s64))) +void __arm_vstrdq_scatter_base_wb_p(uint64x2_t *, int, int64x2_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrdq_scatter_base_wb_p_u64))) +void __arm_vstrdq_scatter_base_wb_p_u64(uint64x2_t *, int, uint64x2_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrdq_scatter_base_wb_p_u64))) +void __arm_vstrdq_scatter_base_wb_p(uint64x2_t *, int, uint64x2_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrdq_scatter_base_wb_s64))) +void __arm_vstrdq_scatter_base_wb_s64(uint64x2_t *, int, int64x2_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrdq_scatter_base_wb_s64))) +void __arm_vstrdq_scatter_base_wb(uint64x2_t *, int, int64x2_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrdq_scatter_base_wb_u64))) +void __arm_vstrdq_scatter_base_wb_u64(uint64x2_t *, int, uint64x2_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrdq_scatter_base_wb_u64))) +void __arm_vstrdq_scatter_base_wb(uint64x2_t *, int, uint64x2_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrdq_scatter_offset_p_s64))) +void __arm_vstrdq_scatter_offset_p_s64(int64_t *, uint64x2_t, int64x2_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrdq_scatter_offset_p_s64))) +void __arm_vstrdq_scatter_offset_p(int64_t *, uint64x2_t, int64x2_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrdq_scatter_offset_p_u64))) +void __arm_vstrdq_scatter_offset_p_u64(uint64_t *, uint64x2_t, uint64x2_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrdq_scatter_offset_p_u64))) +void __arm_vstrdq_scatter_offset_p(uint64_t *, uint64x2_t, uint64x2_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrdq_scatter_offset_s64))) +void __arm_vstrdq_scatter_offset_s64(int64_t *, uint64x2_t, int64x2_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrdq_scatter_offset_s64))) +void __arm_vstrdq_scatter_offset(int64_t *, uint64x2_t, int64x2_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrdq_scatter_offset_u64))) +void __arm_vstrdq_scatter_offset_u64(uint64_t *, uint64x2_t, uint64x2_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrdq_scatter_offset_u64))) +void __arm_vstrdq_scatter_offset(uint64_t *, uint64x2_t, uint64x2_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrdq_scatter_shifted_offset_p_s64))) +void __arm_vstrdq_scatter_shifted_offset_p_s64(int64_t *, uint64x2_t, int64x2_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrdq_scatter_shifted_offset_p_s64))) +void __arm_vstrdq_scatter_shifted_offset_p(int64_t *, uint64x2_t, int64x2_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrdq_scatter_shifted_offset_p_u64))) +void __arm_vstrdq_scatter_shifted_offset_p_u64(uint64_t *, uint64x2_t, uint64x2_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrdq_scatter_shifted_offset_p_u64))) +void __arm_vstrdq_scatter_shifted_offset_p(uint64_t *, uint64x2_t, uint64x2_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrdq_scatter_shifted_offset_s64))) +void __arm_vstrdq_scatter_shifted_offset_s64(int64_t *, uint64x2_t, int64x2_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrdq_scatter_shifted_offset_s64))) +void __arm_vstrdq_scatter_shifted_offset(int64_t *, uint64x2_t, int64x2_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrdq_scatter_shifted_offset_u64))) +void __arm_vstrdq_scatter_shifted_offset_u64(uint64_t *, uint64x2_t, uint64x2_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrdq_scatter_shifted_offset_u64))) +void __arm_vstrdq_scatter_shifted_offset(uint64_t *, uint64x2_t, uint64x2_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_p_s16))) +void __arm_vstrhq_p_s16(int16_t *, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_p_s16))) +void __arm_vstrhq_p(int16_t *, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_p_s32))) +void __arm_vstrhq_p_s32(int16_t *, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_p_s32))) +void __arm_vstrhq_p(int16_t *, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_p_u16))) +void __arm_vstrhq_p_u16(uint16_t *, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_p_u16))) +void __arm_vstrhq_p(uint16_t *, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_p_u32))) +void __arm_vstrhq_p_u32(uint16_t *, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_p_u32))) +void __arm_vstrhq_p(uint16_t *, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_s16))) +void __arm_vstrhq_s16(int16_t *, int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_s16))) +void __arm_vstrhq(int16_t *, int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_s32))) +void __arm_vstrhq_s32(int16_t *, int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_s32))) +void __arm_vstrhq(int16_t *, int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_scatter_offset_p_s16))) +void __arm_vstrhq_scatter_offset_p_s16(int16_t *, uint16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_scatter_offset_p_s16))) +void __arm_vstrhq_scatter_offset_p(int16_t *, uint16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_scatter_offset_p_s32))) +void __arm_vstrhq_scatter_offset_p_s32(int16_t *, uint32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_scatter_offset_p_s32))) +void __arm_vstrhq_scatter_offset_p(int16_t *, uint32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_scatter_offset_p_u16))) +void __arm_vstrhq_scatter_offset_p_u16(uint16_t *, uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_scatter_offset_p_u16))) +void __arm_vstrhq_scatter_offset_p(uint16_t *, uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_scatter_offset_p_u32))) +void __arm_vstrhq_scatter_offset_p_u32(uint16_t *, uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_scatter_offset_p_u32))) +void __arm_vstrhq_scatter_offset_p(uint16_t *, uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_scatter_offset_s16))) +void __arm_vstrhq_scatter_offset_s16(int16_t *, uint16x8_t, int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_scatter_offset_s16))) +void __arm_vstrhq_scatter_offset(int16_t *, uint16x8_t, int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_scatter_offset_s32))) +void __arm_vstrhq_scatter_offset_s32(int16_t *, uint32x4_t, int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_scatter_offset_s32))) +void __arm_vstrhq_scatter_offset(int16_t *, uint32x4_t, int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_scatter_offset_u16))) +void __arm_vstrhq_scatter_offset_u16(uint16_t *, uint16x8_t, uint16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_scatter_offset_u16))) +void __arm_vstrhq_scatter_offset(uint16_t *, uint16x8_t, uint16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_scatter_offset_u32))) +void __arm_vstrhq_scatter_offset_u32(uint16_t *, uint32x4_t, uint32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_scatter_offset_u32))) +void __arm_vstrhq_scatter_offset(uint16_t *, uint32x4_t, uint32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_scatter_shifted_offset_p_s16))) +void __arm_vstrhq_scatter_shifted_offset_p_s16(int16_t *, uint16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_scatter_shifted_offset_p_s16))) +void __arm_vstrhq_scatter_shifted_offset_p(int16_t *, uint16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_scatter_shifted_offset_p_s32))) +void __arm_vstrhq_scatter_shifted_offset_p_s32(int16_t *, uint32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_scatter_shifted_offset_p_s32))) +void __arm_vstrhq_scatter_shifted_offset_p(int16_t *, uint32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_scatter_shifted_offset_p_u16))) +void __arm_vstrhq_scatter_shifted_offset_p_u16(uint16_t *, uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_scatter_shifted_offset_p_u16))) +void __arm_vstrhq_scatter_shifted_offset_p(uint16_t *, uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_scatter_shifted_offset_p_u32))) +void __arm_vstrhq_scatter_shifted_offset_p_u32(uint16_t *, uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_scatter_shifted_offset_p_u32))) +void __arm_vstrhq_scatter_shifted_offset_p(uint16_t *, uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_scatter_shifted_offset_s16))) +void __arm_vstrhq_scatter_shifted_offset_s16(int16_t *, uint16x8_t, int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_scatter_shifted_offset_s16))) +void __arm_vstrhq_scatter_shifted_offset(int16_t *, uint16x8_t, int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_scatter_shifted_offset_s32))) +void __arm_vstrhq_scatter_shifted_offset_s32(int16_t *, uint32x4_t, int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_scatter_shifted_offset_s32))) +void __arm_vstrhq_scatter_shifted_offset(int16_t *, uint32x4_t, int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_scatter_shifted_offset_u16))) +void __arm_vstrhq_scatter_shifted_offset_u16(uint16_t *, uint16x8_t, uint16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_scatter_shifted_offset_u16))) +void __arm_vstrhq_scatter_shifted_offset(uint16_t *, uint16x8_t, uint16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_scatter_shifted_offset_u32))) +void __arm_vstrhq_scatter_shifted_offset_u32(uint16_t *, uint32x4_t, uint32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_scatter_shifted_offset_u32))) +void __arm_vstrhq_scatter_shifted_offset(uint16_t *, uint32x4_t, uint32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_u16))) +void __arm_vstrhq_u16(uint16_t *, uint16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_u16))) +void __arm_vstrhq(uint16_t *, uint16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_u32))) +void __arm_vstrhq_u32(uint16_t *, uint32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_u32))) +void __arm_vstrhq(uint16_t *, uint32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_p_s32))) +void __arm_vstrwq_p_s32(int32_t *, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_p_s32))) +void __arm_vstrwq_p(int32_t *, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_p_u32))) +void __arm_vstrwq_p_u32(uint32_t *, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_p_u32))) +void __arm_vstrwq_p(uint32_t *, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_s32))) +void __arm_vstrwq_s32(int32_t *, int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_s32))) +void __arm_vstrwq(int32_t *, int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_base_p_s32))) +void __arm_vstrwq_scatter_base_p_s32(uint32x4_t, int, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_base_p_s32))) +void __arm_vstrwq_scatter_base_p(uint32x4_t, int, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_base_p_u32))) +void __arm_vstrwq_scatter_base_p_u32(uint32x4_t, int, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_base_p_u32))) +void __arm_vstrwq_scatter_base_p(uint32x4_t, int, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_base_s32))) +void __arm_vstrwq_scatter_base_s32(uint32x4_t, int, int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_base_s32))) +void __arm_vstrwq_scatter_base(uint32x4_t, int, int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_base_u32))) +void __arm_vstrwq_scatter_base_u32(uint32x4_t, int, uint32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_base_u32))) +void __arm_vstrwq_scatter_base(uint32x4_t, int, uint32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_base_wb_p_s32))) +void __arm_vstrwq_scatter_base_wb_p_s32(uint32x4_t *, int, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_base_wb_p_s32))) +void __arm_vstrwq_scatter_base_wb_p(uint32x4_t *, int, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_base_wb_p_u32))) +void __arm_vstrwq_scatter_base_wb_p_u32(uint32x4_t *, int, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_base_wb_p_u32))) +void __arm_vstrwq_scatter_base_wb_p(uint32x4_t *, int, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_base_wb_s32))) +void __arm_vstrwq_scatter_base_wb_s32(uint32x4_t *, int, int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_base_wb_s32))) +void __arm_vstrwq_scatter_base_wb(uint32x4_t *, int, int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_base_wb_u32))) +void __arm_vstrwq_scatter_base_wb_u32(uint32x4_t *, int, uint32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_base_wb_u32))) +void __arm_vstrwq_scatter_base_wb(uint32x4_t *, int, uint32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_offset_p_s32))) +void __arm_vstrwq_scatter_offset_p_s32(int32_t *, uint32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_offset_p_s32))) +void __arm_vstrwq_scatter_offset_p(int32_t *, uint32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_offset_p_u32))) +void __arm_vstrwq_scatter_offset_p_u32(uint32_t *, uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_offset_p_u32))) +void __arm_vstrwq_scatter_offset_p(uint32_t *, uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_offset_s32))) +void __arm_vstrwq_scatter_offset_s32(int32_t *, uint32x4_t, int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_offset_s32))) +void __arm_vstrwq_scatter_offset(int32_t *, uint32x4_t, int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_offset_u32))) +void __arm_vstrwq_scatter_offset_u32(uint32_t *, uint32x4_t, uint32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_offset_u32))) +void __arm_vstrwq_scatter_offset(uint32_t *, uint32x4_t, uint32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_shifted_offset_p_s32))) +void __arm_vstrwq_scatter_shifted_offset_p_s32(int32_t *, uint32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_shifted_offset_p_s32))) +void __arm_vstrwq_scatter_shifted_offset_p(int32_t *, uint32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_shifted_offset_p_u32))) +void __arm_vstrwq_scatter_shifted_offset_p_u32(uint32_t *, uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_shifted_offset_p_u32))) +void __arm_vstrwq_scatter_shifted_offset_p(uint32_t *, uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_shifted_offset_s32))) +void __arm_vstrwq_scatter_shifted_offset_s32(int32_t *, uint32x4_t, int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_shifted_offset_s32))) +void __arm_vstrwq_scatter_shifted_offset(int32_t *, uint32x4_t, int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_shifted_offset_u32))) +void __arm_vstrwq_scatter_shifted_offset_u32(uint32_t *, uint32x4_t, uint32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_shifted_offset_u32))) +void __arm_vstrwq_scatter_shifted_offset(uint32_t *, uint32x4_t, uint32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_u32))) +void __arm_vstrwq_u32(uint32_t *, uint32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_u32))) +void __arm_vstrwq(uint32_t *, uint32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_m_n_s16))) +int16x8_t __arm_vsubq_m_n_s16(int16x8_t, int16x8_t, int16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_m_n_s16))) +int16x8_t __arm_vsubq_m(int16x8_t, int16x8_t, int16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_m_n_s32))) +int32x4_t __arm_vsubq_m_n_s32(int32x4_t, int32x4_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_m_n_s32))) +int32x4_t __arm_vsubq_m(int32x4_t, int32x4_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_m_n_s8))) +int8x16_t __arm_vsubq_m_n_s8(int8x16_t, int8x16_t, int8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_m_n_s8))) +int8x16_t __arm_vsubq_m(int8x16_t, int8x16_t, int8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_m_n_u16))) +uint16x8_t __arm_vsubq_m_n_u16(uint16x8_t, uint16x8_t, uint16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_m_n_u16))) +uint16x8_t __arm_vsubq_m(uint16x8_t, uint16x8_t, uint16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_m_n_u32))) +uint32x4_t __arm_vsubq_m_n_u32(uint32x4_t, uint32x4_t, uint32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_m_n_u32))) +uint32x4_t __arm_vsubq_m(uint32x4_t, uint32x4_t, uint32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_m_n_u8))) +uint8x16_t __arm_vsubq_m_n_u8(uint8x16_t, uint8x16_t, uint8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_m_n_u8))) +uint8x16_t __arm_vsubq_m(uint8x16_t, uint8x16_t, uint8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_m_s16))) +int16x8_t __arm_vsubq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_m_s16))) +int16x8_t __arm_vsubq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_m_s32))) +int32x4_t __arm_vsubq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_m_s32))) +int32x4_t __arm_vsubq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_m_s8))) +int8x16_t __arm_vsubq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_m_s8))) +int8x16_t __arm_vsubq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_m_u16))) +uint16x8_t __arm_vsubq_m_u16(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_m_u16))) +uint16x8_t __arm_vsubq_m(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_m_u32))) +uint32x4_t __arm_vsubq_m_u32(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_m_u32))) +uint32x4_t __arm_vsubq_m(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_m_u8))) +uint8x16_t __arm_vsubq_m_u8(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_m_u8))) +uint8x16_t __arm_vsubq_m(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_n_s16))) +int16x8_t __arm_vsubq_n_s16(int16x8_t, int16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_n_s16))) +int16x8_t __arm_vsubq(int16x8_t, int16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_n_s32))) +int32x4_t __arm_vsubq_n_s32(int32x4_t, int32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_n_s32))) +int32x4_t __arm_vsubq(int32x4_t, int32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_n_s8))) +int8x16_t __arm_vsubq_n_s8(int8x16_t, int8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_n_s8))) +int8x16_t __arm_vsubq(int8x16_t, int8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_n_u16))) +uint16x8_t __arm_vsubq_n_u16(uint16x8_t, uint16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_n_u16))) +uint16x8_t __arm_vsubq(uint16x8_t, uint16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_n_u32))) +uint32x4_t __arm_vsubq_n_u32(uint32x4_t, uint32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_n_u32))) +uint32x4_t __arm_vsubq(uint32x4_t, uint32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_n_u8))) +uint8x16_t __arm_vsubq_n_u8(uint8x16_t, uint8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_n_u8))) +uint8x16_t __arm_vsubq(uint8x16_t, uint8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_s16))) +int16x8_t __arm_vsubq_s16(int16x8_t, int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_s16))) +int16x8_t __arm_vsubq(int16x8_t, int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_s32))) +int32x4_t __arm_vsubq_s32(int32x4_t, int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_s32))) +int32x4_t __arm_vsubq(int32x4_t, int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_s8))) +int8x16_t __arm_vsubq_s8(int8x16_t, int8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_s8))) +int8x16_t __arm_vsubq(int8x16_t, int8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_u16))) +uint16x8_t __arm_vsubq_u16(uint16x8_t, uint16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_u16))) +uint16x8_t __arm_vsubq(uint16x8_t, uint16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_u32))) +uint32x4_t __arm_vsubq_u32(uint32x4_t, uint32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_u32))) +uint32x4_t __arm_vsubq(uint32x4_t, uint32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_u8))) +uint8x16_t __arm_vsubq_u8(uint8x16_t, uint8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_u8))) +uint8x16_t __arm_vsubq(uint8x16_t, uint8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_x_n_s16))) +int16x8_t __arm_vsubq_x_n_s16(int16x8_t, int16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_x_n_s16))) +int16x8_t __arm_vsubq_x(int16x8_t, int16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_x_n_s32))) +int32x4_t __arm_vsubq_x_n_s32(int32x4_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_x_n_s32))) +int32x4_t __arm_vsubq_x(int32x4_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_x_n_s8))) +int8x16_t __arm_vsubq_x_n_s8(int8x16_t, int8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_x_n_s8))) +int8x16_t __arm_vsubq_x(int8x16_t, int8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_x_n_u16))) +uint16x8_t __arm_vsubq_x_n_u16(uint16x8_t, uint16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_x_n_u16))) +uint16x8_t __arm_vsubq_x(uint16x8_t, uint16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_x_n_u32))) +uint32x4_t __arm_vsubq_x_n_u32(uint32x4_t, uint32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_x_n_u32))) +uint32x4_t __arm_vsubq_x(uint32x4_t, uint32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_x_n_u8))) +uint8x16_t __arm_vsubq_x_n_u8(uint8x16_t, uint8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_x_n_u8))) +uint8x16_t __arm_vsubq_x(uint8x16_t, uint8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_x_s16))) +int16x8_t __arm_vsubq_x_s16(int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_x_s16))) +int16x8_t __arm_vsubq_x(int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_x_s32))) +int32x4_t __arm_vsubq_x_s32(int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_x_s32))) +int32x4_t __arm_vsubq_x(int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_x_s8))) +int8x16_t __arm_vsubq_x_s8(int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_x_s8))) +int8x16_t __arm_vsubq_x(int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_x_u16))) +uint16x8_t __arm_vsubq_x_u16(uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_x_u16))) +uint16x8_t __arm_vsubq_x(uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_x_u32))) +uint32x4_t __arm_vsubq_x_u32(uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_x_u32))) +uint32x4_t __arm_vsubq_x(uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_x_u8))) +uint8x16_t __arm_vsubq_x_u8(uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_x_u8))) +uint8x16_t __arm_vsubq_x(uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vuninitializedq_polymorphic_s16))) +int16x8_t __arm_vuninitializedq(int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vuninitializedq_polymorphic_s32))) +int32x4_t __arm_vuninitializedq(int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vuninitializedq_polymorphic_s64))) +int64x2_t __arm_vuninitializedq(int64x2_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vuninitializedq_polymorphic_s8))) +int8x16_t __arm_vuninitializedq(int8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vuninitializedq_polymorphic_u16))) +uint16x8_t __arm_vuninitializedq(uint16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vuninitializedq_polymorphic_u32))) +uint32x4_t __arm_vuninitializedq(uint32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vuninitializedq_polymorphic_u64))) +uint64x2_t __arm_vuninitializedq(uint64x2_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vuninitializedq_polymorphic_u8))) +uint8x16_t __arm_vuninitializedq(uint8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vuninitializedq_s16))) +int16x8_t __arm_vuninitializedq_s16(); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vuninitializedq_s32))) +int32x4_t __arm_vuninitializedq_s32(); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vuninitializedq_s64))) +int64x2_t __arm_vuninitializedq_s64(); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vuninitializedq_s8))) +int8x16_t __arm_vuninitializedq_s8(); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vuninitializedq_u16))) +uint16x8_t __arm_vuninitializedq_u16(); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vuninitializedq_u32))) +uint32x4_t __arm_vuninitializedq_u32(); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vuninitializedq_u64))) +uint64x2_t __arm_vuninitializedq_u64(); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vuninitializedq_u8))) +uint8x16_t __arm_vuninitializedq_u8(); + +#if (__ARM_FEATURE_MVE & 2) + +typedef __fp16 float16_t; +typedef float float32_t; +typedef __attribute__((__neon_vector_type__(8), __clang_arm_mve_strict_polymorphism)) float16_t float16x8_t; +typedef struct { float16x8_t val[2]; } float16x8x2_t; +typedef struct { float16x8_t val[4]; } float16x8x4_t; +typedef __attribute__((__neon_vector_type__(4), __clang_arm_mve_strict_polymorphism)) float32_t float32x4_t; +typedef struct { float32x4_t val[2]; } float32x4x2_t; +typedef struct { float32x4_t val[4]; } float32x4x4_t; + +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabdq_f16))) +float16x8_t __arm_vabdq_f16(float16x8_t, float16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabdq_f16))) +float16x8_t __arm_vabdq(float16x8_t, float16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabdq_f32))) +float32x4_t __arm_vabdq_f32(float32x4_t, float32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabdq_f32))) +float32x4_t __arm_vabdq(float32x4_t, float32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabdq_m_f16))) +float16x8_t __arm_vabdq_m_f16(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabdq_m_f16))) +float16x8_t __arm_vabdq_m(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabdq_m_f32))) +float32x4_t __arm_vabdq_m_f32(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabdq_m_f32))) +float32x4_t __arm_vabdq_m(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabdq_x_f16))) +float16x8_t __arm_vabdq_x_f16(float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabdq_x_f16))) +float16x8_t __arm_vabdq_x(float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabdq_x_f32))) +float32x4_t __arm_vabdq_x_f32(float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabdq_x_f32))) +float32x4_t __arm_vabdq_x(float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabsq_f16))) +float16x8_t __arm_vabsq_f16(float16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabsq_f16))) +float16x8_t __arm_vabsq(float16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabsq_f32))) +float32x4_t __arm_vabsq_f32(float32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabsq_f32))) +float32x4_t __arm_vabsq(float32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabsq_m_f16))) +float16x8_t __arm_vabsq_m_f16(float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabsq_m_f16))) +float16x8_t __arm_vabsq_m(float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabsq_m_f32))) +float32x4_t __arm_vabsq_m_f32(float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabsq_m_f32))) +float32x4_t __arm_vabsq_m(float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabsq_x_f16))) +float16x8_t __arm_vabsq_x_f16(float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabsq_x_f16))) +float16x8_t __arm_vabsq_x(float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabsq_x_f32))) +float32x4_t __arm_vabsq_x_f32(float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabsq_x_f32))) +float32x4_t __arm_vabsq_x(float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_f16))) +float16x8_t __arm_vaddq_f16(float16x8_t, float16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_f16))) +float16x8_t __arm_vaddq(float16x8_t, float16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_f32))) +float32x4_t __arm_vaddq_f32(float32x4_t, float32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_f32))) +float32x4_t __arm_vaddq(float32x4_t, float32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_m_f16))) +float16x8_t __arm_vaddq_m_f16(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_m_f16))) +float16x8_t __arm_vaddq_m(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_m_f32))) +float32x4_t __arm_vaddq_m_f32(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_m_f32))) +float32x4_t __arm_vaddq_m(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_m_n_f16))) +float16x8_t __arm_vaddq_m_n_f16(float16x8_t, float16x8_t, float16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_m_n_f16))) +float16x8_t __arm_vaddq_m(float16x8_t, float16x8_t, float16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_m_n_f32))) +float32x4_t __arm_vaddq_m_n_f32(float32x4_t, float32x4_t, float32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_m_n_f32))) +float32x4_t __arm_vaddq_m(float32x4_t, float32x4_t, float32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_n_f16))) +float16x8_t __arm_vaddq_n_f16(float16x8_t, float16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_n_f16))) +float16x8_t __arm_vaddq(float16x8_t, float16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_n_f32))) +float32x4_t __arm_vaddq_n_f32(float32x4_t, float32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_n_f32))) +float32x4_t __arm_vaddq(float32x4_t, float32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_x_f16))) +float16x8_t __arm_vaddq_x_f16(float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_x_f16))) +float16x8_t __arm_vaddq_x(float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_x_f32))) +float32x4_t __arm_vaddq_x_f32(float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_x_f32))) +float32x4_t __arm_vaddq_x(float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_x_n_f16))) +float16x8_t __arm_vaddq_x_n_f16(float16x8_t, float16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_x_n_f16))) +float16x8_t __arm_vaddq_x(float16x8_t, float16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_x_n_f32))) +float32x4_t __arm_vaddq_x_n_f32(float32x4_t, float32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_x_n_f32))) +float32x4_t __arm_vaddq_x(float32x4_t, float32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vandq_f16))) +float16x8_t __arm_vandq_f16(float16x8_t, float16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vandq_f16))) +float16x8_t __arm_vandq(float16x8_t, float16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vandq_f32))) +float32x4_t __arm_vandq_f32(float32x4_t, float32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vandq_f32))) +float32x4_t __arm_vandq(float32x4_t, float32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vandq_m_f16))) +float16x8_t __arm_vandq_m_f16(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vandq_m_f16))) +float16x8_t __arm_vandq_m(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vandq_m_f32))) +float32x4_t __arm_vandq_m_f32(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vandq_m_f32))) +float32x4_t __arm_vandq_m(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vandq_x_f16))) +float16x8_t __arm_vandq_x_f16(float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vandq_x_f16))) +float16x8_t __arm_vandq_x(float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vandq_x_f32))) +float32x4_t __arm_vandq_x_f32(float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vandq_x_f32))) +float32x4_t __arm_vandq_x(float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbicq_f16))) +float16x8_t __arm_vbicq_f16(float16x8_t, float16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbicq_f16))) +float16x8_t __arm_vbicq(float16x8_t, float16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbicq_f32))) +float32x4_t __arm_vbicq_f32(float32x4_t, float32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbicq_f32))) +float32x4_t __arm_vbicq(float32x4_t, float32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbicq_m_f16))) +float16x8_t __arm_vbicq_m_f16(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbicq_m_f16))) +float16x8_t __arm_vbicq_m(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbicq_m_f32))) +float32x4_t __arm_vbicq_m_f32(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbicq_m_f32))) +float32x4_t __arm_vbicq_m(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbicq_x_f16))) +float16x8_t __arm_vbicq_x_f16(float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbicq_x_f16))) +float16x8_t __arm_vbicq_x(float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbicq_x_f32))) +float32x4_t __arm_vbicq_x_f32(float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbicq_x_f32))) +float32x4_t __arm_vbicq_x(float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_m_n_f16))) +float16x8_t __arm_vbrsrq_m_n_f16(float16x8_t, float16x8_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_m_n_f16))) +float16x8_t __arm_vbrsrq_m(float16x8_t, float16x8_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_m_n_f32))) +float32x4_t __arm_vbrsrq_m_n_f32(float32x4_t, float32x4_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_m_n_f32))) +float32x4_t __arm_vbrsrq_m(float32x4_t, float32x4_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_n_f16))) +float16x8_t __arm_vbrsrq_n_f16(float16x8_t, int32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_n_f16))) +float16x8_t __arm_vbrsrq(float16x8_t, int32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_n_f32))) +float32x4_t __arm_vbrsrq_n_f32(float32x4_t, int32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_n_f32))) +float32x4_t __arm_vbrsrq(float32x4_t, int32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_x_n_f16))) +float16x8_t __arm_vbrsrq_x_n_f16(float16x8_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_x_n_f16))) +float16x8_t __arm_vbrsrq_x(float16x8_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_x_n_f32))) +float32x4_t __arm_vbrsrq_x_n_f32(float32x4_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_x_n_f32))) +float32x4_t __arm_vbrsrq_x(float32x4_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_f16))) +float16x8_t __arm_vcaddq_rot270_f16(float16x8_t, float16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_f16))) +float16x8_t __arm_vcaddq_rot270(float16x8_t, float16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_f32))) +float32x4_t __arm_vcaddq_rot270_f32(float32x4_t, float32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_f32))) +float32x4_t __arm_vcaddq_rot270(float32x4_t, float32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_m_f16))) +float16x8_t __arm_vcaddq_rot270_m_f16(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_m_f16))) +float16x8_t __arm_vcaddq_rot270_m(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_m_f32))) +float32x4_t __arm_vcaddq_rot270_m_f32(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_m_f32))) +float32x4_t __arm_vcaddq_rot270_m(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_x_f16))) +float16x8_t __arm_vcaddq_rot270_x_f16(float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_x_f16))) +float16x8_t __arm_vcaddq_rot270_x(float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_x_f32))) +float32x4_t __arm_vcaddq_rot270_x_f32(float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_x_f32))) +float32x4_t __arm_vcaddq_rot270_x(float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_f16))) +float16x8_t __arm_vcaddq_rot90_f16(float16x8_t, float16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_f16))) +float16x8_t __arm_vcaddq_rot90(float16x8_t, float16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_f32))) +float32x4_t __arm_vcaddq_rot90_f32(float32x4_t, float32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_f32))) +float32x4_t __arm_vcaddq_rot90(float32x4_t, float32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_m_f16))) +float16x8_t __arm_vcaddq_rot90_m_f16(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_m_f16))) +float16x8_t __arm_vcaddq_rot90_m(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_m_f32))) +float32x4_t __arm_vcaddq_rot90_m_f32(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_m_f32))) +float32x4_t __arm_vcaddq_rot90_m(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_x_f16))) +float16x8_t __arm_vcaddq_rot90_x_f16(float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_x_f16))) +float16x8_t __arm_vcaddq_rot90_x(float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_x_f32))) +float32x4_t __arm_vcaddq_rot90_x_f32(float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_x_f32))) +float32x4_t __arm_vcaddq_rot90_x(float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmlaq_f16))) +float16x8_t __arm_vcmlaq_f16(float16x8_t, float16x8_t, float16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmlaq_f16))) +float16x8_t __arm_vcmlaq(float16x8_t, float16x8_t, float16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmlaq_f32))) +float32x4_t __arm_vcmlaq_f32(float32x4_t, float32x4_t, float32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmlaq_f32))) +float32x4_t __arm_vcmlaq(float32x4_t, float32x4_t, float32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmlaq_m_f16))) +float16x8_t __arm_vcmlaq_m_f16(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmlaq_m_f16))) +float16x8_t __arm_vcmlaq_m(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmlaq_m_f32))) +float32x4_t __arm_vcmlaq_m_f32(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmlaq_m_f32))) +float32x4_t __arm_vcmlaq_m(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmlaq_rot180_f16))) +float16x8_t __arm_vcmlaq_rot180_f16(float16x8_t, float16x8_t, float16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmlaq_rot180_f16))) +float16x8_t __arm_vcmlaq_rot180(float16x8_t, float16x8_t, float16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmlaq_rot180_f32))) +float32x4_t __arm_vcmlaq_rot180_f32(float32x4_t, float32x4_t, float32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmlaq_rot180_f32))) +float32x4_t __arm_vcmlaq_rot180(float32x4_t, float32x4_t, float32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmlaq_rot180_m_f16))) +float16x8_t __arm_vcmlaq_rot180_m_f16(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmlaq_rot180_m_f16))) +float16x8_t __arm_vcmlaq_rot180_m(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmlaq_rot180_m_f32))) +float32x4_t __arm_vcmlaq_rot180_m_f32(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmlaq_rot180_m_f32))) +float32x4_t __arm_vcmlaq_rot180_m(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmlaq_rot270_f16))) +float16x8_t __arm_vcmlaq_rot270_f16(float16x8_t, float16x8_t, float16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmlaq_rot270_f16))) +float16x8_t __arm_vcmlaq_rot270(float16x8_t, float16x8_t, float16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmlaq_rot270_f32))) +float32x4_t __arm_vcmlaq_rot270_f32(float32x4_t, float32x4_t, float32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmlaq_rot270_f32))) +float32x4_t __arm_vcmlaq_rot270(float32x4_t, float32x4_t, float32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmlaq_rot270_m_f16))) +float16x8_t __arm_vcmlaq_rot270_m_f16(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmlaq_rot270_m_f16))) +float16x8_t __arm_vcmlaq_rot270_m(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmlaq_rot270_m_f32))) +float32x4_t __arm_vcmlaq_rot270_m_f32(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmlaq_rot270_m_f32))) +float32x4_t __arm_vcmlaq_rot270_m(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmlaq_rot90_f16))) +float16x8_t __arm_vcmlaq_rot90_f16(float16x8_t, float16x8_t, float16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmlaq_rot90_f16))) +float16x8_t __arm_vcmlaq_rot90(float16x8_t, float16x8_t, float16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmlaq_rot90_f32))) +float32x4_t __arm_vcmlaq_rot90_f32(float32x4_t, float32x4_t, float32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmlaq_rot90_f32))) +float32x4_t __arm_vcmlaq_rot90(float32x4_t, float32x4_t, float32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmlaq_rot90_m_f16))) +float16x8_t __arm_vcmlaq_rot90_m_f16(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmlaq_rot90_m_f16))) +float16x8_t __arm_vcmlaq_rot90_m(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmlaq_rot90_m_f32))) +float32x4_t __arm_vcmlaq_rot90_m_f32(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmlaq_rot90_m_f32))) +float32x4_t __arm_vcmlaq_rot90_m(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_f16))) +mve_pred16_t __arm_vcmpeqq_f16(float16x8_t, float16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_f16))) +mve_pred16_t __arm_vcmpeqq(float16x8_t, float16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_f32))) +mve_pred16_t __arm_vcmpeqq_f32(float32x4_t, float32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_f32))) +mve_pred16_t __arm_vcmpeqq(float32x4_t, float32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_m_f16))) +mve_pred16_t __arm_vcmpeqq_m_f16(float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_m_f16))) +mve_pred16_t __arm_vcmpeqq_m(float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_m_f32))) +mve_pred16_t __arm_vcmpeqq_m_f32(float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_m_f32))) +mve_pred16_t __arm_vcmpeqq_m(float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_m_n_f16))) +mve_pred16_t __arm_vcmpeqq_m_n_f16(float16x8_t, float16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_m_n_f16))) +mve_pred16_t __arm_vcmpeqq_m(float16x8_t, float16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_m_n_f32))) +mve_pred16_t __arm_vcmpeqq_m_n_f32(float32x4_t, float32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_m_n_f32))) +mve_pred16_t __arm_vcmpeqq_m(float32x4_t, float32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_n_f16))) +mve_pred16_t __arm_vcmpeqq_n_f16(float16x8_t, float16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_n_f16))) +mve_pred16_t __arm_vcmpeqq(float16x8_t, float16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_n_f32))) +mve_pred16_t __arm_vcmpeqq_n_f32(float32x4_t, float32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_n_f32))) +mve_pred16_t __arm_vcmpeqq(float32x4_t, float32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpgeq_f16))) +mve_pred16_t __arm_vcmpgeq_f16(float16x8_t, float16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpgeq_f16))) +mve_pred16_t __arm_vcmpgeq(float16x8_t, float16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpgeq_f32))) +mve_pred16_t __arm_vcmpgeq_f32(float32x4_t, float32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpgeq_f32))) +mve_pred16_t __arm_vcmpgeq(float32x4_t, float32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpgeq_m_f16))) +mve_pred16_t __arm_vcmpgeq_m_f16(float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpgeq_m_f16))) +mve_pred16_t __arm_vcmpgeq_m(float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpgeq_m_f32))) +mve_pred16_t __arm_vcmpgeq_m_f32(float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpgeq_m_f32))) +mve_pred16_t __arm_vcmpgeq_m(float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpgeq_m_n_f16))) +mve_pred16_t __arm_vcmpgeq_m_n_f16(float16x8_t, float16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpgeq_m_n_f16))) +mve_pred16_t __arm_vcmpgeq_m(float16x8_t, float16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpgeq_m_n_f32))) +mve_pred16_t __arm_vcmpgeq_m_n_f32(float32x4_t, float32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpgeq_m_n_f32))) +mve_pred16_t __arm_vcmpgeq_m(float32x4_t, float32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpgeq_n_f16))) +mve_pred16_t __arm_vcmpgeq_n_f16(float16x8_t, float16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpgeq_n_f16))) +mve_pred16_t __arm_vcmpgeq(float16x8_t, float16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpgeq_n_f32))) +mve_pred16_t __arm_vcmpgeq_n_f32(float32x4_t, float32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpgeq_n_f32))) +mve_pred16_t __arm_vcmpgeq(float32x4_t, float32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpgtq_f16))) +mve_pred16_t __arm_vcmpgtq_f16(float16x8_t, float16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpgtq_f16))) +mve_pred16_t __arm_vcmpgtq(float16x8_t, float16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpgtq_f32))) +mve_pred16_t __arm_vcmpgtq_f32(float32x4_t, float32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpgtq_f32))) +mve_pred16_t __arm_vcmpgtq(float32x4_t, float32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpgtq_m_f16))) +mve_pred16_t __arm_vcmpgtq_m_f16(float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpgtq_m_f16))) +mve_pred16_t __arm_vcmpgtq_m(float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpgtq_m_f32))) +mve_pred16_t __arm_vcmpgtq_m_f32(float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpgtq_m_f32))) +mve_pred16_t __arm_vcmpgtq_m(float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpgtq_m_n_f16))) +mve_pred16_t __arm_vcmpgtq_m_n_f16(float16x8_t, float16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpgtq_m_n_f16))) +mve_pred16_t __arm_vcmpgtq_m(float16x8_t, float16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpgtq_m_n_f32))) +mve_pred16_t __arm_vcmpgtq_m_n_f32(float32x4_t, float32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpgtq_m_n_f32))) +mve_pred16_t __arm_vcmpgtq_m(float32x4_t, float32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpgtq_n_f16))) +mve_pred16_t __arm_vcmpgtq_n_f16(float16x8_t, float16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpgtq_n_f16))) +mve_pred16_t __arm_vcmpgtq(float16x8_t, float16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpgtq_n_f32))) +mve_pred16_t __arm_vcmpgtq_n_f32(float32x4_t, float32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpgtq_n_f32))) +mve_pred16_t __arm_vcmpgtq(float32x4_t, float32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpleq_f16))) +mve_pred16_t __arm_vcmpleq_f16(float16x8_t, float16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpleq_f16))) +mve_pred16_t __arm_vcmpleq(float16x8_t, float16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpleq_f32))) +mve_pred16_t __arm_vcmpleq_f32(float32x4_t, float32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpleq_f32))) +mve_pred16_t __arm_vcmpleq(float32x4_t, float32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpleq_m_f16))) +mve_pred16_t __arm_vcmpleq_m_f16(float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpleq_m_f16))) +mve_pred16_t __arm_vcmpleq_m(float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpleq_m_f32))) +mve_pred16_t __arm_vcmpleq_m_f32(float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpleq_m_f32))) +mve_pred16_t __arm_vcmpleq_m(float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpleq_m_n_f16))) +mve_pred16_t __arm_vcmpleq_m_n_f16(float16x8_t, float16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpleq_m_n_f16))) +mve_pred16_t __arm_vcmpleq_m(float16x8_t, float16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpleq_m_n_f32))) +mve_pred16_t __arm_vcmpleq_m_n_f32(float32x4_t, float32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpleq_m_n_f32))) +mve_pred16_t __arm_vcmpleq_m(float32x4_t, float32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpleq_n_f16))) +mve_pred16_t __arm_vcmpleq_n_f16(float16x8_t, float16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpleq_n_f16))) +mve_pred16_t __arm_vcmpleq(float16x8_t, float16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpleq_n_f32))) +mve_pred16_t __arm_vcmpleq_n_f32(float32x4_t, float32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpleq_n_f32))) +mve_pred16_t __arm_vcmpleq(float32x4_t, float32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpltq_f16))) +mve_pred16_t __arm_vcmpltq_f16(float16x8_t, float16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpltq_f16))) +mve_pred16_t __arm_vcmpltq(float16x8_t, float16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpltq_f32))) +mve_pred16_t __arm_vcmpltq_f32(float32x4_t, float32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpltq_f32))) +mve_pred16_t __arm_vcmpltq(float32x4_t, float32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpltq_m_f16))) +mve_pred16_t __arm_vcmpltq_m_f16(float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpltq_m_f16))) +mve_pred16_t __arm_vcmpltq_m(float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpltq_m_f32))) +mve_pred16_t __arm_vcmpltq_m_f32(float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpltq_m_f32))) +mve_pred16_t __arm_vcmpltq_m(float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpltq_m_n_f16))) +mve_pred16_t __arm_vcmpltq_m_n_f16(float16x8_t, float16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpltq_m_n_f16))) +mve_pred16_t __arm_vcmpltq_m(float16x8_t, float16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpltq_m_n_f32))) +mve_pred16_t __arm_vcmpltq_m_n_f32(float32x4_t, float32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpltq_m_n_f32))) +mve_pred16_t __arm_vcmpltq_m(float32x4_t, float32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpltq_n_f16))) +mve_pred16_t __arm_vcmpltq_n_f16(float16x8_t, float16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpltq_n_f16))) +mve_pred16_t __arm_vcmpltq(float16x8_t, float16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpltq_n_f32))) +mve_pred16_t __arm_vcmpltq_n_f32(float32x4_t, float32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpltq_n_f32))) +mve_pred16_t __arm_vcmpltq(float32x4_t, float32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_f16))) +mve_pred16_t __arm_vcmpneq_f16(float16x8_t, float16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_f16))) +mve_pred16_t __arm_vcmpneq(float16x8_t, float16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_f32))) +mve_pred16_t __arm_vcmpneq_f32(float32x4_t, float32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_f32))) +mve_pred16_t __arm_vcmpneq(float32x4_t, float32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_m_f16))) +mve_pred16_t __arm_vcmpneq_m_f16(float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_m_f16))) +mve_pred16_t __arm_vcmpneq_m(float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_m_f32))) +mve_pred16_t __arm_vcmpneq_m_f32(float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_m_f32))) +mve_pred16_t __arm_vcmpneq_m(float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_m_n_f16))) +mve_pred16_t __arm_vcmpneq_m_n_f16(float16x8_t, float16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_m_n_f16))) +mve_pred16_t __arm_vcmpneq_m(float16x8_t, float16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_m_n_f32))) +mve_pred16_t __arm_vcmpneq_m_n_f32(float32x4_t, float32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_m_n_f32))) +mve_pred16_t __arm_vcmpneq_m(float32x4_t, float32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_n_f16))) +mve_pred16_t __arm_vcmpneq_n_f16(float16x8_t, float16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_n_f16))) +mve_pred16_t __arm_vcmpneq(float16x8_t, float16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_n_f32))) +mve_pred16_t __arm_vcmpneq_n_f32(float32x4_t, float32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_n_f32))) +mve_pred16_t __arm_vcmpneq(float32x4_t, float32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_f16))) +float16x8_t __arm_vcmulq_f16(float16x8_t, float16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_f16))) +float16x8_t __arm_vcmulq(float16x8_t, float16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_f32))) +float32x4_t __arm_vcmulq_f32(float32x4_t, float32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_f32))) +float32x4_t __arm_vcmulq(float32x4_t, float32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_m_f16))) +float16x8_t __arm_vcmulq_m_f16(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_m_f16))) +float16x8_t __arm_vcmulq_m(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_m_f32))) +float32x4_t __arm_vcmulq_m_f32(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_m_f32))) +float32x4_t __arm_vcmulq_m(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_rot180_f16))) +float16x8_t __arm_vcmulq_rot180_f16(float16x8_t, float16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_rot180_f16))) +float16x8_t __arm_vcmulq_rot180(float16x8_t, float16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_rot180_f32))) +float32x4_t __arm_vcmulq_rot180_f32(float32x4_t, float32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_rot180_f32))) +float32x4_t __arm_vcmulq_rot180(float32x4_t, float32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_rot180_m_f16))) +float16x8_t __arm_vcmulq_rot180_m_f16(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_rot180_m_f16))) +float16x8_t __arm_vcmulq_rot180_m(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_rot180_m_f32))) +float32x4_t __arm_vcmulq_rot180_m_f32(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_rot180_m_f32))) +float32x4_t __arm_vcmulq_rot180_m(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_rot180_x_f16))) +float16x8_t __arm_vcmulq_rot180_x_f16(float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_rot180_x_f16))) +float16x8_t __arm_vcmulq_rot180_x(float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_rot180_x_f32))) +float32x4_t __arm_vcmulq_rot180_x_f32(float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_rot180_x_f32))) +float32x4_t __arm_vcmulq_rot180_x(float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_rot270_f16))) +float16x8_t __arm_vcmulq_rot270_f16(float16x8_t, float16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_rot270_f16))) +float16x8_t __arm_vcmulq_rot270(float16x8_t, float16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_rot270_f32))) +float32x4_t __arm_vcmulq_rot270_f32(float32x4_t, float32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_rot270_f32))) +float32x4_t __arm_vcmulq_rot270(float32x4_t, float32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_rot270_m_f16))) +float16x8_t __arm_vcmulq_rot270_m_f16(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_rot270_m_f16))) +float16x8_t __arm_vcmulq_rot270_m(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_rot270_m_f32))) +float32x4_t __arm_vcmulq_rot270_m_f32(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_rot270_m_f32))) +float32x4_t __arm_vcmulq_rot270_m(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_rot270_x_f16))) +float16x8_t __arm_vcmulq_rot270_x_f16(float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_rot270_x_f16))) +float16x8_t __arm_vcmulq_rot270_x(float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_rot270_x_f32))) +float32x4_t __arm_vcmulq_rot270_x_f32(float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_rot270_x_f32))) +float32x4_t __arm_vcmulq_rot270_x(float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_rot90_f16))) +float16x8_t __arm_vcmulq_rot90_f16(float16x8_t, float16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_rot90_f16))) +float16x8_t __arm_vcmulq_rot90(float16x8_t, float16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_rot90_f32))) +float32x4_t __arm_vcmulq_rot90_f32(float32x4_t, float32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_rot90_f32))) +float32x4_t __arm_vcmulq_rot90(float32x4_t, float32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_rot90_m_f16))) +float16x8_t __arm_vcmulq_rot90_m_f16(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_rot90_m_f16))) +float16x8_t __arm_vcmulq_rot90_m(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_rot90_m_f32))) +float32x4_t __arm_vcmulq_rot90_m_f32(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_rot90_m_f32))) +float32x4_t __arm_vcmulq_rot90_m(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_rot90_x_f16))) +float16x8_t __arm_vcmulq_rot90_x_f16(float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_rot90_x_f16))) +float16x8_t __arm_vcmulq_rot90_x(float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_rot90_x_f32))) +float32x4_t __arm_vcmulq_rot90_x_f32(float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_rot90_x_f32))) +float32x4_t __arm_vcmulq_rot90_x(float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_x_f16))) +float16x8_t __arm_vcmulq_x_f16(float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_x_f16))) +float16x8_t __arm_vcmulq_x(float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_x_f32))) +float32x4_t __arm_vcmulq_x_f32(float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_x_f32))) +float32x4_t __arm_vcmulq_x(float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcreateq_f16))) +float16x8_t __arm_vcreateq_f16(uint64_t, uint64_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcreateq_f32))) +float32x4_t __arm_vcreateq_f32(uint64_t, uint64_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtaq_m_s16_f16))) +int16x8_t __arm_vcvtaq_m_s16_f16(int16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtaq_m_s16_f16))) +int16x8_t __arm_vcvtaq_m(int16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtaq_m_s32_f32))) +int32x4_t __arm_vcvtaq_m_s32_f32(int32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtaq_m_s32_f32))) +int32x4_t __arm_vcvtaq_m(int32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtaq_m_u16_f16))) +uint16x8_t __arm_vcvtaq_m_u16_f16(uint16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtaq_m_u16_f16))) +uint16x8_t __arm_vcvtaq_m(uint16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtaq_m_u32_f32))) +uint32x4_t __arm_vcvtaq_m_u32_f32(uint32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtaq_m_u32_f32))) +uint32x4_t __arm_vcvtaq_m(uint32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtaq_s16_f16))) +int16x8_t __arm_vcvtaq_s16_f16(float16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtaq_s32_f32))) +int32x4_t __arm_vcvtaq_s32_f32(float32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtaq_u16_f16))) +uint16x8_t __arm_vcvtaq_u16_f16(float16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtaq_u32_f32))) +uint32x4_t __arm_vcvtaq_u32_f32(float32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtaq_x_s16_f16))) +int16x8_t __arm_vcvtaq_x_s16_f16(float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtaq_x_s32_f32))) +int32x4_t __arm_vcvtaq_x_s32_f32(float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtaq_x_u16_f16))) +uint16x8_t __arm_vcvtaq_x_u16_f16(float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtaq_x_u32_f32))) +uint32x4_t __arm_vcvtaq_x_u32_f32(float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtbq_f16_f32))) +float16x8_t __arm_vcvtbq_f16_f32(float16x8_t, float32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtbq_f32_f16))) +float32x4_t __arm_vcvtbq_f32_f16(float16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtbq_m_f16_f32))) +float16x8_t __arm_vcvtbq_m_f16_f32(float16x8_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtbq_m_f32_f16))) +float32x4_t __arm_vcvtbq_m_f32_f16(float32x4_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtbq_x_f32_f16))) +float32x4_t __arm_vcvtbq_x_f32_f16(float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtmq_m_s16_f16))) +int16x8_t __arm_vcvtmq_m_s16_f16(int16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtmq_m_s16_f16))) +int16x8_t __arm_vcvtmq_m(int16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtmq_m_s32_f32))) +int32x4_t __arm_vcvtmq_m_s32_f32(int32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtmq_m_s32_f32))) +int32x4_t __arm_vcvtmq_m(int32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtmq_m_u16_f16))) +uint16x8_t __arm_vcvtmq_m_u16_f16(uint16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtmq_m_u16_f16))) +uint16x8_t __arm_vcvtmq_m(uint16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtmq_m_u32_f32))) +uint32x4_t __arm_vcvtmq_m_u32_f32(uint32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtmq_m_u32_f32))) +uint32x4_t __arm_vcvtmq_m(uint32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtmq_s16_f16))) +int16x8_t __arm_vcvtmq_s16_f16(float16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtmq_s32_f32))) +int32x4_t __arm_vcvtmq_s32_f32(float32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtmq_u16_f16))) +uint16x8_t __arm_vcvtmq_u16_f16(float16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtmq_u32_f32))) +uint32x4_t __arm_vcvtmq_u32_f32(float32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtmq_x_s16_f16))) +int16x8_t __arm_vcvtmq_x_s16_f16(float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtmq_x_s32_f32))) +int32x4_t __arm_vcvtmq_x_s32_f32(float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtmq_x_u16_f16))) +uint16x8_t __arm_vcvtmq_x_u16_f16(float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtmq_x_u32_f32))) +uint32x4_t __arm_vcvtmq_x_u32_f32(float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtnq_m_s16_f16))) +int16x8_t __arm_vcvtnq_m_s16_f16(int16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtnq_m_s16_f16))) +int16x8_t __arm_vcvtnq_m(int16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtnq_m_s32_f32))) +int32x4_t __arm_vcvtnq_m_s32_f32(int32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtnq_m_s32_f32))) +int32x4_t __arm_vcvtnq_m(int32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtnq_m_u16_f16))) +uint16x8_t __arm_vcvtnq_m_u16_f16(uint16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtnq_m_u16_f16))) +uint16x8_t __arm_vcvtnq_m(uint16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtnq_m_u32_f32))) +uint32x4_t __arm_vcvtnq_m_u32_f32(uint32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtnq_m_u32_f32))) +uint32x4_t __arm_vcvtnq_m(uint32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtnq_s16_f16))) +int16x8_t __arm_vcvtnq_s16_f16(float16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtnq_s32_f32))) +int32x4_t __arm_vcvtnq_s32_f32(float32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtnq_u16_f16))) +uint16x8_t __arm_vcvtnq_u16_f16(float16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtnq_u32_f32))) +uint32x4_t __arm_vcvtnq_u32_f32(float32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtnq_x_s16_f16))) +int16x8_t __arm_vcvtnq_x_s16_f16(float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtnq_x_s32_f32))) +int32x4_t __arm_vcvtnq_x_s32_f32(float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtnq_x_u16_f16))) +uint16x8_t __arm_vcvtnq_x_u16_f16(float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtnq_x_u32_f32))) +uint32x4_t __arm_vcvtnq_x_u32_f32(float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtpq_m_s16_f16))) +int16x8_t __arm_vcvtpq_m_s16_f16(int16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtpq_m_s16_f16))) +int16x8_t __arm_vcvtpq_m(int16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtpq_m_s32_f32))) +int32x4_t __arm_vcvtpq_m_s32_f32(int32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtpq_m_s32_f32))) +int32x4_t __arm_vcvtpq_m(int32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtpq_m_u16_f16))) +uint16x8_t __arm_vcvtpq_m_u16_f16(uint16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtpq_m_u16_f16))) +uint16x8_t __arm_vcvtpq_m(uint16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtpq_m_u32_f32))) +uint32x4_t __arm_vcvtpq_m_u32_f32(uint32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtpq_m_u32_f32))) +uint32x4_t __arm_vcvtpq_m(uint32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtpq_s16_f16))) +int16x8_t __arm_vcvtpq_s16_f16(float16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtpq_s32_f32))) +int32x4_t __arm_vcvtpq_s32_f32(float32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtpq_u16_f16))) +uint16x8_t __arm_vcvtpq_u16_f16(float16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtpq_u32_f32))) +uint32x4_t __arm_vcvtpq_u32_f32(float32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtpq_x_s16_f16))) +int16x8_t __arm_vcvtpq_x_s16_f16(float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtpq_x_s32_f32))) +int32x4_t __arm_vcvtpq_x_s32_f32(float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtpq_x_u16_f16))) +uint16x8_t __arm_vcvtpq_x_u16_f16(float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtpq_x_u32_f32))) +uint32x4_t __arm_vcvtpq_x_u32_f32(float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_f16_s16))) +float16x8_t __arm_vcvtq_f16_s16(int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_f16_s16))) +float16x8_t __arm_vcvtq(int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_f16_u16))) +float16x8_t __arm_vcvtq_f16_u16(uint16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_f16_u16))) +float16x8_t __arm_vcvtq(uint16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_f32_s32))) +float32x4_t __arm_vcvtq_f32_s32(int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_f32_s32))) +float32x4_t __arm_vcvtq(int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_f32_u32))) +float32x4_t __arm_vcvtq_f32_u32(uint32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_f32_u32))) +float32x4_t __arm_vcvtq(uint32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_m_f16_s16))) +float16x8_t __arm_vcvtq_m_f16_s16(float16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_m_f16_s16))) +float16x8_t __arm_vcvtq_m(float16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_m_f16_u16))) +float16x8_t __arm_vcvtq_m_f16_u16(float16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_m_f16_u16))) +float16x8_t __arm_vcvtq_m(float16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_m_f32_s32))) +float32x4_t __arm_vcvtq_m_f32_s32(float32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_m_f32_s32))) +float32x4_t __arm_vcvtq_m(float32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_m_f32_u32))) +float32x4_t __arm_vcvtq_m_f32_u32(float32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_m_f32_u32))) +float32x4_t __arm_vcvtq_m(float32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_m_n_f16_s16))) +float16x8_t __arm_vcvtq_m_n_f16_s16(float16x8_t, int16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_m_n_f16_s16))) +float16x8_t __arm_vcvtq_m_n(float16x8_t, int16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_m_n_f16_u16))) +float16x8_t __arm_vcvtq_m_n_f16_u16(float16x8_t, uint16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_m_n_f16_u16))) +float16x8_t __arm_vcvtq_m_n(float16x8_t, uint16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_m_n_f32_s32))) +float32x4_t __arm_vcvtq_m_n_f32_s32(float32x4_t, int32x4_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_m_n_f32_s32))) +float32x4_t __arm_vcvtq_m_n(float32x4_t, int32x4_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_m_n_f32_u32))) +float32x4_t __arm_vcvtq_m_n_f32_u32(float32x4_t, uint32x4_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_m_n_f32_u32))) +float32x4_t __arm_vcvtq_m_n(float32x4_t, uint32x4_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_m_n_s16_f16))) +int16x8_t __arm_vcvtq_m_n_s16_f16(int16x8_t, float16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_m_n_s16_f16))) +int16x8_t __arm_vcvtq_m_n(int16x8_t, float16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_m_n_s32_f32))) +int32x4_t __arm_vcvtq_m_n_s32_f32(int32x4_t, float32x4_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_m_n_s32_f32))) +int32x4_t __arm_vcvtq_m_n(int32x4_t, float32x4_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_m_n_u16_f16))) +uint16x8_t __arm_vcvtq_m_n_u16_f16(uint16x8_t, float16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_m_n_u16_f16))) +uint16x8_t __arm_vcvtq_m_n(uint16x8_t, float16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_m_n_u32_f32))) +uint32x4_t __arm_vcvtq_m_n_u32_f32(uint32x4_t, float32x4_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_m_n_u32_f32))) +uint32x4_t __arm_vcvtq_m_n(uint32x4_t, float32x4_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_m_s16_f16))) +int16x8_t __arm_vcvtq_m_s16_f16(int16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_m_s16_f16))) +int16x8_t __arm_vcvtq_m(int16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_m_s32_f32))) +int32x4_t __arm_vcvtq_m_s32_f32(int32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_m_s32_f32))) +int32x4_t __arm_vcvtq_m(int32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_m_u16_f16))) +uint16x8_t __arm_vcvtq_m_u16_f16(uint16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_m_u16_f16))) +uint16x8_t __arm_vcvtq_m(uint16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_m_u32_f32))) +uint32x4_t __arm_vcvtq_m_u32_f32(uint32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_m_u32_f32))) +uint32x4_t __arm_vcvtq_m(uint32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_n_f16_s16))) +float16x8_t __arm_vcvtq_n_f16_s16(int16x8_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_n_f16_s16))) +float16x8_t __arm_vcvtq_n(int16x8_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_n_f16_u16))) +float16x8_t __arm_vcvtq_n_f16_u16(uint16x8_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_n_f16_u16))) +float16x8_t __arm_vcvtq_n(uint16x8_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_n_f32_s32))) +float32x4_t __arm_vcvtq_n_f32_s32(int32x4_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_n_f32_s32))) +float32x4_t __arm_vcvtq_n(int32x4_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_n_f32_u32))) +float32x4_t __arm_vcvtq_n_f32_u32(uint32x4_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_n_f32_u32))) +float32x4_t __arm_vcvtq_n(uint32x4_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_n_s16_f16))) +int16x8_t __arm_vcvtq_n_s16_f16(float16x8_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_n_s32_f32))) +int32x4_t __arm_vcvtq_n_s32_f32(float32x4_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_n_u16_f16))) +uint16x8_t __arm_vcvtq_n_u16_f16(float16x8_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_n_u32_f32))) +uint32x4_t __arm_vcvtq_n_u32_f32(float32x4_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_s16_f16))) +int16x8_t __arm_vcvtq_s16_f16(float16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_s32_f32))) +int32x4_t __arm_vcvtq_s32_f32(float32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_u16_f16))) +uint16x8_t __arm_vcvtq_u16_f16(float16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_u32_f32))) +uint32x4_t __arm_vcvtq_u32_f32(float32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_x_f16_s16))) +float16x8_t __arm_vcvtq_x_f16_s16(int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_x_f16_s16))) +float16x8_t __arm_vcvtq_x(int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_x_f16_u16))) +float16x8_t __arm_vcvtq_x_f16_u16(uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_x_f16_u16))) +float16x8_t __arm_vcvtq_x(uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_x_f32_s32))) +float32x4_t __arm_vcvtq_x_f32_s32(int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_x_f32_s32))) +float32x4_t __arm_vcvtq_x(int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_x_f32_u32))) +float32x4_t __arm_vcvtq_x_f32_u32(uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_x_f32_u32))) +float32x4_t __arm_vcvtq_x(uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_x_n_f16_s16))) +float16x8_t __arm_vcvtq_x_n_f16_s16(int16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_x_n_f16_s16))) +float16x8_t __arm_vcvtq_x_n(int16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_x_n_f16_u16))) +float16x8_t __arm_vcvtq_x_n_f16_u16(uint16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_x_n_f16_u16))) +float16x8_t __arm_vcvtq_x_n(uint16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_x_n_f32_s32))) +float32x4_t __arm_vcvtq_x_n_f32_s32(int32x4_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_x_n_f32_s32))) +float32x4_t __arm_vcvtq_x_n(int32x4_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_x_n_f32_u32))) +float32x4_t __arm_vcvtq_x_n_f32_u32(uint32x4_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_x_n_f32_u32))) +float32x4_t __arm_vcvtq_x_n(uint32x4_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_x_n_s16_f16))) +int16x8_t __arm_vcvtq_x_n_s16_f16(float16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_x_n_s32_f32))) +int32x4_t __arm_vcvtq_x_n_s32_f32(float32x4_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_x_n_u16_f16))) +uint16x8_t __arm_vcvtq_x_n_u16_f16(float16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_x_n_u32_f32))) +uint32x4_t __arm_vcvtq_x_n_u32_f32(float32x4_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_x_s16_f16))) +int16x8_t __arm_vcvtq_x_s16_f16(float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_x_s32_f32))) +int32x4_t __arm_vcvtq_x_s32_f32(float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_x_u16_f16))) +uint16x8_t __arm_vcvtq_x_u16_f16(float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_x_u32_f32))) +uint32x4_t __arm_vcvtq_x_u32_f32(float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvttq_f16_f32))) +float16x8_t __arm_vcvttq_f16_f32(float16x8_t, float32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvttq_f32_f16))) +float32x4_t __arm_vcvttq_f32_f16(float16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvttq_m_f16_f32))) +float16x8_t __arm_vcvttq_m_f16_f32(float16x8_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvttq_m_f32_f16))) +float32x4_t __arm_vcvttq_m_f32_f16(float32x4_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvttq_x_f32_f16))) +float32x4_t __arm_vcvttq_x_f32_f16(float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vdupq_m_n_f16))) +float16x8_t __arm_vdupq_m_n_f16(float16x8_t, float16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vdupq_m_n_f16))) +float16x8_t __arm_vdupq_m(float16x8_t, float16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vdupq_m_n_f32))) +float32x4_t __arm_vdupq_m_n_f32(float32x4_t, float32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vdupq_m_n_f32))) +float32x4_t __arm_vdupq_m(float32x4_t, float32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vdupq_n_f16))) +float16x8_t __arm_vdupq_n_f16(float16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vdupq_n_f32))) +float32x4_t __arm_vdupq_n_f32(float32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vdupq_x_n_f16))) +float16x8_t __arm_vdupq_x_n_f16(float16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vdupq_x_n_f32))) +float32x4_t __arm_vdupq_x_n_f32(float32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_veorq_f16))) +float16x8_t __arm_veorq_f16(float16x8_t, float16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_veorq_f16))) +float16x8_t __arm_veorq(float16x8_t, float16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_veorq_f32))) +float32x4_t __arm_veorq_f32(float32x4_t, float32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_veorq_f32))) +float32x4_t __arm_veorq(float32x4_t, float32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_veorq_m_f16))) +float16x8_t __arm_veorq_m_f16(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_veorq_m_f16))) +float16x8_t __arm_veorq_m(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_veorq_m_f32))) +float32x4_t __arm_veorq_m_f32(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_veorq_m_f32))) +float32x4_t __arm_veorq_m(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_veorq_x_f16))) +float16x8_t __arm_veorq_x_f16(float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_veorq_x_f16))) +float16x8_t __arm_veorq_x(float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_veorq_x_f32))) +float32x4_t __arm_veorq_x_f32(float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_veorq_x_f32))) +float32x4_t __arm_veorq_x(float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vfmaq_f16))) +float16x8_t __arm_vfmaq_f16(float16x8_t, float16x8_t, float16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vfmaq_f16))) +float16x8_t __arm_vfmaq(float16x8_t, float16x8_t, float16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vfmaq_f32))) +float32x4_t __arm_vfmaq_f32(float32x4_t, float32x4_t, float32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vfmaq_f32))) +float32x4_t __arm_vfmaq(float32x4_t, float32x4_t, float32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vfmaq_m_f16))) +float16x8_t __arm_vfmaq_m_f16(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vfmaq_m_f16))) +float16x8_t __arm_vfmaq_m(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vfmaq_m_f32))) +float32x4_t __arm_vfmaq_m_f32(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vfmaq_m_f32))) +float32x4_t __arm_vfmaq_m(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vfmaq_m_n_f16))) +float16x8_t __arm_vfmaq_m_n_f16(float16x8_t, float16x8_t, float16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vfmaq_m_n_f16))) +float16x8_t __arm_vfmaq_m(float16x8_t, float16x8_t, float16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vfmaq_m_n_f32))) +float32x4_t __arm_vfmaq_m_n_f32(float32x4_t, float32x4_t, float32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vfmaq_m_n_f32))) +float32x4_t __arm_vfmaq_m(float32x4_t, float32x4_t, float32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vfmaq_n_f16))) +float16x8_t __arm_vfmaq_n_f16(float16x8_t, float16x8_t, float16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vfmaq_n_f16))) +float16x8_t __arm_vfmaq(float16x8_t, float16x8_t, float16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vfmaq_n_f32))) +float32x4_t __arm_vfmaq_n_f32(float32x4_t, float32x4_t, float32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vfmaq_n_f32))) +float32x4_t __arm_vfmaq(float32x4_t, float32x4_t, float32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vfmasq_m_n_f16))) +float16x8_t __arm_vfmasq_m_n_f16(float16x8_t, float16x8_t, float16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vfmasq_m_n_f16))) +float16x8_t __arm_vfmasq_m(float16x8_t, float16x8_t, float16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vfmasq_m_n_f32))) +float32x4_t __arm_vfmasq_m_n_f32(float32x4_t, float32x4_t, float32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vfmasq_m_n_f32))) +float32x4_t __arm_vfmasq_m(float32x4_t, float32x4_t, float32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vfmasq_n_f16))) +float16x8_t __arm_vfmasq_n_f16(float16x8_t, float16x8_t, float16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vfmasq_n_f16))) +float16x8_t __arm_vfmasq(float16x8_t, float16x8_t, float16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vfmasq_n_f32))) +float32x4_t __arm_vfmasq_n_f32(float32x4_t, float32x4_t, float32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vfmasq_n_f32))) +float32x4_t __arm_vfmasq(float32x4_t, float32x4_t, float32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vfmsq_f16))) +float16x8_t __arm_vfmsq_f16(float16x8_t, float16x8_t, float16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vfmsq_f16))) +float16x8_t __arm_vfmsq(float16x8_t, float16x8_t, float16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vfmsq_f32))) +float32x4_t __arm_vfmsq_f32(float32x4_t, float32x4_t, float32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vfmsq_f32))) +float32x4_t __arm_vfmsq(float32x4_t, float32x4_t, float32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vfmsq_m_f16))) +float16x8_t __arm_vfmsq_m_f16(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vfmsq_m_f16))) +float16x8_t __arm_vfmsq_m(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vfmsq_m_f32))) +float32x4_t __arm_vfmsq_m_f32(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vfmsq_m_f32))) +float32x4_t __arm_vfmsq_m(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vgetq_lane_f16))) +float16_t __arm_vgetq_lane_f16(float16x8_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vgetq_lane_f16))) +float16_t __arm_vgetq_lane(float16x8_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vgetq_lane_f32))) +float32_t __arm_vgetq_lane_f32(float32x4_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vgetq_lane_f32))) +float32_t __arm_vgetq_lane(float32x4_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vld1q_f16))) +float16x8_t __arm_vld1q_f16(const float16_t *); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vld1q_f16))) +float16x8_t __arm_vld1q(const float16_t *); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vld1q_f32))) +float32x4_t __arm_vld1q_f32(const float32_t *); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vld1q_f32))) +float32x4_t __arm_vld1q(const float32_t *); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vld1q_z_f16))) +float16x8_t __arm_vld1q_z_f16(const float16_t *, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vld1q_z_f16))) +float16x8_t __arm_vld1q_z(const float16_t *, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vld1q_z_f32))) +float32x4_t __arm_vld1q_z_f32(const float32_t *, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vld1q_z_f32))) +float32x4_t __arm_vld1q_z(const float32_t *, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vld2q_f16))) +float16x8x2_t __arm_vld2q_f16(const float16_t *); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vld2q_f16))) +float16x8x2_t __arm_vld2q(const float16_t *); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vld2q_f32))) +float32x4x2_t __arm_vld2q_f32(const float32_t *); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vld2q_f32))) +float32x4x2_t __arm_vld2q(const float32_t *); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vld4q_f16))) +float16x8x4_t __arm_vld4q_f16(const float16_t *); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vld4q_f16))) +float16x8x4_t __arm_vld4q(const float16_t *); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vld4q_f32))) +float32x4x4_t __arm_vld4q_f32(const float32_t *); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vld4q_f32))) +float32x4x4_t __arm_vld4q(const float32_t *); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_f16))) +float16x8_t __arm_vldrhq_f16(const float16_t *); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_gather_offset_f16))) +float16x8_t __arm_vldrhq_gather_offset_f16(const float16_t *, uint16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_gather_offset_f16))) +float16x8_t __arm_vldrhq_gather_offset(const float16_t *, uint16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_gather_offset_z_f16))) +float16x8_t __arm_vldrhq_gather_offset_z_f16(const float16_t *, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_gather_offset_z_f16))) +float16x8_t __arm_vldrhq_gather_offset_z(const float16_t *, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_gather_shifted_offset_f16))) +float16x8_t __arm_vldrhq_gather_shifted_offset_f16(const float16_t *, uint16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_gather_shifted_offset_f16))) +float16x8_t __arm_vldrhq_gather_shifted_offset(const float16_t *, uint16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_gather_shifted_offset_z_f16))) +float16x8_t __arm_vldrhq_gather_shifted_offset_z_f16(const float16_t *, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_gather_shifted_offset_z_f16))) +float16x8_t __arm_vldrhq_gather_shifted_offset_z(const float16_t *, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_z_f16))) +float16x8_t __arm_vldrhq_z_f16(const float16_t *, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrwq_f32))) +float32x4_t __arm_vldrwq_f32(const float32_t *); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrwq_gather_base_f32))) +float32x4_t __arm_vldrwq_gather_base_f32(uint32x4_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrwq_gather_base_wb_f32))) +float32x4_t __arm_vldrwq_gather_base_wb_f32(uint32x4_t *, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrwq_gather_base_wb_z_f32))) +float32x4_t __arm_vldrwq_gather_base_wb_z_f32(uint32x4_t *, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrwq_gather_base_z_f32))) +float32x4_t __arm_vldrwq_gather_base_z_f32(uint32x4_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrwq_gather_offset_f32))) +float32x4_t __arm_vldrwq_gather_offset_f32(const float32_t *, uint32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrwq_gather_offset_f32))) +float32x4_t __arm_vldrwq_gather_offset(const float32_t *, uint32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrwq_gather_offset_z_f32))) +float32x4_t __arm_vldrwq_gather_offset_z_f32(const float32_t *, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrwq_gather_offset_z_f32))) +float32x4_t __arm_vldrwq_gather_offset_z(const float32_t *, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrwq_gather_shifted_offset_f32))) +float32x4_t __arm_vldrwq_gather_shifted_offset_f32(const float32_t *, uint32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrwq_gather_shifted_offset_f32))) +float32x4_t __arm_vldrwq_gather_shifted_offset(const float32_t *, uint32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrwq_gather_shifted_offset_z_f32))) +float32x4_t __arm_vldrwq_gather_shifted_offset_z_f32(const float32_t *, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrwq_gather_shifted_offset_z_f32))) +float32x4_t __arm_vldrwq_gather_shifted_offset_z(const float32_t *, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrwq_z_f32))) +float32x4_t __arm_vldrwq_z_f32(const float32_t *, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxnmaq_f16))) +float16x8_t __arm_vmaxnmaq_f16(float16x8_t, float16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxnmaq_f16))) +float16x8_t __arm_vmaxnmaq(float16x8_t, float16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxnmaq_f32))) +float32x4_t __arm_vmaxnmaq_f32(float32x4_t, float32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxnmaq_f32))) +float32x4_t __arm_vmaxnmaq(float32x4_t, float32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxnmaq_m_f16))) +float16x8_t __arm_vmaxnmaq_m_f16(float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxnmaq_m_f16))) +float16x8_t __arm_vmaxnmaq_m(float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxnmaq_m_f32))) +float32x4_t __arm_vmaxnmaq_m_f32(float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxnmaq_m_f32))) +float32x4_t __arm_vmaxnmaq_m(float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxnmavq_f16))) +float16_t __arm_vmaxnmavq_f16(float16_t, float16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxnmavq_f16))) +float16_t __arm_vmaxnmavq(float16_t, float16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxnmavq_f32))) +float32_t __arm_vmaxnmavq_f32(float32_t, float32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxnmavq_f32))) +float32_t __arm_vmaxnmavq(float32_t, float32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxnmavq_p_f16))) +float16_t __arm_vmaxnmavq_p_f16(float16_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxnmavq_p_f16))) +float16_t __arm_vmaxnmavq_p(float16_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxnmavq_p_f32))) +float32_t __arm_vmaxnmavq_p_f32(float32_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxnmavq_p_f32))) +float32_t __arm_vmaxnmavq_p(float32_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxnmq_f16))) +float16x8_t __arm_vmaxnmq_f16(float16x8_t, float16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxnmq_f16))) +float16x8_t __arm_vmaxnmq(float16x8_t, float16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxnmq_f32))) +float32x4_t __arm_vmaxnmq_f32(float32x4_t, float32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxnmq_f32))) +float32x4_t __arm_vmaxnmq(float32x4_t, float32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxnmq_m_f16))) +float16x8_t __arm_vmaxnmq_m_f16(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxnmq_m_f16))) +float16x8_t __arm_vmaxnmq_m(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxnmq_m_f32))) +float32x4_t __arm_vmaxnmq_m_f32(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxnmq_m_f32))) +float32x4_t __arm_vmaxnmq_m(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxnmq_x_f16))) +float16x8_t __arm_vmaxnmq_x_f16(float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxnmq_x_f16))) +float16x8_t __arm_vmaxnmq_x(float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxnmq_x_f32))) +float32x4_t __arm_vmaxnmq_x_f32(float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxnmq_x_f32))) +float32x4_t __arm_vmaxnmq_x(float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxnmvq_f16))) +float16_t __arm_vmaxnmvq_f16(float16_t, float16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxnmvq_f16))) +float16_t __arm_vmaxnmvq(float16_t, float16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxnmvq_f32))) +float32_t __arm_vmaxnmvq_f32(float32_t, float32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxnmvq_f32))) +float32_t __arm_vmaxnmvq(float32_t, float32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxnmvq_p_f16))) +float16_t __arm_vmaxnmvq_p_f16(float16_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxnmvq_p_f16))) +float16_t __arm_vmaxnmvq_p(float16_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxnmvq_p_f32))) +float32_t __arm_vmaxnmvq_p_f32(float32_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxnmvq_p_f32))) +float32_t __arm_vmaxnmvq_p(float32_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminnmaq_f16))) +float16x8_t __arm_vminnmaq_f16(float16x8_t, float16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminnmaq_f16))) +float16x8_t __arm_vminnmaq(float16x8_t, float16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminnmaq_f32))) +float32x4_t __arm_vminnmaq_f32(float32x4_t, float32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminnmaq_f32))) +float32x4_t __arm_vminnmaq(float32x4_t, float32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminnmaq_m_f16))) +float16x8_t __arm_vminnmaq_m_f16(float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminnmaq_m_f16))) +float16x8_t __arm_vminnmaq_m(float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminnmaq_m_f32))) +float32x4_t __arm_vminnmaq_m_f32(float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminnmaq_m_f32))) +float32x4_t __arm_vminnmaq_m(float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminnmavq_f16))) +float16_t __arm_vminnmavq_f16(float16_t, float16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminnmavq_f16))) +float16_t __arm_vminnmavq(float16_t, float16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminnmavq_f32))) +float32_t __arm_vminnmavq_f32(float32_t, float32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminnmavq_f32))) +float32_t __arm_vminnmavq(float32_t, float32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminnmavq_p_f16))) +float16_t __arm_vminnmavq_p_f16(float16_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminnmavq_p_f16))) +float16_t __arm_vminnmavq_p(float16_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminnmavq_p_f32))) +float32_t __arm_vminnmavq_p_f32(float32_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminnmavq_p_f32))) +float32_t __arm_vminnmavq_p(float32_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminnmq_f16))) +float16x8_t __arm_vminnmq_f16(float16x8_t, float16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminnmq_f16))) +float16x8_t __arm_vminnmq(float16x8_t, float16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminnmq_f32))) +float32x4_t __arm_vminnmq_f32(float32x4_t, float32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminnmq_f32))) +float32x4_t __arm_vminnmq(float32x4_t, float32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminnmq_m_f16))) +float16x8_t __arm_vminnmq_m_f16(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminnmq_m_f16))) +float16x8_t __arm_vminnmq_m(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminnmq_m_f32))) +float32x4_t __arm_vminnmq_m_f32(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminnmq_m_f32))) +float32x4_t __arm_vminnmq_m(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminnmq_x_f16))) +float16x8_t __arm_vminnmq_x_f16(float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminnmq_x_f16))) +float16x8_t __arm_vminnmq_x(float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminnmq_x_f32))) +float32x4_t __arm_vminnmq_x_f32(float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminnmq_x_f32))) +float32x4_t __arm_vminnmq_x(float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminnmvq_f16))) +float16_t __arm_vminnmvq_f16(float16_t, float16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminnmvq_f16))) +float16_t __arm_vminnmvq(float16_t, float16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminnmvq_f32))) +float32_t __arm_vminnmvq_f32(float32_t, float32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminnmvq_f32))) +float32_t __arm_vminnmvq(float32_t, float32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminnmvq_p_f16))) +float16_t __arm_vminnmvq_p_f16(float16_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminnmvq_p_f16))) +float16_t __arm_vminnmvq_p(float16_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminnmvq_p_f32))) +float32_t __arm_vminnmvq_p_f32(float32_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminnmvq_p_f32))) +float32_t __arm_vminnmvq_p(float32_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_f16))) +float16x8_t __arm_vmulq_f16(float16x8_t, float16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_f16))) +float16x8_t __arm_vmulq(float16x8_t, float16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_f32))) +float32x4_t __arm_vmulq_f32(float32x4_t, float32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_f32))) +float32x4_t __arm_vmulq(float32x4_t, float32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_m_f16))) +float16x8_t __arm_vmulq_m_f16(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_m_f16))) +float16x8_t __arm_vmulq_m(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_m_f32))) +float32x4_t __arm_vmulq_m_f32(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_m_f32))) +float32x4_t __arm_vmulq_m(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_m_n_f16))) +float16x8_t __arm_vmulq_m_n_f16(float16x8_t, float16x8_t, float16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_m_n_f16))) +float16x8_t __arm_vmulq_m(float16x8_t, float16x8_t, float16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_m_n_f32))) +float32x4_t __arm_vmulq_m_n_f32(float32x4_t, float32x4_t, float32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_m_n_f32))) +float32x4_t __arm_vmulq_m(float32x4_t, float32x4_t, float32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_n_f16))) +float16x8_t __arm_vmulq_n_f16(float16x8_t, float16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_n_f16))) +float16x8_t __arm_vmulq(float16x8_t, float16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_n_f32))) +float32x4_t __arm_vmulq_n_f32(float32x4_t, float32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_n_f32))) +float32x4_t __arm_vmulq(float32x4_t, float32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_x_f16))) +float16x8_t __arm_vmulq_x_f16(float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_x_f16))) +float16x8_t __arm_vmulq_x(float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_x_f32))) +float32x4_t __arm_vmulq_x_f32(float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_x_f32))) +float32x4_t __arm_vmulq_x(float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_x_n_f16))) +float16x8_t __arm_vmulq_x_n_f16(float16x8_t, float16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_x_n_f16))) +float16x8_t __arm_vmulq_x(float16x8_t, float16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_x_n_f32))) +float32x4_t __arm_vmulq_x_n_f32(float32x4_t, float32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_x_n_f32))) +float32x4_t __arm_vmulq_x(float32x4_t, float32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vnegq_f16))) +float16x8_t __arm_vnegq_f16(float16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vnegq_f16))) +float16x8_t __arm_vnegq(float16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vnegq_f32))) +float32x4_t __arm_vnegq_f32(float32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vnegq_f32))) +float32x4_t __arm_vnegq(float32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vnegq_m_f16))) +float16x8_t __arm_vnegq_m_f16(float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vnegq_m_f16))) +float16x8_t __arm_vnegq_m(float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vnegq_m_f32))) +float32x4_t __arm_vnegq_m_f32(float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vnegq_m_f32))) +float32x4_t __arm_vnegq_m(float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vnegq_x_f16))) +float16x8_t __arm_vnegq_x_f16(float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vnegq_x_f16))) +float16x8_t __arm_vnegq_x(float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vnegq_x_f32))) +float32x4_t __arm_vnegq_x_f32(float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vnegq_x_f32))) +float32x4_t __arm_vnegq_x(float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vornq_f16))) +float16x8_t __arm_vornq_f16(float16x8_t, float16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vornq_f16))) +float16x8_t __arm_vornq(float16x8_t, float16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vornq_f32))) +float32x4_t __arm_vornq_f32(float32x4_t, float32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vornq_f32))) +float32x4_t __arm_vornq(float32x4_t, float32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vornq_m_f16))) +float16x8_t __arm_vornq_m_f16(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vornq_m_f16))) +float16x8_t __arm_vornq_m(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vornq_m_f32))) +float32x4_t __arm_vornq_m_f32(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vornq_m_f32))) +float32x4_t __arm_vornq_m(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vornq_x_f16))) +float16x8_t __arm_vornq_x_f16(float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vornq_x_f16))) +float16x8_t __arm_vornq_x(float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vornq_x_f32))) +float32x4_t __arm_vornq_x_f32(float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vornq_x_f32))) +float32x4_t __arm_vornq_x(float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vorrq_f16))) +float16x8_t __arm_vorrq_f16(float16x8_t, float16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vorrq_f16))) +float16x8_t __arm_vorrq(float16x8_t, float16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vorrq_f32))) +float32x4_t __arm_vorrq_f32(float32x4_t, float32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vorrq_f32))) +float32x4_t __arm_vorrq(float32x4_t, float32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vorrq_m_f16))) +float16x8_t __arm_vorrq_m_f16(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vorrq_m_f16))) +float16x8_t __arm_vorrq_m(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vorrq_m_f32))) +float32x4_t __arm_vorrq_m_f32(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vorrq_m_f32))) +float32x4_t __arm_vorrq_m(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vorrq_x_f16))) +float16x8_t __arm_vorrq_x_f16(float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vorrq_x_f16))) +float16x8_t __arm_vorrq_x(float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vorrq_x_f32))) +float32x4_t __arm_vorrq_x_f32(float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vorrq_x_f32))) +float32x4_t __arm_vorrq_x(float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vpselq_f16))) +float16x8_t __arm_vpselq_f16(float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vpselq_f16))) +float16x8_t __arm_vpselq(float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vpselq_f32))) +float32x4_t __arm_vpselq_f32(float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vpselq_f32))) +float32x4_t __arm_vpselq(float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_f16_f32))) +float16x8_t __arm_vreinterpretq_f16_f32(float32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_f16_f32))) +float16x8_t __arm_vreinterpretq_f16(float32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_f16_s16))) +float16x8_t __arm_vreinterpretq_f16_s16(int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_f16_s16))) +float16x8_t __arm_vreinterpretq_f16(int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_f16_s32))) +float16x8_t __arm_vreinterpretq_f16_s32(int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_f16_s32))) +float16x8_t __arm_vreinterpretq_f16(int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_f16_s64))) +float16x8_t __arm_vreinterpretq_f16_s64(int64x2_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_f16_s64))) +float16x8_t __arm_vreinterpretq_f16(int64x2_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_f16_s8))) +float16x8_t __arm_vreinterpretq_f16_s8(int8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_f16_s8))) +float16x8_t __arm_vreinterpretq_f16(int8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_f16_u16))) +float16x8_t __arm_vreinterpretq_f16_u16(uint16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_f16_u16))) +float16x8_t __arm_vreinterpretq_f16(uint16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_f16_u32))) +float16x8_t __arm_vreinterpretq_f16_u32(uint32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_f16_u32))) +float16x8_t __arm_vreinterpretq_f16(uint32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_f16_u64))) +float16x8_t __arm_vreinterpretq_f16_u64(uint64x2_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_f16_u64))) +float16x8_t __arm_vreinterpretq_f16(uint64x2_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_f16_u8))) +float16x8_t __arm_vreinterpretq_f16_u8(uint8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_f16_u8))) +float16x8_t __arm_vreinterpretq_f16(uint8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_f32_f16))) +float32x4_t __arm_vreinterpretq_f32_f16(float16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_f32_f16))) +float32x4_t __arm_vreinterpretq_f32(float16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_f32_s16))) +float32x4_t __arm_vreinterpretq_f32_s16(int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_f32_s16))) +float32x4_t __arm_vreinterpretq_f32(int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_f32_s32))) +float32x4_t __arm_vreinterpretq_f32_s32(int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_f32_s32))) +float32x4_t __arm_vreinterpretq_f32(int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_f32_s64))) +float32x4_t __arm_vreinterpretq_f32_s64(int64x2_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_f32_s64))) +float32x4_t __arm_vreinterpretq_f32(int64x2_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_f32_s8))) +float32x4_t __arm_vreinterpretq_f32_s8(int8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_f32_s8))) +float32x4_t __arm_vreinterpretq_f32(int8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_f32_u16))) +float32x4_t __arm_vreinterpretq_f32_u16(uint16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_f32_u16))) +float32x4_t __arm_vreinterpretq_f32(uint16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_f32_u32))) +float32x4_t __arm_vreinterpretq_f32_u32(uint32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_f32_u32))) +float32x4_t __arm_vreinterpretq_f32(uint32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_f32_u64))) +float32x4_t __arm_vreinterpretq_f32_u64(uint64x2_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_f32_u64))) +float32x4_t __arm_vreinterpretq_f32(uint64x2_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_f32_u8))) +float32x4_t __arm_vreinterpretq_f32_u8(uint8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_f32_u8))) +float32x4_t __arm_vreinterpretq_f32(uint8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s16_f16))) +int16x8_t __arm_vreinterpretq_s16_f16(float16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s16_f16))) +int16x8_t __arm_vreinterpretq_s16(float16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s16_f32))) +int16x8_t __arm_vreinterpretq_s16_f32(float32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s16_f32))) +int16x8_t __arm_vreinterpretq_s16(float32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s32_f16))) +int32x4_t __arm_vreinterpretq_s32_f16(float16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s32_f16))) +int32x4_t __arm_vreinterpretq_s32(float16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s32_f32))) +int32x4_t __arm_vreinterpretq_s32_f32(float32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s32_f32))) +int32x4_t __arm_vreinterpretq_s32(float32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s64_f16))) +int64x2_t __arm_vreinterpretq_s64_f16(float16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s64_f16))) +int64x2_t __arm_vreinterpretq_s64(float16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s64_f32))) +int64x2_t __arm_vreinterpretq_s64_f32(float32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s64_f32))) +int64x2_t __arm_vreinterpretq_s64(float32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s8_f16))) +int8x16_t __arm_vreinterpretq_s8_f16(float16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s8_f16))) +int8x16_t __arm_vreinterpretq_s8(float16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s8_f32))) +int8x16_t __arm_vreinterpretq_s8_f32(float32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s8_f32))) +int8x16_t __arm_vreinterpretq_s8(float32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u16_f16))) +uint16x8_t __arm_vreinterpretq_u16_f16(float16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u16_f16))) +uint16x8_t __arm_vreinterpretq_u16(float16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u16_f32))) +uint16x8_t __arm_vreinterpretq_u16_f32(float32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u16_f32))) +uint16x8_t __arm_vreinterpretq_u16(float32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u32_f16))) +uint32x4_t __arm_vreinterpretq_u32_f16(float16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u32_f16))) +uint32x4_t __arm_vreinterpretq_u32(float16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u32_f32))) +uint32x4_t __arm_vreinterpretq_u32_f32(float32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u32_f32))) +uint32x4_t __arm_vreinterpretq_u32(float32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u64_f16))) +uint64x2_t __arm_vreinterpretq_u64_f16(float16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u64_f16))) +uint64x2_t __arm_vreinterpretq_u64(float16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u64_f32))) +uint64x2_t __arm_vreinterpretq_u64_f32(float32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u64_f32))) +uint64x2_t __arm_vreinterpretq_u64(float32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u8_f16))) +uint8x16_t __arm_vreinterpretq_u8_f16(float16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u8_f16))) +uint8x16_t __arm_vreinterpretq_u8(float16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u8_f32))) +uint8x16_t __arm_vreinterpretq_u8_f32(float32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u8_f32))) +uint8x16_t __arm_vreinterpretq_u8(float32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev32q_f16))) +float16x8_t __arm_vrev32q_f16(float16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev32q_f16))) +float16x8_t __arm_vrev32q(float16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev32q_m_f16))) +float16x8_t __arm_vrev32q_m_f16(float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev32q_m_f16))) +float16x8_t __arm_vrev32q_m(float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev32q_x_f16))) +float16x8_t __arm_vrev32q_x_f16(float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev32q_x_f16))) +float16x8_t __arm_vrev32q_x(float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_f16))) +float16x8_t __arm_vrev64q_f16(float16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_f16))) +float16x8_t __arm_vrev64q(float16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_f32))) +float32x4_t __arm_vrev64q_f32(float32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_f32))) +float32x4_t __arm_vrev64q(float32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_m_f16))) +float16x8_t __arm_vrev64q_m_f16(float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_m_f16))) +float16x8_t __arm_vrev64q_m(float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_m_f32))) +float32x4_t __arm_vrev64q_m_f32(float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_m_f32))) +float32x4_t __arm_vrev64q_m(float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_x_f16))) +float16x8_t __arm_vrev64q_x_f16(float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_x_f16))) +float16x8_t __arm_vrev64q_x(float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_x_f32))) +float32x4_t __arm_vrev64q_x_f32(float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_x_f32))) +float32x4_t __arm_vrev64q_x(float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrndaq_f16))) +float16x8_t __arm_vrndaq_f16(float16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrndaq_f16))) +float16x8_t __arm_vrndaq(float16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrndaq_f32))) +float32x4_t __arm_vrndaq_f32(float32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrndaq_f32))) +float32x4_t __arm_vrndaq(float32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrndaq_m_f16))) +float16x8_t __arm_vrndaq_m_f16(float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrndaq_m_f16))) +float16x8_t __arm_vrndaq_m(float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrndaq_m_f32))) +float32x4_t __arm_vrndaq_m_f32(float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrndaq_m_f32))) +float32x4_t __arm_vrndaq_m(float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrndaq_x_f16))) +float16x8_t __arm_vrndaq_x_f16(float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrndaq_x_f16))) +float16x8_t __arm_vrndaq_x(float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrndaq_x_f32))) +float32x4_t __arm_vrndaq_x_f32(float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrndaq_x_f32))) +float32x4_t __arm_vrndaq_x(float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrndmq_f16))) +float16x8_t __arm_vrndmq_f16(float16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrndmq_f16))) +float16x8_t __arm_vrndmq(float16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrndmq_f32))) +float32x4_t __arm_vrndmq_f32(float32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrndmq_f32))) +float32x4_t __arm_vrndmq(float32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrndmq_m_f16))) +float16x8_t __arm_vrndmq_m_f16(float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrndmq_m_f16))) +float16x8_t __arm_vrndmq_m(float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrndmq_m_f32))) +float32x4_t __arm_vrndmq_m_f32(float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrndmq_m_f32))) +float32x4_t __arm_vrndmq_m(float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrndmq_x_f16))) +float16x8_t __arm_vrndmq_x_f16(float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrndmq_x_f16))) +float16x8_t __arm_vrndmq_x(float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrndmq_x_f32))) +float32x4_t __arm_vrndmq_x_f32(float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrndmq_x_f32))) +float32x4_t __arm_vrndmq_x(float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrndnq_f16))) +float16x8_t __arm_vrndnq_f16(float16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrndnq_f16))) +float16x8_t __arm_vrndnq(float16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrndnq_f32))) +float32x4_t __arm_vrndnq_f32(float32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrndnq_f32))) +float32x4_t __arm_vrndnq(float32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrndnq_m_f16))) +float16x8_t __arm_vrndnq_m_f16(float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrndnq_m_f16))) +float16x8_t __arm_vrndnq_m(float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrndnq_m_f32))) +float32x4_t __arm_vrndnq_m_f32(float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrndnq_m_f32))) +float32x4_t __arm_vrndnq_m(float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrndnq_x_f16))) +float16x8_t __arm_vrndnq_x_f16(float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrndnq_x_f16))) +float16x8_t __arm_vrndnq_x(float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrndnq_x_f32))) +float32x4_t __arm_vrndnq_x_f32(float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrndnq_x_f32))) +float32x4_t __arm_vrndnq_x(float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrndpq_f16))) +float16x8_t __arm_vrndpq_f16(float16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrndpq_f16))) +float16x8_t __arm_vrndpq(float16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrndpq_f32))) +float32x4_t __arm_vrndpq_f32(float32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrndpq_f32))) +float32x4_t __arm_vrndpq(float32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrndpq_m_f16))) +float16x8_t __arm_vrndpq_m_f16(float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrndpq_m_f16))) +float16x8_t __arm_vrndpq_m(float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrndpq_m_f32))) +float32x4_t __arm_vrndpq_m_f32(float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrndpq_m_f32))) +float32x4_t __arm_vrndpq_m(float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrndpq_x_f16))) +float16x8_t __arm_vrndpq_x_f16(float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrndpq_x_f16))) +float16x8_t __arm_vrndpq_x(float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrndpq_x_f32))) +float32x4_t __arm_vrndpq_x_f32(float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrndpq_x_f32))) +float32x4_t __arm_vrndpq_x(float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrndq_f16))) +float16x8_t __arm_vrndq_f16(float16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrndq_f16))) +float16x8_t __arm_vrndq(float16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrndq_f32))) +float32x4_t __arm_vrndq_f32(float32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrndq_f32))) +float32x4_t __arm_vrndq(float32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrndq_m_f16))) +float16x8_t __arm_vrndq_m_f16(float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrndq_m_f16))) +float16x8_t __arm_vrndq_m(float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrndq_m_f32))) +float32x4_t __arm_vrndq_m_f32(float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrndq_m_f32))) +float32x4_t __arm_vrndq_m(float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrndq_x_f16))) +float16x8_t __arm_vrndq_x_f16(float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrndq_x_f16))) +float16x8_t __arm_vrndq_x(float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrndq_x_f32))) +float32x4_t __arm_vrndq_x_f32(float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrndq_x_f32))) +float32x4_t __arm_vrndq_x(float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrndxq_f16))) +float16x8_t __arm_vrndxq_f16(float16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrndxq_f16))) +float16x8_t __arm_vrndxq(float16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrndxq_f32))) +float32x4_t __arm_vrndxq_f32(float32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrndxq_f32))) +float32x4_t __arm_vrndxq(float32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrndxq_m_f16))) +float16x8_t __arm_vrndxq_m_f16(float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrndxq_m_f16))) +float16x8_t __arm_vrndxq_m(float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrndxq_m_f32))) +float32x4_t __arm_vrndxq_m_f32(float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrndxq_m_f32))) +float32x4_t __arm_vrndxq_m(float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrndxq_x_f16))) +float16x8_t __arm_vrndxq_x_f16(float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrndxq_x_f16))) +float16x8_t __arm_vrndxq_x(float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrndxq_x_f32))) +float32x4_t __arm_vrndxq_x_f32(float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrndxq_x_f32))) +float32x4_t __arm_vrndxq_x(float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsetq_lane_f16))) +float16x8_t __arm_vsetq_lane_f16(float16_t, float16x8_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsetq_lane_f16))) +float16x8_t __arm_vsetq_lane(float16_t, float16x8_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsetq_lane_f32))) +float32x4_t __arm_vsetq_lane_f32(float32_t, float32x4_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsetq_lane_f32))) +float32x4_t __arm_vsetq_lane(float32_t, float32x4_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vst1q_f16))) +void __arm_vst1q_f16(float16_t *, float16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vst1q_f16))) +void __arm_vst1q(float16_t *, float16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vst1q_f32))) +void __arm_vst1q_f32(float32_t *, float32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vst1q_f32))) +void __arm_vst1q(float32_t *, float32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vst1q_p_f16))) +void __arm_vst1q_p_f16(float16_t *, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vst1q_p_f16))) +void __arm_vst1q_p(float16_t *, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vst1q_p_f32))) +void __arm_vst1q_p_f32(float32_t *, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vst1q_p_f32))) +void __arm_vst1q_p(float32_t *, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vst2q_f16))) +void __arm_vst2q_f16(float16_t *, float16x8x2_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vst2q_f16))) +void __arm_vst2q(float16_t *, float16x8x2_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vst2q_f32))) +void __arm_vst2q_f32(float32_t *, float32x4x2_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vst2q_f32))) +void __arm_vst2q(float32_t *, float32x4x2_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vst4q_f16))) +void __arm_vst4q_f16(float16_t *, float16x8x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vst4q_f16))) +void __arm_vst4q(float16_t *, float16x8x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vst4q_f32))) +void __arm_vst4q_f32(float32_t *, float32x4x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vst4q_f32))) +void __arm_vst4q(float32_t *, float32x4x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_f16))) +void __arm_vstrhq_f16(float16_t *, float16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_f16))) +void __arm_vstrhq(float16_t *, float16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_p_f16))) +void __arm_vstrhq_p_f16(float16_t *, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_p_f16))) +void __arm_vstrhq_p(float16_t *, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_scatter_offset_f16))) +void __arm_vstrhq_scatter_offset_f16(float16_t *, uint16x8_t, float16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_scatter_offset_f16))) +void __arm_vstrhq_scatter_offset(float16_t *, uint16x8_t, float16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_scatter_offset_p_f16))) +void __arm_vstrhq_scatter_offset_p_f16(float16_t *, uint16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_scatter_offset_p_f16))) +void __arm_vstrhq_scatter_offset_p(float16_t *, uint16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_scatter_shifted_offset_f16))) +void __arm_vstrhq_scatter_shifted_offset_f16(float16_t *, uint16x8_t, float16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_scatter_shifted_offset_f16))) +void __arm_vstrhq_scatter_shifted_offset(float16_t *, uint16x8_t, float16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_scatter_shifted_offset_p_f16))) +void __arm_vstrhq_scatter_shifted_offset_p_f16(float16_t *, uint16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_scatter_shifted_offset_p_f16))) +void __arm_vstrhq_scatter_shifted_offset_p(float16_t *, uint16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_f32))) +void __arm_vstrwq_f32(float32_t *, float32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_f32))) +void __arm_vstrwq(float32_t *, float32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_p_f32))) +void __arm_vstrwq_p_f32(float32_t *, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_p_f32))) +void __arm_vstrwq_p(float32_t *, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_base_f32))) +void __arm_vstrwq_scatter_base_f32(uint32x4_t, int, float32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_base_f32))) +void __arm_vstrwq_scatter_base(uint32x4_t, int, float32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_base_p_f32))) +void __arm_vstrwq_scatter_base_p_f32(uint32x4_t, int, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_base_p_f32))) +void __arm_vstrwq_scatter_base_p(uint32x4_t, int, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_base_wb_f32))) +void __arm_vstrwq_scatter_base_wb_f32(uint32x4_t *, int, float32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_base_wb_f32))) +void __arm_vstrwq_scatter_base_wb(uint32x4_t *, int, float32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_base_wb_p_f32))) +void __arm_vstrwq_scatter_base_wb_p_f32(uint32x4_t *, int, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_base_wb_p_f32))) +void __arm_vstrwq_scatter_base_wb_p(uint32x4_t *, int, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_offset_f32))) +void __arm_vstrwq_scatter_offset_f32(float32_t *, uint32x4_t, float32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_offset_f32))) +void __arm_vstrwq_scatter_offset(float32_t *, uint32x4_t, float32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_offset_p_f32))) +void __arm_vstrwq_scatter_offset_p_f32(float32_t *, uint32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_offset_p_f32))) +void __arm_vstrwq_scatter_offset_p(float32_t *, uint32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_shifted_offset_f32))) +void __arm_vstrwq_scatter_shifted_offset_f32(float32_t *, uint32x4_t, float32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_shifted_offset_f32))) +void __arm_vstrwq_scatter_shifted_offset(float32_t *, uint32x4_t, float32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_shifted_offset_p_f32))) +void __arm_vstrwq_scatter_shifted_offset_p_f32(float32_t *, uint32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_shifted_offset_p_f32))) +void __arm_vstrwq_scatter_shifted_offset_p(float32_t *, uint32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_f16))) +float16x8_t __arm_vsubq_f16(float16x8_t, float16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_f16))) +float16x8_t __arm_vsubq(float16x8_t, float16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_f32))) +float32x4_t __arm_vsubq_f32(float32x4_t, float32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_f32))) +float32x4_t __arm_vsubq(float32x4_t, float32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_m_f16))) +float16x8_t __arm_vsubq_m_f16(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_m_f16))) +float16x8_t __arm_vsubq_m(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_m_f32))) +float32x4_t __arm_vsubq_m_f32(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_m_f32))) +float32x4_t __arm_vsubq_m(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_m_n_f16))) +float16x8_t __arm_vsubq_m_n_f16(float16x8_t, float16x8_t, float16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_m_n_f16))) +float16x8_t __arm_vsubq_m(float16x8_t, float16x8_t, float16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_m_n_f32))) +float32x4_t __arm_vsubq_m_n_f32(float32x4_t, float32x4_t, float32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_m_n_f32))) +float32x4_t __arm_vsubq_m(float32x4_t, float32x4_t, float32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_n_f16))) +float16x8_t __arm_vsubq_n_f16(float16x8_t, float16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_n_f16))) +float16x8_t __arm_vsubq(float16x8_t, float16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_n_f32))) +float32x4_t __arm_vsubq_n_f32(float32x4_t, float32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_n_f32))) +float32x4_t __arm_vsubq(float32x4_t, float32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_x_f16))) +float16x8_t __arm_vsubq_x_f16(float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_x_f16))) +float16x8_t __arm_vsubq_x(float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_x_f32))) +float32x4_t __arm_vsubq_x_f32(float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_x_f32))) +float32x4_t __arm_vsubq_x(float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_x_n_f16))) +float16x8_t __arm_vsubq_x_n_f16(float16x8_t, float16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_x_n_f16))) +float16x8_t __arm_vsubq_x(float16x8_t, float16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_x_n_f32))) +float32x4_t __arm_vsubq_x_n_f32(float32x4_t, float32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_x_n_f32))) +float32x4_t __arm_vsubq_x(float32x4_t, float32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vuninitializedq_f16))) +float16x8_t __arm_vuninitializedq_f16(); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vuninitializedq_f32))) +float32x4_t __arm_vuninitializedq_f32(); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vuninitializedq_polymorphic_f16))) +float16x8_t __arm_vuninitializedq(float16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vuninitializedq_polymorphic_f32))) +float32x4_t __arm_vuninitializedq(float32x4_t); + +#endif /* (__ARM_FEATURE_MVE & 2) */ + +#if (!defined __ARM_MVE_PRESERVE_USER_NAMESPACE) + +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_asrl))) +int64_t asrl(int64_t, int32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_lsll))) +uint64_t lsll(uint64_t, int32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_sqrshr))) +int32_t sqrshr(int32_t, int32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_sqrshrl))) +int64_t sqrshrl(int64_t, int32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_sqrshrl_sat48))) +int64_t sqrshrl_sat48(int64_t, int32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_sqshl))) +int32_t sqshl(int32_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_sqshll))) +int64_t sqshll(int64_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_srshr))) +int32_t srshr(int32_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_srshrl))) +int64_t srshrl(int64_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_uqrshl))) +uint32_t uqrshl(uint32_t, int32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_uqrshll))) +uint64_t uqrshll(uint64_t, int32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_uqrshll_sat48))) +uint64_t uqrshll_sat48(uint64_t, int32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_uqshl))) +uint32_t uqshl(uint32_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_uqshll))) +uint64_t uqshll(uint64_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_urshr))) +uint32_t urshr(uint32_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_urshrl))) +uint64_t urshrl(uint64_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabavq_p_s16))) +uint32_t vabavq_p_s16(uint32_t, int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabavq_p_s16))) +uint32_t vabavq_p(uint32_t, int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabavq_p_s32))) +uint32_t vabavq_p_s32(uint32_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabavq_p_s32))) +uint32_t vabavq_p(uint32_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabavq_p_s8))) +uint32_t vabavq_p_s8(uint32_t, int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabavq_p_s8))) +uint32_t vabavq_p(uint32_t, int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabavq_p_u16))) +uint32_t vabavq_p_u16(uint32_t, uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabavq_p_u16))) +uint32_t vabavq_p(uint32_t, uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabavq_p_u32))) +uint32_t vabavq_p_u32(uint32_t, uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabavq_p_u32))) +uint32_t vabavq_p(uint32_t, uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabavq_p_u8))) +uint32_t vabavq_p_u8(uint32_t, uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabavq_p_u8))) +uint32_t vabavq_p(uint32_t, uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabavq_s16))) +uint32_t vabavq_s16(uint32_t, int16x8_t, int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabavq_s16))) +uint32_t vabavq(uint32_t, int16x8_t, int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabavq_s32))) +uint32_t vabavq_s32(uint32_t, int32x4_t, int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabavq_s32))) +uint32_t vabavq(uint32_t, int32x4_t, int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabavq_s8))) +uint32_t vabavq_s8(uint32_t, int8x16_t, int8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabavq_s8))) +uint32_t vabavq(uint32_t, int8x16_t, int8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabavq_u16))) +uint32_t vabavq_u16(uint32_t, uint16x8_t, uint16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabavq_u16))) +uint32_t vabavq(uint32_t, uint16x8_t, uint16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabavq_u32))) +uint32_t vabavq_u32(uint32_t, uint32x4_t, uint32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabavq_u32))) +uint32_t vabavq(uint32_t, uint32x4_t, uint32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabavq_u8))) +uint32_t vabavq_u8(uint32_t, uint8x16_t, uint8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabavq_u8))) +uint32_t vabavq(uint32_t, uint8x16_t, uint8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabdq_m_s16))) +int16x8_t vabdq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabdq_m_s16))) +int16x8_t vabdq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabdq_m_s32))) +int32x4_t vabdq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabdq_m_s32))) +int32x4_t vabdq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabdq_m_s8))) +int8x16_t vabdq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabdq_m_s8))) +int8x16_t vabdq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabdq_m_u16))) +uint16x8_t vabdq_m_u16(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabdq_m_u16))) +uint16x8_t vabdq_m(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabdq_m_u32))) +uint32x4_t vabdq_m_u32(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabdq_m_u32))) +uint32x4_t vabdq_m(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabdq_m_u8))) +uint8x16_t vabdq_m_u8(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabdq_m_u8))) +uint8x16_t vabdq_m(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabdq_s16))) +int16x8_t vabdq_s16(int16x8_t, int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabdq_s16))) +int16x8_t vabdq(int16x8_t, int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabdq_s32))) +int32x4_t vabdq_s32(int32x4_t, int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabdq_s32))) +int32x4_t vabdq(int32x4_t, int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabdq_s8))) +int8x16_t vabdq_s8(int8x16_t, int8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabdq_s8))) +int8x16_t vabdq(int8x16_t, int8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabdq_u16))) +uint16x8_t vabdq_u16(uint16x8_t, uint16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabdq_u16))) +uint16x8_t vabdq(uint16x8_t, uint16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabdq_u32))) +uint32x4_t vabdq_u32(uint32x4_t, uint32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabdq_u32))) +uint32x4_t vabdq(uint32x4_t, uint32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabdq_u8))) +uint8x16_t vabdq_u8(uint8x16_t, uint8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabdq_u8))) +uint8x16_t vabdq(uint8x16_t, uint8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabdq_x_s16))) +int16x8_t vabdq_x_s16(int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabdq_x_s16))) +int16x8_t vabdq_x(int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabdq_x_s32))) +int32x4_t vabdq_x_s32(int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabdq_x_s32))) +int32x4_t vabdq_x(int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabdq_x_s8))) +int8x16_t vabdq_x_s8(int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabdq_x_s8))) +int8x16_t vabdq_x(int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabdq_x_u16))) +uint16x8_t vabdq_x_u16(uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabdq_x_u16))) +uint16x8_t vabdq_x(uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabdq_x_u32))) +uint32x4_t vabdq_x_u32(uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabdq_x_u32))) +uint32x4_t vabdq_x(uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabdq_x_u8))) +uint8x16_t vabdq_x_u8(uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabdq_x_u8))) +uint8x16_t vabdq_x(uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabsq_m_s16))) +int16x8_t vabsq_m_s16(int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabsq_m_s16))) +int16x8_t vabsq_m(int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabsq_m_s32))) +int32x4_t vabsq_m_s32(int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabsq_m_s32))) +int32x4_t vabsq_m(int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabsq_m_s8))) +int8x16_t vabsq_m_s8(int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabsq_m_s8))) +int8x16_t vabsq_m(int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabsq_s16))) +int16x8_t vabsq_s16(int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabsq_s16))) +int16x8_t vabsq(int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabsq_s32))) +int32x4_t vabsq_s32(int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabsq_s32))) +int32x4_t vabsq(int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabsq_s8))) +int8x16_t vabsq_s8(int8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabsq_s8))) +int8x16_t vabsq(int8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabsq_x_s16))) +int16x8_t vabsq_x_s16(int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabsq_x_s16))) +int16x8_t vabsq_x(int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabsq_x_s32))) +int32x4_t vabsq_x_s32(int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabsq_x_s32))) +int32x4_t vabsq_x(int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabsq_x_s8))) +int8x16_t vabsq_x_s8(int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabsq_x_s8))) +int8x16_t vabsq_x(int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vadciq_m_s32))) +int32x4_t vadciq_m_s32(int32x4_t, int32x4_t, int32x4_t, unsigned *, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vadciq_m_s32))) +int32x4_t vadciq_m(int32x4_t, int32x4_t, int32x4_t, unsigned *, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vadciq_m_u32))) +uint32x4_t vadciq_m_u32(uint32x4_t, uint32x4_t, uint32x4_t, unsigned *, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vadciq_m_u32))) +uint32x4_t vadciq_m(uint32x4_t, uint32x4_t, uint32x4_t, unsigned *, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vadciq_s32))) +int32x4_t vadciq_s32(int32x4_t, int32x4_t, unsigned *); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vadciq_s32))) +int32x4_t vadciq(int32x4_t, int32x4_t, unsigned *); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vadciq_u32))) +uint32x4_t vadciq_u32(uint32x4_t, uint32x4_t, unsigned *); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vadciq_u32))) +uint32x4_t vadciq(uint32x4_t, uint32x4_t, unsigned *); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vadcq_m_s32))) +int32x4_t vadcq_m_s32(int32x4_t, int32x4_t, int32x4_t, unsigned *, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vadcq_m_s32))) +int32x4_t vadcq_m(int32x4_t, int32x4_t, int32x4_t, unsigned *, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vadcq_m_u32))) +uint32x4_t vadcq_m_u32(uint32x4_t, uint32x4_t, uint32x4_t, unsigned *, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vadcq_m_u32))) +uint32x4_t vadcq_m(uint32x4_t, uint32x4_t, uint32x4_t, unsigned *, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vadcq_s32))) +int32x4_t vadcq_s32(int32x4_t, int32x4_t, unsigned *); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vadcq_s32))) +int32x4_t vadcq(int32x4_t, int32x4_t, unsigned *); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vadcq_u32))) +uint32x4_t vadcq_u32(uint32x4_t, uint32x4_t, unsigned *); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vadcq_u32))) +uint32x4_t vadcq(uint32x4_t, uint32x4_t, unsigned *); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddlvaq_p_s32))) +int64_t vaddlvaq_p_s32(int64_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddlvaq_p_s32))) +int64_t vaddlvaq_p(int64_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddlvaq_p_u32))) +uint64_t vaddlvaq_p_u32(uint64_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddlvaq_p_u32))) +uint64_t vaddlvaq_p(uint64_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddlvaq_s32))) +int64_t vaddlvaq_s32(int64_t, int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddlvaq_s32))) +int64_t vaddlvaq(int64_t, int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddlvaq_u32))) +uint64_t vaddlvaq_u32(uint64_t, uint32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddlvaq_u32))) +uint64_t vaddlvaq(uint64_t, uint32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddlvq_p_s32))) +int64_t vaddlvq_p_s32(int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddlvq_p_s32))) +int64_t vaddlvq_p(int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddlvq_p_u32))) +uint64_t vaddlvq_p_u32(uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddlvq_p_u32))) +uint64_t vaddlvq_p(uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddlvq_s32))) +int64_t vaddlvq_s32(int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddlvq_s32))) +int64_t vaddlvq(int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddlvq_u32))) +uint64_t vaddlvq_u32(uint32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddlvq_u32))) +uint64_t vaddlvq(uint32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_m_n_s16))) +int16x8_t vaddq_m_n_s16(int16x8_t, int16x8_t, int16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_m_n_s16))) +int16x8_t vaddq_m(int16x8_t, int16x8_t, int16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_m_n_s32))) +int32x4_t vaddq_m_n_s32(int32x4_t, int32x4_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_m_n_s32))) +int32x4_t vaddq_m(int32x4_t, int32x4_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_m_n_s8))) +int8x16_t vaddq_m_n_s8(int8x16_t, int8x16_t, int8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_m_n_s8))) +int8x16_t vaddq_m(int8x16_t, int8x16_t, int8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_m_n_u16))) +uint16x8_t vaddq_m_n_u16(uint16x8_t, uint16x8_t, uint16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_m_n_u16))) +uint16x8_t vaddq_m(uint16x8_t, uint16x8_t, uint16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_m_n_u32))) +uint32x4_t vaddq_m_n_u32(uint32x4_t, uint32x4_t, uint32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_m_n_u32))) +uint32x4_t vaddq_m(uint32x4_t, uint32x4_t, uint32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_m_n_u8))) +uint8x16_t vaddq_m_n_u8(uint8x16_t, uint8x16_t, uint8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_m_n_u8))) +uint8x16_t vaddq_m(uint8x16_t, uint8x16_t, uint8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_m_s16))) +int16x8_t vaddq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_m_s16))) +int16x8_t vaddq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_m_s32))) +int32x4_t vaddq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_m_s32))) +int32x4_t vaddq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_m_s8))) +int8x16_t vaddq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_m_s8))) +int8x16_t vaddq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_m_u16))) +uint16x8_t vaddq_m_u16(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_m_u16))) +uint16x8_t vaddq_m(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_m_u32))) +uint32x4_t vaddq_m_u32(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_m_u32))) +uint32x4_t vaddq_m(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_m_u8))) +uint8x16_t vaddq_m_u8(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_m_u8))) +uint8x16_t vaddq_m(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_n_s16))) +int16x8_t vaddq_n_s16(int16x8_t, int16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_n_s16))) +int16x8_t vaddq(int16x8_t, int16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_n_s32))) +int32x4_t vaddq_n_s32(int32x4_t, int32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_n_s32))) +int32x4_t vaddq(int32x4_t, int32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_n_s8))) +int8x16_t vaddq_n_s8(int8x16_t, int8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_n_s8))) +int8x16_t vaddq(int8x16_t, int8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_n_u16))) +uint16x8_t vaddq_n_u16(uint16x8_t, uint16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_n_u16))) +uint16x8_t vaddq(uint16x8_t, uint16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_n_u32))) +uint32x4_t vaddq_n_u32(uint32x4_t, uint32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_n_u32))) +uint32x4_t vaddq(uint32x4_t, uint32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_n_u8))) +uint8x16_t vaddq_n_u8(uint8x16_t, uint8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_n_u8))) +uint8x16_t vaddq(uint8x16_t, uint8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_s16))) +int16x8_t vaddq_s16(int16x8_t, int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_s16))) +int16x8_t vaddq(int16x8_t, int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_s32))) +int32x4_t vaddq_s32(int32x4_t, int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_s32))) +int32x4_t vaddq(int32x4_t, int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_s8))) +int8x16_t vaddq_s8(int8x16_t, int8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_s8))) +int8x16_t vaddq(int8x16_t, int8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_u16))) +uint16x8_t vaddq_u16(uint16x8_t, uint16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_u16))) +uint16x8_t vaddq(uint16x8_t, uint16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_u32))) +uint32x4_t vaddq_u32(uint32x4_t, uint32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_u32))) +uint32x4_t vaddq(uint32x4_t, uint32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_u8))) +uint8x16_t vaddq_u8(uint8x16_t, uint8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_u8))) +uint8x16_t vaddq(uint8x16_t, uint8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_x_n_s16))) +int16x8_t vaddq_x_n_s16(int16x8_t, int16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_x_n_s16))) +int16x8_t vaddq_x(int16x8_t, int16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_x_n_s32))) +int32x4_t vaddq_x_n_s32(int32x4_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_x_n_s32))) +int32x4_t vaddq_x(int32x4_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_x_n_s8))) +int8x16_t vaddq_x_n_s8(int8x16_t, int8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_x_n_s8))) +int8x16_t vaddq_x(int8x16_t, int8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_x_n_u16))) +uint16x8_t vaddq_x_n_u16(uint16x8_t, uint16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_x_n_u16))) +uint16x8_t vaddq_x(uint16x8_t, uint16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_x_n_u32))) +uint32x4_t vaddq_x_n_u32(uint32x4_t, uint32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_x_n_u32))) +uint32x4_t vaddq_x(uint32x4_t, uint32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_x_n_u8))) +uint8x16_t vaddq_x_n_u8(uint8x16_t, uint8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_x_n_u8))) +uint8x16_t vaddq_x(uint8x16_t, uint8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_x_s16))) +int16x8_t vaddq_x_s16(int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_x_s16))) +int16x8_t vaddq_x(int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_x_s32))) +int32x4_t vaddq_x_s32(int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_x_s32))) +int32x4_t vaddq_x(int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_x_s8))) +int8x16_t vaddq_x_s8(int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_x_s8))) +int8x16_t vaddq_x(int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_x_u16))) +uint16x8_t vaddq_x_u16(uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_x_u16))) +uint16x8_t vaddq_x(uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_x_u32))) +uint32x4_t vaddq_x_u32(uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_x_u32))) +uint32x4_t vaddq_x(uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_x_u8))) +uint8x16_t vaddq_x_u8(uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_x_u8))) +uint8x16_t vaddq_x(uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddvaq_p_s16))) +int32_t vaddvaq_p_s16(int32_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddvaq_p_s16))) +int32_t vaddvaq_p(int32_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddvaq_p_s32))) +int32_t vaddvaq_p_s32(int32_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddvaq_p_s32))) +int32_t vaddvaq_p(int32_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddvaq_p_s8))) +int32_t vaddvaq_p_s8(int32_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddvaq_p_s8))) +int32_t vaddvaq_p(int32_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddvaq_p_u16))) +uint32_t vaddvaq_p_u16(uint32_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddvaq_p_u16))) +uint32_t vaddvaq_p(uint32_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddvaq_p_u32))) +uint32_t vaddvaq_p_u32(uint32_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddvaq_p_u32))) +uint32_t vaddvaq_p(uint32_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddvaq_p_u8))) +uint32_t vaddvaq_p_u8(uint32_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddvaq_p_u8))) +uint32_t vaddvaq_p(uint32_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddvaq_s16))) +int32_t vaddvaq_s16(int32_t, int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddvaq_s16))) +int32_t vaddvaq(int32_t, int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddvaq_s32))) +int32_t vaddvaq_s32(int32_t, int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddvaq_s32))) +int32_t vaddvaq(int32_t, int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddvaq_s8))) +int32_t vaddvaq_s8(int32_t, int8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddvaq_s8))) +int32_t vaddvaq(int32_t, int8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddvaq_u16))) +uint32_t vaddvaq_u16(uint32_t, uint16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddvaq_u16))) +uint32_t vaddvaq(uint32_t, uint16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddvaq_u32))) +uint32_t vaddvaq_u32(uint32_t, uint32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddvaq_u32))) +uint32_t vaddvaq(uint32_t, uint32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddvaq_u8))) +uint32_t vaddvaq_u8(uint32_t, uint8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddvaq_u8))) +uint32_t vaddvaq(uint32_t, uint8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddvq_p_s16))) +int32_t vaddvq_p_s16(int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddvq_p_s16))) +int32_t vaddvq_p(int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddvq_p_s32))) +int32_t vaddvq_p_s32(int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddvq_p_s32))) +int32_t vaddvq_p(int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddvq_p_s8))) +int32_t vaddvq_p_s8(int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddvq_p_s8))) +int32_t vaddvq_p(int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddvq_p_u16))) +uint32_t vaddvq_p_u16(uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddvq_p_u16))) +uint32_t vaddvq_p(uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddvq_p_u32))) +uint32_t vaddvq_p_u32(uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddvq_p_u32))) +uint32_t vaddvq_p(uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddvq_p_u8))) +uint32_t vaddvq_p_u8(uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddvq_p_u8))) +uint32_t vaddvq_p(uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddvq_s16))) +int32_t vaddvq_s16(int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddvq_s16))) +int32_t vaddvq(int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddvq_s32))) +int32_t vaddvq_s32(int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddvq_s32))) +int32_t vaddvq(int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddvq_s8))) +int32_t vaddvq_s8(int8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddvq_s8))) +int32_t vaddvq(int8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddvq_u16))) +uint32_t vaddvq_u16(uint16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddvq_u16))) +uint32_t vaddvq(uint16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddvq_u32))) +uint32_t vaddvq_u32(uint32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddvq_u32))) +uint32_t vaddvq(uint32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddvq_u8))) +uint32_t vaddvq_u8(uint8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddvq_u8))) +uint32_t vaddvq(uint8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vandq_m_s16))) +int16x8_t vandq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vandq_m_s16))) +int16x8_t vandq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vandq_m_s32))) +int32x4_t vandq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vandq_m_s32))) +int32x4_t vandq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vandq_m_s8))) +int8x16_t vandq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vandq_m_s8))) +int8x16_t vandq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vandq_m_u16))) +uint16x8_t vandq_m_u16(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vandq_m_u16))) +uint16x8_t vandq_m(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vandq_m_u32))) +uint32x4_t vandq_m_u32(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vandq_m_u32))) +uint32x4_t vandq_m(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vandq_m_u8))) +uint8x16_t vandq_m_u8(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vandq_m_u8))) +uint8x16_t vandq_m(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vandq_s16))) +int16x8_t vandq_s16(int16x8_t, int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vandq_s16))) +int16x8_t vandq(int16x8_t, int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vandq_s32))) +int32x4_t vandq_s32(int32x4_t, int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vandq_s32))) +int32x4_t vandq(int32x4_t, int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vandq_s8))) +int8x16_t vandq_s8(int8x16_t, int8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vandq_s8))) +int8x16_t vandq(int8x16_t, int8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vandq_u16))) +uint16x8_t vandq_u16(uint16x8_t, uint16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vandq_u16))) +uint16x8_t vandq(uint16x8_t, uint16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vandq_u32))) +uint32x4_t vandq_u32(uint32x4_t, uint32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vandq_u32))) +uint32x4_t vandq(uint32x4_t, uint32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vandq_u8))) +uint8x16_t vandq_u8(uint8x16_t, uint8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vandq_u8))) +uint8x16_t vandq(uint8x16_t, uint8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vandq_x_s16))) +int16x8_t vandq_x_s16(int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vandq_x_s16))) +int16x8_t vandq_x(int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vandq_x_s32))) +int32x4_t vandq_x_s32(int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vandq_x_s32))) +int32x4_t vandq_x(int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vandq_x_s8))) +int8x16_t vandq_x_s8(int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vandq_x_s8))) +int8x16_t vandq_x(int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vandq_x_u16))) +uint16x8_t vandq_x_u16(uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vandq_x_u16))) +uint16x8_t vandq_x(uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vandq_x_u32))) +uint32x4_t vandq_x_u32(uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vandq_x_u32))) +uint32x4_t vandq_x(uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vandq_x_u8))) +uint8x16_t vandq_x_u8(uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vandq_x_u8))) +uint8x16_t vandq_x(uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbicq_m_n_s16))) +int16x8_t vbicq_m_n_s16(int16x8_t, int16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbicq_m_n_s16))) +int16x8_t vbicq_m_n(int16x8_t, int16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbicq_m_n_s32))) +int32x4_t vbicq_m_n_s32(int32x4_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbicq_m_n_s32))) +int32x4_t vbicq_m_n(int32x4_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbicq_m_n_u16))) +uint16x8_t vbicq_m_n_u16(uint16x8_t, uint16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbicq_m_n_u16))) +uint16x8_t vbicq_m_n(uint16x8_t, uint16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbicq_m_n_u32))) +uint32x4_t vbicq_m_n_u32(uint32x4_t, uint32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbicq_m_n_u32))) +uint32x4_t vbicq_m_n(uint32x4_t, uint32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbicq_m_s16))) +int16x8_t vbicq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbicq_m_s16))) +int16x8_t vbicq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbicq_m_s32))) +int32x4_t vbicq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbicq_m_s32))) +int32x4_t vbicq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbicq_m_s8))) +int8x16_t vbicq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbicq_m_s8))) +int8x16_t vbicq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbicq_m_u16))) +uint16x8_t vbicq_m_u16(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbicq_m_u16))) +uint16x8_t vbicq_m(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbicq_m_u32))) +uint32x4_t vbicq_m_u32(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbicq_m_u32))) +uint32x4_t vbicq_m(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbicq_m_u8))) +uint8x16_t vbicq_m_u8(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbicq_m_u8))) +uint8x16_t vbicq_m(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbicq_n_s16))) +int16x8_t vbicq_n_s16(int16x8_t, int16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbicq_n_s16))) +int16x8_t vbicq(int16x8_t, int16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbicq_n_s32))) +int32x4_t vbicq_n_s32(int32x4_t, int32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbicq_n_s32))) +int32x4_t vbicq(int32x4_t, int32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbicq_n_u16))) +uint16x8_t vbicq_n_u16(uint16x8_t, uint16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbicq_n_u16))) +uint16x8_t vbicq(uint16x8_t, uint16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbicq_n_u32))) +uint32x4_t vbicq_n_u32(uint32x4_t, uint32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbicq_n_u32))) +uint32x4_t vbicq(uint32x4_t, uint32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbicq_s16))) +int16x8_t vbicq_s16(int16x8_t, int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbicq_s16))) +int16x8_t vbicq(int16x8_t, int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbicq_s32))) +int32x4_t vbicq_s32(int32x4_t, int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbicq_s32))) +int32x4_t vbicq(int32x4_t, int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbicq_s8))) +int8x16_t vbicq_s8(int8x16_t, int8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbicq_s8))) +int8x16_t vbicq(int8x16_t, int8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbicq_u16))) +uint16x8_t vbicq_u16(uint16x8_t, uint16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbicq_u16))) +uint16x8_t vbicq(uint16x8_t, uint16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbicq_u32))) +uint32x4_t vbicq_u32(uint32x4_t, uint32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbicq_u32))) +uint32x4_t vbicq(uint32x4_t, uint32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbicq_u8))) +uint8x16_t vbicq_u8(uint8x16_t, uint8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbicq_u8))) +uint8x16_t vbicq(uint8x16_t, uint8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbicq_x_s16))) +int16x8_t vbicq_x_s16(int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbicq_x_s16))) +int16x8_t vbicq_x(int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbicq_x_s32))) +int32x4_t vbicq_x_s32(int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbicq_x_s32))) +int32x4_t vbicq_x(int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbicq_x_s8))) +int8x16_t vbicq_x_s8(int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbicq_x_s8))) +int8x16_t vbicq_x(int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbicq_x_u16))) +uint16x8_t vbicq_x_u16(uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbicq_x_u16))) +uint16x8_t vbicq_x(uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbicq_x_u32))) +uint32x4_t vbicq_x_u32(uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbicq_x_u32))) +uint32x4_t vbicq_x(uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbicq_x_u8))) +uint8x16_t vbicq_x_u8(uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbicq_x_u8))) +uint8x16_t vbicq_x(uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_m_n_s16))) +int16x8_t vbrsrq_m_n_s16(int16x8_t, int16x8_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_m_n_s16))) +int16x8_t vbrsrq_m(int16x8_t, int16x8_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_m_n_s32))) +int32x4_t vbrsrq_m_n_s32(int32x4_t, int32x4_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_m_n_s32))) +int32x4_t vbrsrq_m(int32x4_t, int32x4_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_m_n_s8))) +int8x16_t vbrsrq_m_n_s8(int8x16_t, int8x16_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_m_n_s8))) +int8x16_t vbrsrq_m(int8x16_t, int8x16_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_m_n_u16))) +uint16x8_t vbrsrq_m_n_u16(uint16x8_t, uint16x8_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_m_n_u16))) +uint16x8_t vbrsrq_m(uint16x8_t, uint16x8_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_m_n_u32))) +uint32x4_t vbrsrq_m_n_u32(uint32x4_t, uint32x4_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_m_n_u32))) +uint32x4_t vbrsrq_m(uint32x4_t, uint32x4_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_m_n_u8))) +uint8x16_t vbrsrq_m_n_u8(uint8x16_t, uint8x16_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_m_n_u8))) +uint8x16_t vbrsrq_m(uint8x16_t, uint8x16_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_n_s16))) +int16x8_t vbrsrq_n_s16(int16x8_t, int32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_n_s16))) +int16x8_t vbrsrq(int16x8_t, int32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_n_s32))) +int32x4_t vbrsrq_n_s32(int32x4_t, int32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_n_s32))) +int32x4_t vbrsrq(int32x4_t, int32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_n_s8))) +int8x16_t vbrsrq_n_s8(int8x16_t, int32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_n_s8))) +int8x16_t vbrsrq(int8x16_t, int32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_n_u16))) +uint16x8_t vbrsrq_n_u16(uint16x8_t, int32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_n_u16))) +uint16x8_t vbrsrq(uint16x8_t, int32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_n_u32))) +uint32x4_t vbrsrq_n_u32(uint32x4_t, int32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_n_u32))) +uint32x4_t vbrsrq(uint32x4_t, int32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_n_u8))) +uint8x16_t vbrsrq_n_u8(uint8x16_t, int32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_n_u8))) +uint8x16_t vbrsrq(uint8x16_t, int32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_x_n_s16))) +int16x8_t vbrsrq_x_n_s16(int16x8_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_x_n_s16))) +int16x8_t vbrsrq_x(int16x8_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_x_n_s32))) +int32x4_t vbrsrq_x_n_s32(int32x4_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_x_n_s32))) +int32x4_t vbrsrq_x(int32x4_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_x_n_s8))) +int8x16_t vbrsrq_x_n_s8(int8x16_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_x_n_s8))) +int8x16_t vbrsrq_x(int8x16_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_x_n_u16))) +uint16x8_t vbrsrq_x_n_u16(uint16x8_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_x_n_u16))) +uint16x8_t vbrsrq_x(uint16x8_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_x_n_u32))) +uint32x4_t vbrsrq_x_n_u32(uint32x4_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_x_n_u32))) +uint32x4_t vbrsrq_x(uint32x4_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_x_n_u8))) +uint8x16_t vbrsrq_x_n_u8(uint8x16_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_x_n_u8))) +uint8x16_t vbrsrq_x(uint8x16_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_m_s16))) +int16x8_t vcaddq_rot270_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_m_s16))) +int16x8_t vcaddq_rot270_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_m_s32))) +int32x4_t vcaddq_rot270_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_m_s32))) +int32x4_t vcaddq_rot270_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_m_s8))) +int8x16_t vcaddq_rot270_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_m_s8))) +int8x16_t vcaddq_rot270_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_m_u16))) +uint16x8_t vcaddq_rot270_m_u16(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_m_u16))) +uint16x8_t vcaddq_rot270_m(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_m_u32))) +uint32x4_t vcaddq_rot270_m_u32(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_m_u32))) +uint32x4_t vcaddq_rot270_m(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_m_u8))) +uint8x16_t vcaddq_rot270_m_u8(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_m_u8))) +uint8x16_t vcaddq_rot270_m(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_s16))) +int16x8_t vcaddq_rot270_s16(int16x8_t, int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_s16))) +int16x8_t vcaddq_rot270(int16x8_t, int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_s32))) +int32x4_t vcaddq_rot270_s32(int32x4_t, int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_s32))) +int32x4_t vcaddq_rot270(int32x4_t, int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_s8))) +int8x16_t vcaddq_rot270_s8(int8x16_t, int8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_s8))) +int8x16_t vcaddq_rot270(int8x16_t, int8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_u16))) +uint16x8_t vcaddq_rot270_u16(uint16x8_t, uint16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_u16))) +uint16x8_t vcaddq_rot270(uint16x8_t, uint16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_u32))) +uint32x4_t vcaddq_rot270_u32(uint32x4_t, uint32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_u32))) +uint32x4_t vcaddq_rot270(uint32x4_t, uint32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_u8))) +uint8x16_t vcaddq_rot270_u8(uint8x16_t, uint8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_u8))) +uint8x16_t vcaddq_rot270(uint8x16_t, uint8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_x_s16))) +int16x8_t vcaddq_rot270_x_s16(int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_x_s16))) +int16x8_t vcaddq_rot270_x(int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_x_s32))) +int32x4_t vcaddq_rot270_x_s32(int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_x_s32))) +int32x4_t vcaddq_rot270_x(int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_x_s8))) +int8x16_t vcaddq_rot270_x_s8(int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_x_s8))) +int8x16_t vcaddq_rot270_x(int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_x_u16))) +uint16x8_t vcaddq_rot270_x_u16(uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_x_u16))) +uint16x8_t vcaddq_rot270_x(uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_x_u32))) +uint32x4_t vcaddq_rot270_x_u32(uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_x_u32))) +uint32x4_t vcaddq_rot270_x(uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_x_u8))) +uint8x16_t vcaddq_rot270_x_u8(uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_x_u8))) +uint8x16_t vcaddq_rot270_x(uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_m_s16))) +int16x8_t vcaddq_rot90_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_m_s16))) +int16x8_t vcaddq_rot90_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_m_s32))) +int32x4_t vcaddq_rot90_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_m_s32))) +int32x4_t vcaddq_rot90_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_m_s8))) +int8x16_t vcaddq_rot90_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_m_s8))) +int8x16_t vcaddq_rot90_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_m_u16))) +uint16x8_t vcaddq_rot90_m_u16(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_m_u16))) +uint16x8_t vcaddq_rot90_m(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_m_u32))) +uint32x4_t vcaddq_rot90_m_u32(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_m_u32))) +uint32x4_t vcaddq_rot90_m(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_m_u8))) +uint8x16_t vcaddq_rot90_m_u8(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_m_u8))) +uint8x16_t vcaddq_rot90_m(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_s16))) +int16x8_t vcaddq_rot90_s16(int16x8_t, int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_s16))) +int16x8_t vcaddq_rot90(int16x8_t, int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_s32))) +int32x4_t vcaddq_rot90_s32(int32x4_t, int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_s32))) +int32x4_t vcaddq_rot90(int32x4_t, int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_s8))) +int8x16_t vcaddq_rot90_s8(int8x16_t, int8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_s8))) +int8x16_t vcaddq_rot90(int8x16_t, int8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_u16))) +uint16x8_t vcaddq_rot90_u16(uint16x8_t, uint16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_u16))) +uint16x8_t vcaddq_rot90(uint16x8_t, uint16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_u32))) +uint32x4_t vcaddq_rot90_u32(uint32x4_t, uint32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_u32))) +uint32x4_t vcaddq_rot90(uint32x4_t, uint32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_u8))) +uint8x16_t vcaddq_rot90_u8(uint8x16_t, uint8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_u8))) +uint8x16_t vcaddq_rot90(uint8x16_t, uint8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_x_s16))) +int16x8_t vcaddq_rot90_x_s16(int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_x_s16))) +int16x8_t vcaddq_rot90_x(int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_x_s32))) +int32x4_t vcaddq_rot90_x_s32(int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_x_s32))) +int32x4_t vcaddq_rot90_x(int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_x_s8))) +int8x16_t vcaddq_rot90_x_s8(int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_x_s8))) +int8x16_t vcaddq_rot90_x(int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_x_u16))) +uint16x8_t vcaddq_rot90_x_u16(uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_x_u16))) +uint16x8_t vcaddq_rot90_x(uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_x_u32))) +uint32x4_t vcaddq_rot90_x_u32(uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_x_u32))) +uint32x4_t vcaddq_rot90_x(uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_x_u8))) +uint8x16_t vcaddq_rot90_x_u8(uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_x_u8))) +uint8x16_t vcaddq_rot90_x(uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vclsq_m_s16))) +int16x8_t vclsq_m_s16(int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vclsq_m_s16))) +int16x8_t vclsq_m(int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vclsq_m_s32))) +int32x4_t vclsq_m_s32(int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vclsq_m_s32))) +int32x4_t vclsq_m(int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vclsq_m_s8))) +int8x16_t vclsq_m_s8(int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vclsq_m_s8))) +int8x16_t vclsq_m(int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vclsq_s16))) +int16x8_t vclsq_s16(int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vclsq_s16))) +int16x8_t vclsq(int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vclsq_s32))) +int32x4_t vclsq_s32(int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vclsq_s32))) +int32x4_t vclsq(int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vclsq_s8))) +int8x16_t vclsq_s8(int8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vclsq_s8))) +int8x16_t vclsq(int8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vclsq_x_s16))) +int16x8_t vclsq_x_s16(int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vclsq_x_s16))) +int16x8_t vclsq_x(int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vclsq_x_s32))) +int32x4_t vclsq_x_s32(int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vclsq_x_s32))) +int32x4_t vclsq_x(int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vclsq_x_s8))) +int8x16_t vclsq_x_s8(int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vclsq_x_s8))) +int8x16_t vclsq_x(int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vclzq_m_s16))) +int16x8_t vclzq_m_s16(int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vclzq_m_s16))) +int16x8_t vclzq_m(int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vclzq_m_s32))) +int32x4_t vclzq_m_s32(int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vclzq_m_s32))) +int32x4_t vclzq_m(int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vclzq_m_s8))) +int8x16_t vclzq_m_s8(int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vclzq_m_s8))) +int8x16_t vclzq_m(int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vclzq_m_u16))) +uint16x8_t vclzq_m_u16(uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vclzq_m_u16))) +uint16x8_t vclzq_m(uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vclzq_m_u32))) +uint32x4_t vclzq_m_u32(uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vclzq_m_u32))) +uint32x4_t vclzq_m(uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vclzq_m_u8))) +uint8x16_t vclzq_m_u8(uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vclzq_m_u8))) +uint8x16_t vclzq_m(uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vclzq_s16))) +int16x8_t vclzq_s16(int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vclzq_s16))) +int16x8_t vclzq(int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vclzq_s32))) +int32x4_t vclzq_s32(int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vclzq_s32))) +int32x4_t vclzq(int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vclzq_s8))) +int8x16_t vclzq_s8(int8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vclzq_s8))) +int8x16_t vclzq(int8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vclzq_u16))) +uint16x8_t vclzq_u16(uint16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vclzq_u16))) +uint16x8_t vclzq(uint16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vclzq_u32))) +uint32x4_t vclzq_u32(uint32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vclzq_u32))) +uint32x4_t vclzq(uint32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vclzq_u8))) +uint8x16_t vclzq_u8(uint8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vclzq_u8))) +uint8x16_t vclzq(uint8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vclzq_x_s16))) +int16x8_t vclzq_x_s16(int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vclzq_x_s16))) +int16x8_t vclzq_x(int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vclzq_x_s32))) +int32x4_t vclzq_x_s32(int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vclzq_x_s32))) +int32x4_t vclzq_x(int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vclzq_x_s8))) +int8x16_t vclzq_x_s8(int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vclzq_x_s8))) +int8x16_t vclzq_x(int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vclzq_x_u16))) +uint16x8_t vclzq_x_u16(uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vclzq_x_u16))) +uint16x8_t vclzq_x(uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vclzq_x_u32))) +uint32x4_t vclzq_x_u32(uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vclzq_x_u32))) +uint32x4_t vclzq_x(uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vclzq_x_u8))) +uint8x16_t vclzq_x_u8(uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vclzq_x_u8))) +uint8x16_t vclzq_x(uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpcsq_m_n_u16))) +mve_pred16_t vcmpcsq_m_n_u16(uint16x8_t, uint16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpcsq_m_n_u16))) +mve_pred16_t vcmpcsq_m(uint16x8_t, uint16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpcsq_m_n_u32))) +mve_pred16_t vcmpcsq_m_n_u32(uint32x4_t, uint32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpcsq_m_n_u32))) +mve_pred16_t vcmpcsq_m(uint32x4_t, uint32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpcsq_m_n_u8))) +mve_pred16_t vcmpcsq_m_n_u8(uint8x16_t, uint8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpcsq_m_n_u8))) +mve_pred16_t vcmpcsq_m(uint8x16_t, uint8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpcsq_m_u16))) +mve_pred16_t vcmpcsq_m_u16(uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpcsq_m_u16))) +mve_pred16_t vcmpcsq_m(uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpcsq_m_u32))) +mve_pred16_t vcmpcsq_m_u32(uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpcsq_m_u32))) +mve_pred16_t vcmpcsq_m(uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpcsq_m_u8))) +mve_pred16_t vcmpcsq_m_u8(uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpcsq_m_u8))) +mve_pred16_t vcmpcsq_m(uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpcsq_n_u16))) +mve_pred16_t vcmpcsq_n_u16(uint16x8_t, uint16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpcsq_n_u16))) +mve_pred16_t vcmpcsq(uint16x8_t, uint16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpcsq_n_u32))) +mve_pred16_t vcmpcsq_n_u32(uint32x4_t, uint32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpcsq_n_u32))) +mve_pred16_t vcmpcsq(uint32x4_t, uint32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpcsq_n_u8))) +mve_pred16_t vcmpcsq_n_u8(uint8x16_t, uint8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpcsq_n_u8))) +mve_pred16_t vcmpcsq(uint8x16_t, uint8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpcsq_u16))) +mve_pred16_t vcmpcsq_u16(uint16x8_t, uint16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpcsq_u16))) +mve_pred16_t vcmpcsq(uint16x8_t, uint16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpcsq_u32))) +mve_pred16_t vcmpcsq_u32(uint32x4_t, uint32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpcsq_u32))) +mve_pred16_t vcmpcsq(uint32x4_t, uint32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpcsq_u8))) +mve_pred16_t vcmpcsq_u8(uint8x16_t, uint8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpcsq_u8))) +mve_pred16_t vcmpcsq(uint8x16_t, uint8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_m_n_s16))) +mve_pred16_t vcmpeqq_m_n_s16(int16x8_t, int16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_m_n_s16))) +mve_pred16_t vcmpeqq_m(int16x8_t, int16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_m_n_s32))) +mve_pred16_t vcmpeqq_m_n_s32(int32x4_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_m_n_s32))) +mve_pred16_t vcmpeqq_m(int32x4_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_m_n_s8))) +mve_pred16_t vcmpeqq_m_n_s8(int8x16_t, int8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_m_n_s8))) +mve_pred16_t vcmpeqq_m(int8x16_t, int8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_m_n_u16))) +mve_pred16_t vcmpeqq_m_n_u16(uint16x8_t, uint16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_m_n_u16))) +mve_pred16_t vcmpeqq_m(uint16x8_t, uint16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_m_n_u32))) +mve_pred16_t vcmpeqq_m_n_u32(uint32x4_t, uint32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_m_n_u32))) +mve_pred16_t vcmpeqq_m(uint32x4_t, uint32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_m_n_u8))) +mve_pred16_t vcmpeqq_m_n_u8(uint8x16_t, uint8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_m_n_u8))) +mve_pred16_t vcmpeqq_m(uint8x16_t, uint8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_m_s16))) +mve_pred16_t vcmpeqq_m_s16(int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_m_s16))) +mve_pred16_t vcmpeqq_m(int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_m_s32))) +mve_pred16_t vcmpeqq_m_s32(int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_m_s32))) +mve_pred16_t vcmpeqq_m(int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_m_s8))) +mve_pred16_t vcmpeqq_m_s8(int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_m_s8))) +mve_pred16_t vcmpeqq_m(int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_m_u16))) +mve_pred16_t vcmpeqq_m_u16(uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_m_u16))) +mve_pred16_t vcmpeqq_m(uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_m_u32))) +mve_pred16_t vcmpeqq_m_u32(uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_m_u32))) +mve_pred16_t vcmpeqq_m(uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_m_u8))) +mve_pred16_t vcmpeqq_m_u8(uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_m_u8))) +mve_pred16_t vcmpeqq_m(uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_n_s16))) +mve_pred16_t vcmpeqq_n_s16(int16x8_t, int16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_n_s16))) +mve_pred16_t vcmpeqq(int16x8_t, int16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_n_s32))) +mve_pred16_t vcmpeqq_n_s32(int32x4_t, int32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_n_s32))) +mve_pred16_t vcmpeqq(int32x4_t, int32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_n_s8))) +mve_pred16_t vcmpeqq_n_s8(int8x16_t, int8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_n_s8))) +mve_pred16_t vcmpeqq(int8x16_t, int8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_n_u16))) +mve_pred16_t vcmpeqq_n_u16(uint16x8_t, uint16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_n_u16))) +mve_pred16_t vcmpeqq(uint16x8_t, uint16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_n_u32))) +mve_pred16_t vcmpeqq_n_u32(uint32x4_t, uint32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_n_u32))) +mve_pred16_t vcmpeqq(uint32x4_t, uint32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_n_u8))) +mve_pred16_t vcmpeqq_n_u8(uint8x16_t, uint8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_n_u8))) +mve_pred16_t vcmpeqq(uint8x16_t, uint8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_s16))) +mve_pred16_t vcmpeqq_s16(int16x8_t, int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_s16))) +mve_pred16_t vcmpeqq(int16x8_t, int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_s32))) +mve_pred16_t vcmpeqq_s32(int32x4_t, int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_s32))) +mve_pred16_t vcmpeqq(int32x4_t, int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_s8))) +mve_pred16_t vcmpeqq_s8(int8x16_t, int8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_s8))) +mve_pred16_t vcmpeqq(int8x16_t, int8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_u16))) +mve_pred16_t vcmpeqq_u16(uint16x8_t, uint16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_u16))) +mve_pred16_t vcmpeqq(uint16x8_t, uint16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_u32))) +mve_pred16_t vcmpeqq_u32(uint32x4_t, uint32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_u32))) +mve_pred16_t vcmpeqq(uint32x4_t, uint32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_u8))) +mve_pred16_t vcmpeqq_u8(uint8x16_t, uint8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_u8))) +mve_pred16_t vcmpeqq(uint8x16_t, uint8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpgeq_m_n_s16))) +mve_pred16_t vcmpgeq_m_n_s16(int16x8_t, int16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpgeq_m_n_s16))) +mve_pred16_t vcmpgeq_m(int16x8_t, int16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpgeq_m_n_s32))) +mve_pred16_t vcmpgeq_m_n_s32(int32x4_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpgeq_m_n_s32))) +mve_pred16_t vcmpgeq_m(int32x4_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpgeq_m_n_s8))) +mve_pred16_t vcmpgeq_m_n_s8(int8x16_t, int8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpgeq_m_n_s8))) +mve_pred16_t vcmpgeq_m(int8x16_t, int8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpgeq_m_s16))) +mve_pred16_t vcmpgeq_m_s16(int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpgeq_m_s16))) +mve_pred16_t vcmpgeq_m(int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpgeq_m_s32))) +mve_pred16_t vcmpgeq_m_s32(int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpgeq_m_s32))) +mve_pred16_t vcmpgeq_m(int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpgeq_m_s8))) +mve_pred16_t vcmpgeq_m_s8(int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpgeq_m_s8))) +mve_pred16_t vcmpgeq_m(int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpgeq_n_s16))) +mve_pred16_t vcmpgeq_n_s16(int16x8_t, int16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpgeq_n_s16))) +mve_pred16_t vcmpgeq(int16x8_t, int16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpgeq_n_s32))) +mve_pred16_t vcmpgeq_n_s32(int32x4_t, int32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpgeq_n_s32))) +mve_pred16_t vcmpgeq(int32x4_t, int32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpgeq_n_s8))) +mve_pred16_t vcmpgeq_n_s8(int8x16_t, int8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpgeq_n_s8))) +mve_pred16_t vcmpgeq(int8x16_t, int8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpgeq_s16))) +mve_pred16_t vcmpgeq_s16(int16x8_t, int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpgeq_s16))) +mve_pred16_t vcmpgeq(int16x8_t, int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpgeq_s32))) +mve_pred16_t vcmpgeq_s32(int32x4_t, int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpgeq_s32))) +mve_pred16_t vcmpgeq(int32x4_t, int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpgeq_s8))) +mve_pred16_t vcmpgeq_s8(int8x16_t, int8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpgeq_s8))) +mve_pred16_t vcmpgeq(int8x16_t, int8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpgtq_m_n_s16))) +mve_pred16_t vcmpgtq_m_n_s16(int16x8_t, int16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpgtq_m_n_s16))) +mve_pred16_t vcmpgtq_m(int16x8_t, int16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpgtq_m_n_s32))) +mve_pred16_t vcmpgtq_m_n_s32(int32x4_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpgtq_m_n_s32))) +mve_pred16_t vcmpgtq_m(int32x4_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpgtq_m_n_s8))) +mve_pred16_t vcmpgtq_m_n_s8(int8x16_t, int8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpgtq_m_n_s8))) +mve_pred16_t vcmpgtq_m(int8x16_t, int8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpgtq_m_s16))) +mve_pred16_t vcmpgtq_m_s16(int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpgtq_m_s16))) +mve_pred16_t vcmpgtq_m(int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpgtq_m_s32))) +mve_pred16_t vcmpgtq_m_s32(int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpgtq_m_s32))) +mve_pred16_t vcmpgtq_m(int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpgtq_m_s8))) +mve_pred16_t vcmpgtq_m_s8(int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpgtq_m_s8))) +mve_pred16_t vcmpgtq_m(int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpgtq_n_s16))) +mve_pred16_t vcmpgtq_n_s16(int16x8_t, int16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpgtq_n_s16))) +mve_pred16_t vcmpgtq(int16x8_t, int16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpgtq_n_s32))) +mve_pred16_t vcmpgtq_n_s32(int32x4_t, int32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpgtq_n_s32))) +mve_pred16_t vcmpgtq(int32x4_t, int32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpgtq_n_s8))) +mve_pred16_t vcmpgtq_n_s8(int8x16_t, int8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpgtq_n_s8))) +mve_pred16_t vcmpgtq(int8x16_t, int8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpgtq_s16))) +mve_pred16_t vcmpgtq_s16(int16x8_t, int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpgtq_s16))) +mve_pred16_t vcmpgtq(int16x8_t, int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpgtq_s32))) +mve_pred16_t vcmpgtq_s32(int32x4_t, int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpgtq_s32))) +mve_pred16_t vcmpgtq(int32x4_t, int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpgtq_s8))) +mve_pred16_t vcmpgtq_s8(int8x16_t, int8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpgtq_s8))) +mve_pred16_t vcmpgtq(int8x16_t, int8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmphiq_m_n_u16))) +mve_pred16_t vcmphiq_m_n_u16(uint16x8_t, uint16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmphiq_m_n_u16))) +mve_pred16_t vcmphiq_m(uint16x8_t, uint16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmphiq_m_n_u32))) +mve_pred16_t vcmphiq_m_n_u32(uint32x4_t, uint32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmphiq_m_n_u32))) +mve_pred16_t vcmphiq_m(uint32x4_t, uint32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmphiq_m_n_u8))) +mve_pred16_t vcmphiq_m_n_u8(uint8x16_t, uint8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmphiq_m_n_u8))) +mve_pred16_t vcmphiq_m(uint8x16_t, uint8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmphiq_m_u16))) +mve_pred16_t vcmphiq_m_u16(uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmphiq_m_u16))) +mve_pred16_t vcmphiq_m(uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmphiq_m_u32))) +mve_pred16_t vcmphiq_m_u32(uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmphiq_m_u32))) +mve_pred16_t vcmphiq_m(uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmphiq_m_u8))) +mve_pred16_t vcmphiq_m_u8(uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmphiq_m_u8))) +mve_pred16_t vcmphiq_m(uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmphiq_n_u16))) +mve_pred16_t vcmphiq_n_u16(uint16x8_t, uint16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmphiq_n_u16))) +mve_pred16_t vcmphiq(uint16x8_t, uint16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmphiq_n_u32))) +mve_pred16_t vcmphiq_n_u32(uint32x4_t, uint32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmphiq_n_u32))) +mve_pred16_t vcmphiq(uint32x4_t, uint32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmphiq_n_u8))) +mve_pred16_t vcmphiq_n_u8(uint8x16_t, uint8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmphiq_n_u8))) +mve_pred16_t vcmphiq(uint8x16_t, uint8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmphiq_u16))) +mve_pred16_t vcmphiq_u16(uint16x8_t, uint16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmphiq_u16))) +mve_pred16_t vcmphiq(uint16x8_t, uint16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmphiq_u32))) +mve_pred16_t vcmphiq_u32(uint32x4_t, uint32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmphiq_u32))) +mve_pred16_t vcmphiq(uint32x4_t, uint32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmphiq_u8))) +mve_pred16_t vcmphiq_u8(uint8x16_t, uint8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmphiq_u8))) +mve_pred16_t vcmphiq(uint8x16_t, uint8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpleq_m_n_s16))) +mve_pred16_t vcmpleq_m_n_s16(int16x8_t, int16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpleq_m_n_s16))) +mve_pred16_t vcmpleq_m(int16x8_t, int16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpleq_m_n_s32))) +mve_pred16_t vcmpleq_m_n_s32(int32x4_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpleq_m_n_s32))) +mve_pred16_t vcmpleq_m(int32x4_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpleq_m_n_s8))) +mve_pred16_t vcmpleq_m_n_s8(int8x16_t, int8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpleq_m_n_s8))) +mve_pred16_t vcmpleq_m(int8x16_t, int8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpleq_m_s16))) +mve_pred16_t vcmpleq_m_s16(int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpleq_m_s16))) +mve_pred16_t vcmpleq_m(int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpleq_m_s32))) +mve_pred16_t vcmpleq_m_s32(int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpleq_m_s32))) +mve_pred16_t vcmpleq_m(int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpleq_m_s8))) +mve_pred16_t vcmpleq_m_s8(int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpleq_m_s8))) +mve_pred16_t vcmpleq_m(int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpleq_n_s16))) +mve_pred16_t vcmpleq_n_s16(int16x8_t, int16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpleq_n_s16))) +mve_pred16_t vcmpleq(int16x8_t, int16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpleq_n_s32))) +mve_pred16_t vcmpleq_n_s32(int32x4_t, int32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpleq_n_s32))) +mve_pred16_t vcmpleq(int32x4_t, int32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpleq_n_s8))) +mve_pred16_t vcmpleq_n_s8(int8x16_t, int8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpleq_n_s8))) +mve_pred16_t vcmpleq(int8x16_t, int8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpleq_s16))) +mve_pred16_t vcmpleq_s16(int16x8_t, int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpleq_s16))) +mve_pred16_t vcmpleq(int16x8_t, int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpleq_s32))) +mve_pred16_t vcmpleq_s32(int32x4_t, int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpleq_s32))) +mve_pred16_t vcmpleq(int32x4_t, int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpleq_s8))) +mve_pred16_t vcmpleq_s8(int8x16_t, int8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpleq_s8))) +mve_pred16_t vcmpleq(int8x16_t, int8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpltq_m_n_s16))) +mve_pred16_t vcmpltq_m_n_s16(int16x8_t, int16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpltq_m_n_s16))) +mve_pred16_t vcmpltq_m(int16x8_t, int16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpltq_m_n_s32))) +mve_pred16_t vcmpltq_m_n_s32(int32x4_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpltq_m_n_s32))) +mve_pred16_t vcmpltq_m(int32x4_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpltq_m_n_s8))) +mve_pred16_t vcmpltq_m_n_s8(int8x16_t, int8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpltq_m_n_s8))) +mve_pred16_t vcmpltq_m(int8x16_t, int8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpltq_m_s16))) +mve_pred16_t vcmpltq_m_s16(int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpltq_m_s16))) +mve_pred16_t vcmpltq_m(int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpltq_m_s32))) +mve_pred16_t vcmpltq_m_s32(int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpltq_m_s32))) +mve_pred16_t vcmpltq_m(int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpltq_m_s8))) +mve_pred16_t vcmpltq_m_s8(int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpltq_m_s8))) +mve_pred16_t vcmpltq_m(int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpltq_n_s16))) +mve_pred16_t vcmpltq_n_s16(int16x8_t, int16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpltq_n_s16))) +mve_pred16_t vcmpltq(int16x8_t, int16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpltq_n_s32))) +mve_pred16_t vcmpltq_n_s32(int32x4_t, int32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpltq_n_s32))) +mve_pred16_t vcmpltq(int32x4_t, int32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpltq_n_s8))) +mve_pred16_t vcmpltq_n_s8(int8x16_t, int8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpltq_n_s8))) +mve_pred16_t vcmpltq(int8x16_t, int8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpltq_s16))) +mve_pred16_t vcmpltq_s16(int16x8_t, int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpltq_s16))) +mve_pred16_t vcmpltq(int16x8_t, int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpltq_s32))) +mve_pred16_t vcmpltq_s32(int32x4_t, int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpltq_s32))) +mve_pred16_t vcmpltq(int32x4_t, int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpltq_s8))) +mve_pred16_t vcmpltq_s8(int8x16_t, int8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpltq_s8))) +mve_pred16_t vcmpltq(int8x16_t, int8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_m_n_s16))) +mve_pred16_t vcmpneq_m_n_s16(int16x8_t, int16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_m_n_s16))) +mve_pred16_t vcmpneq_m(int16x8_t, int16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_m_n_s32))) +mve_pred16_t vcmpneq_m_n_s32(int32x4_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_m_n_s32))) +mve_pred16_t vcmpneq_m(int32x4_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_m_n_s8))) +mve_pred16_t vcmpneq_m_n_s8(int8x16_t, int8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_m_n_s8))) +mve_pred16_t vcmpneq_m(int8x16_t, int8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_m_n_u16))) +mve_pred16_t vcmpneq_m_n_u16(uint16x8_t, uint16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_m_n_u16))) +mve_pred16_t vcmpneq_m(uint16x8_t, uint16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_m_n_u32))) +mve_pred16_t vcmpneq_m_n_u32(uint32x4_t, uint32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_m_n_u32))) +mve_pred16_t vcmpneq_m(uint32x4_t, uint32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_m_n_u8))) +mve_pred16_t vcmpneq_m_n_u8(uint8x16_t, uint8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_m_n_u8))) +mve_pred16_t vcmpneq_m(uint8x16_t, uint8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_m_s16))) +mve_pred16_t vcmpneq_m_s16(int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_m_s16))) +mve_pred16_t vcmpneq_m(int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_m_s32))) +mve_pred16_t vcmpneq_m_s32(int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_m_s32))) +mve_pred16_t vcmpneq_m(int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_m_s8))) +mve_pred16_t vcmpneq_m_s8(int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_m_s8))) +mve_pred16_t vcmpneq_m(int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_m_u16))) +mve_pred16_t vcmpneq_m_u16(uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_m_u16))) +mve_pred16_t vcmpneq_m(uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_m_u32))) +mve_pred16_t vcmpneq_m_u32(uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_m_u32))) +mve_pred16_t vcmpneq_m(uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_m_u8))) +mve_pred16_t vcmpneq_m_u8(uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_m_u8))) +mve_pred16_t vcmpneq_m(uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_n_s16))) +mve_pred16_t vcmpneq_n_s16(int16x8_t, int16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_n_s16))) +mve_pred16_t vcmpneq(int16x8_t, int16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_n_s32))) +mve_pred16_t vcmpneq_n_s32(int32x4_t, int32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_n_s32))) +mve_pred16_t vcmpneq(int32x4_t, int32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_n_s8))) +mve_pred16_t vcmpneq_n_s8(int8x16_t, int8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_n_s8))) +mve_pred16_t vcmpneq(int8x16_t, int8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_n_u16))) +mve_pred16_t vcmpneq_n_u16(uint16x8_t, uint16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_n_u16))) +mve_pred16_t vcmpneq(uint16x8_t, uint16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_n_u32))) +mve_pred16_t vcmpneq_n_u32(uint32x4_t, uint32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_n_u32))) +mve_pred16_t vcmpneq(uint32x4_t, uint32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_n_u8))) +mve_pred16_t vcmpneq_n_u8(uint8x16_t, uint8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_n_u8))) +mve_pred16_t vcmpneq(uint8x16_t, uint8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_s16))) +mve_pred16_t vcmpneq_s16(int16x8_t, int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_s16))) +mve_pred16_t vcmpneq(int16x8_t, int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_s32))) +mve_pred16_t vcmpneq_s32(int32x4_t, int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_s32))) +mve_pred16_t vcmpneq(int32x4_t, int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_s8))) +mve_pred16_t vcmpneq_s8(int8x16_t, int8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_s8))) +mve_pred16_t vcmpneq(int8x16_t, int8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_u16))) +mve_pred16_t vcmpneq_u16(uint16x8_t, uint16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_u16))) +mve_pred16_t vcmpneq(uint16x8_t, uint16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_u32))) +mve_pred16_t vcmpneq_u32(uint32x4_t, uint32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_u32))) +mve_pred16_t vcmpneq(uint32x4_t, uint32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_u8))) +mve_pred16_t vcmpneq_u8(uint8x16_t, uint8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_u8))) +mve_pred16_t vcmpneq(uint8x16_t, uint8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcreateq_s16))) +int16x8_t vcreateq_s16(uint64_t, uint64_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcreateq_s32))) +int32x4_t vcreateq_s32(uint64_t, uint64_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcreateq_s64))) +int64x2_t vcreateq_s64(uint64_t, uint64_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcreateq_s8))) +int8x16_t vcreateq_s8(uint64_t, uint64_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcreateq_u16))) +uint16x8_t vcreateq_u16(uint64_t, uint64_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcreateq_u32))) +uint32x4_t vcreateq_u32(uint64_t, uint64_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcreateq_u64))) +uint64x2_t vcreateq_u64(uint64_t, uint64_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcreateq_u8))) +uint8x16_t vcreateq_u8(uint64_t, uint64_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vctp16q))) +mve_pred16_t vctp16q(uint32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vctp16q_m))) +mve_pred16_t vctp16q_m(uint32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vctp32q))) +mve_pred16_t vctp32q(uint32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vctp32q_m))) +mve_pred16_t vctp32q_m(uint32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vctp64q))) +mve_pred16_t vctp64q(uint32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vctp64q_m))) +mve_pred16_t vctp64q_m(uint32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vctp8q))) +mve_pred16_t vctp8q(uint32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vctp8q_m))) +mve_pred16_t vctp8q_m(uint32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vddupq_m_n_u16))) +uint16x8_t vddupq_m_n_u16(uint16x8_t, uint32_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vddupq_m_n_u16))) +uint16x8_t vddupq_m(uint16x8_t, uint32_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vddupq_m_n_u32))) +uint32x4_t vddupq_m_n_u32(uint32x4_t, uint32_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vddupq_m_n_u32))) +uint32x4_t vddupq_m(uint32x4_t, uint32_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vddupq_m_n_u8))) +uint8x16_t vddupq_m_n_u8(uint8x16_t, uint32_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vddupq_m_n_u8))) +uint8x16_t vddupq_m(uint8x16_t, uint32_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vddupq_m_wb_u16))) +uint16x8_t vddupq_m_wb_u16(uint16x8_t, uint32_t *, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vddupq_m_wb_u16))) +uint16x8_t vddupq_m(uint16x8_t, uint32_t *, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vddupq_m_wb_u32))) +uint32x4_t vddupq_m_wb_u32(uint32x4_t, uint32_t *, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vddupq_m_wb_u32))) +uint32x4_t vddupq_m(uint32x4_t, uint32_t *, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vddupq_m_wb_u8))) +uint8x16_t vddupq_m_wb_u8(uint8x16_t, uint32_t *, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vddupq_m_wb_u8))) +uint8x16_t vddupq_m(uint8x16_t, uint32_t *, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vddupq_n_u16))) +uint16x8_t vddupq_n_u16(uint32_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vddupq_n_u16))) +uint16x8_t vddupq_u16(uint32_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vddupq_n_u32))) +uint32x4_t vddupq_n_u32(uint32_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vddupq_n_u32))) +uint32x4_t vddupq_u32(uint32_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vddupq_n_u8))) +uint8x16_t vddupq_n_u8(uint32_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vddupq_n_u8))) +uint8x16_t vddupq_u8(uint32_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vddupq_wb_u16))) +uint16x8_t vddupq_wb_u16(uint32_t *, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vddupq_wb_u16))) +uint16x8_t vddupq_u16(uint32_t *, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vddupq_wb_u32))) +uint32x4_t vddupq_wb_u32(uint32_t *, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vddupq_wb_u32))) +uint32x4_t vddupq_u32(uint32_t *, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vddupq_wb_u8))) +uint8x16_t vddupq_wb_u8(uint32_t *, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vddupq_wb_u8))) +uint8x16_t vddupq_u8(uint32_t *, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vddupq_x_n_u16))) +uint16x8_t vddupq_x_n_u16(uint32_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vddupq_x_n_u16))) +uint16x8_t vddupq_x_u16(uint32_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vddupq_x_n_u32))) +uint32x4_t vddupq_x_n_u32(uint32_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vddupq_x_n_u32))) +uint32x4_t vddupq_x_u32(uint32_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vddupq_x_n_u8))) +uint8x16_t vddupq_x_n_u8(uint32_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vddupq_x_n_u8))) +uint8x16_t vddupq_x_u8(uint32_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vddupq_x_wb_u16))) +uint16x8_t vddupq_x_wb_u16(uint32_t *, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vddupq_x_wb_u16))) +uint16x8_t vddupq_x_u16(uint32_t *, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vddupq_x_wb_u32))) +uint32x4_t vddupq_x_wb_u32(uint32_t *, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vddupq_x_wb_u32))) +uint32x4_t vddupq_x_u32(uint32_t *, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vddupq_x_wb_u8))) +uint8x16_t vddupq_x_wb_u8(uint32_t *, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vddupq_x_wb_u8))) +uint8x16_t vddupq_x_u8(uint32_t *, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vdupq_m_n_s16))) +int16x8_t vdupq_m_n_s16(int16x8_t, int16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vdupq_m_n_s16))) +int16x8_t vdupq_m(int16x8_t, int16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vdupq_m_n_s32))) +int32x4_t vdupq_m_n_s32(int32x4_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vdupq_m_n_s32))) +int32x4_t vdupq_m(int32x4_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vdupq_m_n_s8))) +int8x16_t vdupq_m_n_s8(int8x16_t, int8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vdupq_m_n_s8))) +int8x16_t vdupq_m(int8x16_t, int8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vdupq_m_n_u16))) +uint16x8_t vdupq_m_n_u16(uint16x8_t, uint16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vdupq_m_n_u16))) +uint16x8_t vdupq_m(uint16x8_t, uint16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vdupq_m_n_u32))) +uint32x4_t vdupq_m_n_u32(uint32x4_t, uint32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vdupq_m_n_u32))) +uint32x4_t vdupq_m(uint32x4_t, uint32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vdupq_m_n_u8))) +uint8x16_t vdupq_m_n_u8(uint8x16_t, uint8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vdupq_m_n_u8))) +uint8x16_t vdupq_m(uint8x16_t, uint8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vdupq_n_s16))) +int16x8_t vdupq_n_s16(int16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vdupq_n_s32))) +int32x4_t vdupq_n_s32(int32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vdupq_n_s8))) +int8x16_t vdupq_n_s8(int8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vdupq_n_u16))) +uint16x8_t vdupq_n_u16(uint16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vdupq_n_u32))) +uint32x4_t vdupq_n_u32(uint32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vdupq_n_u8))) +uint8x16_t vdupq_n_u8(uint8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vdupq_x_n_s16))) +int16x8_t vdupq_x_n_s16(int16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vdupq_x_n_s32))) +int32x4_t vdupq_x_n_s32(int32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vdupq_x_n_s8))) +int8x16_t vdupq_x_n_s8(int8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vdupq_x_n_u16))) +uint16x8_t vdupq_x_n_u16(uint16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vdupq_x_n_u32))) +uint32x4_t vdupq_x_n_u32(uint32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vdupq_x_n_u8))) +uint8x16_t vdupq_x_n_u8(uint8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vdwdupq_m_n_u16))) +uint16x8_t vdwdupq_m_n_u16(uint16x8_t, uint32_t, uint32_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vdwdupq_m_n_u16))) +uint16x8_t vdwdupq_m(uint16x8_t, uint32_t, uint32_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vdwdupq_m_n_u32))) +uint32x4_t vdwdupq_m_n_u32(uint32x4_t, uint32_t, uint32_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vdwdupq_m_n_u32))) +uint32x4_t vdwdupq_m(uint32x4_t, uint32_t, uint32_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vdwdupq_m_n_u8))) +uint8x16_t vdwdupq_m_n_u8(uint8x16_t, uint32_t, uint32_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vdwdupq_m_n_u8))) +uint8x16_t vdwdupq_m(uint8x16_t, uint32_t, uint32_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vdwdupq_m_wb_u16))) +uint16x8_t vdwdupq_m_wb_u16(uint16x8_t, uint32_t *, uint32_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vdwdupq_m_wb_u16))) +uint16x8_t vdwdupq_m(uint16x8_t, uint32_t *, uint32_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vdwdupq_m_wb_u32))) +uint32x4_t vdwdupq_m_wb_u32(uint32x4_t, uint32_t *, uint32_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vdwdupq_m_wb_u32))) +uint32x4_t vdwdupq_m(uint32x4_t, uint32_t *, uint32_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vdwdupq_m_wb_u8))) +uint8x16_t vdwdupq_m_wb_u8(uint8x16_t, uint32_t *, uint32_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vdwdupq_m_wb_u8))) +uint8x16_t vdwdupq_m(uint8x16_t, uint32_t *, uint32_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vdwdupq_n_u16))) +uint16x8_t vdwdupq_n_u16(uint32_t, uint32_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vdwdupq_n_u16))) +uint16x8_t vdwdupq_u16(uint32_t, uint32_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vdwdupq_n_u32))) +uint32x4_t vdwdupq_n_u32(uint32_t, uint32_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vdwdupq_n_u32))) +uint32x4_t vdwdupq_u32(uint32_t, uint32_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vdwdupq_n_u8))) +uint8x16_t vdwdupq_n_u8(uint32_t, uint32_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vdwdupq_n_u8))) +uint8x16_t vdwdupq_u8(uint32_t, uint32_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vdwdupq_wb_u16))) +uint16x8_t vdwdupq_wb_u16(uint32_t *, uint32_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vdwdupq_wb_u16))) +uint16x8_t vdwdupq_u16(uint32_t *, uint32_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vdwdupq_wb_u32))) +uint32x4_t vdwdupq_wb_u32(uint32_t *, uint32_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vdwdupq_wb_u32))) +uint32x4_t vdwdupq_u32(uint32_t *, uint32_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vdwdupq_wb_u8))) +uint8x16_t vdwdupq_wb_u8(uint32_t *, uint32_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vdwdupq_wb_u8))) +uint8x16_t vdwdupq_u8(uint32_t *, uint32_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vdwdupq_x_n_u16))) +uint16x8_t vdwdupq_x_n_u16(uint32_t, uint32_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vdwdupq_x_n_u16))) +uint16x8_t vdwdupq_x_u16(uint32_t, uint32_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vdwdupq_x_n_u32))) +uint32x4_t vdwdupq_x_n_u32(uint32_t, uint32_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vdwdupq_x_n_u32))) +uint32x4_t vdwdupq_x_u32(uint32_t, uint32_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vdwdupq_x_n_u8))) +uint8x16_t vdwdupq_x_n_u8(uint32_t, uint32_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vdwdupq_x_n_u8))) +uint8x16_t vdwdupq_x_u8(uint32_t, uint32_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vdwdupq_x_wb_u16))) +uint16x8_t vdwdupq_x_wb_u16(uint32_t *, uint32_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vdwdupq_x_wb_u16))) +uint16x8_t vdwdupq_x_u16(uint32_t *, uint32_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vdwdupq_x_wb_u32))) +uint32x4_t vdwdupq_x_wb_u32(uint32_t *, uint32_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vdwdupq_x_wb_u32))) +uint32x4_t vdwdupq_x_u32(uint32_t *, uint32_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vdwdupq_x_wb_u8))) +uint8x16_t vdwdupq_x_wb_u8(uint32_t *, uint32_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vdwdupq_x_wb_u8))) +uint8x16_t vdwdupq_x_u8(uint32_t *, uint32_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_veorq_m_s16))) +int16x8_t veorq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_veorq_m_s16))) +int16x8_t veorq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_veorq_m_s32))) +int32x4_t veorq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_veorq_m_s32))) +int32x4_t veorq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_veorq_m_s8))) +int8x16_t veorq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_veorq_m_s8))) +int8x16_t veorq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_veorq_m_u16))) +uint16x8_t veorq_m_u16(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_veorq_m_u16))) +uint16x8_t veorq_m(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_veorq_m_u32))) +uint32x4_t veorq_m_u32(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_veorq_m_u32))) +uint32x4_t veorq_m(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_veorq_m_u8))) +uint8x16_t veorq_m_u8(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_veorq_m_u8))) +uint8x16_t veorq_m(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_veorq_s16))) +int16x8_t veorq_s16(int16x8_t, int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_veorq_s16))) +int16x8_t veorq(int16x8_t, int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_veorq_s32))) +int32x4_t veorq_s32(int32x4_t, int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_veorq_s32))) +int32x4_t veorq(int32x4_t, int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_veorq_s8))) +int8x16_t veorq_s8(int8x16_t, int8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_veorq_s8))) +int8x16_t veorq(int8x16_t, int8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_veorq_u16))) +uint16x8_t veorq_u16(uint16x8_t, uint16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_veorq_u16))) +uint16x8_t veorq(uint16x8_t, uint16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_veorq_u32))) +uint32x4_t veorq_u32(uint32x4_t, uint32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_veorq_u32))) +uint32x4_t veorq(uint32x4_t, uint32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_veorq_u8))) +uint8x16_t veorq_u8(uint8x16_t, uint8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_veorq_u8))) +uint8x16_t veorq(uint8x16_t, uint8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_veorq_x_s16))) +int16x8_t veorq_x_s16(int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_veorq_x_s16))) +int16x8_t veorq_x(int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_veorq_x_s32))) +int32x4_t veorq_x_s32(int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_veorq_x_s32))) +int32x4_t veorq_x(int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_veorq_x_s8))) +int8x16_t veorq_x_s8(int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_veorq_x_s8))) +int8x16_t veorq_x(int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_veorq_x_u16))) +uint16x8_t veorq_x_u16(uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_veorq_x_u16))) +uint16x8_t veorq_x(uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_veorq_x_u32))) +uint32x4_t veorq_x_u32(uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_veorq_x_u32))) +uint32x4_t veorq_x(uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_veorq_x_u8))) +uint8x16_t veorq_x_u8(uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_veorq_x_u8))) +uint8x16_t veorq_x(uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vgetq_lane_s16))) +int16_t vgetq_lane_s16(int16x8_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vgetq_lane_s16))) +int16_t vgetq_lane(int16x8_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vgetq_lane_s32))) +int32_t vgetq_lane_s32(int32x4_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vgetq_lane_s32))) +int32_t vgetq_lane(int32x4_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vgetq_lane_s64))) +int64_t vgetq_lane_s64(int64x2_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vgetq_lane_s64))) +int64_t vgetq_lane(int64x2_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vgetq_lane_s8))) +int8_t vgetq_lane_s8(int8x16_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vgetq_lane_s8))) +int8_t vgetq_lane(int8x16_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vgetq_lane_u16))) +uint16_t vgetq_lane_u16(uint16x8_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vgetq_lane_u16))) +uint16_t vgetq_lane(uint16x8_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vgetq_lane_u32))) +uint32_t vgetq_lane_u32(uint32x4_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vgetq_lane_u32))) +uint32_t vgetq_lane(uint32x4_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vgetq_lane_u64))) +uint64_t vgetq_lane_u64(uint64x2_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vgetq_lane_u64))) +uint64_t vgetq_lane(uint64x2_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vgetq_lane_u8))) +uint8_t vgetq_lane_u8(uint8x16_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vgetq_lane_u8))) +uint8_t vgetq_lane(uint8x16_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_m_n_s16))) +int16x8_t vhaddq_m_n_s16(int16x8_t, int16x8_t, int16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_m_n_s16))) +int16x8_t vhaddq_m(int16x8_t, int16x8_t, int16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_m_n_s32))) +int32x4_t vhaddq_m_n_s32(int32x4_t, int32x4_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_m_n_s32))) +int32x4_t vhaddq_m(int32x4_t, int32x4_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_m_n_s8))) +int8x16_t vhaddq_m_n_s8(int8x16_t, int8x16_t, int8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_m_n_s8))) +int8x16_t vhaddq_m(int8x16_t, int8x16_t, int8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_m_n_u16))) +uint16x8_t vhaddq_m_n_u16(uint16x8_t, uint16x8_t, uint16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_m_n_u16))) +uint16x8_t vhaddq_m(uint16x8_t, uint16x8_t, uint16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_m_n_u32))) +uint32x4_t vhaddq_m_n_u32(uint32x4_t, uint32x4_t, uint32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_m_n_u32))) +uint32x4_t vhaddq_m(uint32x4_t, uint32x4_t, uint32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_m_n_u8))) +uint8x16_t vhaddq_m_n_u8(uint8x16_t, uint8x16_t, uint8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_m_n_u8))) +uint8x16_t vhaddq_m(uint8x16_t, uint8x16_t, uint8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_m_s16))) +int16x8_t vhaddq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_m_s16))) +int16x8_t vhaddq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_m_s32))) +int32x4_t vhaddq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_m_s32))) +int32x4_t vhaddq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_m_s8))) +int8x16_t vhaddq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_m_s8))) +int8x16_t vhaddq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_m_u16))) +uint16x8_t vhaddq_m_u16(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_m_u16))) +uint16x8_t vhaddq_m(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_m_u32))) +uint32x4_t vhaddq_m_u32(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_m_u32))) +uint32x4_t vhaddq_m(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_m_u8))) +uint8x16_t vhaddq_m_u8(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_m_u8))) +uint8x16_t vhaddq_m(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_n_s16))) +int16x8_t vhaddq_n_s16(int16x8_t, int16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_n_s16))) +int16x8_t vhaddq(int16x8_t, int16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_n_s32))) +int32x4_t vhaddq_n_s32(int32x4_t, int32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_n_s32))) +int32x4_t vhaddq(int32x4_t, int32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_n_s8))) +int8x16_t vhaddq_n_s8(int8x16_t, int8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_n_s8))) +int8x16_t vhaddq(int8x16_t, int8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_n_u16))) +uint16x8_t vhaddq_n_u16(uint16x8_t, uint16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_n_u16))) +uint16x8_t vhaddq(uint16x8_t, uint16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_n_u32))) +uint32x4_t vhaddq_n_u32(uint32x4_t, uint32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_n_u32))) +uint32x4_t vhaddq(uint32x4_t, uint32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_n_u8))) +uint8x16_t vhaddq_n_u8(uint8x16_t, uint8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_n_u8))) +uint8x16_t vhaddq(uint8x16_t, uint8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_s16))) +int16x8_t vhaddq_s16(int16x8_t, int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_s16))) +int16x8_t vhaddq(int16x8_t, int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_s32))) +int32x4_t vhaddq_s32(int32x4_t, int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_s32))) +int32x4_t vhaddq(int32x4_t, int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_s8))) +int8x16_t vhaddq_s8(int8x16_t, int8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_s8))) +int8x16_t vhaddq(int8x16_t, int8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_u16))) +uint16x8_t vhaddq_u16(uint16x8_t, uint16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_u16))) +uint16x8_t vhaddq(uint16x8_t, uint16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_u32))) +uint32x4_t vhaddq_u32(uint32x4_t, uint32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_u32))) +uint32x4_t vhaddq(uint32x4_t, uint32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_u8))) +uint8x16_t vhaddq_u8(uint8x16_t, uint8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_u8))) +uint8x16_t vhaddq(uint8x16_t, uint8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_x_n_s16))) +int16x8_t vhaddq_x_n_s16(int16x8_t, int16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_x_n_s16))) +int16x8_t vhaddq_x(int16x8_t, int16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_x_n_s32))) +int32x4_t vhaddq_x_n_s32(int32x4_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_x_n_s32))) +int32x4_t vhaddq_x(int32x4_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_x_n_s8))) +int8x16_t vhaddq_x_n_s8(int8x16_t, int8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_x_n_s8))) +int8x16_t vhaddq_x(int8x16_t, int8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_x_n_u16))) +uint16x8_t vhaddq_x_n_u16(uint16x8_t, uint16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_x_n_u16))) +uint16x8_t vhaddq_x(uint16x8_t, uint16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_x_n_u32))) +uint32x4_t vhaddq_x_n_u32(uint32x4_t, uint32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_x_n_u32))) +uint32x4_t vhaddq_x(uint32x4_t, uint32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_x_n_u8))) +uint8x16_t vhaddq_x_n_u8(uint8x16_t, uint8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_x_n_u8))) +uint8x16_t vhaddq_x(uint8x16_t, uint8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_x_s16))) +int16x8_t vhaddq_x_s16(int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_x_s16))) +int16x8_t vhaddq_x(int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_x_s32))) +int32x4_t vhaddq_x_s32(int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_x_s32))) +int32x4_t vhaddq_x(int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_x_s8))) +int8x16_t vhaddq_x_s8(int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_x_s8))) +int8x16_t vhaddq_x(int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_x_u16))) +uint16x8_t vhaddq_x_u16(uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_x_u16))) +uint16x8_t vhaddq_x(uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_x_u32))) +uint32x4_t vhaddq_x_u32(uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_x_u32))) +uint32x4_t vhaddq_x(uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_x_u8))) +uint8x16_t vhaddq_x_u8(uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhaddq_x_u8))) +uint8x16_t vhaddq_x(uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhcaddq_rot270_m_s16))) +int16x8_t vhcaddq_rot270_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhcaddq_rot270_m_s16))) +int16x8_t vhcaddq_rot270_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhcaddq_rot270_m_s32))) +int32x4_t vhcaddq_rot270_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhcaddq_rot270_m_s32))) +int32x4_t vhcaddq_rot270_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhcaddq_rot270_m_s8))) +int8x16_t vhcaddq_rot270_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhcaddq_rot270_m_s8))) +int8x16_t vhcaddq_rot270_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhcaddq_rot270_s16))) +int16x8_t vhcaddq_rot270_s16(int16x8_t, int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhcaddq_rot270_s16))) +int16x8_t vhcaddq_rot270(int16x8_t, int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhcaddq_rot270_s32))) +int32x4_t vhcaddq_rot270_s32(int32x4_t, int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhcaddq_rot270_s32))) +int32x4_t vhcaddq_rot270(int32x4_t, int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhcaddq_rot270_s8))) +int8x16_t vhcaddq_rot270_s8(int8x16_t, int8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhcaddq_rot270_s8))) +int8x16_t vhcaddq_rot270(int8x16_t, int8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhcaddq_rot270_x_s16))) +int16x8_t vhcaddq_rot270_x_s16(int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhcaddq_rot270_x_s16))) +int16x8_t vhcaddq_rot270_x(int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhcaddq_rot270_x_s32))) +int32x4_t vhcaddq_rot270_x_s32(int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhcaddq_rot270_x_s32))) +int32x4_t vhcaddq_rot270_x(int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhcaddq_rot270_x_s8))) +int8x16_t vhcaddq_rot270_x_s8(int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhcaddq_rot270_x_s8))) +int8x16_t vhcaddq_rot270_x(int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhcaddq_rot90_m_s16))) +int16x8_t vhcaddq_rot90_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhcaddq_rot90_m_s16))) +int16x8_t vhcaddq_rot90_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhcaddq_rot90_m_s32))) +int32x4_t vhcaddq_rot90_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhcaddq_rot90_m_s32))) +int32x4_t vhcaddq_rot90_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhcaddq_rot90_m_s8))) +int8x16_t vhcaddq_rot90_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhcaddq_rot90_m_s8))) +int8x16_t vhcaddq_rot90_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhcaddq_rot90_s16))) +int16x8_t vhcaddq_rot90_s16(int16x8_t, int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhcaddq_rot90_s16))) +int16x8_t vhcaddq_rot90(int16x8_t, int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhcaddq_rot90_s32))) +int32x4_t vhcaddq_rot90_s32(int32x4_t, int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhcaddq_rot90_s32))) +int32x4_t vhcaddq_rot90(int32x4_t, int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhcaddq_rot90_s8))) +int8x16_t vhcaddq_rot90_s8(int8x16_t, int8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhcaddq_rot90_s8))) +int8x16_t vhcaddq_rot90(int8x16_t, int8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhcaddq_rot90_x_s16))) +int16x8_t vhcaddq_rot90_x_s16(int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhcaddq_rot90_x_s16))) +int16x8_t vhcaddq_rot90_x(int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhcaddq_rot90_x_s32))) +int32x4_t vhcaddq_rot90_x_s32(int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhcaddq_rot90_x_s32))) +int32x4_t vhcaddq_rot90_x(int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhcaddq_rot90_x_s8))) +int8x16_t vhcaddq_rot90_x_s8(int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhcaddq_rot90_x_s8))) +int8x16_t vhcaddq_rot90_x(int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_m_n_s16))) +int16x8_t vhsubq_m_n_s16(int16x8_t, int16x8_t, int16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_m_n_s16))) +int16x8_t vhsubq_m(int16x8_t, int16x8_t, int16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_m_n_s32))) +int32x4_t vhsubq_m_n_s32(int32x4_t, int32x4_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_m_n_s32))) +int32x4_t vhsubq_m(int32x4_t, int32x4_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_m_n_s8))) +int8x16_t vhsubq_m_n_s8(int8x16_t, int8x16_t, int8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_m_n_s8))) +int8x16_t vhsubq_m(int8x16_t, int8x16_t, int8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_m_n_u16))) +uint16x8_t vhsubq_m_n_u16(uint16x8_t, uint16x8_t, uint16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_m_n_u16))) +uint16x8_t vhsubq_m(uint16x8_t, uint16x8_t, uint16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_m_n_u32))) +uint32x4_t vhsubq_m_n_u32(uint32x4_t, uint32x4_t, uint32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_m_n_u32))) +uint32x4_t vhsubq_m(uint32x4_t, uint32x4_t, uint32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_m_n_u8))) +uint8x16_t vhsubq_m_n_u8(uint8x16_t, uint8x16_t, uint8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_m_n_u8))) +uint8x16_t vhsubq_m(uint8x16_t, uint8x16_t, uint8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_m_s16))) +int16x8_t vhsubq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_m_s16))) +int16x8_t vhsubq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_m_s32))) +int32x4_t vhsubq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_m_s32))) +int32x4_t vhsubq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_m_s8))) +int8x16_t vhsubq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_m_s8))) +int8x16_t vhsubq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_m_u16))) +uint16x8_t vhsubq_m_u16(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_m_u16))) +uint16x8_t vhsubq_m(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_m_u32))) +uint32x4_t vhsubq_m_u32(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_m_u32))) +uint32x4_t vhsubq_m(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_m_u8))) +uint8x16_t vhsubq_m_u8(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_m_u8))) +uint8x16_t vhsubq_m(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_n_s16))) +int16x8_t vhsubq_n_s16(int16x8_t, int16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_n_s16))) +int16x8_t vhsubq(int16x8_t, int16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_n_s32))) +int32x4_t vhsubq_n_s32(int32x4_t, int32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_n_s32))) +int32x4_t vhsubq(int32x4_t, int32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_n_s8))) +int8x16_t vhsubq_n_s8(int8x16_t, int8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_n_s8))) +int8x16_t vhsubq(int8x16_t, int8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_n_u16))) +uint16x8_t vhsubq_n_u16(uint16x8_t, uint16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_n_u16))) +uint16x8_t vhsubq(uint16x8_t, uint16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_n_u32))) +uint32x4_t vhsubq_n_u32(uint32x4_t, uint32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_n_u32))) +uint32x4_t vhsubq(uint32x4_t, uint32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_n_u8))) +uint8x16_t vhsubq_n_u8(uint8x16_t, uint8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_n_u8))) +uint8x16_t vhsubq(uint8x16_t, uint8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_s16))) +int16x8_t vhsubq_s16(int16x8_t, int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_s16))) +int16x8_t vhsubq(int16x8_t, int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_s32))) +int32x4_t vhsubq_s32(int32x4_t, int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_s32))) +int32x4_t vhsubq(int32x4_t, int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_s8))) +int8x16_t vhsubq_s8(int8x16_t, int8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_s8))) +int8x16_t vhsubq(int8x16_t, int8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_u16))) +uint16x8_t vhsubq_u16(uint16x8_t, uint16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_u16))) +uint16x8_t vhsubq(uint16x8_t, uint16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_u32))) +uint32x4_t vhsubq_u32(uint32x4_t, uint32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_u32))) +uint32x4_t vhsubq(uint32x4_t, uint32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_u8))) +uint8x16_t vhsubq_u8(uint8x16_t, uint8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_u8))) +uint8x16_t vhsubq(uint8x16_t, uint8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_x_n_s16))) +int16x8_t vhsubq_x_n_s16(int16x8_t, int16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_x_n_s16))) +int16x8_t vhsubq_x(int16x8_t, int16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_x_n_s32))) +int32x4_t vhsubq_x_n_s32(int32x4_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_x_n_s32))) +int32x4_t vhsubq_x(int32x4_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_x_n_s8))) +int8x16_t vhsubq_x_n_s8(int8x16_t, int8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_x_n_s8))) +int8x16_t vhsubq_x(int8x16_t, int8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_x_n_u16))) +uint16x8_t vhsubq_x_n_u16(uint16x8_t, uint16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_x_n_u16))) +uint16x8_t vhsubq_x(uint16x8_t, uint16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_x_n_u32))) +uint32x4_t vhsubq_x_n_u32(uint32x4_t, uint32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_x_n_u32))) +uint32x4_t vhsubq_x(uint32x4_t, uint32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_x_n_u8))) +uint8x16_t vhsubq_x_n_u8(uint8x16_t, uint8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_x_n_u8))) +uint8x16_t vhsubq_x(uint8x16_t, uint8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_x_s16))) +int16x8_t vhsubq_x_s16(int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_x_s16))) +int16x8_t vhsubq_x(int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_x_s32))) +int32x4_t vhsubq_x_s32(int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_x_s32))) +int32x4_t vhsubq_x(int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_x_s8))) +int8x16_t vhsubq_x_s8(int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_x_s8))) +int8x16_t vhsubq_x(int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_x_u16))) +uint16x8_t vhsubq_x_u16(uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_x_u16))) +uint16x8_t vhsubq_x(uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_x_u32))) +uint32x4_t vhsubq_x_u32(uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_x_u32))) +uint32x4_t vhsubq_x(uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_x_u8))) +uint8x16_t vhsubq_x_u8(uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vhsubq_x_u8))) +uint8x16_t vhsubq_x(uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vidupq_m_n_u16))) +uint16x8_t vidupq_m_n_u16(uint16x8_t, uint32_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vidupq_m_n_u16))) +uint16x8_t vidupq_m(uint16x8_t, uint32_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vidupq_m_n_u32))) +uint32x4_t vidupq_m_n_u32(uint32x4_t, uint32_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vidupq_m_n_u32))) +uint32x4_t vidupq_m(uint32x4_t, uint32_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vidupq_m_n_u8))) +uint8x16_t vidupq_m_n_u8(uint8x16_t, uint32_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vidupq_m_n_u8))) +uint8x16_t vidupq_m(uint8x16_t, uint32_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vidupq_m_wb_u16))) +uint16x8_t vidupq_m_wb_u16(uint16x8_t, uint32_t *, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vidupq_m_wb_u16))) +uint16x8_t vidupq_m(uint16x8_t, uint32_t *, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vidupq_m_wb_u32))) +uint32x4_t vidupq_m_wb_u32(uint32x4_t, uint32_t *, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vidupq_m_wb_u32))) +uint32x4_t vidupq_m(uint32x4_t, uint32_t *, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vidupq_m_wb_u8))) +uint8x16_t vidupq_m_wb_u8(uint8x16_t, uint32_t *, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vidupq_m_wb_u8))) +uint8x16_t vidupq_m(uint8x16_t, uint32_t *, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vidupq_n_u16))) +uint16x8_t vidupq_n_u16(uint32_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vidupq_n_u16))) +uint16x8_t vidupq_u16(uint32_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vidupq_n_u32))) +uint32x4_t vidupq_n_u32(uint32_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vidupq_n_u32))) +uint32x4_t vidupq_u32(uint32_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vidupq_n_u8))) +uint8x16_t vidupq_n_u8(uint32_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vidupq_n_u8))) +uint8x16_t vidupq_u8(uint32_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vidupq_wb_u16))) +uint16x8_t vidupq_wb_u16(uint32_t *, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vidupq_wb_u16))) +uint16x8_t vidupq_u16(uint32_t *, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vidupq_wb_u32))) +uint32x4_t vidupq_wb_u32(uint32_t *, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vidupq_wb_u32))) +uint32x4_t vidupq_u32(uint32_t *, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vidupq_wb_u8))) +uint8x16_t vidupq_wb_u8(uint32_t *, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vidupq_wb_u8))) +uint8x16_t vidupq_u8(uint32_t *, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vidupq_x_n_u16))) +uint16x8_t vidupq_x_n_u16(uint32_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vidupq_x_n_u16))) +uint16x8_t vidupq_x_u16(uint32_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vidupq_x_n_u32))) +uint32x4_t vidupq_x_n_u32(uint32_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vidupq_x_n_u32))) +uint32x4_t vidupq_x_u32(uint32_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vidupq_x_n_u8))) +uint8x16_t vidupq_x_n_u8(uint32_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vidupq_x_n_u8))) +uint8x16_t vidupq_x_u8(uint32_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vidupq_x_wb_u16))) +uint16x8_t vidupq_x_wb_u16(uint32_t *, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vidupq_x_wb_u16))) +uint16x8_t vidupq_x_u16(uint32_t *, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vidupq_x_wb_u32))) +uint32x4_t vidupq_x_wb_u32(uint32_t *, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vidupq_x_wb_u32))) +uint32x4_t vidupq_x_u32(uint32_t *, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vidupq_x_wb_u8))) +uint8x16_t vidupq_x_wb_u8(uint32_t *, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vidupq_x_wb_u8))) +uint8x16_t vidupq_x_u8(uint32_t *, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_viwdupq_m_n_u16))) +uint16x8_t viwdupq_m_n_u16(uint16x8_t, uint32_t, uint32_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_viwdupq_m_n_u16))) +uint16x8_t viwdupq_m(uint16x8_t, uint32_t, uint32_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_viwdupq_m_n_u32))) +uint32x4_t viwdupq_m_n_u32(uint32x4_t, uint32_t, uint32_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_viwdupq_m_n_u32))) +uint32x4_t viwdupq_m(uint32x4_t, uint32_t, uint32_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_viwdupq_m_n_u8))) +uint8x16_t viwdupq_m_n_u8(uint8x16_t, uint32_t, uint32_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_viwdupq_m_n_u8))) +uint8x16_t viwdupq_m(uint8x16_t, uint32_t, uint32_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_viwdupq_m_wb_u16))) +uint16x8_t viwdupq_m_wb_u16(uint16x8_t, uint32_t *, uint32_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_viwdupq_m_wb_u16))) +uint16x8_t viwdupq_m(uint16x8_t, uint32_t *, uint32_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_viwdupq_m_wb_u32))) +uint32x4_t viwdupq_m_wb_u32(uint32x4_t, uint32_t *, uint32_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_viwdupq_m_wb_u32))) +uint32x4_t viwdupq_m(uint32x4_t, uint32_t *, uint32_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_viwdupq_m_wb_u8))) +uint8x16_t viwdupq_m_wb_u8(uint8x16_t, uint32_t *, uint32_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_viwdupq_m_wb_u8))) +uint8x16_t viwdupq_m(uint8x16_t, uint32_t *, uint32_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_viwdupq_n_u16))) +uint16x8_t viwdupq_n_u16(uint32_t, uint32_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_viwdupq_n_u16))) +uint16x8_t viwdupq_u16(uint32_t, uint32_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_viwdupq_n_u32))) +uint32x4_t viwdupq_n_u32(uint32_t, uint32_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_viwdupq_n_u32))) +uint32x4_t viwdupq_u32(uint32_t, uint32_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_viwdupq_n_u8))) +uint8x16_t viwdupq_n_u8(uint32_t, uint32_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_viwdupq_n_u8))) +uint8x16_t viwdupq_u8(uint32_t, uint32_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_viwdupq_wb_u16))) +uint16x8_t viwdupq_wb_u16(uint32_t *, uint32_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_viwdupq_wb_u16))) +uint16x8_t viwdupq_u16(uint32_t *, uint32_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_viwdupq_wb_u32))) +uint32x4_t viwdupq_wb_u32(uint32_t *, uint32_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_viwdupq_wb_u32))) +uint32x4_t viwdupq_u32(uint32_t *, uint32_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_viwdupq_wb_u8))) +uint8x16_t viwdupq_wb_u8(uint32_t *, uint32_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_viwdupq_wb_u8))) +uint8x16_t viwdupq_u8(uint32_t *, uint32_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_viwdupq_x_n_u16))) +uint16x8_t viwdupq_x_n_u16(uint32_t, uint32_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_viwdupq_x_n_u16))) +uint16x8_t viwdupq_x_u16(uint32_t, uint32_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_viwdupq_x_n_u32))) +uint32x4_t viwdupq_x_n_u32(uint32_t, uint32_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_viwdupq_x_n_u32))) +uint32x4_t viwdupq_x_u32(uint32_t, uint32_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_viwdupq_x_n_u8))) +uint8x16_t viwdupq_x_n_u8(uint32_t, uint32_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_viwdupq_x_n_u8))) +uint8x16_t viwdupq_x_u8(uint32_t, uint32_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_viwdupq_x_wb_u16))) +uint16x8_t viwdupq_x_wb_u16(uint32_t *, uint32_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_viwdupq_x_wb_u16))) +uint16x8_t viwdupq_x_u16(uint32_t *, uint32_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_viwdupq_x_wb_u32))) +uint32x4_t viwdupq_x_wb_u32(uint32_t *, uint32_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_viwdupq_x_wb_u32))) +uint32x4_t viwdupq_x_u32(uint32_t *, uint32_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_viwdupq_x_wb_u8))) +uint8x16_t viwdupq_x_wb_u8(uint32_t *, uint32_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_viwdupq_x_wb_u8))) +uint8x16_t viwdupq_x_u8(uint32_t *, uint32_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vld1q_s16))) +int16x8_t vld1q_s16(const int16_t *); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vld1q_s16))) +int16x8_t vld1q(const int16_t *); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vld1q_s32))) +int32x4_t vld1q_s32(const int32_t *); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vld1q_s32))) +int32x4_t vld1q(const int32_t *); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vld1q_s8))) +int8x16_t vld1q_s8(const int8_t *); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vld1q_s8))) +int8x16_t vld1q(const int8_t *); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vld1q_u16))) +uint16x8_t vld1q_u16(const uint16_t *); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vld1q_u16))) +uint16x8_t vld1q(const uint16_t *); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vld1q_u32))) +uint32x4_t vld1q_u32(const uint32_t *); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vld1q_u32))) +uint32x4_t vld1q(const uint32_t *); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vld1q_u8))) +uint8x16_t vld1q_u8(const uint8_t *); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vld1q_u8))) +uint8x16_t vld1q(const uint8_t *); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vld1q_z_s16))) +int16x8_t vld1q_z_s16(const int16_t *, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vld1q_z_s16))) +int16x8_t vld1q_z(const int16_t *, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vld1q_z_s32))) +int32x4_t vld1q_z_s32(const int32_t *, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vld1q_z_s32))) +int32x4_t vld1q_z(const int32_t *, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vld1q_z_s8))) +int8x16_t vld1q_z_s8(const int8_t *, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vld1q_z_s8))) +int8x16_t vld1q_z(const int8_t *, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vld1q_z_u16))) +uint16x8_t vld1q_z_u16(const uint16_t *, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vld1q_z_u16))) +uint16x8_t vld1q_z(const uint16_t *, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vld1q_z_u32))) +uint32x4_t vld1q_z_u32(const uint32_t *, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vld1q_z_u32))) +uint32x4_t vld1q_z(const uint32_t *, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vld1q_z_u8))) +uint8x16_t vld1q_z_u8(const uint8_t *, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vld1q_z_u8))) +uint8x16_t vld1q_z(const uint8_t *, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vld2q_s16))) +int16x8x2_t vld2q_s16(const int16_t *); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vld2q_s16))) +int16x8x2_t vld2q(const int16_t *); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vld2q_s32))) +int32x4x2_t vld2q_s32(const int32_t *); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vld2q_s32))) +int32x4x2_t vld2q(const int32_t *); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vld2q_s8))) +int8x16x2_t vld2q_s8(const int8_t *); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vld2q_s8))) +int8x16x2_t vld2q(const int8_t *); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vld2q_u16))) +uint16x8x2_t vld2q_u16(const uint16_t *); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vld2q_u16))) +uint16x8x2_t vld2q(const uint16_t *); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vld2q_u32))) +uint32x4x2_t vld2q_u32(const uint32_t *); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vld2q_u32))) +uint32x4x2_t vld2q(const uint32_t *); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vld2q_u8))) +uint8x16x2_t vld2q_u8(const uint8_t *); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vld2q_u8))) +uint8x16x2_t vld2q(const uint8_t *); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vld4q_s16))) +int16x8x4_t vld4q_s16(const int16_t *); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vld4q_s16))) +int16x8x4_t vld4q(const int16_t *); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vld4q_s32))) +int32x4x4_t vld4q_s32(const int32_t *); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vld4q_s32))) +int32x4x4_t vld4q(const int32_t *); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vld4q_s8))) +int8x16x4_t vld4q_s8(const int8_t *); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vld4q_s8))) +int8x16x4_t vld4q(const int8_t *); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vld4q_u16))) +uint16x8x4_t vld4q_u16(const uint16_t *); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vld4q_u16))) +uint16x8x4_t vld4q(const uint16_t *); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vld4q_u32))) +uint32x4x4_t vld4q_u32(const uint32_t *); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vld4q_u32))) +uint32x4x4_t vld4q(const uint32_t *); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vld4q_u8))) +uint8x16x4_t vld4q_u8(const uint8_t *); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vld4q_u8))) +uint8x16x4_t vld4q(const uint8_t *); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrbq_gather_offset_s16))) +int16x8_t vldrbq_gather_offset_s16(const int8_t *, uint16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrbq_gather_offset_s16))) +int16x8_t vldrbq_gather_offset(const int8_t *, uint16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrbq_gather_offset_s32))) +int32x4_t vldrbq_gather_offset_s32(const int8_t *, uint32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrbq_gather_offset_s32))) +int32x4_t vldrbq_gather_offset(const int8_t *, uint32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrbq_gather_offset_s8))) +int8x16_t vldrbq_gather_offset_s8(const int8_t *, uint8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrbq_gather_offset_s8))) +int8x16_t vldrbq_gather_offset(const int8_t *, uint8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrbq_gather_offset_u16))) +uint16x8_t vldrbq_gather_offset_u16(const uint8_t *, uint16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrbq_gather_offset_u16))) +uint16x8_t vldrbq_gather_offset(const uint8_t *, uint16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrbq_gather_offset_u32))) +uint32x4_t vldrbq_gather_offset_u32(const uint8_t *, uint32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrbq_gather_offset_u32))) +uint32x4_t vldrbq_gather_offset(const uint8_t *, uint32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrbq_gather_offset_u8))) +uint8x16_t vldrbq_gather_offset_u8(const uint8_t *, uint8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrbq_gather_offset_u8))) +uint8x16_t vldrbq_gather_offset(const uint8_t *, uint8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrbq_gather_offset_z_s16))) +int16x8_t vldrbq_gather_offset_z_s16(const int8_t *, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrbq_gather_offset_z_s16))) +int16x8_t vldrbq_gather_offset_z(const int8_t *, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrbq_gather_offset_z_s32))) +int32x4_t vldrbq_gather_offset_z_s32(const int8_t *, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrbq_gather_offset_z_s32))) +int32x4_t vldrbq_gather_offset_z(const int8_t *, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrbq_gather_offset_z_s8))) +int8x16_t vldrbq_gather_offset_z_s8(const int8_t *, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrbq_gather_offset_z_s8))) +int8x16_t vldrbq_gather_offset_z(const int8_t *, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrbq_gather_offset_z_u16))) +uint16x8_t vldrbq_gather_offset_z_u16(const uint8_t *, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrbq_gather_offset_z_u16))) +uint16x8_t vldrbq_gather_offset_z(const uint8_t *, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrbq_gather_offset_z_u32))) +uint32x4_t vldrbq_gather_offset_z_u32(const uint8_t *, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrbq_gather_offset_z_u32))) +uint32x4_t vldrbq_gather_offset_z(const uint8_t *, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrbq_gather_offset_z_u8))) +uint8x16_t vldrbq_gather_offset_z_u8(const uint8_t *, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrbq_gather_offset_z_u8))) +uint8x16_t vldrbq_gather_offset_z(const uint8_t *, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrbq_s16))) +int16x8_t vldrbq_s16(const int8_t *); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrbq_s32))) +int32x4_t vldrbq_s32(const int8_t *); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrbq_s8))) +int8x16_t vldrbq_s8(const int8_t *); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrbq_u16))) +uint16x8_t vldrbq_u16(const uint8_t *); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrbq_u32))) +uint32x4_t vldrbq_u32(const uint8_t *); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrbq_u8))) +uint8x16_t vldrbq_u8(const uint8_t *); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrbq_z_s16))) +int16x8_t vldrbq_z_s16(const int8_t *, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrbq_z_s32))) +int32x4_t vldrbq_z_s32(const int8_t *, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrbq_z_s8))) +int8x16_t vldrbq_z_s8(const int8_t *, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrbq_z_u16))) +uint16x8_t vldrbq_z_u16(const uint8_t *, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrbq_z_u32))) +uint32x4_t vldrbq_z_u32(const uint8_t *, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrbq_z_u8))) +uint8x16_t vldrbq_z_u8(const uint8_t *, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrdq_gather_base_s64))) +int64x2_t vldrdq_gather_base_s64(uint64x2_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrdq_gather_base_u64))) +uint64x2_t vldrdq_gather_base_u64(uint64x2_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrdq_gather_base_wb_s64))) +int64x2_t vldrdq_gather_base_wb_s64(uint64x2_t *, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrdq_gather_base_wb_u64))) +uint64x2_t vldrdq_gather_base_wb_u64(uint64x2_t *, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrdq_gather_base_wb_z_s64))) +int64x2_t vldrdq_gather_base_wb_z_s64(uint64x2_t *, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrdq_gather_base_wb_z_u64))) +uint64x2_t vldrdq_gather_base_wb_z_u64(uint64x2_t *, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrdq_gather_base_z_s64))) +int64x2_t vldrdq_gather_base_z_s64(uint64x2_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrdq_gather_base_z_u64))) +uint64x2_t vldrdq_gather_base_z_u64(uint64x2_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrdq_gather_offset_s64))) +int64x2_t vldrdq_gather_offset_s64(const int64_t *, uint64x2_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrdq_gather_offset_s64))) +int64x2_t vldrdq_gather_offset(const int64_t *, uint64x2_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrdq_gather_offset_u64))) +uint64x2_t vldrdq_gather_offset_u64(const uint64_t *, uint64x2_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrdq_gather_offset_u64))) +uint64x2_t vldrdq_gather_offset(const uint64_t *, uint64x2_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrdq_gather_offset_z_s64))) +int64x2_t vldrdq_gather_offset_z_s64(const int64_t *, uint64x2_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrdq_gather_offset_z_s64))) +int64x2_t vldrdq_gather_offset_z(const int64_t *, uint64x2_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrdq_gather_offset_z_u64))) +uint64x2_t vldrdq_gather_offset_z_u64(const uint64_t *, uint64x2_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrdq_gather_offset_z_u64))) +uint64x2_t vldrdq_gather_offset_z(const uint64_t *, uint64x2_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrdq_gather_shifted_offset_s64))) +int64x2_t vldrdq_gather_shifted_offset_s64(const int64_t *, uint64x2_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrdq_gather_shifted_offset_s64))) +int64x2_t vldrdq_gather_shifted_offset(const int64_t *, uint64x2_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrdq_gather_shifted_offset_u64))) +uint64x2_t vldrdq_gather_shifted_offset_u64(const uint64_t *, uint64x2_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrdq_gather_shifted_offset_u64))) +uint64x2_t vldrdq_gather_shifted_offset(const uint64_t *, uint64x2_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrdq_gather_shifted_offset_z_s64))) +int64x2_t vldrdq_gather_shifted_offset_z_s64(const int64_t *, uint64x2_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrdq_gather_shifted_offset_z_s64))) +int64x2_t vldrdq_gather_shifted_offset_z(const int64_t *, uint64x2_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrdq_gather_shifted_offset_z_u64))) +uint64x2_t vldrdq_gather_shifted_offset_z_u64(const uint64_t *, uint64x2_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrdq_gather_shifted_offset_z_u64))) +uint64x2_t vldrdq_gather_shifted_offset_z(const uint64_t *, uint64x2_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_gather_offset_s16))) +int16x8_t vldrhq_gather_offset_s16(const int16_t *, uint16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_gather_offset_s16))) +int16x8_t vldrhq_gather_offset(const int16_t *, uint16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_gather_offset_s32))) +int32x4_t vldrhq_gather_offset_s32(const int16_t *, uint32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_gather_offset_s32))) +int32x4_t vldrhq_gather_offset(const int16_t *, uint32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_gather_offset_u16))) +uint16x8_t vldrhq_gather_offset_u16(const uint16_t *, uint16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_gather_offset_u16))) +uint16x8_t vldrhq_gather_offset(const uint16_t *, uint16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_gather_offset_u32))) +uint32x4_t vldrhq_gather_offset_u32(const uint16_t *, uint32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_gather_offset_u32))) +uint32x4_t vldrhq_gather_offset(const uint16_t *, uint32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_gather_offset_z_s16))) +int16x8_t vldrhq_gather_offset_z_s16(const int16_t *, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_gather_offset_z_s16))) +int16x8_t vldrhq_gather_offset_z(const int16_t *, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_gather_offset_z_s32))) +int32x4_t vldrhq_gather_offset_z_s32(const int16_t *, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_gather_offset_z_s32))) +int32x4_t vldrhq_gather_offset_z(const int16_t *, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_gather_offset_z_u16))) +uint16x8_t vldrhq_gather_offset_z_u16(const uint16_t *, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_gather_offset_z_u16))) +uint16x8_t vldrhq_gather_offset_z(const uint16_t *, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_gather_offset_z_u32))) +uint32x4_t vldrhq_gather_offset_z_u32(const uint16_t *, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_gather_offset_z_u32))) +uint32x4_t vldrhq_gather_offset_z(const uint16_t *, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_gather_shifted_offset_s16))) +int16x8_t vldrhq_gather_shifted_offset_s16(const int16_t *, uint16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_gather_shifted_offset_s16))) +int16x8_t vldrhq_gather_shifted_offset(const int16_t *, uint16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_gather_shifted_offset_s32))) +int32x4_t vldrhq_gather_shifted_offset_s32(const int16_t *, uint32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_gather_shifted_offset_s32))) +int32x4_t vldrhq_gather_shifted_offset(const int16_t *, uint32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_gather_shifted_offset_u16))) +uint16x8_t vldrhq_gather_shifted_offset_u16(const uint16_t *, uint16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_gather_shifted_offset_u16))) +uint16x8_t vldrhq_gather_shifted_offset(const uint16_t *, uint16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_gather_shifted_offset_u32))) +uint32x4_t vldrhq_gather_shifted_offset_u32(const uint16_t *, uint32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_gather_shifted_offset_u32))) +uint32x4_t vldrhq_gather_shifted_offset(const uint16_t *, uint32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_gather_shifted_offset_z_s16))) +int16x8_t vldrhq_gather_shifted_offset_z_s16(const int16_t *, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_gather_shifted_offset_z_s16))) +int16x8_t vldrhq_gather_shifted_offset_z(const int16_t *, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_gather_shifted_offset_z_s32))) +int32x4_t vldrhq_gather_shifted_offset_z_s32(const int16_t *, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_gather_shifted_offset_z_s32))) +int32x4_t vldrhq_gather_shifted_offset_z(const int16_t *, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_gather_shifted_offset_z_u16))) +uint16x8_t vldrhq_gather_shifted_offset_z_u16(const uint16_t *, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_gather_shifted_offset_z_u16))) +uint16x8_t vldrhq_gather_shifted_offset_z(const uint16_t *, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_gather_shifted_offset_z_u32))) +uint32x4_t vldrhq_gather_shifted_offset_z_u32(const uint16_t *, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_gather_shifted_offset_z_u32))) +uint32x4_t vldrhq_gather_shifted_offset_z(const uint16_t *, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_s16))) +int16x8_t vldrhq_s16(const int16_t *); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_s32))) +int32x4_t vldrhq_s32(const int16_t *); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_u16))) +uint16x8_t vldrhq_u16(const uint16_t *); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_u32))) +uint32x4_t vldrhq_u32(const uint16_t *); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_z_s16))) +int16x8_t vldrhq_z_s16(const int16_t *, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_z_s32))) +int32x4_t vldrhq_z_s32(const int16_t *, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_z_u16))) +uint16x8_t vldrhq_z_u16(const uint16_t *, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_z_u32))) +uint32x4_t vldrhq_z_u32(const uint16_t *, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrwq_gather_base_s32))) +int32x4_t vldrwq_gather_base_s32(uint32x4_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrwq_gather_base_u32))) +uint32x4_t vldrwq_gather_base_u32(uint32x4_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrwq_gather_base_wb_s32))) +int32x4_t vldrwq_gather_base_wb_s32(uint32x4_t *, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrwq_gather_base_wb_u32))) +uint32x4_t vldrwq_gather_base_wb_u32(uint32x4_t *, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrwq_gather_base_wb_z_s32))) +int32x4_t vldrwq_gather_base_wb_z_s32(uint32x4_t *, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrwq_gather_base_wb_z_u32))) +uint32x4_t vldrwq_gather_base_wb_z_u32(uint32x4_t *, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrwq_gather_base_z_s32))) +int32x4_t vldrwq_gather_base_z_s32(uint32x4_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrwq_gather_base_z_u32))) +uint32x4_t vldrwq_gather_base_z_u32(uint32x4_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrwq_gather_offset_s32))) +int32x4_t vldrwq_gather_offset_s32(const int32_t *, uint32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrwq_gather_offset_s32))) +int32x4_t vldrwq_gather_offset(const int32_t *, uint32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrwq_gather_offset_u32))) +uint32x4_t vldrwq_gather_offset_u32(const uint32_t *, uint32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrwq_gather_offset_u32))) +uint32x4_t vldrwq_gather_offset(const uint32_t *, uint32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrwq_gather_offset_z_s32))) +int32x4_t vldrwq_gather_offset_z_s32(const int32_t *, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrwq_gather_offset_z_s32))) +int32x4_t vldrwq_gather_offset_z(const int32_t *, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrwq_gather_offset_z_u32))) +uint32x4_t vldrwq_gather_offset_z_u32(const uint32_t *, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrwq_gather_offset_z_u32))) +uint32x4_t vldrwq_gather_offset_z(const uint32_t *, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrwq_gather_shifted_offset_s32))) +int32x4_t vldrwq_gather_shifted_offset_s32(const int32_t *, uint32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrwq_gather_shifted_offset_s32))) +int32x4_t vldrwq_gather_shifted_offset(const int32_t *, uint32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrwq_gather_shifted_offset_u32))) +uint32x4_t vldrwq_gather_shifted_offset_u32(const uint32_t *, uint32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrwq_gather_shifted_offset_u32))) +uint32x4_t vldrwq_gather_shifted_offset(const uint32_t *, uint32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrwq_gather_shifted_offset_z_s32))) +int32x4_t vldrwq_gather_shifted_offset_z_s32(const int32_t *, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrwq_gather_shifted_offset_z_s32))) +int32x4_t vldrwq_gather_shifted_offset_z(const int32_t *, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrwq_gather_shifted_offset_z_u32))) +uint32x4_t vldrwq_gather_shifted_offset_z_u32(const uint32_t *, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrwq_gather_shifted_offset_z_u32))) +uint32x4_t vldrwq_gather_shifted_offset_z(const uint32_t *, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrwq_s32))) +int32x4_t vldrwq_s32(const int32_t *); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrwq_u32))) +uint32x4_t vldrwq_u32(const uint32_t *); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrwq_z_s32))) +int32x4_t vldrwq_z_s32(const int32_t *, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrwq_z_u32))) +uint32x4_t vldrwq_z_u32(const uint32_t *, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxaq_m_s16))) +uint16x8_t vmaxaq_m_s16(uint16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxaq_m_s16))) +uint16x8_t vmaxaq_m(uint16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxaq_m_s32))) +uint32x4_t vmaxaq_m_s32(uint32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxaq_m_s32))) +uint32x4_t vmaxaq_m(uint32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxaq_m_s8))) +uint8x16_t vmaxaq_m_s8(uint8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxaq_m_s8))) +uint8x16_t vmaxaq_m(uint8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxaq_s16))) +uint16x8_t vmaxaq_s16(uint16x8_t, int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxaq_s16))) +uint16x8_t vmaxaq(uint16x8_t, int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxaq_s32))) +uint32x4_t vmaxaq_s32(uint32x4_t, int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxaq_s32))) +uint32x4_t vmaxaq(uint32x4_t, int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxaq_s8))) +uint8x16_t vmaxaq_s8(uint8x16_t, int8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxaq_s8))) +uint8x16_t vmaxaq(uint8x16_t, int8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxavq_p_s16))) +uint16_t vmaxavq_p_s16(uint16_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxavq_p_s16))) +uint16_t vmaxavq_p(uint16_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxavq_p_s32))) +uint32_t vmaxavq_p_s32(uint32_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxavq_p_s32))) +uint32_t vmaxavq_p(uint32_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxavq_p_s8))) +uint8_t vmaxavq_p_s8(uint8_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxavq_p_s8))) +uint8_t vmaxavq_p(uint8_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxavq_s16))) +uint16_t vmaxavq_s16(uint16_t, int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxavq_s16))) +uint16_t vmaxavq(uint16_t, int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxavq_s32))) +uint32_t vmaxavq_s32(uint32_t, int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxavq_s32))) +uint32_t vmaxavq(uint32_t, int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxavq_s8))) +uint8_t vmaxavq_s8(uint8_t, int8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxavq_s8))) +uint8_t vmaxavq(uint8_t, int8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxq_m_s16))) +int16x8_t vmaxq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxq_m_s16))) +int16x8_t vmaxq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxq_m_s32))) +int32x4_t vmaxq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxq_m_s32))) +int32x4_t vmaxq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxq_m_s8))) +int8x16_t vmaxq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxq_m_s8))) +int8x16_t vmaxq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxq_m_u16))) +uint16x8_t vmaxq_m_u16(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxq_m_u16))) +uint16x8_t vmaxq_m(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxq_m_u32))) +uint32x4_t vmaxq_m_u32(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxq_m_u32))) +uint32x4_t vmaxq_m(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxq_m_u8))) +uint8x16_t vmaxq_m_u8(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxq_m_u8))) +uint8x16_t vmaxq_m(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxq_s16))) +int16x8_t vmaxq_s16(int16x8_t, int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxq_s16))) +int16x8_t vmaxq(int16x8_t, int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxq_s32))) +int32x4_t vmaxq_s32(int32x4_t, int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxq_s32))) +int32x4_t vmaxq(int32x4_t, int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxq_s8))) +int8x16_t vmaxq_s8(int8x16_t, int8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxq_s8))) +int8x16_t vmaxq(int8x16_t, int8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxq_u16))) +uint16x8_t vmaxq_u16(uint16x8_t, uint16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxq_u16))) +uint16x8_t vmaxq(uint16x8_t, uint16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxq_u32))) +uint32x4_t vmaxq_u32(uint32x4_t, uint32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxq_u32))) +uint32x4_t vmaxq(uint32x4_t, uint32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxq_u8))) +uint8x16_t vmaxq_u8(uint8x16_t, uint8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxq_u8))) +uint8x16_t vmaxq(uint8x16_t, uint8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxq_x_s16))) +int16x8_t vmaxq_x_s16(int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxq_x_s16))) +int16x8_t vmaxq_x(int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxq_x_s32))) +int32x4_t vmaxq_x_s32(int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxq_x_s32))) +int32x4_t vmaxq_x(int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxq_x_s8))) +int8x16_t vmaxq_x_s8(int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxq_x_s8))) +int8x16_t vmaxq_x(int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxq_x_u16))) +uint16x8_t vmaxq_x_u16(uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxq_x_u16))) +uint16x8_t vmaxq_x(uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxq_x_u32))) +uint32x4_t vmaxq_x_u32(uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxq_x_u32))) +uint32x4_t vmaxq_x(uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxq_x_u8))) +uint8x16_t vmaxq_x_u8(uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxq_x_u8))) +uint8x16_t vmaxq_x(uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxvq_p_s16))) +int16_t vmaxvq_p_s16(int16_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxvq_p_s16))) +int16_t vmaxvq_p(int16_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxvq_p_s32))) +int32_t vmaxvq_p_s32(int32_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxvq_p_s32))) +int32_t vmaxvq_p(int32_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxvq_p_s8))) +int8_t vmaxvq_p_s8(int8_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxvq_p_s8))) +int8_t vmaxvq_p(int8_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxvq_p_u16))) +uint16_t vmaxvq_p_u16(uint16_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxvq_p_u16))) +uint16_t vmaxvq_p(uint16_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxvq_p_u32))) +uint32_t vmaxvq_p_u32(uint32_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxvq_p_u32))) +uint32_t vmaxvq_p(uint32_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxvq_p_u8))) +uint8_t vmaxvq_p_u8(uint8_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxvq_p_u8))) +uint8_t vmaxvq_p(uint8_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxvq_s16))) +int16_t vmaxvq_s16(int16_t, int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxvq_s16))) +int16_t vmaxvq(int16_t, int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxvq_s32))) +int32_t vmaxvq_s32(int32_t, int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxvq_s32))) +int32_t vmaxvq(int32_t, int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxvq_s8))) +int8_t vmaxvq_s8(int8_t, int8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxvq_s8))) +int8_t vmaxvq(int8_t, int8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxvq_u16))) +uint16_t vmaxvq_u16(uint16_t, uint16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxvq_u16))) +uint16_t vmaxvq(uint16_t, uint16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxvq_u32))) +uint32_t vmaxvq_u32(uint32_t, uint32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxvq_u32))) +uint32_t vmaxvq(uint32_t, uint32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxvq_u8))) +uint8_t vmaxvq_u8(uint8_t, uint8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxvq_u8))) +uint8_t vmaxvq(uint8_t, uint8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminaq_m_s16))) +uint16x8_t vminaq_m_s16(uint16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminaq_m_s16))) +uint16x8_t vminaq_m(uint16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminaq_m_s32))) +uint32x4_t vminaq_m_s32(uint32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminaq_m_s32))) +uint32x4_t vminaq_m(uint32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminaq_m_s8))) +uint8x16_t vminaq_m_s8(uint8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminaq_m_s8))) +uint8x16_t vminaq_m(uint8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminaq_s16))) +uint16x8_t vminaq_s16(uint16x8_t, int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminaq_s16))) +uint16x8_t vminaq(uint16x8_t, int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminaq_s32))) +uint32x4_t vminaq_s32(uint32x4_t, int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminaq_s32))) +uint32x4_t vminaq(uint32x4_t, int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminaq_s8))) +uint8x16_t vminaq_s8(uint8x16_t, int8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminaq_s8))) +uint8x16_t vminaq(uint8x16_t, int8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminavq_p_s16))) +uint16_t vminavq_p_s16(uint16_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminavq_p_s16))) +uint16_t vminavq_p(uint16_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminavq_p_s32))) +uint32_t vminavq_p_s32(uint32_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminavq_p_s32))) +uint32_t vminavq_p(uint32_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminavq_p_s8))) +uint8_t vminavq_p_s8(uint8_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminavq_p_s8))) +uint8_t vminavq_p(uint8_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminavq_s16))) +uint16_t vminavq_s16(uint16_t, int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminavq_s16))) +uint16_t vminavq(uint16_t, int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminavq_s32))) +uint32_t vminavq_s32(uint32_t, int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminavq_s32))) +uint32_t vminavq(uint32_t, int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminavq_s8))) +uint8_t vminavq_s8(uint8_t, int8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminavq_s8))) +uint8_t vminavq(uint8_t, int8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminq_m_s16))) +int16x8_t vminq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminq_m_s16))) +int16x8_t vminq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminq_m_s32))) +int32x4_t vminq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminq_m_s32))) +int32x4_t vminq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminq_m_s8))) +int8x16_t vminq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminq_m_s8))) +int8x16_t vminq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminq_m_u16))) +uint16x8_t vminq_m_u16(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminq_m_u16))) +uint16x8_t vminq_m(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminq_m_u32))) +uint32x4_t vminq_m_u32(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminq_m_u32))) +uint32x4_t vminq_m(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminq_m_u8))) +uint8x16_t vminq_m_u8(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminq_m_u8))) +uint8x16_t vminq_m(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminq_s16))) +int16x8_t vminq_s16(int16x8_t, int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminq_s16))) +int16x8_t vminq(int16x8_t, int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminq_s32))) +int32x4_t vminq_s32(int32x4_t, int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminq_s32))) +int32x4_t vminq(int32x4_t, int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminq_s8))) +int8x16_t vminq_s8(int8x16_t, int8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminq_s8))) +int8x16_t vminq(int8x16_t, int8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminq_u16))) +uint16x8_t vminq_u16(uint16x8_t, uint16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminq_u16))) +uint16x8_t vminq(uint16x8_t, uint16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminq_u32))) +uint32x4_t vminq_u32(uint32x4_t, uint32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminq_u32))) +uint32x4_t vminq(uint32x4_t, uint32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminq_u8))) +uint8x16_t vminq_u8(uint8x16_t, uint8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminq_u8))) +uint8x16_t vminq(uint8x16_t, uint8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminq_x_s16))) +int16x8_t vminq_x_s16(int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminq_x_s16))) +int16x8_t vminq_x(int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminq_x_s32))) +int32x4_t vminq_x_s32(int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminq_x_s32))) +int32x4_t vminq_x(int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminq_x_s8))) +int8x16_t vminq_x_s8(int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminq_x_s8))) +int8x16_t vminq_x(int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminq_x_u16))) +uint16x8_t vminq_x_u16(uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminq_x_u16))) +uint16x8_t vminq_x(uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminq_x_u32))) +uint32x4_t vminq_x_u32(uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminq_x_u32))) +uint32x4_t vminq_x(uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminq_x_u8))) +uint8x16_t vminq_x_u8(uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminq_x_u8))) +uint8x16_t vminq_x(uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminvq_p_s16))) +int16_t vminvq_p_s16(int16_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminvq_p_s16))) +int16_t vminvq_p(int16_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminvq_p_s32))) +int32_t vminvq_p_s32(int32_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminvq_p_s32))) +int32_t vminvq_p(int32_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminvq_p_s8))) +int8_t vminvq_p_s8(int8_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminvq_p_s8))) +int8_t vminvq_p(int8_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminvq_p_u16))) +uint16_t vminvq_p_u16(uint16_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminvq_p_u16))) +uint16_t vminvq_p(uint16_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminvq_p_u32))) +uint32_t vminvq_p_u32(uint32_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminvq_p_u32))) +uint32_t vminvq_p(uint32_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminvq_p_u8))) +uint8_t vminvq_p_u8(uint8_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminvq_p_u8))) +uint8_t vminvq_p(uint8_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminvq_s16))) +int16_t vminvq_s16(int16_t, int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminvq_s16))) +int16_t vminvq(int16_t, int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminvq_s32))) +int32_t vminvq_s32(int32_t, int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminvq_s32))) +int32_t vminvq(int32_t, int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminvq_s8))) +int8_t vminvq_s8(int8_t, int8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminvq_s8))) +int8_t vminvq(int8_t, int8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminvq_u16))) +uint16_t vminvq_u16(uint16_t, uint16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminvq_u16))) +uint16_t vminvq(uint16_t, uint16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminvq_u32))) +uint32_t vminvq_u32(uint32_t, uint32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminvq_u32))) +uint32_t vminvq(uint32_t, uint32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminvq_u8))) +uint8_t vminvq_u8(uint8_t, uint8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminvq_u8))) +uint8_t vminvq(uint8_t, uint8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmladavaq_p_s16))) +int32_t vmladavaq_p_s16(int32_t, int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmladavaq_p_s16))) +int32_t vmladavaq_p(int32_t, int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmladavaq_p_s32))) +int32_t vmladavaq_p_s32(int32_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmladavaq_p_s32))) +int32_t vmladavaq_p(int32_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmladavaq_p_s8))) +int32_t vmladavaq_p_s8(int32_t, int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmladavaq_p_s8))) +int32_t vmladavaq_p(int32_t, int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmladavaq_p_u16))) +uint32_t vmladavaq_p_u16(uint32_t, uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmladavaq_p_u16))) +uint32_t vmladavaq_p(uint32_t, uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmladavaq_p_u32))) +uint32_t vmladavaq_p_u32(uint32_t, uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmladavaq_p_u32))) +uint32_t vmladavaq_p(uint32_t, uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmladavaq_p_u8))) +uint32_t vmladavaq_p_u8(uint32_t, uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmladavaq_p_u8))) +uint32_t vmladavaq_p(uint32_t, uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmladavaq_s16))) +int32_t vmladavaq_s16(int32_t, int16x8_t, int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmladavaq_s16))) +int32_t vmladavaq(int32_t, int16x8_t, int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmladavaq_s32))) +int32_t vmladavaq_s32(int32_t, int32x4_t, int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmladavaq_s32))) +int32_t vmladavaq(int32_t, int32x4_t, int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmladavaq_s8))) +int32_t vmladavaq_s8(int32_t, int8x16_t, int8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmladavaq_s8))) +int32_t vmladavaq(int32_t, int8x16_t, int8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmladavaq_u16))) +uint32_t vmladavaq_u16(uint32_t, uint16x8_t, uint16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmladavaq_u16))) +uint32_t vmladavaq(uint32_t, uint16x8_t, uint16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmladavaq_u32))) +uint32_t vmladavaq_u32(uint32_t, uint32x4_t, uint32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmladavaq_u32))) +uint32_t vmladavaq(uint32_t, uint32x4_t, uint32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmladavaq_u8))) +uint32_t vmladavaq_u8(uint32_t, uint8x16_t, uint8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmladavaq_u8))) +uint32_t vmladavaq(uint32_t, uint8x16_t, uint8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmladavaxq_p_s16))) +int32_t vmladavaxq_p_s16(int32_t, int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmladavaxq_p_s16))) +int32_t vmladavaxq_p(int32_t, int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmladavaxq_p_s32))) +int32_t vmladavaxq_p_s32(int32_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmladavaxq_p_s32))) +int32_t vmladavaxq_p(int32_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmladavaxq_p_s8))) +int32_t vmladavaxq_p_s8(int32_t, int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmladavaxq_p_s8))) +int32_t vmladavaxq_p(int32_t, int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmladavaxq_s16))) +int32_t vmladavaxq_s16(int32_t, int16x8_t, int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmladavaxq_s16))) +int32_t vmladavaxq(int32_t, int16x8_t, int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmladavaxq_s32))) +int32_t vmladavaxq_s32(int32_t, int32x4_t, int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmladavaxq_s32))) +int32_t vmladavaxq(int32_t, int32x4_t, int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmladavaxq_s8))) +int32_t vmladavaxq_s8(int32_t, int8x16_t, int8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmladavaxq_s8))) +int32_t vmladavaxq(int32_t, int8x16_t, int8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmladavq_p_s16))) +int32_t vmladavq_p_s16(int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmladavq_p_s16))) +int32_t vmladavq_p(int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmladavq_p_s32))) +int32_t vmladavq_p_s32(int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmladavq_p_s32))) +int32_t vmladavq_p(int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmladavq_p_s8))) +int32_t vmladavq_p_s8(int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmladavq_p_s8))) +int32_t vmladavq_p(int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmladavq_p_u16))) +uint32_t vmladavq_p_u16(uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmladavq_p_u16))) +uint32_t vmladavq_p(uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmladavq_p_u32))) +uint32_t vmladavq_p_u32(uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmladavq_p_u32))) +uint32_t vmladavq_p(uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmladavq_p_u8))) +uint32_t vmladavq_p_u8(uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmladavq_p_u8))) +uint32_t vmladavq_p(uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmladavq_s16))) +int32_t vmladavq_s16(int16x8_t, int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmladavq_s16))) +int32_t vmladavq(int16x8_t, int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmladavq_s32))) +int32_t vmladavq_s32(int32x4_t, int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmladavq_s32))) +int32_t vmladavq(int32x4_t, int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmladavq_s8))) +int32_t vmladavq_s8(int8x16_t, int8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmladavq_s8))) +int32_t vmladavq(int8x16_t, int8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmladavq_u16))) +uint32_t vmladavq_u16(uint16x8_t, uint16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmladavq_u16))) +uint32_t vmladavq(uint16x8_t, uint16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmladavq_u32))) +uint32_t vmladavq_u32(uint32x4_t, uint32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmladavq_u32))) +uint32_t vmladavq(uint32x4_t, uint32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmladavq_u8))) +uint32_t vmladavq_u8(uint8x16_t, uint8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmladavq_u8))) +uint32_t vmladavq(uint8x16_t, uint8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmladavxq_p_s16))) +int32_t vmladavxq_p_s16(int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmladavxq_p_s16))) +int32_t vmladavxq_p(int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmladavxq_p_s32))) +int32_t vmladavxq_p_s32(int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmladavxq_p_s32))) +int32_t vmladavxq_p(int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmladavxq_p_s8))) +int32_t vmladavxq_p_s8(int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmladavxq_p_s8))) +int32_t vmladavxq_p(int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmladavxq_s16))) +int32_t vmladavxq_s16(int16x8_t, int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmladavxq_s16))) +int32_t vmladavxq(int16x8_t, int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmladavxq_s32))) +int32_t vmladavxq_s32(int32x4_t, int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmladavxq_s32))) +int32_t vmladavxq(int32x4_t, int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmladavxq_s8))) +int32_t vmladavxq_s8(int8x16_t, int8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmladavxq_s8))) +int32_t vmladavxq(int8x16_t, int8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavaq_p_s16))) +int64_t vmlaldavaq_p_s16(int64_t, int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavaq_p_s16))) +int64_t vmlaldavaq_p(int64_t, int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavaq_p_s32))) +int64_t vmlaldavaq_p_s32(int64_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavaq_p_s32))) +int64_t vmlaldavaq_p(int64_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavaq_p_u16))) +uint64_t vmlaldavaq_p_u16(uint64_t, uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavaq_p_u16))) +uint64_t vmlaldavaq_p(uint64_t, uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavaq_p_u32))) +uint64_t vmlaldavaq_p_u32(uint64_t, uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavaq_p_u32))) +uint64_t vmlaldavaq_p(uint64_t, uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavaq_s16))) +int64_t vmlaldavaq_s16(int64_t, int16x8_t, int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavaq_s16))) +int64_t vmlaldavaq(int64_t, int16x8_t, int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavaq_s32))) +int64_t vmlaldavaq_s32(int64_t, int32x4_t, int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavaq_s32))) +int64_t vmlaldavaq(int64_t, int32x4_t, int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavaq_u16))) +uint64_t vmlaldavaq_u16(uint64_t, uint16x8_t, uint16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavaq_u16))) +uint64_t vmlaldavaq(uint64_t, uint16x8_t, uint16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavaq_u32))) +uint64_t vmlaldavaq_u32(uint64_t, uint32x4_t, uint32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavaq_u32))) +uint64_t vmlaldavaq(uint64_t, uint32x4_t, uint32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavaxq_p_s16))) +int64_t vmlaldavaxq_p_s16(int64_t, int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavaxq_p_s16))) +int64_t vmlaldavaxq_p(int64_t, int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavaxq_p_s32))) +int64_t vmlaldavaxq_p_s32(int64_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavaxq_p_s32))) +int64_t vmlaldavaxq_p(int64_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavaxq_s16))) +int64_t vmlaldavaxq_s16(int64_t, int16x8_t, int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavaxq_s16))) +int64_t vmlaldavaxq(int64_t, int16x8_t, int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavaxq_s32))) +int64_t vmlaldavaxq_s32(int64_t, int32x4_t, int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavaxq_s32))) +int64_t vmlaldavaxq(int64_t, int32x4_t, int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavq_p_s16))) +int64_t vmlaldavq_p_s16(int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavq_p_s16))) +int64_t vmlaldavq_p(int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavq_p_s32))) +int64_t vmlaldavq_p_s32(int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavq_p_s32))) +int64_t vmlaldavq_p(int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavq_p_u16))) +uint64_t vmlaldavq_p_u16(uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavq_p_u16))) +uint64_t vmlaldavq_p(uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavq_p_u32))) +uint64_t vmlaldavq_p_u32(uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavq_p_u32))) +uint64_t vmlaldavq_p(uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavq_s16))) +int64_t vmlaldavq_s16(int16x8_t, int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavq_s16))) +int64_t vmlaldavq(int16x8_t, int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavq_s32))) +int64_t vmlaldavq_s32(int32x4_t, int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavq_s32))) +int64_t vmlaldavq(int32x4_t, int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavq_u16))) +uint64_t vmlaldavq_u16(uint16x8_t, uint16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavq_u16))) +uint64_t vmlaldavq(uint16x8_t, uint16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavq_u32))) +uint64_t vmlaldavq_u32(uint32x4_t, uint32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavq_u32))) +uint64_t vmlaldavq(uint32x4_t, uint32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavxq_p_s16))) +int64_t vmlaldavxq_p_s16(int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavxq_p_s16))) +int64_t vmlaldavxq_p(int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavxq_p_s32))) +int64_t vmlaldavxq_p_s32(int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavxq_p_s32))) +int64_t vmlaldavxq_p(int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavxq_s16))) +int64_t vmlaldavxq_s16(int16x8_t, int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavxq_s16))) +int64_t vmlaldavxq(int16x8_t, int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavxq_s32))) +int64_t vmlaldavxq_s32(int32x4_t, int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlaldavxq_s32))) +int64_t vmlaldavxq(int32x4_t, int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlaq_m_n_s16))) +int16x8_t vmlaq_m_n_s16(int16x8_t, int16x8_t, int16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlaq_m_n_s16))) +int16x8_t vmlaq_m(int16x8_t, int16x8_t, int16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlaq_m_n_s32))) +int32x4_t vmlaq_m_n_s32(int32x4_t, int32x4_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlaq_m_n_s32))) +int32x4_t vmlaq_m(int32x4_t, int32x4_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlaq_m_n_s8))) +int8x16_t vmlaq_m_n_s8(int8x16_t, int8x16_t, int8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlaq_m_n_s8))) +int8x16_t vmlaq_m(int8x16_t, int8x16_t, int8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlaq_m_n_u16))) +uint16x8_t vmlaq_m_n_u16(uint16x8_t, uint16x8_t, uint16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlaq_m_n_u16))) +uint16x8_t vmlaq_m(uint16x8_t, uint16x8_t, uint16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlaq_m_n_u32))) +uint32x4_t vmlaq_m_n_u32(uint32x4_t, uint32x4_t, uint32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlaq_m_n_u32))) +uint32x4_t vmlaq_m(uint32x4_t, uint32x4_t, uint32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlaq_m_n_u8))) +uint8x16_t vmlaq_m_n_u8(uint8x16_t, uint8x16_t, uint8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlaq_m_n_u8))) +uint8x16_t vmlaq_m(uint8x16_t, uint8x16_t, uint8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlaq_n_s16))) +int16x8_t vmlaq_n_s16(int16x8_t, int16x8_t, int16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlaq_n_s16))) +int16x8_t vmlaq(int16x8_t, int16x8_t, int16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlaq_n_s32))) +int32x4_t vmlaq_n_s32(int32x4_t, int32x4_t, int32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlaq_n_s32))) +int32x4_t vmlaq(int32x4_t, int32x4_t, int32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlaq_n_s8))) +int8x16_t vmlaq_n_s8(int8x16_t, int8x16_t, int8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlaq_n_s8))) +int8x16_t vmlaq(int8x16_t, int8x16_t, int8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlaq_n_u16))) +uint16x8_t vmlaq_n_u16(uint16x8_t, uint16x8_t, uint16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlaq_n_u16))) +uint16x8_t vmlaq(uint16x8_t, uint16x8_t, uint16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlaq_n_u32))) +uint32x4_t vmlaq_n_u32(uint32x4_t, uint32x4_t, uint32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlaq_n_u32))) +uint32x4_t vmlaq(uint32x4_t, uint32x4_t, uint32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlaq_n_u8))) +uint8x16_t vmlaq_n_u8(uint8x16_t, uint8x16_t, uint8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlaq_n_u8))) +uint8x16_t vmlaq(uint8x16_t, uint8x16_t, uint8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlasq_m_n_s16))) +int16x8_t vmlasq_m_n_s16(int16x8_t, int16x8_t, int16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlasq_m_n_s16))) +int16x8_t vmlasq_m(int16x8_t, int16x8_t, int16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlasq_m_n_s32))) +int32x4_t vmlasq_m_n_s32(int32x4_t, int32x4_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlasq_m_n_s32))) +int32x4_t vmlasq_m(int32x4_t, int32x4_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlasq_m_n_s8))) +int8x16_t vmlasq_m_n_s8(int8x16_t, int8x16_t, int8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlasq_m_n_s8))) +int8x16_t vmlasq_m(int8x16_t, int8x16_t, int8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlasq_m_n_u16))) +uint16x8_t vmlasq_m_n_u16(uint16x8_t, uint16x8_t, uint16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlasq_m_n_u16))) +uint16x8_t vmlasq_m(uint16x8_t, uint16x8_t, uint16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlasq_m_n_u32))) +uint32x4_t vmlasq_m_n_u32(uint32x4_t, uint32x4_t, uint32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlasq_m_n_u32))) +uint32x4_t vmlasq_m(uint32x4_t, uint32x4_t, uint32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlasq_m_n_u8))) +uint8x16_t vmlasq_m_n_u8(uint8x16_t, uint8x16_t, uint8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlasq_m_n_u8))) +uint8x16_t vmlasq_m(uint8x16_t, uint8x16_t, uint8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlasq_n_s16))) +int16x8_t vmlasq_n_s16(int16x8_t, int16x8_t, int16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlasq_n_s16))) +int16x8_t vmlasq(int16x8_t, int16x8_t, int16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlasq_n_s32))) +int32x4_t vmlasq_n_s32(int32x4_t, int32x4_t, int32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlasq_n_s32))) +int32x4_t vmlasq(int32x4_t, int32x4_t, int32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlasq_n_s8))) +int8x16_t vmlasq_n_s8(int8x16_t, int8x16_t, int8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlasq_n_s8))) +int8x16_t vmlasq(int8x16_t, int8x16_t, int8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlasq_n_u16))) +uint16x8_t vmlasq_n_u16(uint16x8_t, uint16x8_t, uint16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlasq_n_u16))) +uint16x8_t vmlasq(uint16x8_t, uint16x8_t, uint16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlasq_n_u32))) +uint32x4_t vmlasq_n_u32(uint32x4_t, uint32x4_t, uint32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlasq_n_u32))) +uint32x4_t vmlasq(uint32x4_t, uint32x4_t, uint32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlasq_n_u8))) +uint8x16_t vmlasq_n_u8(uint8x16_t, uint8x16_t, uint8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlasq_n_u8))) +uint8x16_t vmlasq(uint8x16_t, uint8x16_t, uint8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavaq_p_s16))) +int32_t vmlsdavaq_p_s16(int32_t, int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavaq_p_s16))) +int32_t vmlsdavaq_p(int32_t, int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavaq_p_s32))) +int32_t vmlsdavaq_p_s32(int32_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavaq_p_s32))) +int32_t vmlsdavaq_p(int32_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavaq_p_s8))) +int32_t vmlsdavaq_p_s8(int32_t, int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavaq_p_s8))) +int32_t vmlsdavaq_p(int32_t, int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavaq_s16))) +int32_t vmlsdavaq_s16(int32_t, int16x8_t, int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavaq_s16))) +int32_t vmlsdavaq(int32_t, int16x8_t, int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavaq_s32))) +int32_t vmlsdavaq_s32(int32_t, int32x4_t, int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavaq_s32))) +int32_t vmlsdavaq(int32_t, int32x4_t, int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavaq_s8))) +int32_t vmlsdavaq_s8(int32_t, int8x16_t, int8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavaq_s8))) +int32_t vmlsdavaq(int32_t, int8x16_t, int8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavaxq_p_s16))) +int32_t vmlsdavaxq_p_s16(int32_t, int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavaxq_p_s16))) +int32_t vmlsdavaxq_p(int32_t, int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavaxq_p_s32))) +int32_t vmlsdavaxq_p_s32(int32_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavaxq_p_s32))) +int32_t vmlsdavaxq_p(int32_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavaxq_p_s8))) +int32_t vmlsdavaxq_p_s8(int32_t, int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavaxq_p_s8))) +int32_t vmlsdavaxq_p(int32_t, int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavaxq_s16))) +int32_t vmlsdavaxq_s16(int32_t, int16x8_t, int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavaxq_s16))) +int32_t vmlsdavaxq(int32_t, int16x8_t, int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavaxq_s32))) +int32_t vmlsdavaxq_s32(int32_t, int32x4_t, int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavaxq_s32))) +int32_t vmlsdavaxq(int32_t, int32x4_t, int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavaxq_s8))) +int32_t vmlsdavaxq_s8(int32_t, int8x16_t, int8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavaxq_s8))) +int32_t vmlsdavaxq(int32_t, int8x16_t, int8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavq_p_s16))) +int32_t vmlsdavq_p_s16(int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavq_p_s16))) +int32_t vmlsdavq_p(int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavq_p_s32))) +int32_t vmlsdavq_p_s32(int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavq_p_s32))) +int32_t vmlsdavq_p(int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavq_p_s8))) +int32_t vmlsdavq_p_s8(int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavq_p_s8))) +int32_t vmlsdavq_p(int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavq_s16))) +int32_t vmlsdavq_s16(int16x8_t, int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavq_s16))) +int32_t vmlsdavq(int16x8_t, int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavq_s32))) +int32_t vmlsdavq_s32(int32x4_t, int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavq_s32))) +int32_t vmlsdavq(int32x4_t, int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavq_s8))) +int32_t vmlsdavq_s8(int8x16_t, int8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavq_s8))) +int32_t vmlsdavq(int8x16_t, int8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavxq_p_s16))) +int32_t vmlsdavxq_p_s16(int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavxq_p_s16))) +int32_t vmlsdavxq_p(int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavxq_p_s32))) +int32_t vmlsdavxq_p_s32(int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavxq_p_s32))) +int32_t vmlsdavxq_p(int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavxq_p_s8))) +int32_t vmlsdavxq_p_s8(int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavxq_p_s8))) +int32_t vmlsdavxq_p(int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavxq_s16))) +int32_t vmlsdavxq_s16(int16x8_t, int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavxq_s16))) +int32_t vmlsdavxq(int16x8_t, int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavxq_s32))) +int32_t vmlsdavxq_s32(int32x4_t, int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavxq_s32))) +int32_t vmlsdavxq(int32x4_t, int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavxq_s8))) +int32_t vmlsdavxq_s8(int8x16_t, int8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlsdavxq_s8))) +int32_t vmlsdavxq(int8x16_t, int8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlsldavaq_p_s16))) +int64_t vmlsldavaq_p_s16(int64_t, int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlsldavaq_p_s16))) +int64_t vmlsldavaq_p(int64_t, int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlsldavaq_p_s32))) +int64_t vmlsldavaq_p_s32(int64_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlsldavaq_p_s32))) +int64_t vmlsldavaq_p(int64_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlsldavaq_s16))) +int64_t vmlsldavaq_s16(int64_t, int16x8_t, int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlsldavaq_s16))) +int64_t vmlsldavaq(int64_t, int16x8_t, int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlsldavaq_s32))) +int64_t vmlsldavaq_s32(int64_t, int32x4_t, int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlsldavaq_s32))) +int64_t vmlsldavaq(int64_t, int32x4_t, int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlsldavaxq_p_s16))) +int64_t vmlsldavaxq_p_s16(int64_t, int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlsldavaxq_p_s16))) +int64_t vmlsldavaxq_p(int64_t, int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlsldavaxq_p_s32))) +int64_t vmlsldavaxq_p_s32(int64_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlsldavaxq_p_s32))) +int64_t vmlsldavaxq_p(int64_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlsldavaxq_s16))) +int64_t vmlsldavaxq_s16(int64_t, int16x8_t, int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlsldavaxq_s16))) +int64_t vmlsldavaxq(int64_t, int16x8_t, int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlsldavaxq_s32))) +int64_t vmlsldavaxq_s32(int64_t, int32x4_t, int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlsldavaxq_s32))) +int64_t vmlsldavaxq(int64_t, int32x4_t, int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlsldavq_p_s16))) +int64_t vmlsldavq_p_s16(int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlsldavq_p_s16))) +int64_t vmlsldavq_p(int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlsldavq_p_s32))) +int64_t vmlsldavq_p_s32(int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlsldavq_p_s32))) +int64_t vmlsldavq_p(int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlsldavq_s16))) +int64_t vmlsldavq_s16(int16x8_t, int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlsldavq_s16))) +int64_t vmlsldavq(int16x8_t, int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlsldavq_s32))) +int64_t vmlsldavq_s32(int32x4_t, int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlsldavq_s32))) +int64_t vmlsldavq(int32x4_t, int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlsldavxq_p_s16))) +int64_t vmlsldavxq_p_s16(int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlsldavxq_p_s16))) +int64_t vmlsldavxq_p(int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlsldavxq_p_s32))) +int64_t vmlsldavxq_p_s32(int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlsldavxq_p_s32))) +int64_t vmlsldavxq_p(int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlsldavxq_s16))) +int64_t vmlsldavxq_s16(int16x8_t, int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlsldavxq_s16))) +int64_t vmlsldavxq(int16x8_t, int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmlsldavxq_s32))) +int64_t vmlsldavxq_s32(int32x4_t, int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmlsldavxq_s32))) +int64_t vmlsldavxq(int32x4_t, int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmovlbq_m_s16))) +int32x4_t vmovlbq_m_s16(int32x4_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmovlbq_m_s16))) +int32x4_t vmovlbq_m(int32x4_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmovlbq_m_s8))) +int16x8_t vmovlbq_m_s8(int16x8_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmovlbq_m_s8))) +int16x8_t vmovlbq_m(int16x8_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmovlbq_m_u16))) +uint32x4_t vmovlbq_m_u16(uint32x4_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmovlbq_m_u16))) +uint32x4_t vmovlbq_m(uint32x4_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmovlbq_m_u8))) +uint16x8_t vmovlbq_m_u8(uint16x8_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmovlbq_m_u8))) +uint16x8_t vmovlbq_m(uint16x8_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmovlbq_s16))) +int32x4_t vmovlbq_s16(int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmovlbq_s16))) +int32x4_t vmovlbq(int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmovlbq_s8))) +int16x8_t vmovlbq_s8(int8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmovlbq_s8))) +int16x8_t vmovlbq(int8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmovlbq_u16))) +uint32x4_t vmovlbq_u16(uint16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmovlbq_u16))) +uint32x4_t vmovlbq(uint16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmovlbq_u8))) +uint16x8_t vmovlbq_u8(uint8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmovlbq_u8))) +uint16x8_t vmovlbq(uint8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmovlbq_x_s16))) +int32x4_t vmovlbq_x_s16(int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmovlbq_x_s16))) +int32x4_t vmovlbq_x(int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmovlbq_x_s8))) +int16x8_t vmovlbq_x_s8(int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmovlbq_x_s8))) +int16x8_t vmovlbq_x(int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmovlbq_x_u16))) +uint32x4_t vmovlbq_x_u16(uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmovlbq_x_u16))) +uint32x4_t vmovlbq_x(uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmovlbq_x_u8))) +uint16x8_t vmovlbq_x_u8(uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmovlbq_x_u8))) +uint16x8_t vmovlbq_x(uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmovltq_m_s16))) +int32x4_t vmovltq_m_s16(int32x4_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmovltq_m_s16))) +int32x4_t vmovltq_m(int32x4_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmovltq_m_s8))) +int16x8_t vmovltq_m_s8(int16x8_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmovltq_m_s8))) +int16x8_t vmovltq_m(int16x8_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmovltq_m_u16))) +uint32x4_t vmovltq_m_u16(uint32x4_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmovltq_m_u16))) +uint32x4_t vmovltq_m(uint32x4_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmovltq_m_u8))) +uint16x8_t vmovltq_m_u8(uint16x8_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmovltq_m_u8))) +uint16x8_t vmovltq_m(uint16x8_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmovltq_s16))) +int32x4_t vmovltq_s16(int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmovltq_s16))) +int32x4_t vmovltq(int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmovltq_s8))) +int16x8_t vmovltq_s8(int8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmovltq_s8))) +int16x8_t vmovltq(int8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmovltq_u16))) +uint32x4_t vmovltq_u16(uint16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmovltq_u16))) +uint32x4_t vmovltq(uint16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmovltq_u8))) +uint16x8_t vmovltq_u8(uint8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmovltq_u8))) +uint16x8_t vmovltq(uint8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmovltq_x_s16))) +int32x4_t vmovltq_x_s16(int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmovltq_x_s16))) +int32x4_t vmovltq_x(int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmovltq_x_s8))) +int16x8_t vmovltq_x_s8(int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmovltq_x_s8))) +int16x8_t vmovltq_x(int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmovltq_x_u16))) +uint32x4_t vmovltq_x_u16(uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmovltq_x_u16))) +uint32x4_t vmovltq_x(uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmovltq_x_u8))) +uint16x8_t vmovltq_x_u8(uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmovltq_x_u8))) +uint16x8_t vmovltq_x(uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmovnbq_m_s16))) +int8x16_t vmovnbq_m_s16(int8x16_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmovnbq_m_s16))) +int8x16_t vmovnbq_m(int8x16_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmovnbq_m_s32))) +int16x8_t vmovnbq_m_s32(int16x8_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmovnbq_m_s32))) +int16x8_t vmovnbq_m(int16x8_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmovnbq_m_u16))) +uint8x16_t vmovnbq_m_u16(uint8x16_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmovnbq_m_u16))) +uint8x16_t vmovnbq_m(uint8x16_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmovnbq_m_u32))) +uint16x8_t vmovnbq_m_u32(uint16x8_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmovnbq_m_u32))) +uint16x8_t vmovnbq_m(uint16x8_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmovnbq_s16))) +int8x16_t vmovnbq_s16(int8x16_t, int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmovnbq_s16))) +int8x16_t vmovnbq(int8x16_t, int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmovnbq_s32))) +int16x8_t vmovnbq_s32(int16x8_t, int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmovnbq_s32))) +int16x8_t vmovnbq(int16x8_t, int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmovnbq_u16))) +uint8x16_t vmovnbq_u16(uint8x16_t, uint16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmovnbq_u16))) +uint8x16_t vmovnbq(uint8x16_t, uint16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmovnbq_u32))) +uint16x8_t vmovnbq_u32(uint16x8_t, uint32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmovnbq_u32))) +uint16x8_t vmovnbq(uint16x8_t, uint32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmovntq_m_s16))) +int8x16_t vmovntq_m_s16(int8x16_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmovntq_m_s16))) +int8x16_t vmovntq_m(int8x16_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmovntq_m_s32))) +int16x8_t vmovntq_m_s32(int16x8_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmovntq_m_s32))) +int16x8_t vmovntq_m(int16x8_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmovntq_m_u16))) +uint8x16_t vmovntq_m_u16(uint8x16_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmovntq_m_u16))) +uint8x16_t vmovntq_m(uint8x16_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmovntq_m_u32))) +uint16x8_t vmovntq_m_u32(uint16x8_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmovntq_m_u32))) +uint16x8_t vmovntq_m(uint16x8_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmovntq_s16))) +int8x16_t vmovntq_s16(int8x16_t, int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmovntq_s16))) +int8x16_t vmovntq(int8x16_t, int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmovntq_s32))) +int16x8_t vmovntq_s32(int16x8_t, int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmovntq_s32))) +int16x8_t vmovntq(int16x8_t, int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmovntq_u16))) +uint8x16_t vmovntq_u16(uint8x16_t, uint16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmovntq_u16))) +uint8x16_t vmovntq(uint8x16_t, uint16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmovntq_u32))) +uint16x8_t vmovntq_u32(uint16x8_t, uint32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmovntq_u32))) +uint16x8_t vmovntq(uint16x8_t, uint32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulhq_m_s16))) +int16x8_t vmulhq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulhq_m_s16))) +int16x8_t vmulhq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulhq_m_s32))) +int32x4_t vmulhq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulhq_m_s32))) +int32x4_t vmulhq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulhq_m_s8))) +int8x16_t vmulhq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulhq_m_s8))) +int8x16_t vmulhq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulhq_m_u16))) +uint16x8_t vmulhq_m_u16(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulhq_m_u16))) +uint16x8_t vmulhq_m(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulhq_m_u32))) +uint32x4_t vmulhq_m_u32(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulhq_m_u32))) +uint32x4_t vmulhq_m(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulhq_m_u8))) +uint8x16_t vmulhq_m_u8(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulhq_m_u8))) +uint8x16_t vmulhq_m(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulhq_s16))) +int16x8_t vmulhq_s16(int16x8_t, int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulhq_s16))) +int16x8_t vmulhq(int16x8_t, int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulhq_s32))) +int32x4_t vmulhq_s32(int32x4_t, int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulhq_s32))) +int32x4_t vmulhq(int32x4_t, int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulhq_s8))) +int8x16_t vmulhq_s8(int8x16_t, int8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulhq_s8))) +int8x16_t vmulhq(int8x16_t, int8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulhq_u16))) +uint16x8_t vmulhq_u16(uint16x8_t, uint16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulhq_u16))) +uint16x8_t vmulhq(uint16x8_t, uint16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulhq_u32))) +uint32x4_t vmulhq_u32(uint32x4_t, uint32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulhq_u32))) +uint32x4_t vmulhq(uint32x4_t, uint32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulhq_u8))) +uint8x16_t vmulhq_u8(uint8x16_t, uint8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulhq_u8))) +uint8x16_t vmulhq(uint8x16_t, uint8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulhq_x_s16))) +int16x8_t vmulhq_x_s16(int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulhq_x_s16))) +int16x8_t vmulhq_x(int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulhq_x_s32))) +int32x4_t vmulhq_x_s32(int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulhq_x_s32))) +int32x4_t vmulhq_x(int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulhq_x_s8))) +int8x16_t vmulhq_x_s8(int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulhq_x_s8))) +int8x16_t vmulhq_x(int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulhq_x_u16))) +uint16x8_t vmulhq_x_u16(uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulhq_x_u16))) +uint16x8_t vmulhq_x(uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulhq_x_u32))) +uint32x4_t vmulhq_x_u32(uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulhq_x_u32))) +uint32x4_t vmulhq_x(uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulhq_x_u8))) +uint8x16_t vmulhq_x_u8(uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulhq_x_u8))) +uint8x16_t vmulhq_x(uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_int_m_s16))) +int32x4_t vmullbq_int_m_s16(int32x4_t, int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_int_m_s16))) +int32x4_t vmullbq_int_m(int32x4_t, int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_int_m_s32))) +int64x2_t vmullbq_int_m_s32(int64x2_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_int_m_s32))) +int64x2_t vmullbq_int_m(int64x2_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_int_m_s8))) +int16x8_t vmullbq_int_m_s8(int16x8_t, int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_int_m_s8))) +int16x8_t vmullbq_int_m(int16x8_t, int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_int_m_u16))) +uint32x4_t vmullbq_int_m_u16(uint32x4_t, uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_int_m_u16))) +uint32x4_t vmullbq_int_m(uint32x4_t, uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_int_m_u32))) +uint64x2_t vmullbq_int_m_u32(uint64x2_t, uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_int_m_u32))) +uint64x2_t vmullbq_int_m(uint64x2_t, uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_int_m_u8))) +uint16x8_t vmullbq_int_m_u8(uint16x8_t, uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_int_m_u8))) +uint16x8_t vmullbq_int_m(uint16x8_t, uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_int_s16))) +int32x4_t vmullbq_int_s16(int16x8_t, int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_int_s16))) +int32x4_t vmullbq_int(int16x8_t, int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_int_s32))) +int64x2_t vmullbq_int_s32(int32x4_t, int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_int_s32))) +int64x2_t vmullbq_int(int32x4_t, int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_int_s8))) +int16x8_t vmullbq_int_s8(int8x16_t, int8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_int_s8))) +int16x8_t vmullbq_int(int8x16_t, int8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_int_u16))) +uint32x4_t vmullbq_int_u16(uint16x8_t, uint16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_int_u16))) +uint32x4_t vmullbq_int(uint16x8_t, uint16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_int_u32))) +uint64x2_t vmullbq_int_u32(uint32x4_t, uint32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_int_u32))) +uint64x2_t vmullbq_int(uint32x4_t, uint32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_int_u8))) +uint16x8_t vmullbq_int_u8(uint8x16_t, uint8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_int_u8))) +uint16x8_t vmullbq_int(uint8x16_t, uint8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_int_x_s16))) +int32x4_t vmullbq_int_x_s16(int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_int_x_s16))) +int32x4_t vmullbq_int_x(int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_int_x_s32))) +int64x2_t vmullbq_int_x_s32(int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_int_x_s32))) +int64x2_t vmullbq_int_x(int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_int_x_s8))) +int16x8_t vmullbq_int_x_s8(int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_int_x_s8))) +int16x8_t vmullbq_int_x(int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_int_x_u16))) +uint32x4_t vmullbq_int_x_u16(uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_int_x_u16))) +uint32x4_t vmullbq_int_x(uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_int_x_u32))) +uint64x2_t vmullbq_int_x_u32(uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_int_x_u32))) +uint64x2_t vmullbq_int_x(uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_int_x_u8))) +uint16x8_t vmullbq_int_x_u8(uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_int_x_u8))) +uint16x8_t vmullbq_int_x(uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_poly_m_p16))) +uint32x4_t vmullbq_poly_m_p16(uint32x4_t, uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_poly_m_p16))) +uint32x4_t vmullbq_poly_m(uint32x4_t, uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_poly_m_p8))) +uint16x8_t vmullbq_poly_m_p8(uint16x8_t, uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_poly_m_p8))) +uint16x8_t vmullbq_poly_m(uint16x8_t, uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_poly_p16))) +uint32x4_t vmullbq_poly_p16(uint16x8_t, uint16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_poly_p16))) +uint32x4_t vmullbq_poly(uint16x8_t, uint16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_poly_p8))) +uint16x8_t vmullbq_poly_p8(uint8x16_t, uint8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_poly_p8))) +uint16x8_t vmullbq_poly(uint8x16_t, uint8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_poly_x_p16))) +uint32x4_t vmullbq_poly_x_p16(uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_poly_x_p16))) +uint32x4_t vmullbq_poly_x(uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_poly_x_p8))) +uint16x8_t vmullbq_poly_x_p8(uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmullbq_poly_x_p8))) +uint16x8_t vmullbq_poly_x(uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_int_m_s16))) +int32x4_t vmulltq_int_m_s16(int32x4_t, int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_int_m_s16))) +int32x4_t vmulltq_int_m(int32x4_t, int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_int_m_s32))) +int64x2_t vmulltq_int_m_s32(int64x2_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_int_m_s32))) +int64x2_t vmulltq_int_m(int64x2_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_int_m_s8))) +int16x8_t vmulltq_int_m_s8(int16x8_t, int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_int_m_s8))) +int16x8_t vmulltq_int_m(int16x8_t, int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_int_m_u16))) +uint32x4_t vmulltq_int_m_u16(uint32x4_t, uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_int_m_u16))) +uint32x4_t vmulltq_int_m(uint32x4_t, uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_int_m_u32))) +uint64x2_t vmulltq_int_m_u32(uint64x2_t, uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_int_m_u32))) +uint64x2_t vmulltq_int_m(uint64x2_t, uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_int_m_u8))) +uint16x8_t vmulltq_int_m_u8(uint16x8_t, uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_int_m_u8))) +uint16x8_t vmulltq_int_m(uint16x8_t, uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_int_s16))) +int32x4_t vmulltq_int_s16(int16x8_t, int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_int_s16))) +int32x4_t vmulltq_int(int16x8_t, int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_int_s32))) +int64x2_t vmulltq_int_s32(int32x4_t, int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_int_s32))) +int64x2_t vmulltq_int(int32x4_t, int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_int_s8))) +int16x8_t vmulltq_int_s8(int8x16_t, int8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_int_s8))) +int16x8_t vmulltq_int(int8x16_t, int8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_int_u16))) +uint32x4_t vmulltq_int_u16(uint16x8_t, uint16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_int_u16))) +uint32x4_t vmulltq_int(uint16x8_t, uint16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_int_u32))) +uint64x2_t vmulltq_int_u32(uint32x4_t, uint32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_int_u32))) +uint64x2_t vmulltq_int(uint32x4_t, uint32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_int_u8))) +uint16x8_t vmulltq_int_u8(uint8x16_t, uint8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_int_u8))) +uint16x8_t vmulltq_int(uint8x16_t, uint8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_int_x_s16))) +int32x4_t vmulltq_int_x_s16(int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_int_x_s16))) +int32x4_t vmulltq_int_x(int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_int_x_s32))) +int64x2_t vmulltq_int_x_s32(int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_int_x_s32))) +int64x2_t vmulltq_int_x(int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_int_x_s8))) +int16x8_t vmulltq_int_x_s8(int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_int_x_s8))) +int16x8_t vmulltq_int_x(int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_int_x_u16))) +uint32x4_t vmulltq_int_x_u16(uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_int_x_u16))) +uint32x4_t vmulltq_int_x(uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_int_x_u32))) +uint64x2_t vmulltq_int_x_u32(uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_int_x_u32))) +uint64x2_t vmulltq_int_x(uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_int_x_u8))) +uint16x8_t vmulltq_int_x_u8(uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_int_x_u8))) +uint16x8_t vmulltq_int_x(uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_poly_m_p16))) +uint32x4_t vmulltq_poly_m_p16(uint32x4_t, uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_poly_m_p16))) +uint32x4_t vmulltq_poly_m(uint32x4_t, uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_poly_m_p8))) +uint16x8_t vmulltq_poly_m_p8(uint16x8_t, uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_poly_m_p8))) +uint16x8_t vmulltq_poly_m(uint16x8_t, uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_poly_p16))) +uint32x4_t vmulltq_poly_p16(uint16x8_t, uint16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_poly_p16))) +uint32x4_t vmulltq_poly(uint16x8_t, uint16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_poly_p8))) +uint16x8_t vmulltq_poly_p8(uint8x16_t, uint8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_poly_p8))) +uint16x8_t vmulltq_poly(uint8x16_t, uint8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_poly_x_p16))) +uint32x4_t vmulltq_poly_x_p16(uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_poly_x_p16))) +uint32x4_t vmulltq_poly_x(uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_poly_x_p8))) +uint16x8_t vmulltq_poly_x_p8(uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulltq_poly_x_p8))) +uint16x8_t vmulltq_poly_x(uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_m_n_s16))) +int16x8_t vmulq_m_n_s16(int16x8_t, int16x8_t, int16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_m_n_s16))) +int16x8_t vmulq_m(int16x8_t, int16x8_t, int16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_m_n_s32))) +int32x4_t vmulq_m_n_s32(int32x4_t, int32x4_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_m_n_s32))) +int32x4_t vmulq_m(int32x4_t, int32x4_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_m_n_s8))) +int8x16_t vmulq_m_n_s8(int8x16_t, int8x16_t, int8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_m_n_s8))) +int8x16_t vmulq_m(int8x16_t, int8x16_t, int8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_m_n_u16))) +uint16x8_t vmulq_m_n_u16(uint16x8_t, uint16x8_t, uint16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_m_n_u16))) +uint16x8_t vmulq_m(uint16x8_t, uint16x8_t, uint16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_m_n_u32))) +uint32x4_t vmulq_m_n_u32(uint32x4_t, uint32x4_t, uint32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_m_n_u32))) +uint32x4_t vmulq_m(uint32x4_t, uint32x4_t, uint32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_m_n_u8))) +uint8x16_t vmulq_m_n_u8(uint8x16_t, uint8x16_t, uint8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_m_n_u8))) +uint8x16_t vmulq_m(uint8x16_t, uint8x16_t, uint8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_m_s16))) +int16x8_t vmulq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_m_s16))) +int16x8_t vmulq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_m_s32))) +int32x4_t vmulq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_m_s32))) +int32x4_t vmulq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_m_s8))) +int8x16_t vmulq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_m_s8))) +int8x16_t vmulq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_m_u16))) +uint16x8_t vmulq_m_u16(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_m_u16))) +uint16x8_t vmulq_m(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_m_u32))) +uint32x4_t vmulq_m_u32(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_m_u32))) +uint32x4_t vmulq_m(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_m_u8))) +uint8x16_t vmulq_m_u8(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_m_u8))) +uint8x16_t vmulq_m(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_n_s16))) +int16x8_t vmulq_n_s16(int16x8_t, int16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_n_s16))) +int16x8_t vmulq(int16x8_t, int16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_n_s32))) +int32x4_t vmulq_n_s32(int32x4_t, int32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_n_s32))) +int32x4_t vmulq(int32x4_t, int32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_n_s8))) +int8x16_t vmulq_n_s8(int8x16_t, int8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_n_s8))) +int8x16_t vmulq(int8x16_t, int8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_n_u16))) +uint16x8_t vmulq_n_u16(uint16x8_t, uint16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_n_u16))) +uint16x8_t vmulq(uint16x8_t, uint16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_n_u32))) +uint32x4_t vmulq_n_u32(uint32x4_t, uint32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_n_u32))) +uint32x4_t vmulq(uint32x4_t, uint32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_n_u8))) +uint8x16_t vmulq_n_u8(uint8x16_t, uint8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_n_u8))) +uint8x16_t vmulq(uint8x16_t, uint8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_s16))) +int16x8_t vmulq_s16(int16x8_t, int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_s16))) +int16x8_t vmulq(int16x8_t, int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_s32))) +int32x4_t vmulq_s32(int32x4_t, int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_s32))) +int32x4_t vmulq(int32x4_t, int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_s8))) +int8x16_t vmulq_s8(int8x16_t, int8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_s8))) +int8x16_t vmulq(int8x16_t, int8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_u16))) +uint16x8_t vmulq_u16(uint16x8_t, uint16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_u16))) +uint16x8_t vmulq(uint16x8_t, uint16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_u32))) +uint32x4_t vmulq_u32(uint32x4_t, uint32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_u32))) +uint32x4_t vmulq(uint32x4_t, uint32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_u8))) +uint8x16_t vmulq_u8(uint8x16_t, uint8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_u8))) +uint8x16_t vmulq(uint8x16_t, uint8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_x_n_s16))) +int16x8_t vmulq_x_n_s16(int16x8_t, int16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_x_n_s16))) +int16x8_t vmulq_x(int16x8_t, int16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_x_n_s32))) +int32x4_t vmulq_x_n_s32(int32x4_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_x_n_s32))) +int32x4_t vmulq_x(int32x4_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_x_n_s8))) +int8x16_t vmulq_x_n_s8(int8x16_t, int8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_x_n_s8))) +int8x16_t vmulq_x(int8x16_t, int8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_x_n_u16))) +uint16x8_t vmulq_x_n_u16(uint16x8_t, uint16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_x_n_u16))) +uint16x8_t vmulq_x(uint16x8_t, uint16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_x_n_u32))) +uint32x4_t vmulq_x_n_u32(uint32x4_t, uint32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_x_n_u32))) +uint32x4_t vmulq_x(uint32x4_t, uint32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_x_n_u8))) +uint8x16_t vmulq_x_n_u8(uint8x16_t, uint8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_x_n_u8))) +uint8x16_t vmulq_x(uint8x16_t, uint8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_x_s16))) +int16x8_t vmulq_x_s16(int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_x_s16))) +int16x8_t vmulq_x(int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_x_s32))) +int32x4_t vmulq_x_s32(int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_x_s32))) +int32x4_t vmulq_x(int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_x_s8))) +int8x16_t vmulq_x_s8(int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_x_s8))) +int8x16_t vmulq_x(int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_x_u16))) +uint16x8_t vmulq_x_u16(uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_x_u16))) +uint16x8_t vmulq_x(uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_x_u32))) +uint32x4_t vmulq_x_u32(uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_x_u32))) +uint32x4_t vmulq_x(uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_x_u8))) +uint8x16_t vmulq_x_u8(uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_x_u8))) +uint8x16_t vmulq_x(uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_m_n_s16))) +int16x8_t vmvnq_m_n_s16(int16x8_t, int16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_m_n_s16))) +int16x8_t vmvnq_m(int16x8_t, int16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_m_n_s32))) +int32x4_t vmvnq_m_n_s32(int32x4_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_m_n_s32))) +int32x4_t vmvnq_m(int32x4_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_m_n_u16))) +uint16x8_t vmvnq_m_n_u16(uint16x8_t, uint16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_m_n_u16))) +uint16x8_t vmvnq_m(uint16x8_t, uint16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_m_n_u32))) +uint32x4_t vmvnq_m_n_u32(uint32x4_t, uint32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_m_n_u32))) +uint32x4_t vmvnq_m(uint32x4_t, uint32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_m_s16))) +int16x8_t vmvnq_m_s16(int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_m_s16))) +int16x8_t vmvnq_m(int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_m_s32))) +int32x4_t vmvnq_m_s32(int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_m_s32))) +int32x4_t vmvnq_m(int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_m_s8))) +int8x16_t vmvnq_m_s8(int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_m_s8))) +int8x16_t vmvnq_m(int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_m_u16))) +uint16x8_t vmvnq_m_u16(uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_m_u16))) +uint16x8_t vmvnq_m(uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_m_u32))) +uint32x4_t vmvnq_m_u32(uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_m_u32))) +uint32x4_t vmvnq_m(uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_m_u8))) +uint8x16_t vmvnq_m_u8(uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_m_u8))) +uint8x16_t vmvnq_m(uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_n_s16))) +int16x8_t vmvnq_n_s16(int16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_n_s32))) +int32x4_t vmvnq_n_s32(int32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_n_u16))) +uint16x8_t vmvnq_n_u16(uint16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_n_u32))) +uint32x4_t vmvnq_n_u32(uint32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_s16))) +int16x8_t vmvnq_s16(int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_s16))) +int16x8_t vmvnq(int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_s32))) +int32x4_t vmvnq_s32(int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_s32))) +int32x4_t vmvnq(int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_s8))) +int8x16_t vmvnq_s8(int8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_s8))) +int8x16_t vmvnq(int8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_u16))) +uint16x8_t vmvnq_u16(uint16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_u16))) +uint16x8_t vmvnq(uint16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_u32))) +uint32x4_t vmvnq_u32(uint32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_u32))) +uint32x4_t vmvnq(uint32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_u8))) +uint8x16_t vmvnq_u8(uint8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_u8))) +uint8x16_t vmvnq(uint8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_x_n_s16))) +int16x8_t vmvnq_x_n_s16(int16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_x_n_s32))) +int32x4_t vmvnq_x_n_s32(int32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_x_n_u16))) +uint16x8_t vmvnq_x_n_u16(uint16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_x_n_u32))) +uint32x4_t vmvnq_x_n_u32(uint32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_x_s16))) +int16x8_t vmvnq_x_s16(int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_x_s16))) +int16x8_t vmvnq_x(int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_x_s32))) +int32x4_t vmvnq_x_s32(int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_x_s32))) +int32x4_t vmvnq_x(int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_x_s8))) +int8x16_t vmvnq_x_s8(int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_x_s8))) +int8x16_t vmvnq_x(int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_x_u16))) +uint16x8_t vmvnq_x_u16(uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_x_u16))) +uint16x8_t vmvnq_x(uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_x_u32))) +uint32x4_t vmvnq_x_u32(uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_x_u32))) +uint32x4_t vmvnq_x(uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_x_u8))) +uint8x16_t vmvnq_x_u8(uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmvnq_x_u8))) +uint8x16_t vmvnq_x(uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vnegq_m_s16))) +int16x8_t vnegq_m_s16(int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vnegq_m_s16))) +int16x8_t vnegq_m(int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vnegq_m_s32))) +int32x4_t vnegq_m_s32(int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vnegq_m_s32))) +int32x4_t vnegq_m(int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vnegq_m_s8))) +int8x16_t vnegq_m_s8(int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vnegq_m_s8))) +int8x16_t vnegq_m(int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vnegq_s16))) +int16x8_t vnegq_s16(int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vnegq_s16))) +int16x8_t vnegq(int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vnegq_s32))) +int32x4_t vnegq_s32(int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vnegq_s32))) +int32x4_t vnegq(int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vnegq_s8))) +int8x16_t vnegq_s8(int8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vnegq_s8))) +int8x16_t vnegq(int8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vnegq_x_s16))) +int16x8_t vnegq_x_s16(int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vnegq_x_s16))) +int16x8_t vnegq_x(int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vnegq_x_s32))) +int32x4_t vnegq_x_s32(int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vnegq_x_s32))) +int32x4_t vnegq_x(int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vnegq_x_s8))) +int8x16_t vnegq_x_s8(int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vnegq_x_s8))) +int8x16_t vnegq_x(int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vornq_m_s16))) +int16x8_t vornq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vornq_m_s16))) +int16x8_t vornq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vornq_m_s32))) +int32x4_t vornq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vornq_m_s32))) +int32x4_t vornq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vornq_m_s8))) +int8x16_t vornq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vornq_m_s8))) +int8x16_t vornq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vornq_m_u16))) +uint16x8_t vornq_m_u16(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vornq_m_u16))) +uint16x8_t vornq_m(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vornq_m_u32))) +uint32x4_t vornq_m_u32(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vornq_m_u32))) +uint32x4_t vornq_m(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vornq_m_u8))) +uint8x16_t vornq_m_u8(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vornq_m_u8))) +uint8x16_t vornq_m(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vornq_s16))) +int16x8_t vornq_s16(int16x8_t, int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vornq_s16))) +int16x8_t vornq(int16x8_t, int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vornq_s32))) +int32x4_t vornq_s32(int32x4_t, int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vornq_s32))) +int32x4_t vornq(int32x4_t, int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vornq_s8))) +int8x16_t vornq_s8(int8x16_t, int8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vornq_s8))) +int8x16_t vornq(int8x16_t, int8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vornq_u16))) +uint16x8_t vornq_u16(uint16x8_t, uint16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vornq_u16))) +uint16x8_t vornq(uint16x8_t, uint16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vornq_u32))) +uint32x4_t vornq_u32(uint32x4_t, uint32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vornq_u32))) +uint32x4_t vornq(uint32x4_t, uint32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vornq_u8))) +uint8x16_t vornq_u8(uint8x16_t, uint8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vornq_u8))) +uint8x16_t vornq(uint8x16_t, uint8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vornq_x_s16))) +int16x8_t vornq_x_s16(int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vornq_x_s16))) +int16x8_t vornq_x(int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vornq_x_s32))) +int32x4_t vornq_x_s32(int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vornq_x_s32))) +int32x4_t vornq_x(int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vornq_x_s8))) +int8x16_t vornq_x_s8(int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vornq_x_s8))) +int8x16_t vornq_x(int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vornq_x_u16))) +uint16x8_t vornq_x_u16(uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vornq_x_u16))) +uint16x8_t vornq_x(uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vornq_x_u32))) +uint32x4_t vornq_x_u32(uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vornq_x_u32))) +uint32x4_t vornq_x(uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vornq_x_u8))) +uint8x16_t vornq_x_u8(uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vornq_x_u8))) +uint8x16_t vornq_x(uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vorrq_m_n_s16))) +int16x8_t vorrq_m_n_s16(int16x8_t, int16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vorrq_m_n_s16))) +int16x8_t vorrq_m_n(int16x8_t, int16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vorrq_m_n_s32))) +int32x4_t vorrq_m_n_s32(int32x4_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vorrq_m_n_s32))) +int32x4_t vorrq_m_n(int32x4_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vorrq_m_n_u16))) +uint16x8_t vorrq_m_n_u16(uint16x8_t, uint16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vorrq_m_n_u16))) +uint16x8_t vorrq_m_n(uint16x8_t, uint16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vorrq_m_n_u32))) +uint32x4_t vorrq_m_n_u32(uint32x4_t, uint32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vorrq_m_n_u32))) +uint32x4_t vorrq_m_n(uint32x4_t, uint32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vorrq_m_s16))) +int16x8_t vorrq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vorrq_m_s16))) +int16x8_t vorrq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vorrq_m_s32))) +int32x4_t vorrq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vorrq_m_s32))) +int32x4_t vorrq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vorrq_m_s8))) +int8x16_t vorrq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vorrq_m_s8))) +int8x16_t vorrq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vorrq_m_u16))) +uint16x8_t vorrq_m_u16(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vorrq_m_u16))) +uint16x8_t vorrq_m(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vorrq_m_u32))) +uint32x4_t vorrq_m_u32(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vorrq_m_u32))) +uint32x4_t vorrq_m(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vorrq_m_u8))) +uint8x16_t vorrq_m_u8(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vorrq_m_u8))) +uint8x16_t vorrq_m(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vorrq_n_s16))) +int16x8_t vorrq_n_s16(int16x8_t, int16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vorrq_n_s16))) +int16x8_t vorrq(int16x8_t, int16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vorrq_n_s32))) +int32x4_t vorrq_n_s32(int32x4_t, int32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vorrq_n_s32))) +int32x4_t vorrq(int32x4_t, int32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vorrq_n_u16))) +uint16x8_t vorrq_n_u16(uint16x8_t, uint16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vorrq_n_u16))) +uint16x8_t vorrq(uint16x8_t, uint16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vorrq_n_u32))) +uint32x4_t vorrq_n_u32(uint32x4_t, uint32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vorrq_n_u32))) +uint32x4_t vorrq(uint32x4_t, uint32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vorrq_s16))) +int16x8_t vorrq_s16(int16x8_t, int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vorrq_s16))) +int16x8_t vorrq(int16x8_t, int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vorrq_s32))) +int32x4_t vorrq_s32(int32x4_t, int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vorrq_s32))) +int32x4_t vorrq(int32x4_t, int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vorrq_s8))) +int8x16_t vorrq_s8(int8x16_t, int8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vorrq_s8))) +int8x16_t vorrq(int8x16_t, int8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vorrq_u16))) +uint16x8_t vorrq_u16(uint16x8_t, uint16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vorrq_u16))) +uint16x8_t vorrq(uint16x8_t, uint16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vorrq_u32))) +uint32x4_t vorrq_u32(uint32x4_t, uint32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vorrq_u32))) +uint32x4_t vorrq(uint32x4_t, uint32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vorrq_u8))) +uint8x16_t vorrq_u8(uint8x16_t, uint8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vorrq_u8))) +uint8x16_t vorrq(uint8x16_t, uint8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vorrq_x_s16))) +int16x8_t vorrq_x_s16(int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vorrq_x_s16))) +int16x8_t vorrq_x(int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vorrq_x_s32))) +int32x4_t vorrq_x_s32(int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vorrq_x_s32))) +int32x4_t vorrq_x(int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vorrq_x_s8))) +int8x16_t vorrq_x_s8(int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vorrq_x_s8))) +int8x16_t vorrq_x(int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vorrq_x_u16))) +uint16x8_t vorrq_x_u16(uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vorrq_x_u16))) +uint16x8_t vorrq_x(uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vorrq_x_u32))) +uint32x4_t vorrq_x_u32(uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vorrq_x_u32))) +uint32x4_t vorrq_x(uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vorrq_x_u8))) +uint8x16_t vorrq_x_u8(uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vorrq_x_u8))) +uint8x16_t vorrq_x(uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vpnot))) +mve_pred16_t vpnot(mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vpselq_s16))) +int16x8_t vpselq_s16(int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vpselq_s16))) +int16x8_t vpselq(int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vpselq_s32))) +int32x4_t vpselq_s32(int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vpselq_s32))) +int32x4_t vpselq(int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vpselq_s64))) +int64x2_t vpselq_s64(int64x2_t, int64x2_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vpselq_s64))) +int64x2_t vpselq(int64x2_t, int64x2_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vpselq_s8))) +int8x16_t vpselq_s8(int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vpselq_s8))) +int8x16_t vpselq(int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vpselq_u16))) +uint16x8_t vpselq_u16(uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vpselq_u16))) +uint16x8_t vpselq(uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vpselq_u32))) +uint32x4_t vpselq_u32(uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vpselq_u32))) +uint32x4_t vpselq(uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vpselq_u64))) +uint64x2_t vpselq_u64(uint64x2_t, uint64x2_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vpselq_u64))) +uint64x2_t vpselq(uint64x2_t, uint64x2_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vpselq_u8))) +uint8x16_t vpselq_u8(uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vpselq_u8))) +uint8x16_t vpselq(uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqabsq_m_s16))) +int16x8_t vqabsq_m_s16(int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqabsq_m_s16))) +int16x8_t vqabsq_m(int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqabsq_m_s32))) +int32x4_t vqabsq_m_s32(int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqabsq_m_s32))) +int32x4_t vqabsq_m(int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqabsq_m_s8))) +int8x16_t vqabsq_m_s8(int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqabsq_m_s8))) +int8x16_t vqabsq_m(int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqabsq_s16))) +int16x8_t vqabsq_s16(int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqabsq_s16))) +int16x8_t vqabsq(int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqabsq_s32))) +int32x4_t vqabsq_s32(int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqabsq_s32))) +int32x4_t vqabsq(int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqabsq_s8))) +int8x16_t vqabsq_s8(int8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqabsq_s8))) +int8x16_t vqabsq(int8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_m_n_s16))) +int16x8_t vqaddq_m_n_s16(int16x8_t, int16x8_t, int16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_m_n_s16))) +int16x8_t vqaddq_m(int16x8_t, int16x8_t, int16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_m_n_s32))) +int32x4_t vqaddq_m_n_s32(int32x4_t, int32x4_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_m_n_s32))) +int32x4_t vqaddq_m(int32x4_t, int32x4_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_m_n_s8))) +int8x16_t vqaddq_m_n_s8(int8x16_t, int8x16_t, int8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_m_n_s8))) +int8x16_t vqaddq_m(int8x16_t, int8x16_t, int8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_m_n_u16))) +uint16x8_t vqaddq_m_n_u16(uint16x8_t, uint16x8_t, uint16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_m_n_u16))) +uint16x8_t vqaddq_m(uint16x8_t, uint16x8_t, uint16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_m_n_u32))) +uint32x4_t vqaddq_m_n_u32(uint32x4_t, uint32x4_t, uint32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_m_n_u32))) +uint32x4_t vqaddq_m(uint32x4_t, uint32x4_t, uint32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_m_n_u8))) +uint8x16_t vqaddq_m_n_u8(uint8x16_t, uint8x16_t, uint8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_m_n_u8))) +uint8x16_t vqaddq_m(uint8x16_t, uint8x16_t, uint8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_m_s16))) +int16x8_t vqaddq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_m_s16))) +int16x8_t vqaddq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_m_s32))) +int32x4_t vqaddq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_m_s32))) +int32x4_t vqaddq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_m_s8))) +int8x16_t vqaddq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_m_s8))) +int8x16_t vqaddq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_m_u16))) +uint16x8_t vqaddq_m_u16(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_m_u16))) +uint16x8_t vqaddq_m(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_m_u32))) +uint32x4_t vqaddq_m_u32(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_m_u32))) +uint32x4_t vqaddq_m(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_m_u8))) +uint8x16_t vqaddq_m_u8(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_m_u8))) +uint8x16_t vqaddq_m(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_n_s16))) +int16x8_t vqaddq_n_s16(int16x8_t, int16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_n_s16))) +int16x8_t vqaddq(int16x8_t, int16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_n_s32))) +int32x4_t vqaddq_n_s32(int32x4_t, int32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_n_s32))) +int32x4_t vqaddq(int32x4_t, int32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_n_s8))) +int8x16_t vqaddq_n_s8(int8x16_t, int8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_n_s8))) +int8x16_t vqaddq(int8x16_t, int8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_n_u16))) +uint16x8_t vqaddq_n_u16(uint16x8_t, uint16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_n_u16))) +uint16x8_t vqaddq(uint16x8_t, uint16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_n_u32))) +uint32x4_t vqaddq_n_u32(uint32x4_t, uint32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_n_u32))) +uint32x4_t vqaddq(uint32x4_t, uint32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_n_u8))) +uint8x16_t vqaddq_n_u8(uint8x16_t, uint8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_n_u8))) +uint8x16_t vqaddq(uint8x16_t, uint8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_s16))) +int16x8_t vqaddq_s16(int16x8_t, int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_s16))) +int16x8_t vqaddq(int16x8_t, int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_s32))) +int32x4_t vqaddq_s32(int32x4_t, int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_s32))) +int32x4_t vqaddq(int32x4_t, int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_s8))) +int8x16_t vqaddq_s8(int8x16_t, int8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_s8))) +int8x16_t vqaddq(int8x16_t, int8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_u16))) +uint16x8_t vqaddq_u16(uint16x8_t, uint16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_u16))) +uint16x8_t vqaddq(uint16x8_t, uint16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_u32))) +uint32x4_t vqaddq_u32(uint32x4_t, uint32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_u32))) +uint32x4_t vqaddq(uint32x4_t, uint32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_u8))) +uint8x16_t vqaddq_u8(uint8x16_t, uint8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqaddq_u8))) +uint8x16_t vqaddq(uint8x16_t, uint8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmladhq_m_s16))) +int16x8_t vqdmladhq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmladhq_m_s16))) +int16x8_t vqdmladhq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmladhq_m_s32))) +int32x4_t vqdmladhq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmladhq_m_s32))) +int32x4_t vqdmladhq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmladhq_m_s8))) +int8x16_t vqdmladhq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmladhq_m_s8))) +int8x16_t vqdmladhq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmladhq_s16))) +int16x8_t vqdmladhq_s16(int16x8_t, int16x8_t, int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmladhq_s16))) +int16x8_t vqdmladhq(int16x8_t, int16x8_t, int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmladhq_s32))) +int32x4_t vqdmladhq_s32(int32x4_t, int32x4_t, int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmladhq_s32))) +int32x4_t vqdmladhq(int32x4_t, int32x4_t, int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmladhq_s8))) +int8x16_t vqdmladhq_s8(int8x16_t, int8x16_t, int8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmladhq_s8))) +int8x16_t vqdmladhq(int8x16_t, int8x16_t, int8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmladhxq_m_s16))) +int16x8_t vqdmladhxq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmladhxq_m_s16))) +int16x8_t vqdmladhxq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmladhxq_m_s32))) +int32x4_t vqdmladhxq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmladhxq_m_s32))) +int32x4_t vqdmladhxq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmladhxq_m_s8))) +int8x16_t vqdmladhxq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmladhxq_m_s8))) +int8x16_t vqdmladhxq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmladhxq_s16))) +int16x8_t vqdmladhxq_s16(int16x8_t, int16x8_t, int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmladhxq_s16))) +int16x8_t vqdmladhxq(int16x8_t, int16x8_t, int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmladhxq_s32))) +int32x4_t vqdmladhxq_s32(int32x4_t, int32x4_t, int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmladhxq_s32))) +int32x4_t vqdmladhxq(int32x4_t, int32x4_t, int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmladhxq_s8))) +int8x16_t vqdmladhxq_s8(int8x16_t, int8x16_t, int8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmladhxq_s8))) +int8x16_t vqdmladhxq(int8x16_t, int8x16_t, int8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmlahq_m_n_s16))) +int16x8_t vqdmlahq_m_n_s16(int16x8_t, int16x8_t, int16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmlahq_m_n_s16))) +int16x8_t vqdmlahq_m(int16x8_t, int16x8_t, int16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmlahq_m_n_s32))) +int32x4_t vqdmlahq_m_n_s32(int32x4_t, int32x4_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmlahq_m_n_s32))) +int32x4_t vqdmlahq_m(int32x4_t, int32x4_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmlahq_m_n_s8))) +int8x16_t vqdmlahq_m_n_s8(int8x16_t, int8x16_t, int8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmlahq_m_n_s8))) +int8x16_t vqdmlahq_m(int8x16_t, int8x16_t, int8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmlahq_n_s16))) +int16x8_t vqdmlahq_n_s16(int16x8_t, int16x8_t, int16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmlahq_n_s16))) +int16x8_t vqdmlahq(int16x8_t, int16x8_t, int16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmlahq_n_s32))) +int32x4_t vqdmlahq_n_s32(int32x4_t, int32x4_t, int32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmlahq_n_s32))) +int32x4_t vqdmlahq(int32x4_t, int32x4_t, int32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmlahq_n_s8))) +int8x16_t vqdmlahq_n_s8(int8x16_t, int8x16_t, int8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmlahq_n_s8))) +int8x16_t vqdmlahq(int8x16_t, int8x16_t, int8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmlashq_m_n_s16))) +int16x8_t vqdmlashq_m_n_s16(int16x8_t, int16x8_t, int16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmlashq_m_n_s16))) +int16x8_t vqdmlashq_m(int16x8_t, int16x8_t, int16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmlashq_m_n_s32))) +int32x4_t vqdmlashq_m_n_s32(int32x4_t, int32x4_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmlashq_m_n_s32))) +int32x4_t vqdmlashq_m(int32x4_t, int32x4_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmlashq_m_n_s8))) +int8x16_t vqdmlashq_m_n_s8(int8x16_t, int8x16_t, int8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmlashq_m_n_s8))) +int8x16_t vqdmlashq_m(int8x16_t, int8x16_t, int8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmlashq_n_s16))) +int16x8_t vqdmlashq_n_s16(int16x8_t, int16x8_t, int16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmlashq_n_s16))) +int16x8_t vqdmlashq(int16x8_t, int16x8_t, int16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmlashq_n_s32))) +int32x4_t vqdmlashq_n_s32(int32x4_t, int32x4_t, int32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmlashq_n_s32))) +int32x4_t vqdmlashq(int32x4_t, int32x4_t, int32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmlashq_n_s8))) +int8x16_t vqdmlashq_n_s8(int8x16_t, int8x16_t, int8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmlashq_n_s8))) +int8x16_t vqdmlashq(int8x16_t, int8x16_t, int8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmlsdhq_m_s16))) +int16x8_t vqdmlsdhq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmlsdhq_m_s16))) +int16x8_t vqdmlsdhq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmlsdhq_m_s32))) +int32x4_t vqdmlsdhq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmlsdhq_m_s32))) +int32x4_t vqdmlsdhq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmlsdhq_m_s8))) +int8x16_t vqdmlsdhq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmlsdhq_m_s8))) +int8x16_t vqdmlsdhq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmlsdhq_s16))) +int16x8_t vqdmlsdhq_s16(int16x8_t, int16x8_t, int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmlsdhq_s16))) +int16x8_t vqdmlsdhq(int16x8_t, int16x8_t, int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmlsdhq_s32))) +int32x4_t vqdmlsdhq_s32(int32x4_t, int32x4_t, int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmlsdhq_s32))) +int32x4_t vqdmlsdhq(int32x4_t, int32x4_t, int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmlsdhq_s8))) +int8x16_t vqdmlsdhq_s8(int8x16_t, int8x16_t, int8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmlsdhq_s8))) +int8x16_t vqdmlsdhq(int8x16_t, int8x16_t, int8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmlsdhxq_m_s16))) +int16x8_t vqdmlsdhxq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmlsdhxq_m_s16))) +int16x8_t vqdmlsdhxq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmlsdhxq_m_s32))) +int32x4_t vqdmlsdhxq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmlsdhxq_m_s32))) +int32x4_t vqdmlsdhxq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmlsdhxq_m_s8))) +int8x16_t vqdmlsdhxq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmlsdhxq_m_s8))) +int8x16_t vqdmlsdhxq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmlsdhxq_s16))) +int16x8_t vqdmlsdhxq_s16(int16x8_t, int16x8_t, int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmlsdhxq_s16))) +int16x8_t vqdmlsdhxq(int16x8_t, int16x8_t, int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmlsdhxq_s32))) +int32x4_t vqdmlsdhxq_s32(int32x4_t, int32x4_t, int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmlsdhxq_s32))) +int32x4_t vqdmlsdhxq(int32x4_t, int32x4_t, int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmlsdhxq_s8))) +int8x16_t vqdmlsdhxq_s8(int8x16_t, int8x16_t, int8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmlsdhxq_s8))) +int8x16_t vqdmlsdhxq(int8x16_t, int8x16_t, int8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmulhq_m_n_s16))) +int16x8_t vqdmulhq_m_n_s16(int16x8_t, int16x8_t, int16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmulhq_m_n_s16))) +int16x8_t vqdmulhq_m(int16x8_t, int16x8_t, int16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmulhq_m_n_s32))) +int32x4_t vqdmulhq_m_n_s32(int32x4_t, int32x4_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmulhq_m_n_s32))) +int32x4_t vqdmulhq_m(int32x4_t, int32x4_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmulhq_m_n_s8))) +int8x16_t vqdmulhq_m_n_s8(int8x16_t, int8x16_t, int8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmulhq_m_n_s8))) +int8x16_t vqdmulhq_m(int8x16_t, int8x16_t, int8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmulhq_m_s16))) +int16x8_t vqdmulhq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmulhq_m_s16))) +int16x8_t vqdmulhq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmulhq_m_s32))) +int32x4_t vqdmulhq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmulhq_m_s32))) +int32x4_t vqdmulhq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmulhq_m_s8))) +int8x16_t vqdmulhq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmulhq_m_s8))) +int8x16_t vqdmulhq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmulhq_n_s16))) +int16x8_t vqdmulhq_n_s16(int16x8_t, int16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmulhq_n_s16))) +int16x8_t vqdmulhq(int16x8_t, int16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmulhq_n_s32))) +int32x4_t vqdmulhq_n_s32(int32x4_t, int32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmulhq_n_s32))) +int32x4_t vqdmulhq(int32x4_t, int32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmulhq_n_s8))) +int8x16_t vqdmulhq_n_s8(int8x16_t, int8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmulhq_n_s8))) +int8x16_t vqdmulhq(int8x16_t, int8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmulhq_s16))) +int16x8_t vqdmulhq_s16(int16x8_t, int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmulhq_s16))) +int16x8_t vqdmulhq(int16x8_t, int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmulhq_s32))) +int32x4_t vqdmulhq_s32(int32x4_t, int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmulhq_s32))) +int32x4_t vqdmulhq(int32x4_t, int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmulhq_s8))) +int8x16_t vqdmulhq_s8(int8x16_t, int8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmulhq_s8))) +int8x16_t vqdmulhq(int8x16_t, int8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmullbq_m_n_s16))) +int32x4_t vqdmullbq_m_n_s16(int32x4_t, int16x8_t, int16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmullbq_m_n_s16))) +int32x4_t vqdmullbq_m(int32x4_t, int16x8_t, int16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmullbq_m_n_s32))) +int64x2_t vqdmullbq_m_n_s32(int64x2_t, int32x4_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmullbq_m_n_s32))) +int64x2_t vqdmullbq_m(int64x2_t, int32x4_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmullbq_m_s16))) +int32x4_t vqdmullbq_m_s16(int32x4_t, int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmullbq_m_s16))) +int32x4_t vqdmullbq_m(int32x4_t, int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmullbq_m_s32))) +int64x2_t vqdmullbq_m_s32(int64x2_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmullbq_m_s32))) +int64x2_t vqdmullbq_m(int64x2_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmullbq_n_s16))) +int32x4_t vqdmullbq_n_s16(int16x8_t, int16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmullbq_n_s16))) +int32x4_t vqdmullbq(int16x8_t, int16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmullbq_n_s32))) +int64x2_t vqdmullbq_n_s32(int32x4_t, int32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmullbq_n_s32))) +int64x2_t vqdmullbq(int32x4_t, int32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmullbq_s16))) +int32x4_t vqdmullbq_s16(int16x8_t, int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmullbq_s16))) +int32x4_t vqdmullbq(int16x8_t, int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmullbq_s32))) +int64x2_t vqdmullbq_s32(int32x4_t, int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmullbq_s32))) +int64x2_t vqdmullbq(int32x4_t, int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmulltq_m_n_s16))) +int32x4_t vqdmulltq_m_n_s16(int32x4_t, int16x8_t, int16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmulltq_m_n_s16))) +int32x4_t vqdmulltq_m(int32x4_t, int16x8_t, int16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmulltq_m_n_s32))) +int64x2_t vqdmulltq_m_n_s32(int64x2_t, int32x4_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmulltq_m_n_s32))) +int64x2_t vqdmulltq_m(int64x2_t, int32x4_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmulltq_m_s16))) +int32x4_t vqdmulltq_m_s16(int32x4_t, int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmulltq_m_s16))) +int32x4_t vqdmulltq_m(int32x4_t, int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmulltq_m_s32))) +int64x2_t vqdmulltq_m_s32(int64x2_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmulltq_m_s32))) +int64x2_t vqdmulltq_m(int64x2_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmulltq_n_s16))) +int32x4_t vqdmulltq_n_s16(int16x8_t, int16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmulltq_n_s16))) +int32x4_t vqdmulltq(int16x8_t, int16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmulltq_n_s32))) +int64x2_t vqdmulltq_n_s32(int32x4_t, int32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmulltq_n_s32))) +int64x2_t vqdmulltq(int32x4_t, int32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmulltq_s16))) +int32x4_t vqdmulltq_s16(int16x8_t, int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmulltq_s16))) +int32x4_t vqdmulltq(int16x8_t, int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqdmulltq_s32))) +int64x2_t vqdmulltq_s32(int32x4_t, int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqdmulltq_s32))) +int64x2_t vqdmulltq(int32x4_t, int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqmovnbq_m_s16))) +int8x16_t vqmovnbq_m_s16(int8x16_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqmovnbq_m_s16))) +int8x16_t vqmovnbq_m(int8x16_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqmovnbq_m_s32))) +int16x8_t vqmovnbq_m_s32(int16x8_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqmovnbq_m_s32))) +int16x8_t vqmovnbq_m(int16x8_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqmovnbq_m_u16))) +uint8x16_t vqmovnbq_m_u16(uint8x16_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqmovnbq_m_u16))) +uint8x16_t vqmovnbq_m(uint8x16_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqmovnbq_m_u32))) +uint16x8_t vqmovnbq_m_u32(uint16x8_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqmovnbq_m_u32))) +uint16x8_t vqmovnbq_m(uint16x8_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqmovnbq_s16))) +int8x16_t vqmovnbq_s16(int8x16_t, int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqmovnbq_s16))) +int8x16_t vqmovnbq(int8x16_t, int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqmovnbq_s32))) +int16x8_t vqmovnbq_s32(int16x8_t, int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqmovnbq_s32))) +int16x8_t vqmovnbq(int16x8_t, int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqmovnbq_u16))) +uint8x16_t vqmovnbq_u16(uint8x16_t, uint16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqmovnbq_u16))) +uint8x16_t vqmovnbq(uint8x16_t, uint16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqmovnbq_u32))) +uint16x8_t vqmovnbq_u32(uint16x8_t, uint32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqmovnbq_u32))) +uint16x8_t vqmovnbq(uint16x8_t, uint32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqmovntq_m_s16))) +int8x16_t vqmovntq_m_s16(int8x16_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqmovntq_m_s16))) +int8x16_t vqmovntq_m(int8x16_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqmovntq_m_s32))) +int16x8_t vqmovntq_m_s32(int16x8_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqmovntq_m_s32))) +int16x8_t vqmovntq_m(int16x8_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqmovntq_m_u16))) +uint8x16_t vqmovntq_m_u16(uint8x16_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqmovntq_m_u16))) +uint8x16_t vqmovntq_m(uint8x16_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqmovntq_m_u32))) +uint16x8_t vqmovntq_m_u32(uint16x8_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqmovntq_m_u32))) +uint16x8_t vqmovntq_m(uint16x8_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqmovntq_s16))) +int8x16_t vqmovntq_s16(int8x16_t, int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqmovntq_s16))) +int8x16_t vqmovntq(int8x16_t, int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqmovntq_s32))) +int16x8_t vqmovntq_s32(int16x8_t, int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqmovntq_s32))) +int16x8_t vqmovntq(int16x8_t, int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqmovntq_u16))) +uint8x16_t vqmovntq_u16(uint8x16_t, uint16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqmovntq_u16))) +uint8x16_t vqmovntq(uint8x16_t, uint16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqmovntq_u32))) +uint16x8_t vqmovntq_u32(uint16x8_t, uint32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqmovntq_u32))) +uint16x8_t vqmovntq(uint16x8_t, uint32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqmovunbq_m_s16))) +uint8x16_t vqmovunbq_m_s16(uint8x16_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqmovunbq_m_s16))) +uint8x16_t vqmovunbq_m(uint8x16_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqmovunbq_m_s32))) +uint16x8_t vqmovunbq_m_s32(uint16x8_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqmovunbq_m_s32))) +uint16x8_t vqmovunbq_m(uint16x8_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqmovunbq_s16))) +uint8x16_t vqmovunbq_s16(uint8x16_t, int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqmovunbq_s16))) +uint8x16_t vqmovunbq(uint8x16_t, int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqmovunbq_s32))) +uint16x8_t vqmovunbq_s32(uint16x8_t, int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqmovunbq_s32))) +uint16x8_t vqmovunbq(uint16x8_t, int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqmovuntq_m_s16))) +uint8x16_t vqmovuntq_m_s16(uint8x16_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqmovuntq_m_s16))) +uint8x16_t vqmovuntq_m(uint8x16_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqmovuntq_m_s32))) +uint16x8_t vqmovuntq_m_s32(uint16x8_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqmovuntq_m_s32))) +uint16x8_t vqmovuntq_m(uint16x8_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqmovuntq_s16))) +uint8x16_t vqmovuntq_s16(uint8x16_t, int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqmovuntq_s16))) +uint8x16_t vqmovuntq(uint8x16_t, int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqmovuntq_s32))) +uint16x8_t vqmovuntq_s32(uint16x8_t, int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqmovuntq_s32))) +uint16x8_t vqmovuntq(uint16x8_t, int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqnegq_m_s16))) +int16x8_t vqnegq_m_s16(int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqnegq_m_s16))) +int16x8_t vqnegq_m(int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqnegq_m_s32))) +int32x4_t vqnegq_m_s32(int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqnegq_m_s32))) +int32x4_t vqnegq_m(int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqnegq_m_s8))) +int8x16_t vqnegq_m_s8(int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqnegq_m_s8))) +int8x16_t vqnegq_m(int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqnegq_s16))) +int16x8_t vqnegq_s16(int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqnegq_s16))) +int16x8_t vqnegq(int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqnegq_s32))) +int32x4_t vqnegq_s32(int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqnegq_s32))) +int32x4_t vqnegq(int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqnegq_s8))) +int8x16_t vqnegq_s8(int8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqnegq_s8))) +int8x16_t vqnegq(int8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmladhq_m_s16))) +int16x8_t vqrdmladhq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmladhq_m_s16))) +int16x8_t vqrdmladhq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmladhq_m_s32))) +int32x4_t vqrdmladhq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmladhq_m_s32))) +int32x4_t vqrdmladhq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmladhq_m_s8))) +int8x16_t vqrdmladhq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmladhq_m_s8))) +int8x16_t vqrdmladhq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmladhq_s16))) +int16x8_t vqrdmladhq_s16(int16x8_t, int16x8_t, int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmladhq_s16))) +int16x8_t vqrdmladhq(int16x8_t, int16x8_t, int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmladhq_s32))) +int32x4_t vqrdmladhq_s32(int32x4_t, int32x4_t, int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmladhq_s32))) +int32x4_t vqrdmladhq(int32x4_t, int32x4_t, int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmladhq_s8))) +int8x16_t vqrdmladhq_s8(int8x16_t, int8x16_t, int8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmladhq_s8))) +int8x16_t vqrdmladhq(int8x16_t, int8x16_t, int8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmladhxq_m_s16))) +int16x8_t vqrdmladhxq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmladhxq_m_s16))) +int16x8_t vqrdmladhxq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmladhxq_m_s32))) +int32x4_t vqrdmladhxq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmladhxq_m_s32))) +int32x4_t vqrdmladhxq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmladhxq_m_s8))) +int8x16_t vqrdmladhxq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmladhxq_m_s8))) +int8x16_t vqrdmladhxq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmladhxq_s16))) +int16x8_t vqrdmladhxq_s16(int16x8_t, int16x8_t, int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmladhxq_s16))) +int16x8_t vqrdmladhxq(int16x8_t, int16x8_t, int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmladhxq_s32))) +int32x4_t vqrdmladhxq_s32(int32x4_t, int32x4_t, int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmladhxq_s32))) +int32x4_t vqrdmladhxq(int32x4_t, int32x4_t, int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmladhxq_s8))) +int8x16_t vqrdmladhxq_s8(int8x16_t, int8x16_t, int8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmladhxq_s8))) +int8x16_t vqrdmladhxq(int8x16_t, int8x16_t, int8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlahq_m_n_s16))) +int16x8_t vqrdmlahq_m_n_s16(int16x8_t, int16x8_t, int16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlahq_m_n_s16))) +int16x8_t vqrdmlahq_m(int16x8_t, int16x8_t, int16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlahq_m_n_s32))) +int32x4_t vqrdmlahq_m_n_s32(int32x4_t, int32x4_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlahq_m_n_s32))) +int32x4_t vqrdmlahq_m(int32x4_t, int32x4_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlahq_m_n_s8))) +int8x16_t vqrdmlahq_m_n_s8(int8x16_t, int8x16_t, int8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlahq_m_n_s8))) +int8x16_t vqrdmlahq_m(int8x16_t, int8x16_t, int8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlahq_n_s16))) +int16x8_t vqrdmlahq_n_s16(int16x8_t, int16x8_t, int16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlahq_n_s16))) +int16x8_t vqrdmlahq(int16x8_t, int16x8_t, int16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlahq_n_s32))) +int32x4_t vqrdmlahq_n_s32(int32x4_t, int32x4_t, int32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlahq_n_s32))) +int32x4_t vqrdmlahq(int32x4_t, int32x4_t, int32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlahq_n_s8))) +int8x16_t vqrdmlahq_n_s8(int8x16_t, int8x16_t, int8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlahq_n_s8))) +int8x16_t vqrdmlahq(int8x16_t, int8x16_t, int8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlashq_m_n_s16))) +int16x8_t vqrdmlashq_m_n_s16(int16x8_t, int16x8_t, int16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlashq_m_n_s16))) +int16x8_t vqrdmlashq_m(int16x8_t, int16x8_t, int16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlashq_m_n_s32))) +int32x4_t vqrdmlashq_m_n_s32(int32x4_t, int32x4_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlashq_m_n_s32))) +int32x4_t vqrdmlashq_m(int32x4_t, int32x4_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlashq_m_n_s8))) +int8x16_t vqrdmlashq_m_n_s8(int8x16_t, int8x16_t, int8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlashq_m_n_s8))) +int8x16_t vqrdmlashq_m(int8x16_t, int8x16_t, int8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlashq_n_s16))) +int16x8_t vqrdmlashq_n_s16(int16x8_t, int16x8_t, int16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlashq_n_s16))) +int16x8_t vqrdmlashq(int16x8_t, int16x8_t, int16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlashq_n_s32))) +int32x4_t vqrdmlashq_n_s32(int32x4_t, int32x4_t, int32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlashq_n_s32))) +int32x4_t vqrdmlashq(int32x4_t, int32x4_t, int32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlashq_n_s8))) +int8x16_t vqrdmlashq_n_s8(int8x16_t, int8x16_t, int8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlashq_n_s8))) +int8x16_t vqrdmlashq(int8x16_t, int8x16_t, int8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlsdhq_m_s16))) +int16x8_t vqrdmlsdhq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlsdhq_m_s16))) +int16x8_t vqrdmlsdhq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlsdhq_m_s32))) +int32x4_t vqrdmlsdhq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlsdhq_m_s32))) +int32x4_t vqrdmlsdhq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlsdhq_m_s8))) +int8x16_t vqrdmlsdhq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlsdhq_m_s8))) +int8x16_t vqrdmlsdhq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlsdhq_s16))) +int16x8_t vqrdmlsdhq_s16(int16x8_t, int16x8_t, int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlsdhq_s16))) +int16x8_t vqrdmlsdhq(int16x8_t, int16x8_t, int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlsdhq_s32))) +int32x4_t vqrdmlsdhq_s32(int32x4_t, int32x4_t, int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlsdhq_s32))) +int32x4_t vqrdmlsdhq(int32x4_t, int32x4_t, int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlsdhq_s8))) +int8x16_t vqrdmlsdhq_s8(int8x16_t, int8x16_t, int8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlsdhq_s8))) +int8x16_t vqrdmlsdhq(int8x16_t, int8x16_t, int8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlsdhxq_m_s16))) +int16x8_t vqrdmlsdhxq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlsdhxq_m_s16))) +int16x8_t vqrdmlsdhxq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlsdhxq_m_s32))) +int32x4_t vqrdmlsdhxq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlsdhxq_m_s32))) +int32x4_t vqrdmlsdhxq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlsdhxq_m_s8))) +int8x16_t vqrdmlsdhxq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlsdhxq_m_s8))) +int8x16_t vqrdmlsdhxq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlsdhxq_s16))) +int16x8_t vqrdmlsdhxq_s16(int16x8_t, int16x8_t, int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlsdhxq_s16))) +int16x8_t vqrdmlsdhxq(int16x8_t, int16x8_t, int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlsdhxq_s32))) +int32x4_t vqrdmlsdhxq_s32(int32x4_t, int32x4_t, int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlsdhxq_s32))) +int32x4_t vqrdmlsdhxq(int32x4_t, int32x4_t, int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlsdhxq_s8))) +int8x16_t vqrdmlsdhxq_s8(int8x16_t, int8x16_t, int8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmlsdhxq_s8))) +int8x16_t vqrdmlsdhxq(int8x16_t, int8x16_t, int8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmulhq_m_n_s16))) +int16x8_t vqrdmulhq_m_n_s16(int16x8_t, int16x8_t, int16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmulhq_m_n_s16))) +int16x8_t vqrdmulhq_m(int16x8_t, int16x8_t, int16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmulhq_m_n_s32))) +int32x4_t vqrdmulhq_m_n_s32(int32x4_t, int32x4_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmulhq_m_n_s32))) +int32x4_t vqrdmulhq_m(int32x4_t, int32x4_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmulhq_m_n_s8))) +int8x16_t vqrdmulhq_m_n_s8(int8x16_t, int8x16_t, int8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmulhq_m_n_s8))) +int8x16_t vqrdmulhq_m(int8x16_t, int8x16_t, int8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmulhq_m_s16))) +int16x8_t vqrdmulhq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmulhq_m_s16))) +int16x8_t vqrdmulhq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmulhq_m_s32))) +int32x4_t vqrdmulhq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmulhq_m_s32))) +int32x4_t vqrdmulhq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmulhq_m_s8))) +int8x16_t vqrdmulhq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmulhq_m_s8))) +int8x16_t vqrdmulhq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmulhq_n_s16))) +int16x8_t vqrdmulhq_n_s16(int16x8_t, int16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmulhq_n_s16))) +int16x8_t vqrdmulhq(int16x8_t, int16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmulhq_n_s32))) +int32x4_t vqrdmulhq_n_s32(int32x4_t, int32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmulhq_n_s32))) +int32x4_t vqrdmulhq(int32x4_t, int32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmulhq_n_s8))) +int8x16_t vqrdmulhq_n_s8(int8x16_t, int8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmulhq_n_s8))) +int8x16_t vqrdmulhq(int8x16_t, int8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmulhq_s16))) +int16x8_t vqrdmulhq_s16(int16x8_t, int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmulhq_s16))) +int16x8_t vqrdmulhq(int16x8_t, int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmulhq_s32))) +int32x4_t vqrdmulhq_s32(int32x4_t, int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmulhq_s32))) +int32x4_t vqrdmulhq(int32x4_t, int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrdmulhq_s8))) +int8x16_t vqrdmulhq_s8(int8x16_t, int8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrdmulhq_s8))) +int8x16_t vqrdmulhq(int8x16_t, int8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_m_n_s16))) +int16x8_t vqrshlq_m_n_s16(int16x8_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_m_n_s16))) +int16x8_t vqrshlq_m_n(int16x8_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_m_n_s32))) +int32x4_t vqrshlq_m_n_s32(int32x4_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_m_n_s32))) +int32x4_t vqrshlq_m_n(int32x4_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_m_n_s8))) +int8x16_t vqrshlq_m_n_s8(int8x16_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_m_n_s8))) +int8x16_t vqrshlq_m_n(int8x16_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_m_n_u16))) +uint16x8_t vqrshlq_m_n_u16(uint16x8_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_m_n_u16))) +uint16x8_t vqrshlq_m_n(uint16x8_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_m_n_u32))) +uint32x4_t vqrshlq_m_n_u32(uint32x4_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_m_n_u32))) +uint32x4_t vqrshlq_m_n(uint32x4_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_m_n_u8))) +uint8x16_t vqrshlq_m_n_u8(uint8x16_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_m_n_u8))) +uint8x16_t vqrshlq_m_n(uint8x16_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_m_s16))) +int16x8_t vqrshlq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_m_s16))) +int16x8_t vqrshlq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_m_s32))) +int32x4_t vqrshlq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_m_s32))) +int32x4_t vqrshlq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_m_s8))) +int8x16_t vqrshlq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_m_s8))) +int8x16_t vqrshlq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_m_u16))) +uint16x8_t vqrshlq_m_u16(uint16x8_t, uint16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_m_u16))) +uint16x8_t vqrshlq_m(uint16x8_t, uint16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_m_u32))) +uint32x4_t vqrshlq_m_u32(uint32x4_t, uint32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_m_u32))) +uint32x4_t vqrshlq_m(uint32x4_t, uint32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_m_u8))) +uint8x16_t vqrshlq_m_u8(uint8x16_t, uint8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_m_u8))) +uint8x16_t vqrshlq_m(uint8x16_t, uint8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_n_s16))) +int16x8_t vqrshlq_n_s16(int16x8_t, int32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_n_s16))) +int16x8_t vqrshlq(int16x8_t, int32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_n_s32))) +int32x4_t vqrshlq_n_s32(int32x4_t, int32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_n_s32))) +int32x4_t vqrshlq(int32x4_t, int32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_n_s8))) +int8x16_t vqrshlq_n_s8(int8x16_t, int32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_n_s8))) +int8x16_t vqrshlq(int8x16_t, int32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_n_u16))) +uint16x8_t vqrshlq_n_u16(uint16x8_t, int32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_n_u16))) +uint16x8_t vqrshlq(uint16x8_t, int32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_n_u32))) +uint32x4_t vqrshlq_n_u32(uint32x4_t, int32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_n_u32))) +uint32x4_t vqrshlq(uint32x4_t, int32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_n_u8))) +uint8x16_t vqrshlq_n_u8(uint8x16_t, int32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_n_u8))) +uint8x16_t vqrshlq(uint8x16_t, int32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_s16))) +int16x8_t vqrshlq_s16(int16x8_t, int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_s16))) +int16x8_t vqrshlq(int16x8_t, int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_s32))) +int32x4_t vqrshlq_s32(int32x4_t, int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_s32))) +int32x4_t vqrshlq(int32x4_t, int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_s8))) +int8x16_t vqrshlq_s8(int8x16_t, int8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_s8))) +int8x16_t vqrshlq(int8x16_t, int8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_u16))) +uint16x8_t vqrshlq_u16(uint16x8_t, int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_u16))) +uint16x8_t vqrshlq(uint16x8_t, int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_u32))) +uint32x4_t vqrshlq_u32(uint32x4_t, int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_u32))) +uint32x4_t vqrshlq(uint32x4_t, int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_u8))) +uint8x16_t vqrshlq_u8(uint8x16_t, int8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshlq_u8))) +uint8x16_t vqrshlq(uint8x16_t, int8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshrnbq_m_n_s16))) +int8x16_t vqrshrnbq_m_n_s16(int8x16_t, int16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshrnbq_m_n_s16))) +int8x16_t vqrshrnbq_m(int8x16_t, int16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshrnbq_m_n_s32))) +int16x8_t vqrshrnbq_m_n_s32(int16x8_t, int32x4_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshrnbq_m_n_s32))) +int16x8_t vqrshrnbq_m(int16x8_t, int32x4_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshrnbq_m_n_u16))) +uint8x16_t vqrshrnbq_m_n_u16(uint8x16_t, uint16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshrnbq_m_n_u16))) +uint8x16_t vqrshrnbq_m(uint8x16_t, uint16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshrnbq_m_n_u32))) +uint16x8_t vqrshrnbq_m_n_u32(uint16x8_t, uint32x4_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshrnbq_m_n_u32))) +uint16x8_t vqrshrnbq_m(uint16x8_t, uint32x4_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshrnbq_n_s16))) +int8x16_t vqrshrnbq_n_s16(int8x16_t, int16x8_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshrnbq_n_s16))) +int8x16_t vqrshrnbq(int8x16_t, int16x8_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshrnbq_n_s32))) +int16x8_t vqrshrnbq_n_s32(int16x8_t, int32x4_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshrnbq_n_s32))) +int16x8_t vqrshrnbq(int16x8_t, int32x4_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshrnbq_n_u16))) +uint8x16_t vqrshrnbq_n_u16(uint8x16_t, uint16x8_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshrnbq_n_u16))) +uint8x16_t vqrshrnbq(uint8x16_t, uint16x8_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshrnbq_n_u32))) +uint16x8_t vqrshrnbq_n_u32(uint16x8_t, uint32x4_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshrnbq_n_u32))) +uint16x8_t vqrshrnbq(uint16x8_t, uint32x4_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshrntq_m_n_s16))) +int8x16_t vqrshrntq_m_n_s16(int8x16_t, int16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshrntq_m_n_s16))) +int8x16_t vqrshrntq_m(int8x16_t, int16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshrntq_m_n_s32))) +int16x8_t vqrshrntq_m_n_s32(int16x8_t, int32x4_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshrntq_m_n_s32))) +int16x8_t vqrshrntq_m(int16x8_t, int32x4_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshrntq_m_n_u16))) +uint8x16_t vqrshrntq_m_n_u16(uint8x16_t, uint16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshrntq_m_n_u16))) +uint8x16_t vqrshrntq_m(uint8x16_t, uint16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshrntq_m_n_u32))) +uint16x8_t vqrshrntq_m_n_u32(uint16x8_t, uint32x4_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshrntq_m_n_u32))) +uint16x8_t vqrshrntq_m(uint16x8_t, uint32x4_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshrntq_n_s16))) +int8x16_t vqrshrntq_n_s16(int8x16_t, int16x8_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshrntq_n_s16))) +int8x16_t vqrshrntq(int8x16_t, int16x8_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshrntq_n_s32))) +int16x8_t vqrshrntq_n_s32(int16x8_t, int32x4_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshrntq_n_s32))) +int16x8_t vqrshrntq(int16x8_t, int32x4_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshrntq_n_u16))) +uint8x16_t vqrshrntq_n_u16(uint8x16_t, uint16x8_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshrntq_n_u16))) +uint8x16_t vqrshrntq(uint8x16_t, uint16x8_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshrntq_n_u32))) +uint16x8_t vqrshrntq_n_u32(uint16x8_t, uint32x4_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshrntq_n_u32))) +uint16x8_t vqrshrntq(uint16x8_t, uint32x4_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshrunbq_m_n_s16))) +uint8x16_t vqrshrunbq_m_n_s16(uint8x16_t, int16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshrunbq_m_n_s16))) +uint8x16_t vqrshrunbq_m(uint8x16_t, int16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshrunbq_m_n_s32))) +uint16x8_t vqrshrunbq_m_n_s32(uint16x8_t, int32x4_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshrunbq_m_n_s32))) +uint16x8_t vqrshrunbq_m(uint16x8_t, int32x4_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshrunbq_n_s16))) +uint8x16_t vqrshrunbq_n_s16(uint8x16_t, int16x8_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshrunbq_n_s16))) +uint8x16_t vqrshrunbq(uint8x16_t, int16x8_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshrunbq_n_s32))) +uint16x8_t vqrshrunbq_n_s32(uint16x8_t, int32x4_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshrunbq_n_s32))) +uint16x8_t vqrshrunbq(uint16x8_t, int32x4_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshruntq_m_n_s16))) +uint8x16_t vqrshruntq_m_n_s16(uint8x16_t, int16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshruntq_m_n_s16))) +uint8x16_t vqrshruntq_m(uint8x16_t, int16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshruntq_m_n_s32))) +uint16x8_t vqrshruntq_m_n_s32(uint16x8_t, int32x4_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshruntq_m_n_s32))) +uint16x8_t vqrshruntq_m(uint16x8_t, int32x4_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshruntq_n_s16))) +uint8x16_t vqrshruntq_n_s16(uint8x16_t, int16x8_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshruntq_n_s16))) +uint8x16_t vqrshruntq(uint8x16_t, int16x8_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqrshruntq_n_s32))) +uint16x8_t vqrshruntq_n_s32(uint16x8_t, int32x4_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqrshruntq_n_s32))) +uint16x8_t vqrshruntq(uint16x8_t, int32x4_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_m_n_s16))) +int16x8_t vqshlq_m_n_s16(int16x8_t, int16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_m_n_s16))) +int16x8_t vqshlq_m_n(int16x8_t, int16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_m_n_s32))) +int32x4_t vqshlq_m_n_s32(int32x4_t, int32x4_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_m_n_s32))) +int32x4_t vqshlq_m_n(int32x4_t, int32x4_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_m_n_s8))) +int8x16_t vqshlq_m_n_s8(int8x16_t, int8x16_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_m_n_s8))) +int8x16_t vqshlq_m_n(int8x16_t, int8x16_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_m_n_u16))) +uint16x8_t vqshlq_m_n_u16(uint16x8_t, uint16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_m_n_u16))) +uint16x8_t vqshlq_m_n(uint16x8_t, uint16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_m_n_u32))) +uint32x4_t vqshlq_m_n_u32(uint32x4_t, uint32x4_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_m_n_u32))) +uint32x4_t vqshlq_m_n(uint32x4_t, uint32x4_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_m_n_u8))) +uint8x16_t vqshlq_m_n_u8(uint8x16_t, uint8x16_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_m_n_u8))) +uint8x16_t vqshlq_m_n(uint8x16_t, uint8x16_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_m_r_s16))) +int16x8_t vqshlq_m_r_s16(int16x8_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_m_r_s16))) +int16x8_t vqshlq_m_r(int16x8_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_m_r_s32))) +int32x4_t vqshlq_m_r_s32(int32x4_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_m_r_s32))) +int32x4_t vqshlq_m_r(int32x4_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_m_r_s8))) +int8x16_t vqshlq_m_r_s8(int8x16_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_m_r_s8))) +int8x16_t vqshlq_m_r(int8x16_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_m_r_u16))) +uint16x8_t vqshlq_m_r_u16(uint16x8_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_m_r_u16))) +uint16x8_t vqshlq_m_r(uint16x8_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_m_r_u32))) +uint32x4_t vqshlq_m_r_u32(uint32x4_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_m_r_u32))) +uint32x4_t vqshlq_m_r(uint32x4_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_m_r_u8))) +uint8x16_t vqshlq_m_r_u8(uint8x16_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_m_r_u8))) +uint8x16_t vqshlq_m_r(uint8x16_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_m_s16))) +int16x8_t vqshlq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_m_s16))) +int16x8_t vqshlq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_m_s32))) +int32x4_t vqshlq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_m_s32))) +int32x4_t vqshlq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_m_s8))) +int8x16_t vqshlq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_m_s8))) +int8x16_t vqshlq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_m_u16))) +uint16x8_t vqshlq_m_u16(uint16x8_t, uint16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_m_u16))) +uint16x8_t vqshlq_m(uint16x8_t, uint16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_m_u32))) +uint32x4_t vqshlq_m_u32(uint32x4_t, uint32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_m_u32))) +uint32x4_t vqshlq_m(uint32x4_t, uint32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_m_u8))) +uint8x16_t vqshlq_m_u8(uint8x16_t, uint8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_m_u8))) +uint8x16_t vqshlq_m(uint8x16_t, uint8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_n_s16))) +int16x8_t vqshlq_n_s16(int16x8_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_n_s16))) +int16x8_t vqshlq_n(int16x8_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_n_s32))) +int32x4_t vqshlq_n_s32(int32x4_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_n_s32))) +int32x4_t vqshlq_n(int32x4_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_n_s8))) +int8x16_t vqshlq_n_s8(int8x16_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_n_s8))) +int8x16_t vqshlq_n(int8x16_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_n_u16))) +uint16x8_t vqshlq_n_u16(uint16x8_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_n_u16))) +uint16x8_t vqshlq_n(uint16x8_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_n_u32))) +uint32x4_t vqshlq_n_u32(uint32x4_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_n_u32))) +uint32x4_t vqshlq_n(uint32x4_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_n_u8))) +uint8x16_t vqshlq_n_u8(uint8x16_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_n_u8))) +uint8x16_t vqshlq_n(uint8x16_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_r_s16))) +int16x8_t vqshlq_r_s16(int16x8_t, int32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_r_s16))) +int16x8_t vqshlq_r(int16x8_t, int32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_r_s32))) +int32x4_t vqshlq_r_s32(int32x4_t, int32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_r_s32))) +int32x4_t vqshlq_r(int32x4_t, int32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_r_s8))) +int8x16_t vqshlq_r_s8(int8x16_t, int32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_r_s8))) +int8x16_t vqshlq_r(int8x16_t, int32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_r_u16))) +uint16x8_t vqshlq_r_u16(uint16x8_t, int32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_r_u16))) +uint16x8_t vqshlq_r(uint16x8_t, int32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_r_u32))) +uint32x4_t vqshlq_r_u32(uint32x4_t, int32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_r_u32))) +uint32x4_t vqshlq_r(uint32x4_t, int32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_r_u8))) +uint8x16_t vqshlq_r_u8(uint8x16_t, int32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_r_u8))) +uint8x16_t vqshlq_r(uint8x16_t, int32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_s16))) +int16x8_t vqshlq_s16(int16x8_t, int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_s16))) +int16x8_t vqshlq(int16x8_t, int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_s32))) +int32x4_t vqshlq_s32(int32x4_t, int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_s32))) +int32x4_t vqshlq(int32x4_t, int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_s8))) +int8x16_t vqshlq_s8(int8x16_t, int8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_s8))) +int8x16_t vqshlq(int8x16_t, int8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_u16))) +uint16x8_t vqshlq_u16(uint16x8_t, int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_u16))) +uint16x8_t vqshlq(uint16x8_t, int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_u32))) +uint32x4_t vqshlq_u32(uint32x4_t, int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_u32))) +uint32x4_t vqshlq(uint32x4_t, int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_u8))) +uint8x16_t vqshlq_u8(uint8x16_t, int8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshlq_u8))) +uint8x16_t vqshlq(uint8x16_t, int8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshluq_m_n_s16))) +uint16x8_t vqshluq_m_n_s16(uint16x8_t, int16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshluq_m_n_s16))) +uint16x8_t vqshluq_m(uint16x8_t, int16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshluq_m_n_s32))) +uint32x4_t vqshluq_m_n_s32(uint32x4_t, int32x4_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshluq_m_n_s32))) +uint32x4_t vqshluq_m(uint32x4_t, int32x4_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshluq_m_n_s8))) +uint8x16_t vqshluq_m_n_s8(uint8x16_t, int8x16_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshluq_m_n_s8))) +uint8x16_t vqshluq_m(uint8x16_t, int8x16_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshluq_n_s16))) +uint16x8_t vqshluq_n_s16(int16x8_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshluq_n_s16))) +uint16x8_t vqshluq(int16x8_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshluq_n_s32))) +uint32x4_t vqshluq_n_s32(int32x4_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshluq_n_s32))) +uint32x4_t vqshluq(int32x4_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshluq_n_s8))) +uint8x16_t vqshluq_n_s8(int8x16_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshluq_n_s8))) +uint8x16_t vqshluq(int8x16_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshrnbq_m_n_s16))) +int8x16_t vqshrnbq_m_n_s16(int8x16_t, int16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshrnbq_m_n_s16))) +int8x16_t vqshrnbq_m(int8x16_t, int16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshrnbq_m_n_s32))) +int16x8_t vqshrnbq_m_n_s32(int16x8_t, int32x4_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshrnbq_m_n_s32))) +int16x8_t vqshrnbq_m(int16x8_t, int32x4_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshrnbq_m_n_u16))) +uint8x16_t vqshrnbq_m_n_u16(uint8x16_t, uint16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshrnbq_m_n_u16))) +uint8x16_t vqshrnbq_m(uint8x16_t, uint16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshrnbq_m_n_u32))) +uint16x8_t vqshrnbq_m_n_u32(uint16x8_t, uint32x4_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshrnbq_m_n_u32))) +uint16x8_t vqshrnbq_m(uint16x8_t, uint32x4_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshrnbq_n_s16))) +int8x16_t vqshrnbq_n_s16(int8x16_t, int16x8_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshrnbq_n_s16))) +int8x16_t vqshrnbq(int8x16_t, int16x8_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshrnbq_n_s32))) +int16x8_t vqshrnbq_n_s32(int16x8_t, int32x4_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshrnbq_n_s32))) +int16x8_t vqshrnbq(int16x8_t, int32x4_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshrnbq_n_u16))) +uint8x16_t vqshrnbq_n_u16(uint8x16_t, uint16x8_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshrnbq_n_u16))) +uint8x16_t vqshrnbq(uint8x16_t, uint16x8_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshrnbq_n_u32))) +uint16x8_t vqshrnbq_n_u32(uint16x8_t, uint32x4_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshrnbq_n_u32))) +uint16x8_t vqshrnbq(uint16x8_t, uint32x4_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshrntq_m_n_s16))) +int8x16_t vqshrntq_m_n_s16(int8x16_t, int16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshrntq_m_n_s16))) +int8x16_t vqshrntq_m(int8x16_t, int16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshrntq_m_n_s32))) +int16x8_t vqshrntq_m_n_s32(int16x8_t, int32x4_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshrntq_m_n_s32))) +int16x8_t vqshrntq_m(int16x8_t, int32x4_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshrntq_m_n_u16))) +uint8x16_t vqshrntq_m_n_u16(uint8x16_t, uint16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshrntq_m_n_u16))) +uint8x16_t vqshrntq_m(uint8x16_t, uint16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshrntq_m_n_u32))) +uint16x8_t vqshrntq_m_n_u32(uint16x8_t, uint32x4_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshrntq_m_n_u32))) +uint16x8_t vqshrntq_m(uint16x8_t, uint32x4_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshrntq_n_s16))) +int8x16_t vqshrntq_n_s16(int8x16_t, int16x8_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshrntq_n_s16))) +int8x16_t vqshrntq(int8x16_t, int16x8_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshrntq_n_s32))) +int16x8_t vqshrntq_n_s32(int16x8_t, int32x4_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshrntq_n_s32))) +int16x8_t vqshrntq(int16x8_t, int32x4_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshrntq_n_u16))) +uint8x16_t vqshrntq_n_u16(uint8x16_t, uint16x8_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshrntq_n_u16))) +uint8x16_t vqshrntq(uint8x16_t, uint16x8_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshrntq_n_u32))) +uint16x8_t vqshrntq_n_u32(uint16x8_t, uint32x4_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshrntq_n_u32))) +uint16x8_t vqshrntq(uint16x8_t, uint32x4_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshrunbq_m_n_s16))) +uint8x16_t vqshrunbq_m_n_s16(uint8x16_t, int16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshrunbq_m_n_s16))) +uint8x16_t vqshrunbq_m(uint8x16_t, int16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshrunbq_m_n_s32))) +uint16x8_t vqshrunbq_m_n_s32(uint16x8_t, int32x4_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshrunbq_m_n_s32))) +uint16x8_t vqshrunbq_m(uint16x8_t, int32x4_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshrunbq_n_s16))) +uint8x16_t vqshrunbq_n_s16(uint8x16_t, int16x8_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshrunbq_n_s16))) +uint8x16_t vqshrunbq(uint8x16_t, int16x8_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshrunbq_n_s32))) +uint16x8_t vqshrunbq_n_s32(uint16x8_t, int32x4_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshrunbq_n_s32))) +uint16x8_t vqshrunbq(uint16x8_t, int32x4_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshruntq_m_n_s16))) +uint8x16_t vqshruntq_m_n_s16(uint8x16_t, int16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshruntq_m_n_s16))) +uint8x16_t vqshruntq_m(uint8x16_t, int16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshruntq_m_n_s32))) +uint16x8_t vqshruntq_m_n_s32(uint16x8_t, int32x4_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshruntq_m_n_s32))) +uint16x8_t vqshruntq_m(uint16x8_t, int32x4_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshruntq_n_s16))) +uint8x16_t vqshruntq_n_s16(uint8x16_t, int16x8_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshruntq_n_s16))) +uint8x16_t vqshruntq(uint8x16_t, int16x8_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqshruntq_n_s32))) +uint16x8_t vqshruntq_n_s32(uint16x8_t, int32x4_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqshruntq_n_s32))) +uint16x8_t vqshruntq(uint16x8_t, int32x4_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_m_n_s16))) +int16x8_t vqsubq_m_n_s16(int16x8_t, int16x8_t, int16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_m_n_s16))) +int16x8_t vqsubq_m(int16x8_t, int16x8_t, int16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_m_n_s32))) +int32x4_t vqsubq_m_n_s32(int32x4_t, int32x4_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_m_n_s32))) +int32x4_t vqsubq_m(int32x4_t, int32x4_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_m_n_s8))) +int8x16_t vqsubq_m_n_s8(int8x16_t, int8x16_t, int8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_m_n_s8))) +int8x16_t vqsubq_m(int8x16_t, int8x16_t, int8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_m_n_u16))) +uint16x8_t vqsubq_m_n_u16(uint16x8_t, uint16x8_t, uint16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_m_n_u16))) +uint16x8_t vqsubq_m(uint16x8_t, uint16x8_t, uint16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_m_n_u32))) +uint32x4_t vqsubq_m_n_u32(uint32x4_t, uint32x4_t, uint32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_m_n_u32))) +uint32x4_t vqsubq_m(uint32x4_t, uint32x4_t, uint32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_m_n_u8))) +uint8x16_t vqsubq_m_n_u8(uint8x16_t, uint8x16_t, uint8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_m_n_u8))) +uint8x16_t vqsubq_m(uint8x16_t, uint8x16_t, uint8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_m_s16))) +int16x8_t vqsubq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_m_s16))) +int16x8_t vqsubq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_m_s32))) +int32x4_t vqsubq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_m_s32))) +int32x4_t vqsubq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_m_s8))) +int8x16_t vqsubq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_m_s8))) +int8x16_t vqsubq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_m_u16))) +uint16x8_t vqsubq_m_u16(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_m_u16))) +uint16x8_t vqsubq_m(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_m_u32))) +uint32x4_t vqsubq_m_u32(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_m_u32))) +uint32x4_t vqsubq_m(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_m_u8))) +uint8x16_t vqsubq_m_u8(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_m_u8))) +uint8x16_t vqsubq_m(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_n_s16))) +int16x8_t vqsubq_n_s16(int16x8_t, int16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_n_s16))) +int16x8_t vqsubq(int16x8_t, int16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_n_s32))) +int32x4_t vqsubq_n_s32(int32x4_t, int32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_n_s32))) +int32x4_t vqsubq(int32x4_t, int32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_n_s8))) +int8x16_t vqsubq_n_s8(int8x16_t, int8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_n_s8))) +int8x16_t vqsubq(int8x16_t, int8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_n_u16))) +uint16x8_t vqsubq_n_u16(uint16x8_t, uint16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_n_u16))) +uint16x8_t vqsubq(uint16x8_t, uint16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_n_u32))) +uint32x4_t vqsubq_n_u32(uint32x4_t, uint32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_n_u32))) +uint32x4_t vqsubq(uint32x4_t, uint32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_n_u8))) +uint8x16_t vqsubq_n_u8(uint8x16_t, uint8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_n_u8))) +uint8x16_t vqsubq(uint8x16_t, uint8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_s16))) +int16x8_t vqsubq_s16(int16x8_t, int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_s16))) +int16x8_t vqsubq(int16x8_t, int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_s32))) +int32x4_t vqsubq_s32(int32x4_t, int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_s32))) +int32x4_t vqsubq(int32x4_t, int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_s8))) +int8x16_t vqsubq_s8(int8x16_t, int8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_s8))) +int8x16_t vqsubq(int8x16_t, int8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_u16))) +uint16x8_t vqsubq_u16(uint16x8_t, uint16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_u16))) +uint16x8_t vqsubq(uint16x8_t, uint16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_u32))) +uint32x4_t vqsubq_u32(uint32x4_t, uint32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_u32))) +uint32x4_t vqsubq(uint32x4_t, uint32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_u8))) +uint8x16_t vqsubq_u8(uint8x16_t, uint8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vqsubq_u8))) +uint8x16_t vqsubq(uint8x16_t, uint8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s16_s32))) +int16x8_t vreinterpretq_s16_s32(int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s16_s32))) +int16x8_t vreinterpretq_s16(int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s16_s64))) +int16x8_t vreinterpretq_s16_s64(int64x2_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s16_s64))) +int16x8_t vreinterpretq_s16(int64x2_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s16_s8))) +int16x8_t vreinterpretq_s16_s8(int8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s16_s8))) +int16x8_t vreinterpretq_s16(int8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s16_u16))) +int16x8_t vreinterpretq_s16_u16(uint16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s16_u16))) +int16x8_t vreinterpretq_s16(uint16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s16_u32))) +int16x8_t vreinterpretq_s16_u32(uint32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s16_u32))) +int16x8_t vreinterpretq_s16(uint32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s16_u64))) +int16x8_t vreinterpretq_s16_u64(uint64x2_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s16_u64))) +int16x8_t vreinterpretq_s16(uint64x2_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s16_u8))) +int16x8_t vreinterpretq_s16_u8(uint8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s16_u8))) +int16x8_t vreinterpretq_s16(uint8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s32_s16))) +int32x4_t vreinterpretq_s32_s16(int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s32_s16))) +int32x4_t vreinterpretq_s32(int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s32_s64))) +int32x4_t vreinterpretq_s32_s64(int64x2_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s32_s64))) +int32x4_t vreinterpretq_s32(int64x2_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s32_s8))) +int32x4_t vreinterpretq_s32_s8(int8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s32_s8))) +int32x4_t vreinterpretq_s32(int8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s32_u16))) +int32x4_t vreinterpretq_s32_u16(uint16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s32_u16))) +int32x4_t vreinterpretq_s32(uint16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s32_u32))) +int32x4_t vreinterpretq_s32_u32(uint32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s32_u32))) +int32x4_t vreinterpretq_s32(uint32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s32_u64))) +int32x4_t vreinterpretq_s32_u64(uint64x2_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s32_u64))) +int32x4_t vreinterpretq_s32(uint64x2_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s32_u8))) +int32x4_t vreinterpretq_s32_u8(uint8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s32_u8))) +int32x4_t vreinterpretq_s32(uint8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s64_s16))) +int64x2_t vreinterpretq_s64_s16(int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s64_s16))) +int64x2_t vreinterpretq_s64(int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s64_s32))) +int64x2_t vreinterpretq_s64_s32(int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s64_s32))) +int64x2_t vreinterpretq_s64(int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s64_s8))) +int64x2_t vreinterpretq_s64_s8(int8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s64_s8))) +int64x2_t vreinterpretq_s64(int8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s64_u16))) +int64x2_t vreinterpretq_s64_u16(uint16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s64_u16))) +int64x2_t vreinterpretq_s64(uint16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s64_u32))) +int64x2_t vreinterpretq_s64_u32(uint32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s64_u32))) +int64x2_t vreinterpretq_s64(uint32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s64_u64))) +int64x2_t vreinterpretq_s64_u64(uint64x2_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s64_u64))) +int64x2_t vreinterpretq_s64(uint64x2_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s64_u8))) +int64x2_t vreinterpretq_s64_u8(uint8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s64_u8))) +int64x2_t vreinterpretq_s64(uint8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s8_s16))) +int8x16_t vreinterpretq_s8_s16(int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s8_s16))) +int8x16_t vreinterpretq_s8(int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s8_s32))) +int8x16_t vreinterpretq_s8_s32(int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s8_s32))) +int8x16_t vreinterpretq_s8(int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s8_s64))) +int8x16_t vreinterpretq_s8_s64(int64x2_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s8_s64))) +int8x16_t vreinterpretq_s8(int64x2_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s8_u16))) +int8x16_t vreinterpretq_s8_u16(uint16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s8_u16))) +int8x16_t vreinterpretq_s8(uint16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s8_u32))) +int8x16_t vreinterpretq_s8_u32(uint32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s8_u32))) +int8x16_t vreinterpretq_s8(uint32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s8_u64))) +int8x16_t vreinterpretq_s8_u64(uint64x2_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s8_u64))) +int8x16_t vreinterpretq_s8(uint64x2_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s8_u8))) +int8x16_t vreinterpretq_s8_u8(uint8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s8_u8))) +int8x16_t vreinterpretq_s8(uint8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u16_s16))) +uint16x8_t vreinterpretq_u16_s16(int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u16_s16))) +uint16x8_t vreinterpretq_u16(int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u16_s32))) +uint16x8_t vreinterpretq_u16_s32(int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u16_s32))) +uint16x8_t vreinterpretq_u16(int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u16_s64))) +uint16x8_t vreinterpretq_u16_s64(int64x2_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u16_s64))) +uint16x8_t vreinterpretq_u16(int64x2_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u16_s8))) +uint16x8_t vreinterpretq_u16_s8(int8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u16_s8))) +uint16x8_t vreinterpretq_u16(int8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u16_u32))) +uint16x8_t vreinterpretq_u16_u32(uint32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u16_u32))) +uint16x8_t vreinterpretq_u16(uint32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u16_u64))) +uint16x8_t vreinterpretq_u16_u64(uint64x2_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u16_u64))) +uint16x8_t vreinterpretq_u16(uint64x2_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u16_u8))) +uint16x8_t vreinterpretq_u16_u8(uint8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u16_u8))) +uint16x8_t vreinterpretq_u16(uint8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u32_s16))) +uint32x4_t vreinterpretq_u32_s16(int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u32_s16))) +uint32x4_t vreinterpretq_u32(int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u32_s32))) +uint32x4_t vreinterpretq_u32_s32(int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u32_s32))) +uint32x4_t vreinterpretq_u32(int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u32_s64))) +uint32x4_t vreinterpretq_u32_s64(int64x2_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u32_s64))) +uint32x4_t vreinterpretq_u32(int64x2_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u32_s8))) +uint32x4_t vreinterpretq_u32_s8(int8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u32_s8))) +uint32x4_t vreinterpretq_u32(int8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u32_u16))) +uint32x4_t vreinterpretq_u32_u16(uint16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u32_u16))) +uint32x4_t vreinterpretq_u32(uint16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u32_u64))) +uint32x4_t vreinterpretq_u32_u64(uint64x2_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u32_u64))) +uint32x4_t vreinterpretq_u32(uint64x2_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u32_u8))) +uint32x4_t vreinterpretq_u32_u8(uint8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u32_u8))) +uint32x4_t vreinterpretq_u32(uint8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u64_s16))) +uint64x2_t vreinterpretq_u64_s16(int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u64_s16))) +uint64x2_t vreinterpretq_u64(int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u64_s32))) +uint64x2_t vreinterpretq_u64_s32(int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u64_s32))) +uint64x2_t vreinterpretq_u64(int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u64_s64))) +uint64x2_t vreinterpretq_u64_s64(int64x2_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u64_s64))) +uint64x2_t vreinterpretq_u64(int64x2_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u64_s8))) +uint64x2_t vreinterpretq_u64_s8(int8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u64_s8))) +uint64x2_t vreinterpretq_u64(int8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u64_u16))) +uint64x2_t vreinterpretq_u64_u16(uint16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u64_u16))) +uint64x2_t vreinterpretq_u64(uint16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u64_u32))) +uint64x2_t vreinterpretq_u64_u32(uint32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u64_u32))) +uint64x2_t vreinterpretq_u64(uint32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u64_u8))) +uint64x2_t vreinterpretq_u64_u8(uint8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u64_u8))) +uint64x2_t vreinterpretq_u64(uint8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u8_s16))) +uint8x16_t vreinterpretq_u8_s16(int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u8_s16))) +uint8x16_t vreinterpretq_u8(int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u8_s32))) +uint8x16_t vreinterpretq_u8_s32(int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u8_s32))) +uint8x16_t vreinterpretq_u8(int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u8_s64))) +uint8x16_t vreinterpretq_u8_s64(int64x2_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u8_s64))) +uint8x16_t vreinterpretq_u8(int64x2_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u8_s8))) +uint8x16_t vreinterpretq_u8_s8(int8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u8_s8))) +uint8x16_t vreinterpretq_u8(int8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u8_u16))) +uint8x16_t vreinterpretq_u8_u16(uint16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u8_u16))) +uint8x16_t vreinterpretq_u8(uint16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u8_u32))) +uint8x16_t vreinterpretq_u8_u32(uint32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u8_u32))) +uint8x16_t vreinterpretq_u8(uint32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u8_u64))) +uint8x16_t vreinterpretq_u8_u64(uint64x2_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u8_u64))) +uint8x16_t vreinterpretq_u8(uint64x2_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev16q_m_s8))) +int8x16_t vrev16q_m_s8(int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev16q_m_s8))) +int8x16_t vrev16q_m(int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev16q_m_u8))) +uint8x16_t vrev16q_m_u8(uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev16q_m_u8))) +uint8x16_t vrev16q_m(uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev16q_s8))) +int8x16_t vrev16q_s8(int8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev16q_s8))) +int8x16_t vrev16q(int8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev16q_u8))) +uint8x16_t vrev16q_u8(uint8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev16q_u8))) +uint8x16_t vrev16q(uint8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev16q_x_s8))) +int8x16_t vrev16q_x_s8(int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev16q_x_s8))) +int8x16_t vrev16q_x(int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev16q_x_u8))) +uint8x16_t vrev16q_x_u8(uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev16q_x_u8))) +uint8x16_t vrev16q_x(uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev32q_m_s16))) +int16x8_t vrev32q_m_s16(int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev32q_m_s16))) +int16x8_t vrev32q_m(int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev32q_m_s8))) +int8x16_t vrev32q_m_s8(int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev32q_m_s8))) +int8x16_t vrev32q_m(int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev32q_m_u16))) +uint16x8_t vrev32q_m_u16(uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev32q_m_u16))) +uint16x8_t vrev32q_m(uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev32q_m_u8))) +uint8x16_t vrev32q_m_u8(uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev32q_m_u8))) +uint8x16_t vrev32q_m(uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev32q_s16))) +int16x8_t vrev32q_s16(int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev32q_s16))) +int16x8_t vrev32q(int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev32q_s8))) +int8x16_t vrev32q_s8(int8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev32q_s8))) +int8x16_t vrev32q(int8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev32q_u16))) +uint16x8_t vrev32q_u16(uint16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev32q_u16))) +uint16x8_t vrev32q(uint16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev32q_u8))) +uint8x16_t vrev32q_u8(uint8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev32q_u8))) +uint8x16_t vrev32q(uint8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev32q_x_s16))) +int16x8_t vrev32q_x_s16(int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev32q_x_s16))) +int16x8_t vrev32q_x(int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev32q_x_s8))) +int8x16_t vrev32q_x_s8(int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev32q_x_s8))) +int8x16_t vrev32q_x(int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev32q_x_u16))) +uint16x8_t vrev32q_x_u16(uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev32q_x_u16))) +uint16x8_t vrev32q_x(uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev32q_x_u8))) +uint8x16_t vrev32q_x_u8(uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev32q_x_u8))) +uint8x16_t vrev32q_x(uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_m_s16))) +int16x8_t vrev64q_m_s16(int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_m_s16))) +int16x8_t vrev64q_m(int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_m_s32))) +int32x4_t vrev64q_m_s32(int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_m_s32))) +int32x4_t vrev64q_m(int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_m_s8))) +int8x16_t vrev64q_m_s8(int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_m_s8))) +int8x16_t vrev64q_m(int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_m_u16))) +uint16x8_t vrev64q_m_u16(uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_m_u16))) +uint16x8_t vrev64q_m(uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_m_u32))) +uint32x4_t vrev64q_m_u32(uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_m_u32))) +uint32x4_t vrev64q_m(uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_m_u8))) +uint8x16_t vrev64q_m_u8(uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_m_u8))) +uint8x16_t vrev64q_m(uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_s16))) +int16x8_t vrev64q_s16(int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_s16))) +int16x8_t vrev64q(int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_s32))) +int32x4_t vrev64q_s32(int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_s32))) +int32x4_t vrev64q(int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_s8))) +int8x16_t vrev64q_s8(int8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_s8))) +int8x16_t vrev64q(int8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_u16))) +uint16x8_t vrev64q_u16(uint16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_u16))) +uint16x8_t vrev64q(uint16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_u32))) +uint32x4_t vrev64q_u32(uint32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_u32))) +uint32x4_t vrev64q(uint32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_u8))) +uint8x16_t vrev64q_u8(uint8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_u8))) +uint8x16_t vrev64q(uint8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_x_s16))) +int16x8_t vrev64q_x_s16(int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_x_s16))) +int16x8_t vrev64q_x(int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_x_s32))) +int32x4_t vrev64q_x_s32(int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_x_s32))) +int32x4_t vrev64q_x(int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_x_s8))) +int8x16_t vrev64q_x_s8(int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_x_s8))) +int8x16_t vrev64q_x(int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_x_u16))) +uint16x8_t vrev64q_x_u16(uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_x_u16))) +uint16x8_t vrev64q_x(uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_x_u32))) +uint32x4_t vrev64q_x_u32(uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_x_u32))) +uint32x4_t vrev64q_x(uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_x_u8))) +uint8x16_t vrev64q_x_u8(uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_x_u8))) +uint8x16_t vrev64q_x(uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrhaddq_m_s16))) +int16x8_t vrhaddq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrhaddq_m_s16))) +int16x8_t vrhaddq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrhaddq_m_s32))) +int32x4_t vrhaddq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrhaddq_m_s32))) +int32x4_t vrhaddq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrhaddq_m_s8))) +int8x16_t vrhaddq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrhaddq_m_s8))) +int8x16_t vrhaddq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrhaddq_m_u16))) +uint16x8_t vrhaddq_m_u16(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrhaddq_m_u16))) +uint16x8_t vrhaddq_m(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrhaddq_m_u32))) +uint32x4_t vrhaddq_m_u32(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrhaddq_m_u32))) +uint32x4_t vrhaddq_m(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrhaddq_m_u8))) +uint8x16_t vrhaddq_m_u8(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrhaddq_m_u8))) +uint8x16_t vrhaddq_m(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrhaddq_s16))) +int16x8_t vrhaddq_s16(int16x8_t, int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrhaddq_s16))) +int16x8_t vrhaddq(int16x8_t, int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrhaddq_s32))) +int32x4_t vrhaddq_s32(int32x4_t, int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrhaddq_s32))) +int32x4_t vrhaddq(int32x4_t, int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrhaddq_s8))) +int8x16_t vrhaddq_s8(int8x16_t, int8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrhaddq_s8))) +int8x16_t vrhaddq(int8x16_t, int8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrhaddq_u16))) +uint16x8_t vrhaddq_u16(uint16x8_t, uint16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrhaddq_u16))) +uint16x8_t vrhaddq(uint16x8_t, uint16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrhaddq_u32))) +uint32x4_t vrhaddq_u32(uint32x4_t, uint32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrhaddq_u32))) +uint32x4_t vrhaddq(uint32x4_t, uint32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrhaddq_u8))) +uint8x16_t vrhaddq_u8(uint8x16_t, uint8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrhaddq_u8))) +uint8x16_t vrhaddq(uint8x16_t, uint8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrhaddq_x_s16))) +int16x8_t vrhaddq_x_s16(int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrhaddq_x_s16))) +int16x8_t vrhaddq_x(int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrhaddq_x_s32))) +int32x4_t vrhaddq_x_s32(int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrhaddq_x_s32))) +int32x4_t vrhaddq_x(int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrhaddq_x_s8))) +int8x16_t vrhaddq_x_s8(int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrhaddq_x_s8))) +int8x16_t vrhaddq_x(int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrhaddq_x_u16))) +uint16x8_t vrhaddq_x_u16(uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrhaddq_x_u16))) +uint16x8_t vrhaddq_x(uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrhaddq_x_u32))) +uint32x4_t vrhaddq_x_u32(uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrhaddq_x_u32))) +uint32x4_t vrhaddq_x(uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrhaddq_x_u8))) +uint8x16_t vrhaddq_x_u8(uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrhaddq_x_u8))) +uint8x16_t vrhaddq_x(uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrmlaldavhaq_p_s32))) +int64_t vrmlaldavhaq_p_s32(int64_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrmlaldavhaq_p_s32))) +int64_t vrmlaldavhaq_p(int64_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrmlaldavhaq_p_u32))) +uint64_t vrmlaldavhaq_p_u32(uint64_t, uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrmlaldavhaq_p_u32))) +uint64_t vrmlaldavhaq_p(uint64_t, uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrmlaldavhaq_s32))) +int64_t vrmlaldavhaq_s32(int64_t, int32x4_t, int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrmlaldavhaq_s32))) +int64_t vrmlaldavhaq(int64_t, int32x4_t, int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrmlaldavhaq_u32))) +uint64_t vrmlaldavhaq_u32(uint64_t, uint32x4_t, uint32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrmlaldavhaq_u32))) +uint64_t vrmlaldavhaq(uint64_t, uint32x4_t, uint32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrmlaldavhaxq_p_s32))) +int64_t vrmlaldavhaxq_p_s32(int64_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrmlaldavhaxq_p_s32))) +int64_t vrmlaldavhaxq_p(int64_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrmlaldavhaxq_s32))) +int64_t vrmlaldavhaxq_s32(int64_t, int32x4_t, int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrmlaldavhaxq_s32))) +int64_t vrmlaldavhaxq(int64_t, int32x4_t, int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrmlaldavhq_p_s32))) +int64_t vrmlaldavhq_p_s32(int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrmlaldavhq_p_s32))) +int64_t vrmlaldavhq_p(int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrmlaldavhq_p_u32))) +uint64_t vrmlaldavhq_p_u32(uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrmlaldavhq_p_u32))) +uint64_t vrmlaldavhq_p(uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrmlaldavhq_s32))) +int64_t vrmlaldavhq_s32(int32x4_t, int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrmlaldavhq_s32))) +int64_t vrmlaldavhq(int32x4_t, int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrmlaldavhq_u32))) +uint64_t vrmlaldavhq_u32(uint32x4_t, uint32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrmlaldavhq_u32))) +uint64_t vrmlaldavhq(uint32x4_t, uint32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrmlaldavhxq_p_s32))) +int64_t vrmlaldavhxq_p_s32(int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrmlaldavhxq_p_s32))) +int64_t vrmlaldavhxq_p(int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrmlaldavhxq_s32))) +int64_t vrmlaldavhxq_s32(int32x4_t, int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrmlaldavhxq_s32))) +int64_t vrmlaldavhxq(int32x4_t, int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrmlsldavhaq_p_s32))) +int64_t vrmlsldavhaq_p_s32(int64_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrmlsldavhaq_p_s32))) +int64_t vrmlsldavhaq_p(int64_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrmlsldavhaq_s32))) +int64_t vrmlsldavhaq_s32(int64_t, int32x4_t, int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrmlsldavhaq_s32))) +int64_t vrmlsldavhaq(int64_t, int32x4_t, int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrmlsldavhaxq_p_s32))) +int64_t vrmlsldavhaxq_p_s32(int64_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrmlsldavhaxq_p_s32))) +int64_t vrmlsldavhaxq_p(int64_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrmlsldavhaxq_s32))) +int64_t vrmlsldavhaxq_s32(int64_t, int32x4_t, int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrmlsldavhaxq_s32))) +int64_t vrmlsldavhaxq(int64_t, int32x4_t, int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrmlsldavhq_p_s32))) +int64_t vrmlsldavhq_p_s32(int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrmlsldavhq_p_s32))) +int64_t vrmlsldavhq_p(int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrmlsldavhq_s32))) +int64_t vrmlsldavhq_s32(int32x4_t, int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrmlsldavhq_s32))) +int64_t vrmlsldavhq(int32x4_t, int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrmlsldavhxq_p_s32))) +int64_t vrmlsldavhxq_p_s32(int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrmlsldavhxq_p_s32))) +int64_t vrmlsldavhxq_p(int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrmlsldavhxq_s32))) +int64_t vrmlsldavhxq_s32(int32x4_t, int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrmlsldavhxq_s32))) +int64_t vrmlsldavhxq(int32x4_t, int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrmulhq_m_s16))) +int16x8_t vrmulhq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrmulhq_m_s16))) +int16x8_t vrmulhq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrmulhq_m_s32))) +int32x4_t vrmulhq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrmulhq_m_s32))) +int32x4_t vrmulhq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrmulhq_m_s8))) +int8x16_t vrmulhq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrmulhq_m_s8))) +int8x16_t vrmulhq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrmulhq_m_u16))) +uint16x8_t vrmulhq_m_u16(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrmulhq_m_u16))) +uint16x8_t vrmulhq_m(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrmulhq_m_u32))) +uint32x4_t vrmulhq_m_u32(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrmulhq_m_u32))) +uint32x4_t vrmulhq_m(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrmulhq_m_u8))) +uint8x16_t vrmulhq_m_u8(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrmulhq_m_u8))) +uint8x16_t vrmulhq_m(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrmulhq_s16))) +int16x8_t vrmulhq_s16(int16x8_t, int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrmulhq_s16))) +int16x8_t vrmulhq(int16x8_t, int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrmulhq_s32))) +int32x4_t vrmulhq_s32(int32x4_t, int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrmulhq_s32))) +int32x4_t vrmulhq(int32x4_t, int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrmulhq_s8))) +int8x16_t vrmulhq_s8(int8x16_t, int8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrmulhq_s8))) +int8x16_t vrmulhq(int8x16_t, int8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrmulhq_u16))) +uint16x8_t vrmulhq_u16(uint16x8_t, uint16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrmulhq_u16))) +uint16x8_t vrmulhq(uint16x8_t, uint16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrmulhq_u32))) +uint32x4_t vrmulhq_u32(uint32x4_t, uint32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrmulhq_u32))) +uint32x4_t vrmulhq(uint32x4_t, uint32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrmulhq_u8))) +uint8x16_t vrmulhq_u8(uint8x16_t, uint8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrmulhq_u8))) +uint8x16_t vrmulhq(uint8x16_t, uint8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrmulhq_x_s16))) +int16x8_t vrmulhq_x_s16(int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrmulhq_x_s16))) +int16x8_t vrmulhq_x(int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrmulhq_x_s32))) +int32x4_t vrmulhq_x_s32(int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrmulhq_x_s32))) +int32x4_t vrmulhq_x(int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrmulhq_x_s8))) +int8x16_t vrmulhq_x_s8(int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrmulhq_x_s8))) +int8x16_t vrmulhq_x(int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrmulhq_x_u16))) +uint16x8_t vrmulhq_x_u16(uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrmulhq_x_u16))) +uint16x8_t vrmulhq_x(uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrmulhq_x_u32))) +uint32x4_t vrmulhq_x_u32(uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrmulhq_x_u32))) +uint32x4_t vrmulhq_x(uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrmulhq_x_u8))) +uint8x16_t vrmulhq_x_u8(uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrmulhq_x_u8))) +uint8x16_t vrmulhq_x(uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_m_n_s16))) +int16x8_t vrshlq_m_n_s16(int16x8_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_m_n_s16))) +int16x8_t vrshlq_m_n(int16x8_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_m_n_s32))) +int32x4_t vrshlq_m_n_s32(int32x4_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_m_n_s32))) +int32x4_t vrshlq_m_n(int32x4_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_m_n_s8))) +int8x16_t vrshlq_m_n_s8(int8x16_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_m_n_s8))) +int8x16_t vrshlq_m_n(int8x16_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_m_n_u16))) +uint16x8_t vrshlq_m_n_u16(uint16x8_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_m_n_u16))) +uint16x8_t vrshlq_m_n(uint16x8_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_m_n_u32))) +uint32x4_t vrshlq_m_n_u32(uint32x4_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_m_n_u32))) +uint32x4_t vrshlq_m_n(uint32x4_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_m_n_u8))) +uint8x16_t vrshlq_m_n_u8(uint8x16_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_m_n_u8))) +uint8x16_t vrshlq_m_n(uint8x16_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_m_s16))) +int16x8_t vrshlq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_m_s16))) +int16x8_t vrshlq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_m_s32))) +int32x4_t vrshlq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_m_s32))) +int32x4_t vrshlq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_m_s8))) +int8x16_t vrshlq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_m_s8))) +int8x16_t vrshlq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_m_u16))) +uint16x8_t vrshlq_m_u16(uint16x8_t, uint16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_m_u16))) +uint16x8_t vrshlq_m(uint16x8_t, uint16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_m_u32))) +uint32x4_t vrshlq_m_u32(uint32x4_t, uint32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_m_u32))) +uint32x4_t vrshlq_m(uint32x4_t, uint32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_m_u8))) +uint8x16_t vrshlq_m_u8(uint8x16_t, uint8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_m_u8))) +uint8x16_t vrshlq_m(uint8x16_t, uint8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_n_s16))) +int16x8_t vrshlq_n_s16(int16x8_t, int32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_n_s16))) +int16x8_t vrshlq(int16x8_t, int32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_n_s32))) +int32x4_t vrshlq_n_s32(int32x4_t, int32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_n_s32))) +int32x4_t vrshlq(int32x4_t, int32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_n_s8))) +int8x16_t vrshlq_n_s8(int8x16_t, int32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_n_s8))) +int8x16_t vrshlq(int8x16_t, int32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_n_u16))) +uint16x8_t vrshlq_n_u16(uint16x8_t, int32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_n_u16))) +uint16x8_t vrshlq(uint16x8_t, int32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_n_u32))) +uint32x4_t vrshlq_n_u32(uint32x4_t, int32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_n_u32))) +uint32x4_t vrshlq(uint32x4_t, int32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_n_u8))) +uint8x16_t vrshlq_n_u8(uint8x16_t, int32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_n_u8))) +uint8x16_t vrshlq(uint8x16_t, int32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_s16))) +int16x8_t vrshlq_s16(int16x8_t, int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_s16))) +int16x8_t vrshlq(int16x8_t, int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_s32))) +int32x4_t vrshlq_s32(int32x4_t, int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_s32))) +int32x4_t vrshlq(int32x4_t, int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_s8))) +int8x16_t vrshlq_s8(int8x16_t, int8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_s8))) +int8x16_t vrshlq(int8x16_t, int8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_u16))) +uint16x8_t vrshlq_u16(uint16x8_t, int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_u16))) +uint16x8_t vrshlq(uint16x8_t, int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_u32))) +uint32x4_t vrshlq_u32(uint32x4_t, int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_u32))) +uint32x4_t vrshlq(uint32x4_t, int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_u8))) +uint8x16_t vrshlq_u8(uint8x16_t, int8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_u8))) +uint8x16_t vrshlq(uint8x16_t, int8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_x_s16))) +int16x8_t vrshlq_x_s16(int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_x_s16))) +int16x8_t vrshlq_x(int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_x_s32))) +int32x4_t vrshlq_x_s32(int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_x_s32))) +int32x4_t vrshlq_x(int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_x_s8))) +int8x16_t vrshlq_x_s8(int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_x_s8))) +int8x16_t vrshlq_x(int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_x_u16))) +uint16x8_t vrshlq_x_u16(uint16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_x_u16))) +uint16x8_t vrshlq_x(uint16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_x_u32))) +uint32x4_t vrshlq_x_u32(uint32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_x_u32))) +uint32x4_t vrshlq_x(uint32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_x_u8))) +uint8x16_t vrshlq_x_u8(uint8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshlq_x_u8))) +uint8x16_t vrshlq_x(uint8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshrnbq_m_n_s16))) +int8x16_t vrshrnbq_m_n_s16(int8x16_t, int16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshrnbq_m_n_s16))) +int8x16_t vrshrnbq_m(int8x16_t, int16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshrnbq_m_n_s32))) +int16x8_t vrshrnbq_m_n_s32(int16x8_t, int32x4_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshrnbq_m_n_s32))) +int16x8_t vrshrnbq_m(int16x8_t, int32x4_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshrnbq_m_n_u16))) +uint8x16_t vrshrnbq_m_n_u16(uint8x16_t, uint16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshrnbq_m_n_u16))) +uint8x16_t vrshrnbq_m(uint8x16_t, uint16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshrnbq_m_n_u32))) +uint16x8_t vrshrnbq_m_n_u32(uint16x8_t, uint32x4_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshrnbq_m_n_u32))) +uint16x8_t vrshrnbq_m(uint16x8_t, uint32x4_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshrnbq_n_s16))) +int8x16_t vrshrnbq_n_s16(int8x16_t, int16x8_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshrnbq_n_s16))) +int8x16_t vrshrnbq(int8x16_t, int16x8_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshrnbq_n_s32))) +int16x8_t vrshrnbq_n_s32(int16x8_t, int32x4_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshrnbq_n_s32))) +int16x8_t vrshrnbq(int16x8_t, int32x4_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshrnbq_n_u16))) +uint8x16_t vrshrnbq_n_u16(uint8x16_t, uint16x8_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshrnbq_n_u16))) +uint8x16_t vrshrnbq(uint8x16_t, uint16x8_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshrnbq_n_u32))) +uint16x8_t vrshrnbq_n_u32(uint16x8_t, uint32x4_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshrnbq_n_u32))) +uint16x8_t vrshrnbq(uint16x8_t, uint32x4_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshrntq_m_n_s16))) +int8x16_t vrshrntq_m_n_s16(int8x16_t, int16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshrntq_m_n_s16))) +int8x16_t vrshrntq_m(int8x16_t, int16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshrntq_m_n_s32))) +int16x8_t vrshrntq_m_n_s32(int16x8_t, int32x4_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshrntq_m_n_s32))) +int16x8_t vrshrntq_m(int16x8_t, int32x4_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshrntq_m_n_u16))) +uint8x16_t vrshrntq_m_n_u16(uint8x16_t, uint16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshrntq_m_n_u16))) +uint8x16_t vrshrntq_m(uint8x16_t, uint16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshrntq_m_n_u32))) +uint16x8_t vrshrntq_m_n_u32(uint16x8_t, uint32x4_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshrntq_m_n_u32))) +uint16x8_t vrshrntq_m(uint16x8_t, uint32x4_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshrntq_n_s16))) +int8x16_t vrshrntq_n_s16(int8x16_t, int16x8_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshrntq_n_s16))) +int8x16_t vrshrntq(int8x16_t, int16x8_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshrntq_n_s32))) +int16x8_t vrshrntq_n_s32(int16x8_t, int32x4_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshrntq_n_s32))) +int16x8_t vrshrntq(int16x8_t, int32x4_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshrntq_n_u16))) +uint8x16_t vrshrntq_n_u16(uint8x16_t, uint16x8_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshrntq_n_u16))) +uint8x16_t vrshrntq(uint8x16_t, uint16x8_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshrntq_n_u32))) +uint16x8_t vrshrntq_n_u32(uint16x8_t, uint32x4_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshrntq_n_u32))) +uint16x8_t vrshrntq(uint16x8_t, uint32x4_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshrq_m_n_s16))) +int16x8_t vrshrq_m_n_s16(int16x8_t, int16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshrq_m_n_s16))) +int16x8_t vrshrq_m(int16x8_t, int16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshrq_m_n_s32))) +int32x4_t vrshrq_m_n_s32(int32x4_t, int32x4_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshrq_m_n_s32))) +int32x4_t vrshrq_m(int32x4_t, int32x4_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshrq_m_n_s8))) +int8x16_t vrshrq_m_n_s8(int8x16_t, int8x16_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshrq_m_n_s8))) +int8x16_t vrshrq_m(int8x16_t, int8x16_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshrq_m_n_u16))) +uint16x8_t vrshrq_m_n_u16(uint16x8_t, uint16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshrq_m_n_u16))) +uint16x8_t vrshrq_m(uint16x8_t, uint16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshrq_m_n_u32))) +uint32x4_t vrshrq_m_n_u32(uint32x4_t, uint32x4_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshrq_m_n_u32))) +uint32x4_t vrshrq_m(uint32x4_t, uint32x4_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshrq_m_n_u8))) +uint8x16_t vrshrq_m_n_u8(uint8x16_t, uint8x16_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshrq_m_n_u8))) +uint8x16_t vrshrq_m(uint8x16_t, uint8x16_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshrq_n_s16))) +int16x8_t vrshrq_n_s16(int16x8_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshrq_n_s16))) +int16x8_t vrshrq(int16x8_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshrq_n_s32))) +int32x4_t vrshrq_n_s32(int32x4_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshrq_n_s32))) +int32x4_t vrshrq(int32x4_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshrq_n_s8))) +int8x16_t vrshrq_n_s8(int8x16_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshrq_n_s8))) +int8x16_t vrshrq(int8x16_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshrq_n_u16))) +uint16x8_t vrshrq_n_u16(uint16x8_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshrq_n_u16))) +uint16x8_t vrshrq(uint16x8_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshrq_n_u32))) +uint32x4_t vrshrq_n_u32(uint32x4_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshrq_n_u32))) +uint32x4_t vrshrq(uint32x4_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshrq_n_u8))) +uint8x16_t vrshrq_n_u8(uint8x16_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshrq_n_u8))) +uint8x16_t vrshrq(uint8x16_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshrq_x_n_s16))) +int16x8_t vrshrq_x_n_s16(int16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshrq_x_n_s16))) +int16x8_t vrshrq_x(int16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshrq_x_n_s32))) +int32x4_t vrshrq_x_n_s32(int32x4_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshrq_x_n_s32))) +int32x4_t vrshrq_x(int32x4_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshrq_x_n_s8))) +int8x16_t vrshrq_x_n_s8(int8x16_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshrq_x_n_s8))) +int8x16_t vrshrq_x(int8x16_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshrq_x_n_u16))) +uint16x8_t vrshrq_x_n_u16(uint16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshrq_x_n_u16))) +uint16x8_t vrshrq_x(uint16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshrq_x_n_u32))) +uint32x4_t vrshrq_x_n_u32(uint32x4_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshrq_x_n_u32))) +uint32x4_t vrshrq_x(uint32x4_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrshrq_x_n_u8))) +uint8x16_t vrshrq_x_n_u8(uint8x16_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrshrq_x_n_u8))) +uint8x16_t vrshrq_x(uint8x16_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsbciq_m_s32))) +int32x4_t vsbciq_m_s32(int32x4_t, int32x4_t, int32x4_t, unsigned *, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsbciq_m_s32))) +int32x4_t vsbciq_m(int32x4_t, int32x4_t, int32x4_t, unsigned *, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsbciq_m_u32))) +uint32x4_t vsbciq_m_u32(uint32x4_t, uint32x4_t, uint32x4_t, unsigned *, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsbciq_m_u32))) +uint32x4_t vsbciq_m(uint32x4_t, uint32x4_t, uint32x4_t, unsigned *, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsbciq_s32))) +int32x4_t vsbciq_s32(int32x4_t, int32x4_t, unsigned *); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsbciq_s32))) +int32x4_t vsbciq(int32x4_t, int32x4_t, unsigned *); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsbciq_u32))) +uint32x4_t vsbciq_u32(uint32x4_t, uint32x4_t, unsigned *); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsbciq_u32))) +uint32x4_t vsbciq(uint32x4_t, uint32x4_t, unsigned *); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsbcq_m_s32))) +int32x4_t vsbcq_m_s32(int32x4_t, int32x4_t, int32x4_t, unsigned *, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsbcq_m_s32))) +int32x4_t vsbcq_m(int32x4_t, int32x4_t, int32x4_t, unsigned *, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsbcq_m_u32))) +uint32x4_t vsbcq_m_u32(uint32x4_t, uint32x4_t, uint32x4_t, unsigned *, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsbcq_m_u32))) +uint32x4_t vsbcq_m(uint32x4_t, uint32x4_t, uint32x4_t, unsigned *, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsbcq_s32))) +int32x4_t vsbcq_s32(int32x4_t, int32x4_t, unsigned *); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsbcq_s32))) +int32x4_t vsbcq(int32x4_t, int32x4_t, unsigned *); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsbcq_u32))) +uint32x4_t vsbcq_u32(uint32x4_t, uint32x4_t, unsigned *); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsbcq_u32))) +uint32x4_t vsbcq(uint32x4_t, uint32x4_t, unsigned *); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsetq_lane_s16))) +int16x8_t vsetq_lane_s16(int16_t, int16x8_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsetq_lane_s16))) +int16x8_t vsetq_lane(int16_t, int16x8_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsetq_lane_s32))) +int32x4_t vsetq_lane_s32(int32_t, int32x4_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsetq_lane_s32))) +int32x4_t vsetq_lane(int32_t, int32x4_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsetq_lane_s64))) +int64x2_t vsetq_lane_s64(int64_t, int64x2_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsetq_lane_s64))) +int64x2_t vsetq_lane(int64_t, int64x2_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsetq_lane_s8))) +int8x16_t vsetq_lane_s8(int8_t, int8x16_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsetq_lane_s8))) +int8x16_t vsetq_lane(int8_t, int8x16_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsetq_lane_u16))) +uint16x8_t vsetq_lane_u16(uint16_t, uint16x8_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsetq_lane_u16))) +uint16x8_t vsetq_lane(uint16_t, uint16x8_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsetq_lane_u32))) +uint32x4_t vsetq_lane_u32(uint32_t, uint32x4_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsetq_lane_u32))) +uint32x4_t vsetq_lane(uint32_t, uint32x4_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsetq_lane_u64))) +uint64x2_t vsetq_lane_u64(uint64_t, uint64x2_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsetq_lane_u64))) +uint64x2_t vsetq_lane(uint64_t, uint64x2_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsetq_lane_u8))) +uint8x16_t vsetq_lane_u8(uint8_t, uint8x16_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsetq_lane_u8))) +uint8x16_t vsetq_lane(uint8_t, uint8x16_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlcq_m_s16))) +int16x8_t vshlcq_m_s16(int16x8_t, uint32_t *, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlcq_m_s16))) +int16x8_t vshlcq_m(int16x8_t, uint32_t *, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlcq_m_s32))) +int32x4_t vshlcq_m_s32(int32x4_t, uint32_t *, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlcq_m_s32))) +int32x4_t vshlcq_m(int32x4_t, uint32_t *, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlcq_m_s8))) +int8x16_t vshlcq_m_s8(int8x16_t, uint32_t *, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlcq_m_s8))) +int8x16_t vshlcq_m(int8x16_t, uint32_t *, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlcq_m_u16))) +uint16x8_t vshlcq_m_u16(uint16x8_t, uint32_t *, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlcq_m_u16))) +uint16x8_t vshlcq_m(uint16x8_t, uint32_t *, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlcq_m_u32))) +uint32x4_t vshlcq_m_u32(uint32x4_t, uint32_t *, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlcq_m_u32))) +uint32x4_t vshlcq_m(uint32x4_t, uint32_t *, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlcq_m_u8))) +uint8x16_t vshlcq_m_u8(uint8x16_t, uint32_t *, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlcq_m_u8))) +uint8x16_t vshlcq_m(uint8x16_t, uint32_t *, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlcq_s16))) +int16x8_t vshlcq_s16(int16x8_t, uint32_t *, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlcq_s16))) +int16x8_t vshlcq(int16x8_t, uint32_t *, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlcq_s32))) +int32x4_t vshlcq_s32(int32x4_t, uint32_t *, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlcq_s32))) +int32x4_t vshlcq(int32x4_t, uint32_t *, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlcq_s8))) +int8x16_t vshlcq_s8(int8x16_t, uint32_t *, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlcq_s8))) +int8x16_t vshlcq(int8x16_t, uint32_t *, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlcq_u16))) +uint16x8_t vshlcq_u16(uint16x8_t, uint32_t *, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlcq_u16))) +uint16x8_t vshlcq(uint16x8_t, uint32_t *, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlcq_u32))) +uint32x4_t vshlcq_u32(uint32x4_t, uint32_t *, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlcq_u32))) +uint32x4_t vshlcq(uint32x4_t, uint32_t *, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlcq_u8))) +uint8x16_t vshlcq_u8(uint8x16_t, uint32_t *, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlcq_u8))) +uint8x16_t vshlcq(uint8x16_t, uint32_t *, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshllbq_m_n_s16))) +int32x4_t vshllbq_m_n_s16(int32x4_t, int16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshllbq_m_n_s16))) +int32x4_t vshllbq_m(int32x4_t, int16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshllbq_m_n_s8))) +int16x8_t vshllbq_m_n_s8(int16x8_t, int8x16_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshllbq_m_n_s8))) +int16x8_t vshllbq_m(int16x8_t, int8x16_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshllbq_m_n_u16))) +uint32x4_t vshllbq_m_n_u16(uint32x4_t, uint16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshllbq_m_n_u16))) +uint32x4_t vshllbq_m(uint32x4_t, uint16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshllbq_m_n_u8))) +uint16x8_t vshllbq_m_n_u8(uint16x8_t, uint8x16_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshllbq_m_n_u8))) +uint16x8_t vshllbq_m(uint16x8_t, uint8x16_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshllbq_n_s16))) +int32x4_t vshllbq_n_s16(int16x8_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshllbq_n_s16))) +int32x4_t vshllbq(int16x8_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshllbq_n_s8))) +int16x8_t vshllbq_n_s8(int8x16_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshllbq_n_s8))) +int16x8_t vshllbq(int8x16_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshllbq_n_u16))) +uint32x4_t vshllbq_n_u16(uint16x8_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshllbq_n_u16))) +uint32x4_t vshllbq(uint16x8_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshllbq_n_u8))) +uint16x8_t vshllbq_n_u8(uint8x16_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshllbq_n_u8))) +uint16x8_t vshllbq(uint8x16_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshllbq_x_n_s16))) +int32x4_t vshllbq_x_n_s16(int16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshllbq_x_n_s16))) +int32x4_t vshllbq_x(int16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshllbq_x_n_s8))) +int16x8_t vshllbq_x_n_s8(int8x16_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshllbq_x_n_s8))) +int16x8_t vshllbq_x(int8x16_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshllbq_x_n_u16))) +uint32x4_t vshllbq_x_n_u16(uint16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshllbq_x_n_u16))) +uint32x4_t vshllbq_x(uint16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshllbq_x_n_u8))) +uint16x8_t vshllbq_x_n_u8(uint8x16_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshllbq_x_n_u8))) +uint16x8_t vshllbq_x(uint8x16_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlltq_m_n_s16))) +int32x4_t vshlltq_m_n_s16(int32x4_t, int16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlltq_m_n_s16))) +int32x4_t vshlltq_m(int32x4_t, int16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlltq_m_n_s8))) +int16x8_t vshlltq_m_n_s8(int16x8_t, int8x16_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlltq_m_n_s8))) +int16x8_t vshlltq_m(int16x8_t, int8x16_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlltq_m_n_u16))) +uint32x4_t vshlltq_m_n_u16(uint32x4_t, uint16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlltq_m_n_u16))) +uint32x4_t vshlltq_m(uint32x4_t, uint16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlltq_m_n_u8))) +uint16x8_t vshlltq_m_n_u8(uint16x8_t, uint8x16_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlltq_m_n_u8))) +uint16x8_t vshlltq_m(uint16x8_t, uint8x16_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlltq_n_s16))) +int32x4_t vshlltq_n_s16(int16x8_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlltq_n_s16))) +int32x4_t vshlltq(int16x8_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlltq_n_s8))) +int16x8_t vshlltq_n_s8(int8x16_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlltq_n_s8))) +int16x8_t vshlltq(int8x16_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlltq_n_u16))) +uint32x4_t vshlltq_n_u16(uint16x8_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlltq_n_u16))) +uint32x4_t vshlltq(uint16x8_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlltq_n_u8))) +uint16x8_t vshlltq_n_u8(uint8x16_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlltq_n_u8))) +uint16x8_t vshlltq(uint8x16_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlltq_x_n_s16))) +int32x4_t vshlltq_x_n_s16(int16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlltq_x_n_s16))) +int32x4_t vshlltq_x(int16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlltq_x_n_s8))) +int16x8_t vshlltq_x_n_s8(int8x16_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlltq_x_n_s8))) +int16x8_t vshlltq_x(int8x16_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlltq_x_n_u16))) +uint32x4_t vshlltq_x_n_u16(uint16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlltq_x_n_u16))) +uint32x4_t vshlltq_x(uint16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlltq_x_n_u8))) +uint16x8_t vshlltq_x_n_u8(uint8x16_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlltq_x_n_u8))) +uint16x8_t vshlltq_x(uint8x16_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_m_n_s16))) +int16x8_t vshlq_m_n_s16(int16x8_t, int16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_m_n_s16))) +int16x8_t vshlq_m_n(int16x8_t, int16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_m_n_s32))) +int32x4_t vshlq_m_n_s32(int32x4_t, int32x4_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_m_n_s32))) +int32x4_t vshlq_m_n(int32x4_t, int32x4_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_m_n_s8))) +int8x16_t vshlq_m_n_s8(int8x16_t, int8x16_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_m_n_s8))) +int8x16_t vshlq_m_n(int8x16_t, int8x16_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_m_n_u16))) +uint16x8_t vshlq_m_n_u16(uint16x8_t, uint16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_m_n_u16))) +uint16x8_t vshlq_m_n(uint16x8_t, uint16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_m_n_u32))) +uint32x4_t vshlq_m_n_u32(uint32x4_t, uint32x4_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_m_n_u32))) +uint32x4_t vshlq_m_n(uint32x4_t, uint32x4_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_m_n_u8))) +uint8x16_t vshlq_m_n_u8(uint8x16_t, uint8x16_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_m_n_u8))) +uint8x16_t vshlq_m_n(uint8x16_t, uint8x16_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_m_r_s16))) +int16x8_t vshlq_m_r_s16(int16x8_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_m_r_s16))) +int16x8_t vshlq_m_r(int16x8_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_m_r_s32))) +int32x4_t vshlq_m_r_s32(int32x4_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_m_r_s32))) +int32x4_t vshlq_m_r(int32x4_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_m_r_s8))) +int8x16_t vshlq_m_r_s8(int8x16_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_m_r_s8))) +int8x16_t vshlq_m_r(int8x16_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_m_r_u16))) +uint16x8_t vshlq_m_r_u16(uint16x8_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_m_r_u16))) +uint16x8_t vshlq_m_r(uint16x8_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_m_r_u32))) +uint32x4_t vshlq_m_r_u32(uint32x4_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_m_r_u32))) +uint32x4_t vshlq_m_r(uint32x4_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_m_r_u8))) +uint8x16_t vshlq_m_r_u8(uint8x16_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_m_r_u8))) +uint8x16_t vshlq_m_r(uint8x16_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_m_s16))) +int16x8_t vshlq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_m_s16))) +int16x8_t vshlq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_m_s32))) +int32x4_t vshlq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_m_s32))) +int32x4_t vshlq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_m_s8))) +int8x16_t vshlq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_m_s8))) +int8x16_t vshlq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_m_u16))) +uint16x8_t vshlq_m_u16(uint16x8_t, uint16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_m_u16))) +uint16x8_t vshlq_m(uint16x8_t, uint16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_m_u32))) +uint32x4_t vshlq_m_u32(uint32x4_t, uint32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_m_u32))) +uint32x4_t vshlq_m(uint32x4_t, uint32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_m_u8))) +uint8x16_t vshlq_m_u8(uint8x16_t, uint8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_m_u8))) +uint8x16_t vshlq_m(uint8x16_t, uint8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_n_s16))) +int16x8_t vshlq_n_s16(int16x8_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_n_s16))) +int16x8_t vshlq_n(int16x8_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_n_s32))) +int32x4_t vshlq_n_s32(int32x4_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_n_s32))) +int32x4_t vshlq_n(int32x4_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_n_s8))) +int8x16_t vshlq_n_s8(int8x16_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_n_s8))) +int8x16_t vshlq_n(int8x16_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_n_u16))) +uint16x8_t vshlq_n_u16(uint16x8_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_n_u16))) +uint16x8_t vshlq_n(uint16x8_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_n_u32))) +uint32x4_t vshlq_n_u32(uint32x4_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_n_u32))) +uint32x4_t vshlq_n(uint32x4_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_n_u8))) +uint8x16_t vshlq_n_u8(uint8x16_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_n_u8))) +uint8x16_t vshlq_n(uint8x16_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_r_s16))) +int16x8_t vshlq_r_s16(int16x8_t, int32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_r_s16))) +int16x8_t vshlq_r(int16x8_t, int32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_r_s32))) +int32x4_t vshlq_r_s32(int32x4_t, int32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_r_s32))) +int32x4_t vshlq_r(int32x4_t, int32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_r_s8))) +int8x16_t vshlq_r_s8(int8x16_t, int32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_r_s8))) +int8x16_t vshlq_r(int8x16_t, int32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_r_u16))) +uint16x8_t vshlq_r_u16(uint16x8_t, int32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_r_u16))) +uint16x8_t vshlq_r(uint16x8_t, int32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_r_u32))) +uint32x4_t vshlq_r_u32(uint32x4_t, int32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_r_u32))) +uint32x4_t vshlq_r(uint32x4_t, int32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_r_u8))) +uint8x16_t vshlq_r_u8(uint8x16_t, int32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_r_u8))) +uint8x16_t vshlq_r(uint8x16_t, int32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_s16))) +int16x8_t vshlq_s16(int16x8_t, int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_s16))) +int16x8_t vshlq(int16x8_t, int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_s32))) +int32x4_t vshlq_s32(int32x4_t, int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_s32))) +int32x4_t vshlq(int32x4_t, int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_s8))) +int8x16_t vshlq_s8(int8x16_t, int8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_s8))) +int8x16_t vshlq(int8x16_t, int8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_u16))) +uint16x8_t vshlq_u16(uint16x8_t, int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_u16))) +uint16x8_t vshlq(uint16x8_t, int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_u32))) +uint32x4_t vshlq_u32(uint32x4_t, int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_u32))) +uint32x4_t vshlq(uint32x4_t, int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_u8))) +uint8x16_t vshlq_u8(uint8x16_t, int8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_u8))) +uint8x16_t vshlq(uint8x16_t, int8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_x_n_s16))) +int16x8_t vshlq_x_n_s16(int16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_x_n_s16))) +int16x8_t vshlq_x_n(int16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_x_n_s32))) +int32x4_t vshlq_x_n_s32(int32x4_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_x_n_s32))) +int32x4_t vshlq_x_n(int32x4_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_x_n_s8))) +int8x16_t vshlq_x_n_s8(int8x16_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_x_n_s8))) +int8x16_t vshlq_x_n(int8x16_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_x_n_u16))) +uint16x8_t vshlq_x_n_u16(uint16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_x_n_u16))) +uint16x8_t vshlq_x_n(uint16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_x_n_u32))) +uint32x4_t vshlq_x_n_u32(uint32x4_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_x_n_u32))) +uint32x4_t vshlq_x_n(uint32x4_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_x_n_u8))) +uint8x16_t vshlq_x_n_u8(uint8x16_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_x_n_u8))) +uint8x16_t vshlq_x_n(uint8x16_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_x_s16))) +int16x8_t vshlq_x_s16(int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_x_s16))) +int16x8_t vshlq_x(int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_x_s32))) +int32x4_t vshlq_x_s32(int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_x_s32))) +int32x4_t vshlq_x(int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_x_s8))) +int8x16_t vshlq_x_s8(int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_x_s8))) +int8x16_t vshlq_x(int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_x_u16))) +uint16x8_t vshlq_x_u16(uint16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_x_u16))) +uint16x8_t vshlq_x(uint16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_x_u32))) +uint32x4_t vshlq_x_u32(uint32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_x_u32))) +uint32x4_t vshlq_x(uint32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshlq_x_u8))) +uint8x16_t vshlq_x_u8(uint8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshlq_x_u8))) +uint8x16_t vshlq_x(uint8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshrnbq_m_n_s16))) +int8x16_t vshrnbq_m_n_s16(int8x16_t, int16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshrnbq_m_n_s16))) +int8x16_t vshrnbq_m(int8x16_t, int16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshrnbq_m_n_s32))) +int16x8_t vshrnbq_m_n_s32(int16x8_t, int32x4_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshrnbq_m_n_s32))) +int16x8_t vshrnbq_m(int16x8_t, int32x4_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshrnbq_m_n_u16))) +uint8x16_t vshrnbq_m_n_u16(uint8x16_t, uint16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshrnbq_m_n_u16))) +uint8x16_t vshrnbq_m(uint8x16_t, uint16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshrnbq_m_n_u32))) +uint16x8_t vshrnbq_m_n_u32(uint16x8_t, uint32x4_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshrnbq_m_n_u32))) +uint16x8_t vshrnbq_m(uint16x8_t, uint32x4_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshrnbq_n_s16))) +int8x16_t vshrnbq_n_s16(int8x16_t, int16x8_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshrnbq_n_s16))) +int8x16_t vshrnbq(int8x16_t, int16x8_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshrnbq_n_s32))) +int16x8_t vshrnbq_n_s32(int16x8_t, int32x4_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshrnbq_n_s32))) +int16x8_t vshrnbq(int16x8_t, int32x4_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshrnbq_n_u16))) +uint8x16_t vshrnbq_n_u16(uint8x16_t, uint16x8_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshrnbq_n_u16))) +uint8x16_t vshrnbq(uint8x16_t, uint16x8_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshrnbq_n_u32))) +uint16x8_t vshrnbq_n_u32(uint16x8_t, uint32x4_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshrnbq_n_u32))) +uint16x8_t vshrnbq(uint16x8_t, uint32x4_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshrntq_m_n_s16))) +int8x16_t vshrntq_m_n_s16(int8x16_t, int16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshrntq_m_n_s16))) +int8x16_t vshrntq_m(int8x16_t, int16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshrntq_m_n_s32))) +int16x8_t vshrntq_m_n_s32(int16x8_t, int32x4_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshrntq_m_n_s32))) +int16x8_t vshrntq_m(int16x8_t, int32x4_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshrntq_m_n_u16))) +uint8x16_t vshrntq_m_n_u16(uint8x16_t, uint16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshrntq_m_n_u16))) +uint8x16_t vshrntq_m(uint8x16_t, uint16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshrntq_m_n_u32))) +uint16x8_t vshrntq_m_n_u32(uint16x8_t, uint32x4_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshrntq_m_n_u32))) +uint16x8_t vshrntq_m(uint16x8_t, uint32x4_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshrntq_n_s16))) +int8x16_t vshrntq_n_s16(int8x16_t, int16x8_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshrntq_n_s16))) +int8x16_t vshrntq(int8x16_t, int16x8_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshrntq_n_s32))) +int16x8_t vshrntq_n_s32(int16x8_t, int32x4_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshrntq_n_s32))) +int16x8_t vshrntq(int16x8_t, int32x4_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshrntq_n_u16))) +uint8x16_t vshrntq_n_u16(uint8x16_t, uint16x8_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshrntq_n_u16))) +uint8x16_t vshrntq(uint8x16_t, uint16x8_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshrntq_n_u32))) +uint16x8_t vshrntq_n_u32(uint16x8_t, uint32x4_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshrntq_n_u32))) +uint16x8_t vshrntq(uint16x8_t, uint32x4_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshrq_m_n_s16))) +int16x8_t vshrq_m_n_s16(int16x8_t, int16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshrq_m_n_s16))) +int16x8_t vshrq_m(int16x8_t, int16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshrq_m_n_s32))) +int32x4_t vshrq_m_n_s32(int32x4_t, int32x4_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshrq_m_n_s32))) +int32x4_t vshrq_m(int32x4_t, int32x4_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshrq_m_n_s8))) +int8x16_t vshrq_m_n_s8(int8x16_t, int8x16_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshrq_m_n_s8))) +int8x16_t vshrq_m(int8x16_t, int8x16_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshrq_m_n_u16))) +uint16x8_t vshrq_m_n_u16(uint16x8_t, uint16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshrq_m_n_u16))) +uint16x8_t vshrq_m(uint16x8_t, uint16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshrq_m_n_u32))) +uint32x4_t vshrq_m_n_u32(uint32x4_t, uint32x4_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshrq_m_n_u32))) +uint32x4_t vshrq_m(uint32x4_t, uint32x4_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshrq_m_n_u8))) +uint8x16_t vshrq_m_n_u8(uint8x16_t, uint8x16_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshrq_m_n_u8))) +uint8x16_t vshrq_m(uint8x16_t, uint8x16_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshrq_n_s16))) +int16x8_t vshrq_n_s16(int16x8_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshrq_n_s16))) +int16x8_t vshrq(int16x8_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshrq_n_s32))) +int32x4_t vshrq_n_s32(int32x4_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshrq_n_s32))) +int32x4_t vshrq(int32x4_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshrq_n_s8))) +int8x16_t vshrq_n_s8(int8x16_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshrq_n_s8))) +int8x16_t vshrq(int8x16_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshrq_n_u16))) +uint16x8_t vshrq_n_u16(uint16x8_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshrq_n_u16))) +uint16x8_t vshrq(uint16x8_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshrq_n_u32))) +uint32x4_t vshrq_n_u32(uint32x4_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshrq_n_u32))) +uint32x4_t vshrq(uint32x4_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshrq_n_u8))) +uint8x16_t vshrq_n_u8(uint8x16_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshrq_n_u8))) +uint8x16_t vshrq(uint8x16_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshrq_x_n_s16))) +int16x8_t vshrq_x_n_s16(int16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshrq_x_n_s16))) +int16x8_t vshrq_x(int16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshrq_x_n_s32))) +int32x4_t vshrq_x_n_s32(int32x4_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshrq_x_n_s32))) +int32x4_t vshrq_x(int32x4_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshrq_x_n_s8))) +int8x16_t vshrq_x_n_s8(int8x16_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshrq_x_n_s8))) +int8x16_t vshrq_x(int8x16_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshrq_x_n_u16))) +uint16x8_t vshrq_x_n_u16(uint16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshrq_x_n_u16))) +uint16x8_t vshrq_x(uint16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshrq_x_n_u32))) +uint32x4_t vshrq_x_n_u32(uint32x4_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshrq_x_n_u32))) +uint32x4_t vshrq_x(uint32x4_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vshrq_x_n_u8))) +uint8x16_t vshrq_x_n_u8(uint8x16_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vshrq_x_n_u8))) +uint8x16_t vshrq_x(uint8x16_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsliq_m_n_s16))) +int16x8_t vsliq_m_n_s16(int16x8_t, int16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsliq_m_n_s16))) +int16x8_t vsliq_m(int16x8_t, int16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsliq_m_n_s32))) +int32x4_t vsliq_m_n_s32(int32x4_t, int32x4_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsliq_m_n_s32))) +int32x4_t vsliq_m(int32x4_t, int32x4_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsliq_m_n_s8))) +int8x16_t vsliq_m_n_s8(int8x16_t, int8x16_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsliq_m_n_s8))) +int8x16_t vsliq_m(int8x16_t, int8x16_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsliq_m_n_u16))) +uint16x8_t vsliq_m_n_u16(uint16x8_t, uint16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsliq_m_n_u16))) +uint16x8_t vsliq_m(uint16x8_t, uint16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsliq_m_n_u32))) +uint32x4_t vsliq_m_n_u32(uint32x4_t, uint32x4_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsliq_m_n_u32))) +uint32x4_t vsliq_m(uint32x4_t, uint32x4_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsliq_m_n_u8))) +uint8x16_t vsliq_m_n_u8(uint8x16_t, uint8x16_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsliq_m_n_u8))) +uint8x16_t vsliq_m(uint8x16_t, uint8x16_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsliq_n_s16))) +int16x8_t vsliq_n_s16(int16x8_t, int16x8_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsliq_n_s16))) +int16x8_t vsliq(int16x8_t, int16x8_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsliq_n_s32))) +int32x4_t vsliq_n_s32(int32x4_t, int32x4_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsliq_n_s32))) +int32x4_t vsliq(int32x4_t, int32x4_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsliq_n_s8))) +int8x16_t vsliq_n_s8(int8x16_t, int8x16_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsliq_n_s8))) +int8x16_t vsliq(int8x16_t, int8x16_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsliq_n_u16))) +uint16x8_t vsliq_n_u16(uint16x8_t, uint16x8_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsliq_n_u16))) +uint16x8_t vsliq(uint16x8_t, uint16x8_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsliq_n_u32))) +uint32x4_t vsliq_n_u32(uint32x4_t, uint32x4_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsliq_n_u32))) +uint32x4_t vsliq(uint32x4_t, uint32x4_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsliq_n_u8))) +uint8x16_t vsliq_n_u8(uint8x16_t, uint8x16_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsliq_n_u8))) +uint8x16_t vsliq(uint8x16_t, uint8x16_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsriq_m_n_s16))) +int16x8_t vsriq_m_n_s16(int16x8_t, int16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsriq_m_n_s16))) +int16x8_t vsriq_m(int16x8_t, int16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsriq_m_n_s32))) +int32x4_t vsriq_m_n_s32(int32x4_t, int32x4_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsriq_m_n_s32))) +int32x4_t vsriq_m(int32x4_t, int32x4_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsriq_m_n_s8))) +int8x16_t vsriq_m_n_s8(int8x16_t, int8x16_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsriq_m_n_s8))) +int8x16_t vsriq_m(int8x16_t, int8x16_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsriq_m_n_u16))) +uint16x8_t vsriq_m_n_u16(uint16x8_t, uint16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsriq_m_n_u16))) +uint16x8_t vsriq_m(uint16x8_t, uint16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsriq_m_n_u32))) +uint32x4_t vsriq_m_n_u32(uint32x4_t, uint32x4_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsriq_m_n_u32))) +uint32x4_t vsriq_m(uint32x4_t, uint32x4_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsriq_m_n_u8))) +uint8x16_t vsriq_m_n_u8(uint8x16_t, uint8x16_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsriq_m_n_u8))) +uint8x16_t vsriq_m(uint8x16_t, uint8x16_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsriq_n_s16))) +int16x8_t vsriq_n_s16(int16x8_t, int16x8_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsriq_n_s16))) +int16x8_t vsriq(int16x8_t, int16x8_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsriq_n_s32))) +int32x4_t vsriq_n_s32(int32x4_t, int32x4_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsriq_n_s32))) +int32x4_t vsriq(int32x4_t, int32x4_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsriq_n_s8))) +int8x16_t vsriq_n_s8(int8x16_t, int8x16_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsriq_n_s8))) +int8x16_t vsriq(int8x16_t, int8x16_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsriq_n_u16))) +uint16x8_t vsriq_n_u16(uint16x8_t, uint16x8_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsriq_n_u16))) +uint16x8_t vsriq(uint16x8_t, uint16x8_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsriq_n_u32))) +uint32x4_t vsriq_n_u32(uint32x4_t, uint32x4_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsriq_n_u32))) +uint32x4_t vsriq(uint32x4_t, uint32x4_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsriq_n_u8))) +uint8x16_t vsriq_n_u8(uint8x16_t, uint8x16_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsriq_n_u8))) +uint8x16_t vsriq(uint8x16_t, uint8x16_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vst1q_p_s16))) +void vst1q_p_s16(int16_t *, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vst1q_p_s16))) +void vst1q_p(int16_t *, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vst1q_p_s32))) +void vst1q_p_s32(int32_t *, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vst1q_p_s32))) +void vst1q_p(int32_t *, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vst1q_p_s8))) +void vst1q_p_s8(int8_t *, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vst1q_p_s8))) +void vst1q_p(int8_t *, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vst1q_p_u16))) +void vst1q_p_u16(uint16_t *, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vst1q_p_u16))) +void vst1q_p(uint16_t *, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vst1q_p_u32))) +void vst1q_p_u32(uint32_t *, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vst1q_p_u32))) +void vst1q_p(uint32_t *, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vst1q_p_u8))) +void vst1q_p_u8(uint8_t *, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vst1q_p_u8))) +void vst1q_p(uint8_t *, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vst1q_s16))) +void vst1q_s16(int16_t *, int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vst1q_s16))) +void vst1q(int16_t *, int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vst1q_s32))) +void vst1q_s32(int32_t *, int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vst1q_s32))) +void vst1q(int32_t *, int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vst1q_s8))) +void vst1q_s8(int8_t *, int8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vst1q_s8))) +void vst1q(int8_t *, int8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vst1q_u16))) +void vst1q_u16(uint16_t *, uint16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vst1q_u16))) +void vst1q(uint16_t *, uint16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vst1q_u32))) +void vst1q_u32(uint32_t *, uint32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vst1q_u32))) +void vst1q(uint32_t *, uint32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vst1q_u8))) +void vst1q_u8(uint8_t *, uint8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vst1q_u8))) +void vst1q(uint8_t *, uint8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vst2q_s16))) +void vst2q_s16(int16_t *, int16x8x2_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vst2q_s16))) +void vst2q(int16_t *, int16x8x2_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vst2q_s32))) +void vst2q_s32(int32_t *, int32x4x2_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vst2q_s32))) +void vst2q(int32_t *, int32x4x2_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vst2q_s8))) +void vst2q_s8(int8_t *, int8x16x2_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vst2q_s8))) +void vst2q(int8_t *, int8x16x2_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vst2q_u16))) +void vst2q_u16(uint16_t *, uint16x8x2_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vst2q_u16))) +void vst2q(uint16_t *, uint16x8x2_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vst2q_u32))) +void vst2q_u32(uint32_t *, uint32x4x2_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vst2q_u32))) +void vst2q(uint32_t *, uint32x4x2_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vst2q_u8))) +void vst2q_u8(uint8_t *, uint8x16x2_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vst2q_u8))) +void vst2q(uint8_t *, uint8x16x2_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vst4q_s16))) +void vst4q_s16(int16_t *, int16x8x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vst4q_s16))) +void vst4q(int16_t *, int16x8x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vst4q_s32))) +void vst4q_s32(int32_t *, int32x4x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vst4q_s32))) +void vst4q(int32_t *, int32x4x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vst4q_s8))) +void vst4q_s8(int8_t *, int8x16x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vst4q_s8))) +void vst4q(int8_t *, int8x16x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vst4q_u16))) +void vst4q_u16(uint16_t *, uint16x8x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vst4q_u16))) +void vst4q(uint16_t *, uint16x8x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vst4q_u32))) +void vst4q_u32(uint32_t *, uint32x4x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vst4q_u32))) +void vst4q(uint32_t *, uint32x4x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vst4q_u8))) +void vst4q_u8(uint8_t *, uint8x16x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vst4q_u8))) +void vst4q(uint8_t *, uint8x16x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_p_s16))) +void vstrbq_p_s16(int8_t *, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_p_s16))) +void vstrbq_p(int8_t *, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_p_s32))) +void vstrbq_p_s32(int8_t *, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_p_s32))) +void vstrbq_p(int8_t *, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_p_s8))) +void vstrbq_p_s8(int8_t *, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_p_s8))) +void vstrbq_p(int8_t *, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_p_u16))) +void vstrbq_p_u16(uint8_t *, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_p_u16))) +void vstrbq_p(uint8_t *, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_p_u32))) +void vstrbq_p_u32(uint8_t *, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_p_u32))) +void vstrbq_p(uint8_t *, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_p_u8))) +void vstrbq_p_u8(uint8_t *, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_p_u8))) +void vstrbq_p(uint8_t *, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_s16))) +void vstrbq_s16(int8_t *, int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_s16))) +void vstrbq(int8_t *, int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_s32))) +void vstrbq_s32(int8_t *, int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_s32))) +void vstrbq(int8_t *, int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_s8))) +void vstrbq_s8(int8_t *, int8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_s8))) +void vstrbq(int8_t *, int8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_scatter_offset_p_s16))) +void vstrbq_scatter_offset_p_s16(int8_t *, uint16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_scatter_offset_p_s16))) +void vstrbq_scatter_offset_p(int8_t *, uint16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_scatter_offset_p_s32))) +void vstrbq_scatter_offset_p_s32(int8_t *, uint32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_scatter_offset_p_s32))) +void vstrbq_scatter_offset_p(int8_t *, uint32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_scatter_offset_p_s8))) +void vstrbq_scatter_offset_p_s8(int8_t *, uint8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_scatter_offset_p_s8))) +void vstrbq_scatter_offset_p(int8_t *, uint8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_scatter_offset_p_u16))) +void vstrbq_scatter_offset_p_u16(uint8_t *, uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_scatter_offset_p_u16))) +void vstrbq_scatter_offset_p(uint8_t *, uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_scatter_offset_p_u32))) +void vstrbq_scatter_offset_p_u32(uint8_t *, uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_scatter_offset_p_u32))) +void vstrbq_scatter_offset_p(uint8_t *, uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_scatter_offset_p_u8))) +void vstrbq_scatter_offset_p_u8(uint8_t *, uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_scatter_offset_p_u8))) +void vstrbq_scatter_offset_p(uint8_t *, uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_scatter_offset_s16))) +void vstrbq_scatter_offset_s16(int8_t *, uint16x8_t, int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_scatter_offset_s16))) +void vstrbq_scatter_offset(int8_t *, uint16x8_t, int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_scatter_offset_s32))) +void vstrbq_scatter_offset_s32(int8_t *, uint32x4_t, int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_scatter_offset_s32))) +void vstrbq_scatter_offset(int8_t *, uint32x4_t, int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_scatter_offset_s8))) +void vstrbq_scatter_offset_s8(int8_t *, uint8x16_t, int8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_scatter_offset_s8))) +void vstrbq_scatter_offset(int8_t *, uint8x16_t, int8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_scatter_offset_u16))) +void vstrbq_scatter_offset_u16(uint8_t *, uint16x8_t, uint16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_scatter_offset_u16))) +void vstrbq_scatter_offset(uint8_t *, uint16x8_t, uint16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_scatter_offset_u32))) +void vstrbq_scatter_offset_u32(uint8_t *, uint32x4_t, uint32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_scatter_offset_u32))) +void vstrbq_scatter_offset(uint8_t *, uint32x4_t, uint32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_scatter_offset_u8))) +void vstrbq_scatter_offset_u8(uint8_t *, uint8x16_t, uint8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_scatter_offset_u8))) +void vstrbq_scatter_offset(uint8_t *, uint8x16_t, uint8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_u16))) +void vstrbq_u16(uint8_t *, uint16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_u16))) +void vstrbq(uint8_t *, uint16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_u32))) +void vstrbq_u32(uint8_t *, uint32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_u32))) +void vstrbq(uint8_t *, uint32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_u8))) +void vstrbq_u8(uint8_t *, uint8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrbq_u8))) +void vstrbq(uint8_t *, uint8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrdq_scatter_base_p_s64))) +void vstrdq_scatter_base_p_s64(uint64x2_t, int, int64x2_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrdq_scatter_base_p_s64))) +void vstrdq_scatter_base_p(uint64x2_t, int, int64x2_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrdq_scatter_base_p_u64))) +void vstrdq_scatter_base_p_u64(uint64x2_t, int, uint64x2_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrdq_scatter_base_p_u64))) +void vstrdq_scatter_base_p(uint64x2_t, int, uint64x2_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrdq_scatter_base_s64))) +void vstrdq_scatter_base_s64(uint64x2_t, int, int64x2_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrdq_scatter_base_s64))) +void vstrdq_scatter_base(uint64x2_t, int, int64x2_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrdq_scatter_base_u64))) +void vstrdq_scatter_base_u64(uint64x2_t, int, uint64x2_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrdq_scatter_base_u64))) +void vstrdq_scatter_base(uint64x2_t, int, uint64x2_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrdq_scatter_base_wb_p_s64))) +void vstrdq_scatter_base_wb_p_s64(uint64x2_t *, int, int64x2_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrdq_scatter_base_wb_p_s64))) +void vstrdq_scatter_base_wb_p(uint64x2_t *, int, int64x2_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrdq_scatter_base_wb_p_u64))) +void vstrdq_scatter_base_wb_p_u64(uint64x2_t *, int, uint64x2_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrdq_scatter_base_wb_p_u64))) +void vstrdq_scatter_base_wb_p(uint64x2_t *, int, uint64x2_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrdq_scatter_base_wb_s64))) +void vstrdq_scatter_base_wb_s64(uint64x2_t *, int, int64x2_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrdq_scatter_base_wb_s64))) +void vstrdq_scatter_base_wb(uint64x2_t *, int, int64x2_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrdq_scatter_base_wb_u64))) +void vstrdq_scatter_base_wb_u64(uint64x2_t *, int, uint64x2_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrdq_scatter_base_wb_u64))) +void vstrdq_scatter_base_wb(uint64x2_t *, int, uint64x2_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrdq_scatter_offset_p_s64))) +void vstrdq_scatter_offset_p_s64(int64_t *, uint64x2_t, int64x2_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrdq_scatter_offset_p_s64))) +void vstrdq_scatter_offset_p(int64_t *, uint64x2_t, int64x2_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrdq_scatter_offset_p_u64))) +void vstrdq_scatter_offset_p_u64(uint64_t *, uint64x2_t, uint64x2_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrdq_scatter_offset_p_u64))) +void vstrdq_scatter_offset_p(uint64_t *, uint64x2_t, uint64x2_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrdq_scatter_offset_s64))) +void vstrdq_scatter_offset_s64(int64_t *, uint64x2_t, int64x2_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrdq_scatter_offset_s64))) +void vstrdq_scatter_offset(int64_t *, uint64x2_t, int64x2_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrdq_scatter_offset_u64))) +void vstrdq_scatter_offset_u64(uint64_t *, uint64x2_t, uint64x2_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrdq_scatter_offset_u64))) +void vstrdq_scatter_offset(uint64_t *, uint64x2_t, uint64x2_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrdq_scatter_shifted_offset_p_s64))) +void vstrdq_scatter_shifted_offset_p_s64(int64_t *, uint64x2_t, int64x2_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrdq_scatter_shifted_offset_p_s64))) +void vstrdq_scatter_shifted_offset_p(int64_t *, uint64x2_t, int64x2_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrdq_scatter_shifted_offset_p_u64))) +void vstrdq_scatter_shifted_offset_p_u64(uint64_t *, uint64x2_t, uint64x2_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrdq_scatter_shifted_offset_p_u64))) +void vstrdq_scatter_shifted_offset_p(uint64_t *, uint64x2_t, uint64x2_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrdq_scatter_shifted_offset_s64))) +void vstrdq_scatter_shifted_offset_s64(int64_t *, uint64x2_t, int64x2_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrdq_scatter_shifted_offset_s64))) +void vstrdq_scatter_shifted_offset(int64_t *, uint64x2_t, int64x2_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrdq_scatter_shifted_offset_u64))) +void vstrdq_scatter_shifted_offset_u64(uint64_t *, uint64x2_t, uint64x2_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrdq_scatter_shifted_offset_u64))) +void vstrdq_scatter_shifted_offset(uint64_t *, uint64x2_t, uint64x2_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_p_s16))) +void vstrhq_p_s16(int16_t *, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_p_s16))) +void vstrhq_p(int16_t *, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_p_s32))) +void vstrhq_p_s32(int16_t *, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_p_s32))) +void vstrhq_p(int16_t *, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_p_u16))) +void vstrhq_p_u16(uint16_t *, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_p_u16))) +void vstrhq_p(uint16_t *, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_p_u32))) +void vstrhq_p_u32(uint16_t *, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_p_u32))) +void vstrhq_p(uint16_t *, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_s16))) +void vstrhq_s16(int16_t *, int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_s16))) +void vstrhq(int16_t *, int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_s32))) +void vstrhq_s32(int16_t *, int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_s32))) +void vstrhq(int16_t *, int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_scatter_offset_p_s16))) +void vstrhq_scatter_offset_p_s16(int16_t *, uint16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_scatter_offset_p_s16))) +void vstrhq_scatter_offset_p(int16_t *, uint16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_scatter_offset_p_s32))) +void vstrhq_scatter_offset_p_s32(int16_t *, uint32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_scatter_offset_p_s32))) +void vstrhq_scatter_offset_p(int16_t *, uint32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_scatter_offset_p_u16))) +void vstrhq_scatter_offset_p_u16(uint16_t *, uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_scatter_offset_p_u16))) +void vstrhq_scatter_offset_p(uint16_t *, uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_scatter_offset_p_u32))) +void vstrhq_scatter_offset_p_u32(uint16_t *, uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_scatter_offset_p_u32))) +void vstrhq_scatter_offset_p(uint16_t *, uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_scatter_offset_s16))) +void vstrhq_scatter_offset_s16(int16_t *, uint16x8_t, int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_scatter_offset_s16))) +void vstrhq_scatter_offset(int16_t *, uint16x8_t, int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_scatter_offset_s32))) +void vstrhq_scatter_offset_s32(int16_t *, uint32x4_t, int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_scatter_offset_s32))) +void vstrhq_scatter_offset(int16_t *, uint32x4_t, int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_scatter_offset_u16))) +void vstrhq_scatter_offset_u16(uint16_t *, uint16x8_t, uint16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_scatter_offset_u16))) +void vstrhq_scatter_offset(uint16_t *, uint16x8_t, uint16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_scatter_offset_u32))) +void vstrhq_scatter_offset_u32(uint16_t *, uint32x4_t, uint32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_scatter_offset_u32))) +void vstrhq_scatter_offset(uint16_t *, uint32x4_t, uint32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_scatter_shifted_offset_p_s16))) +void vstrhq_scatter_shifted_offset_p_s16(int16_t *, uint16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_scatter_shifted_offset_p_s16))) +void vstrhq_scatter_shifted_offset_p(int16_t *, uint16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_scatter_shifted_offset_p_s32))) +void vstrhq_scatter_shifted_offset_p_s32(int16_t *, uint32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_scatter_shifted_offset_p_s32))) +void vstrhq_scatter_shifted_offset_p(int16_t *, uint32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_scatter_shifted_offset_p_u16))) +void vstrhq_scatter_shifted_offset_p_u16(uint16_t *, uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_scatter_shifted_offset_p_u16))) +void vstrhq_scatter_shifted_offset_p(uint16_t *, uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_scatter_shifted_offset_p_u32))) +void vstrhq_scatter_shifted_offset_p_u32(uint16_t *, uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_scatter_shifted_offset_p_u32))) +void vstrhq_scatter_shifted_offset_p(uint16_t *, uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_scatter_shifted_offset_s16))) +void vstrhq_scatter_shifted_offset_s16(int16_t *, uint16x8_t, int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_scatter_shifted_offset_s16))) +void vstrhq_scatter_shifted_offset(int16_t *, uint16x8_t, int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_scatter_shifted_offset_s32))) +void vstrhq_scatter_shifted_offset_s32(int16_t *, uint32x4_t, int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_scatter_shifted_offset_s32))) +void vstrhq_scatter_shifted_offset(int16_t *, uint32x4_t, int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_scatter_shifted_offset_u16))) +void vstrhq_scatter_shifted_offset_u16(uint16_t *, uint16x8_t, uint16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_scatter_shifted_offset_u16))) +void vstrhq_scatter_shifted_offset(uint16_t *, uint16x8_t, uint16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_scatter_shifted_offset_u32))) +void vstrhq_scatter_shifted_offset_u32(uint16_t *, uint32x4_t, uint32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_scatter_shifted_offset_u32))) +void vstrhq_scatter_shifted_offset(uint16_t *, uint32x4_t, uint32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_u16))) +void vstrhq_u16(uint16_t *, uint16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_u16))) +void vstrhq(uint16_t *, uint16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_u32))) +void vstrhq_u32(uint16_t *, uint32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_u32))) +void vstrhq(uint16_t *, uint32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_p_s32))) +void vstrwq_p_s32(int32_t *, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_p_s32))) +void vstrwq_p(int32_t *, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_p_u32))) +void vstrwq_p_u32(uint32_t *, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_p_u32))) +void vstrwq_p(uint32_t *, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_s32))) +void vstrwq_s32(int32_t *, int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_s32))) +void vstrwq(int32_t *, int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_base_p_s32))) +void vstrwq_scatter_base_p_s32(uint32x4_t, int, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_base_p_s32))) +void vstrwq_scatter_base_p(uint32x4_t, int, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_base_p_u32))) +void vstrwq_scatter_base_p_u32(uint32x4_t, int, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_base_p_u32))) +void vstrwq_scatter_base_p(uint32x4_t, int, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_base_s32))) +void vstrwq_scatter_base_s32(uint32x4_t, int, int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_base_s32))) +void vstrwq_scatter_base(uint32x4_t, int, int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_base_u32))) +void vstrwq_scatter_base_u32(uint32x4_t, int, uint32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_base_u32))) +void vstrwq_scatter_base(uint32x4_t, int, uint32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_base_wb_p_s32))) +void vstrwq_scatter_base_wb_p_s32(uint32x4_t *, int, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_base_wb_p_s32))) +void vstrwq_scatter_base_wb_p(uint32x4_t *, int, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_base_wb_p_u32))) +void vstrwq_scatter_base_wb_p_u32(uint32x4_t *, int, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_base_wb_p_u32))) +void vstrwq_scatter_base_wb_p(uint32x4_t *, int, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_base_wb_s32))) +void vstrwq_scatter_base_wb_s32(uint32x4_t *, int, int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_base_wb_s32))) +void vstrwq_scatter_base_wb(uint32x4_t *, int, int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_base_wb_u32))) +void vstrwq_scatter_base_wb_u32(uint32x4_t *, int, uint32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_base_wb_u32))) +void vstrwq_scatter_base_wb(uint32x4_t *, int, uint32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_offset_p_s32))) +void vstrwq_scatter_offset_p_s32(int32_t *, uint32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_offset_p_s32))) +void vstrwq_scatter_offset_p(int32_t *, uint32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_offset_p_u32))) +void vstrwq_scatter_offset_p_u32(uint32_t *, uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_offset_p_u32))) +void vstrwq_scatter_offset_p(uint32_t *, uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_offset_s32))) +void vstrwq_scatter_offset_s32(int32_t *, uint32x4_t, int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_offset_s32))) +void vstrwq_scatter_offset(int32_t *, uint32x4_t, int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_offset_u32))) +void vstrwq_scatter_offset_u32(uint32_t *, uint32x4_t, uint32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_offset_u32))) +void vstrwq_scatter_offset(uint32_t *, uint32x4_t, uint32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_shifted_offset_p_s32))) +void vstrwq_scatter_shifted_offset_p_s32(int32_t *, uint32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_shifted_offset_p_s32))) +void vstrwq_scatter_shifted_offset_p(int32_t *, uint32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_shifted_offset_p_u32))) +void vstrwq_scatter_shifted_offset_p_u32(uint32_t *, uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_shifted_offset_p_u32))) +void vstrwq_scatter_shifted_offset_p(uint32_t *, uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_shifted_offset_s32))) +void vstrwq_scatter_shifted_offset_s32(int32_t *, uint32x4_t, int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_shifted_offset_s32))) +void vstrwq_scatter_shifted_offset(int32_t *, uint32x4_t, int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_shifted_offset_u32))) +void vstrwq_scatter_shifted_offset_u32(uint32_t *, uint32x4_t, uint32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_shifted_offset_u32))) +void vstrwq_scatter_shifted_offset(uint32_t *, uint32x4_t, uint32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_u32))) +void vstrwq_u32(uint32_t *, uint32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_u32))) +void vstrwq(uint32_t *, uint32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_m_n_s16))) +int16x8_t vsubq_m_n_s16(int16x8_t, int16x8_t, int16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_m_n_s16))) +int16x8_t vsubq_m(int16x8_t, int16x8_t, int16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_m_n_s32))) +int32x4_t vsubq_m_n_s32(int32x4_t, int32x4_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_m_n_s32))) +int32x4_t vsubq_m(int32x4_t, int32x4_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_m_n_s8))) +int8x16_t vsubq_m_n_s8(int8x16_t, int8x16_t, int8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_m_n_s8))) +int8x16_t vsubq_m(int8x16_t, int8x16_t, int8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_m_n_u16))) +uint16x8_t vsubq_m_n_u16(uint16x8_t, uint16x8_t, uint16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_m_n_u16))) +uint16x8_t vsubq_m(uint16x8_t, uint16x8_t, uint16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_m_n_u32))) +uint32x4_t vsubq_m_n_u32(uint32x4_t, uint32x4_t, uint32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_m_n_u32))) +uint32x4_t vsubq_m(uint32x4_t, uint32x4_t, uint32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_m_n_u8))) +uint8x16_t vsubq_m_n_u8(uint8x16_t, uint8x16_t, uint8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_m_n_u8))) +uint8x16_t vsubq_m(uint8x16_t, uint8x16_t, uint8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_m_s16))) +int16x8_t vsubq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_m_s16))) +int16x8_t vsubq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_m_s32))) +int32x4_t vsubq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_m_s32))) +int32x4_t vsubq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_m_s8))) +int8x16_t vsubq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_m_s8))) +int8x16_t vsubq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_m_u16))) +uint16x8_t vsubq_m_u16(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_m_u16))) +uint16x8_t vsubq_m(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_m_u32))) +uint32x4_t vsubq_m_u32(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_m_u32))) +uint32x4_t vsubq_m(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_m_u8))) +uint8x16_t vsubq_m_u8(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_m_u8))) +uint8x16_t vsubq_m(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_n_s16))) +int16x8_t vsubq_n_s16(int16x8_t, int16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_n_s16))) +int16x8_t vsubq(int16x8_t, int16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_n_s32))) +int32x4_t vsubq_n_s32(int32x4_t, int32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_n_s32))) +int32x4_t vsubq(int32x4_t, int32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_n_s8))) +int8x16_t vsubq_n_s8(int8x16_t, int8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_n_s8))) +int8x16_t vsubq(int8x16_t, int8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_n_u16))) +uint16x8_t vsubq_n_u16(uint16x8_t, uint16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_n_u16))) +uint16x8_t vsubq(uint16x8_t, uint16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_n_u32))) +uint32x4_t vsubq_n_u32(uint32x4_t, uint32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_n_u32))) +uint32x4_t vsubq(uint32x4_t, uint32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_n_u8))) +uint8x16_t vsubq_n_u8(uint8x16_t, uint8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_n_u8))) +uint8x16_t vsubq(uint8x16_t, uint8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_s16))) +int16x8_t vsubq_s16(int16x8_t, int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_s16))) +int16x8_t vsubq(int16x8_t, int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_s32))) +int32x4_t vsubq_s32(int32x4_t, int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_s32))) +int32x4_t vsubq(int32x4_t, int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_s8))) +int8x16_t vsubq_s8(int8x16_t, int8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_s8))) +int8x16_t vsubq(int8x16_t, int8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_u16))) +uint16x8_t vsubq_u16(uint16x8_t, uint16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_u16))) +uint16x8_t vsubq(uint16x8_t, uint16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_u32))) +uint32x4_t vsubq_u32(uint32x4_t, uint32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_u32))) +uint32x4_t vsubq(uint32x4_t, uint32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_u8))) +uint8x16_t vsubq_u8(uint8x16_t, uint8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_u8))) +uint8x16_t vsubq(uint8x16_t, uint8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_x_n_s16))) +int16x8_t vsubq_x_n_s16(int16x8_t, int16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_x_n_s16))) +int16x8_t vsubq_x(int16x8_t, int16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_x_n_s32))) +int32x4_t vsubq_x_n_s32(int32x4_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_x_n_s32))) +int32x4_t vsubq_x(int32x4_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_x_n_s8))) +int8x16_t vsubq_x_n_s8(int8x16_t, int8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_x_n_s8))) +int8x16_t vsubq_x(int8x16_t, int8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_x_n_u16))) +uint16x8_t vsubq_x_n_u16(uint16x8_t, uint16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_x_n_u16))) +uint16x8_t vsubq_x(uint16x8_t, uint16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_x_n_u32))) +uint32x4_t vsubq_x_n_u32(uint32x4_t, uint32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_x_n_u32))) +uint32x4_t vsubq_x(uint32x4_t, uint32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_x_n_u8))) +uint8x16_t vsubq_x_n_u8(uint8x16_t, uint8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_x_n_u8))) +uint8x16_t vsubq_x(uint8x16_t, uint8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_x_s16))) +int16x8_t vsubq_x_s16(int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_x_s16))) +int16x8_t vsubq_x(int16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_x_s32))) +int32x4_t vsubq_x_s32(int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_x_s32))) +int32x4_t vsubq_x(int32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_x_s8))) +int8x16_t vsubq_x_s8(int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_x_s8))) +int8x16_t vsubq_x(int8x16_t, int8x16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_x_u16))) +uint16x8_t vsubq_x_u16(uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_x_u16))) +uint16x8_t vsubq_x(uint16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_x_u32))) +uint32x4_t vsubq_x_u32(uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_x_u32))) +uint32x4_t vsubq_x(uint32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_x_u8))) +uint8x16_t vsubq_x_u8(uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_x_u8))) +uint8x16_t vsubq_x(uint8x16_t, uint8x16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vuninitializedq_polymorphic_s16))) +int16x8_t vuninitializedq(int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vuninitializedq_polymorphic_s32))) +int32x4_t vuninitializedq(int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vuninitializedq_polymorphic_s64))) +int64x2_t vuninitializedq(int64x2_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vuninitializedq_polymorphic_s8))) +int8x16_t vuninitializedq(int8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vuninitializedq_polymorphic_u16))) +uint16x8_t vuninitializedq(uint16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vuninitializedq_polymorphic_u32))) +uint32x4_t vuninitializedq(uint32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vuninitializedq_polymorphic_u64))) +uint64x2_t vuninitializedq(uint64x2_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vuninitializedq_polymorphic_u8))) +uint8x16_t vuninitializedq(uint8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vuninitializedq_s16))) +int16x8_t vuninitializedq_s16(); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vuninitializedq_s32))) +int32x4_t vuninitializedq_s32(); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vuninitializedq_s64))) +int64x2_t vuninitializedq_s64(); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vuninitializedq_s8))) +int8x16_t vuninitializedq_s8(); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vuninitializedq_u16))) +uint16x8_t vuninitializedq_u16(); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vuninitializedq_u32))) +uint32x4_t vuninitializedq_u32(); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vuninitializedq_u64))) +uint64x2_t vuninitializedq_u64(); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vuninitializedq_u8))) +uint8x16_t vuninitializedq_u8(); + +#endif /* (!defined __ARM_MVE_PRESERVE_USER_NAMESPACE) */ + +#if (__ARM_FEATURE_MVE & 2) && (!defined __ARM_MVE_PRESERVE_USER_NAMESPACE) + +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabdq_f16))) +float16x8_t vabdq_f16(float16x8_t, float16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabdq_f16))) +float16x8_t vabdq(float16x8_t, float16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabdq_f32))) +float32x4_t vabdq_f32(float32x4_t, float32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabdq_f32))) +float32x4_t vabdq(float32x4_t, float32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabdq_m_f16))) +float16x8_t vabdq_m_f16(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabdq_m_f16))) +float16x8_t vabdq_m(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabdq_m_f32))) +float32x4_t vabdq_m_f32(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabdq_m_f32))) +float32x4_t vabdq_m(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabdq_x_f16))) +float16x8_t vabdq_x_f16(float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabdq_x_f16))) +float16x8_t vabdq_x(float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabdq_x_f32))) +float32x4_t vabdq_x_f32(float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabdq_x_f32))) +float32x4_t vabdq_x(float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabsq_f16))) +float16x8_t vabsq_f16(float16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabsq_f16))) +float16x8_t vabsq(float16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabsq_f32))) +float32x4_t vabsq_f32(float32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabsq_f32))) +float32x4_t vabsq(float32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabsq_m_f16))) +float16x8_t vabsq_m_f16(float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabsq_m_f16))) +float16x8_t vabsq_m(float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabsq_m_f32))) +float32x4_t vabsq_m_f32(float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabsq_m_f32))) +float32x4_t vabsq_m(float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabsq_x_f16))) +float16x8_t vabsq_x_f16(float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabsq_x_f16))) +float16x8_t vabsq_x(float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vabsq_x_f32))) +float32x4_t vabsq_x_f32(float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vabsq_x_f32))) +float32x4_t vabsq_x(float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_f16))) +float16x8_t vaddq_f16(float16x8_t, float16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_f16))) +float16x8_t vaddq(float16x8_t, float16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_f32))) +float32x4_t vaddq_f32(float32x4_t, float32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_f32))) +float32x4_t vaddq(float32x4_t, float32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_m_f16))) +float16x8_t vaddq_m_f16(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_m_f16))) +float16x8_t vaddq_m(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_m_f32))) +float32x4_t vaddq_m_f32(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_m_f32))) +float32x4_t vaddq_m(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_m_n_f16))) +float16x8_t vaddq_m_n_f16(float16x8_t, float16x8_t, float16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_m_n_f16))) +float16x8_t vaddq_m(float16x8_t, float16x8_t, float16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_m_n_f32))) +float32x4_t vaddq_m_n_f32(float32x4_t, float32x4_t, float32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_m_n_f32))) +float32x4_t vaddq_m(float32x4_t, float32x4_t, float32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_n_f16))) +float16x8_t vaddq_n_f16(float16x8_t, float16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_n_f16))) +float16x8_t vaddq(float16x8_t, float16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_n_f32))) +float32x4_t vaddq_n_f32(float32x4_t, float32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_n_f32))) +float32x4_t vaddq(float32x4_t, float32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_x_f16))) +float16x8_t vaddq_x_f16(float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_x_f16))) +float16x8_t vaddq_x(float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_x_f32))) +float32x4_t vaddq_x_f32(float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_x_f32))) +float32x4_t vaddq_x(float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_x_n_f16))) +float16x8_t vaddq_x_n_f16(float16x8_t, float16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_x_n_f16))) +float16x8_t vaddq_x(float16x8_t, float16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vaddq_x_n_f32))) +float32x4_t vaddq_x_n_f32(float32x4_t, float32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vaddq_x_n_f32))) +float32x4_t vaddq_x(float32x4_t, float32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vandq_f16))) +float16x8_t vandq_f16(float16x8_t, float16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vandq_f16))) +float16x8_t vandq(float16x8_t, float16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vandq_f32))) +float32x4_t vandq_f32(float32x4_t, float32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vandq_f32))) +float32x4_t vandq(float32x4_t, float32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vandq_m_f16))) +float16x8_t vandq_m_f16(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vandq_m_f16))) +float16x8_t vandq_m(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vandq_m_f32))) +float32x4_t vandq_m_f32(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vandq_m_f32))) +float32x4_t vandq_m(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vandq_x_f16))) +float16x8_t vandq_x_f16(float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vandq_x_f16))) +float16x8_t vandq_x(float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vandq_x_f32))) +float32x4_t vandq_x_f32(float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vandq_x_f32))) +float32x4_t vandq_x(float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbicq_f16))) +float16x8_t vbicq_f16(float16x8_t, float16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbicq_f16))) +float16x8_t vbicq(float16x8_t, float16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbicq_f32))) +float32x4_t vbicq_f32(float32x4_t, float32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbicq_f32))) +float32x4_t vbicq(float32x4_t, float32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbicq_m_f16))) +float16x8_t vbicq_m_f16(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbicq_m_f16))) +float16x8_t vbicq_m(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbicq_m_f32))) +float32x4_t vbicq_m_f32(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbicq_m_f32))) +float32x4_t vbicq_m(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbicq_x_f16))) +float16x8_t vbicq_x_f16(float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbicq_x_f16))) +float16x8_t vbicq_x(float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbicq_x_f32))) +float32x4_t vbicq_x_f32(float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbicq_x_f32))) +float32x4_t vbicq_x(float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_m_n_f16))) +float16x8_t vbrsrq_m_n_f16(float16x8_t, float16x8_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_m_n_f16))) +float16x8_t vbrsrq_m(float16x8_t, float16x8_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_m_n_f32))) +float32x4_t vbrsrq_m_n_f32(float32x4_t, float32x4_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_m_n_f32))) +float32x4_t vbrsrq_m(float32x4_t, float32x4_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_n_f16))) +float16x8_t vbrsrq_n_f16(float16x8_t, int32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_n_f16))) +float16x8_t vbrsrq(float16x8_t, int32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_n_f32))) +float32x4_t vbrsrq_n_f32(float32x4_t, int32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_n_f32))) +float32x4_t vbrsrq(float32x4_t, int32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_x_n_f16))) +float16x8_t vbrsrq_x_n_f16(float16x8_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_x_n_f16))) +float16x8_t vbrsrq_x(float16x8_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_x_n_f32))) +float32x4_t vbrsrq_x_n_f32(float32x4_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vbrsrq_x_n_f32))) +float32x4_t vbrsrq_x(float32x4_t, int32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_f16))) +float16x8_t vcaddq_rot270_f16(float16x8_t, float16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_f16))) +float16x8_t vcaddq_rot270(float16x8_t, float16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_f32))) +float32x4_t vcaddq_rot270_f32(float32x4_t, float32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_f32))) +float32x4_t vcaddq_rot270(float32x4_t, float32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_m_f16))) +float16x8_t vcaddq_rot270_m_f16(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_m_f16))) +float16x8_t vcaddq_rot270_m(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_m_f32))) +float32x4_t vcaddq_rot270_m_f32(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_m_f32))) +float32x4_t vcaddq_rot270_m(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_x_f16))) +float16x8_t vcaddq_rot270_x_f16(float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_x_f16))) +float16x8_t vcaddq_rot270_x(float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_x_f32))) +float32x4_t vcaddq_rot270_x_f32(float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot270_x_f32))) +float32x4_t vcaddq_rot270_x(float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_f16))) +float16x8_t vcaddq_rot90_f16(float16x8_t, float16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_f16))) +float16x8_t vcaddq_rot90(float16x8_t, float16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_f32))) +float32x4_t vcaddq_rot90_f32(float32x4_t, float32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_f32))) +float32x4_t vcaddq_rot90(float32x4_t, float32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_m_f16))) +float16x8_t vcaddq_rot90_m_f16(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_m_f16))) +float16x8_t vcaddq_rot90_m(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_m_f32))) +float32x4_t vcaddq_rot90_m_f32(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_m_f32))) +float32x4_t vcaddq_rot90_m(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_x_f16))) +float16x8_t vcaddq_rot90_x_f16(float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_x_f16))) +float16x8_t vcaddq_rot90_x(float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_x_f32))) +float32x4_t vcaddq_rot90_x_f32(float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcaddq_rot90_x_f32))) +float32x4_t vcaddq_rot90_x(float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmlaq_f16))) +float16x8_t vcmlaq_f16(float16x8_t, float16x8_t, float16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmlaq_f16))) +float16x8_t vcmlaq(float16x8_t, float16x8_t, float16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmlaq_f32))) +float32x4_t vcmlaq_f32(float32x4_t, float32x4_t, float32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmlaq_f32))) +float32x4_t vcmlaq(float32x4_t, float32x4_t, float32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmlaq_m_f16))) +float16x8_t vcmlaq_m_f16(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmlaq_m_f16))) +float16x8_t vcmlaq_m(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmlaq_m_f32))) +float32x4_t vcmlaq_m_f32(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmlaq_m_f32))) +float32x4_t vcmlaq_m(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmlaq_rot180_f16))) +float16x8_t vcmlaq_rot180_f16(float16x8_t, float16x8_t, float16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmlaq_rot180_f16))) +float16x8_t vcmlaq_rot180(float16x8_t, float16x8_t, float16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmlaq_rot180_f32))) +float32x4_t vcmlaq_rot180_f32(float32x4_t, float32x4_t, float32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmlaq_rot180_f32))) +float32x4_t vcmlaq_rot180(float32x4_t, float32x4_t, float32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmlaq_rot180_m_f16))) +float16x8_t vcmlaq_rot180_m_f16(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmlaq_rot180_m_f16))) +float16x8_t vcmlaq_rot180_m(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmlaq_rot180_m_f32))) +float32x4_t vcmlaq_rot180_m_f32(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmlaq_rot180_m_f32))) +float32x4_t vcmlaq_rot180_m(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmlaq_rot270_f16))) +float16x8_t vcmlaq_rot270_f16(float16x8_t, float16x8_t, float16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmlaq_rot270_f16))) +float16x8_t vcmlaq_rot270(float16x8_t, float16x8_t, float16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmlaq_rot270_f32))) +float32x4_t vcmlaq_rot270_f32(float32x4_t, float32x4_t, float32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmlaq_rot270_f32))) +float32x4_t vcmlaq_rot270(float32x4_t, float32x4_t, float32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmlaq_rot270_m_f16))) +float16x8_t vcmlaq_rot270_m_f16(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmlaq_rot270_m_f16))) +float16x8_t vcmlaq_rot270_m(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmlaq_rot270_m_f32))) +float32x4_t vcmlaq_rot270_m_f32(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmlaq_rot270_m_f32))) +float32x4_t vcmlaq_rot270_m(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmlaq_rot90_f16))) +float16x8_t vcmlaq_rot90_f16(float16x8_t, float16x8_t, float16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmlaq_rot90_f16))) +float16x8_t vcmlaq_rot90(float16x8_t, float16x8_t, float16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmlaq_rot90_f32))) +float32x4_t vcmlaq_rot90_f32(float32x4_t, float32x4_t, float32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmlaq_rot90_f32))) +float32x4_t vcmlaq_rot90(float32x4_t, float32x4_t, float32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmlaq_rot90_m_f16))) +float16x8_t vcmlaq_rot90_m_f16(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmlaq_rot90_m_f16))) +float16x8_t vcmlaq_rot90_m(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmlaq_rot90_m_f32))) +float32x4_t vcmlaq_rot90_m_f32(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmlaq_rot90_m_f32))) +float32x4_t vcmlaq_rot90_m(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_f16))) +mve_pred16_t vcmpeqq_f16(float16x8_t, float16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_f16))) +mve_pred16_t vcmpeqq(float16x8_t, float16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_f32))) +mve_pred16_t vcmpeqq_f32(float32x4_t, float32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_f32))) +mve_pred16_t vcmpeqq(float32x4_t, float32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_m_f16))) +mve_pred16_t vcmpeqq_m_f16(float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_m_f16))) +mve_pred16_t vcmpeqq_m(float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_m_f32))) +mve_pred16_t vcmpeqq_m_f32(float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_m_f32))) +mve_pred16_t vcmpeqq_m(float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_m_n_f16))) +mve_pred16_t vcmpeqq_m_n_f16(float16x8_t, float16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_m_n_f16))) +mve_pred16_t vcmpeqq_m(float16x8_t, float16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_m_n_f32))) +mve_pred16_t vcmpeqq_m_n_f32(float32x4_t, float32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_m_n_f32))) +mve_pred16_t vcmpeqq_m(float32x4_t, float32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_n_f16))) +mve_pred16_t vcmpeqq_n_f16(float16x8_t, float16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_n_f16))) +mve_pred16_t vcmpeqq(float16x8_t, float16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_n_f32))) +mve_pred16_t vcmpeqq_n_f32(float32x4_t, float32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpeqq_n_f32))) +mve_pred16_t vcmpeqq(float32x4_t, float32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpgeq_f16))) +mve_pred16_t vcmpgeq_f16(float16x8_t, float16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpgeq_f16))) +mve_pred16_t vcmpgeq(float16x8_t, float16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpgeq_f32))) +mve_pred16_t vcmpgeq_f32(float32x4_t, float32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpgeq_f32))) +mve_pred16_t vcmpgeq(float32x4_t, float32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpgeq_m_f16))) +mve_pred16_t vcmpgeq_m_f16(float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpgeq_m_f16))) +mve_pred16_t vcmpgeq_m(float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpgeq_m_f32))) +mve_pred16_t vcmpgeq_m_f32(float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpgeq_m_f32))) +mve_pred16_t vcmpgeq_m(float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpgeq_m_n_f16))) +mve_pred16_t vcmpgeq_m_n_f16(float16x8_t, float16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpgeq_m_n_f16))) +mve_pred16_t vcmpgeq_m(float16x8_t, float16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpgeq_m_n_f32))) +mve_pred16_t vcmpgeq_m_n_f32(float32x4_t, float32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpgeq_m_n_f32))) +mve_pred16_t vcmpgeq_m(float32x4_t, float32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpgeq_n_f16))) +mve_pred16_t vcmpgeq_n_f16(float16x8_t, float16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpgeq_n_f16))) +mve_pred16_t vcmpgeq(float16x8_t, float16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpgeq_n_f32))) +mve_pred16_t vcmpgeq_n_f32(float32x4_t, float32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpgeq_n_f32))) +mve_pred16_t vcmpgeq(float32x4_t, float32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpgtq_f16))) +mve_pred16_t vcmpgtq_f16(float16x8_t, float16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpgtq_f16))) +mve_pred16_t vcmpgtq(float16x8_t, float16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpgtq_f32))) +mve_pred16_t vcmpgtq_f32(float32x4_t, float32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpgtq_f32))) +mve_pred16_t vcmpgtq(float32x4_t, float32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpgtq_m_f16))) +mve_pred16_t vcmpgtq_m_f16(float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpgtq_m_f16))) +mve_pred16_t vcmpgtq_m(float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpgtq_m_f32))) +mve_pred16_t vcmpgtq_m_f32(float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpgtq_m_f32))) +mve_pred16_t vcmpgtq_m(float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpgtq_m_n_f16))) +mve_pred16_t vcmpgtq_m_n_f16(float16x8_t, float16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpgtq_m_n_f16))) +mve_pred16_t vcmpgtq_m(float16x8_t, float16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpgtq_m_n_f32))) +mve_pred16_t vcmpgtq_m_n_f32(float32x4_t, float32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpgtq_m_n_f32))) +mve_pred16_t vcmpgtq_m(float32x4_t, float32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpgtq_n_f16))) +mve_pred16_t vcmpgtq_n_f16(float16x8_t, float16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpgtq_n_f16))) +mve_pred16_t vcmpgtq(float16x8_t, float16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpgtq_n_f32))) +mve_pred16_t vcmpgtq_n_f32(float32x4_t, float32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpgtq_n_f32))) +mve_pred16_t vcmpgtq(float32x4_t, float32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpleq_f16))) +mve_pred16_t vcmpleq_f16(float16x8_t, float16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpleq_f16))) +mve_pred16_t vcmpleq(float16x8_t, float16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpleq_f32))) +mve_pred16_t vcmpleq_f32(float32x4_t, float32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpleq_f32))) +mve_pred16_t vcmpleq(float32x4_t, float32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpleq_m_f16))) +mve_pred16_t vcmpleq_m_f16(float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpleq_m_f16))) +mve_pred16_t vcmpleq_m(float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpleq_m_f32))) +mve_pred16_t vcmpleq_m_f32(float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpleq_m_f32))) +mve_pred16_t vcmpleq_m(float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpleq_m_n_f16))) +mve_pred16_t vcmpleq_m_n_f16(float16x8_t, float16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpleq_m_n_f16))) +mve_pred16_t vcmpleq_m(float16x8_t, float16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpleq_m_n_f32))) +mve_pred16_t vcmpleq_m_n_f32(float32x4_t, float32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpleq_m_n_f32))) +mve_pred16_t vcmpleq_m(float32x4_t, float32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpleq_n_f16))) +mve_pred16_t vcmpleq_n_f16(float16x8_t, float16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpleq_n_f16))) +mve_pred16_t vcmpleq(float16x8_t, float16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpleq_n_f32))) +mve_pred16_t vcmpleq_n_f32(float32x4_t, float32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpleq_n_f32))) +mve_pred16_t vcmpleq(float32x4_t, float32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpltq_f16))) +mve_pred16_t vcmpltq_f16(float16x8_t, float16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpltq_f16))) +mve_pred16_t vcmpltq(float16x8_t, float16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpltq_f32))) +mve_pred16_t vcmpltq_f32(float32x4_t, float32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpltq_f32))) +mve_pred16_t vcmpltq(float32x4_t, float32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpltq_m_f16))) +mve_pred16_t vcmpltq_m_f16(float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpltq_m_f16))) +mve_pred16_t vcmpltq_m(float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpltq_m_f32))) +mve_pred16_t vcmpltq_m_f32(float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpltq_m_f32))) +mve_pred16_t vcmpltq_m(float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpltq_m_n_f16))) +mve_pred16_t vcmpltq_m_n_f16(float16x8_t, float16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpltq_m_n_f16))) +mve_pred16_t vcmpltq_m(float16x8_t, float16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpltq_m_n_f32))) +mve_pred16_t vcmpltq_m_n_f32(float32x4_t, float32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpltq_m_n_f32))) +mve_pred16_t vcmpltq_m(float32x4_t, float32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpltq_n_f16))) +mve_pred16_t vcmpltq_n_f16(float16x8_t, float16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpltq_n_f16))) +mve_pred16_t vcmpltq(float16x8_t, float16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpltq_n_f32))) +mve_pred16_t vcmpltq_n_f32(float32x4_t, float32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpltq_n_f32))) +mve_pred16_t vcmpltq(float32x4_t, float32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_f16))) +mve_pred16_t vcmpneq_f16(float16x8_t, float16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_f16))) +mve_pred16_t vcmpneq(float16x8_t, float16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_f32))) +mve_pred16_t vcmpneq_f32(float32x4_t, float32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_f32))) +mve_pred16_t vcmpneq(float32x4_t, float32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_m_f16))) +mve_pred16_t vcmpneq_m_f16(float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_m_f16))) +mve_pred16_t vcmpneq_m(float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_m_f32))) +mve_pred16_t vcmpneq_m_f32(float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_m_f32))) +mve_pred16_t vcmpneq_m(float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_m_n_f16))) +mve_pred16_t vcmpneq_m_n_f16(float16x8_t, float16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_m_n_f16))) +mve_pred16_t vcmpneq_m(float16x8_t, float16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_m_n_f32))) +mve_pred16_t vcmpneq_m_n_f32(float32x4_t, float32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_m_n_f32))) +mve_pred16_t vcmpneq_m(float32x4_t, float32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_n_f16))) +mve_pred16_t vcmpneq_n_f16(float16x8_t, float16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_n_f16))) +mve_pred16_t vcmpneq(float16x8_t, float16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_n_f32))) +mve_pred16_t vcmpneq_n_f32(float32x4_t, float32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmpneq_n_f32))) +mve_pred16_t vcmpneq(float32x4_t, float32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_f16))) +float16x8_t vcmulq_f16(float16x8_t, float16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_f16))) +float16x8_t vcmulq(float16x8_t, float16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_f32))) +float32x4_t vcmulq_f32(float32x4_t, float32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_f32))) +float32x4_t vcmulq(float32x4_t, float32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_m_f16))) +float16x8_t vcmulq_m_f16(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_m_f16))) +float16x8_t vcmulq_m(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_m_f32))) +float32x4_t vcmulq_m_f32(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_m_f32))) +float32x4_t vcmulq_m(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_rot180_f16))) +float16x8_t vcmulq_rot180_f16(float16x8_t, float16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_rot180_f16))) +float16x8_t vcmulq_rot180(float16x8_t, float16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_rot180_f32))) +float32x4_t vcmulq_rot180_f32(float32x4_t, float32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_rot180_f32))) +float32x4_t vcmulq_rot180(float32x4_t, float32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_rot180_m_f16))) +float16x8_t vcmulq_rot180_m_f16(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_rot180_m_f16))) +float16x8_t vcmulq_rot180_m(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_rot180_m_f32))) +float32x4_t vcmulq_rot180_m_f32(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_rot180_m_f32))) +float32x4_t vcmulq_rot180_m(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_rot180_x_f16))) +float16x8_t vcmulq_rot180_x_f16(float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_rot180_x_f16))) +float16x8_t vcmulq_rot180_x(float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_rot180_x_f32))) +float32x4_t vcmulq_rot180_x_f32(float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_rot180_x_f32))) +float32x4_t vcmulq_rot180_x(float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_rot270_f16))) +float16x8_t vcmulq_rot270_f16(float16x8_t, float16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_rot270_f16))) +float16x8_t vcmulq_rot270(float16x8_t, float16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_rot270_f32))) +float32x4_t vcmulq_rot270_f32(float32x4_t, float32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_rot270_f32))) +float32x4_t vcmulq_rot270(float32x4_t, float32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_rot270_m_f16))) +float16x8_t vcmulq_rot270_m_f16(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_rot270_m_f16))) +float16x8_t vcmulq_rot270_m(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_rot270_m_f32))) +float32x4_t vcmulq_rot270_m_f32(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_rot270_m_f32))) +float32x4_t vcmulq_rot270_m(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_rot270_x_f16))) +float16x8_t vcmulq_rot270_x_f16(float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_rot270_x_f16))) +float16x8_t vcmulq_rot270_x(float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_rot270_x_f32))) +float32x4_t vcmulq_rot270_x_f32(float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_rot270_x_f32))) +float32x4_t vcmulq_rot270_x(float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_rot90_f16))) +float16x8_t vcmulq_rot90_f16(float16x8_t, float16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_rot90_f16))) +float16x8_t vcmulq_rot90(float16x8_t, float16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_rot90_f32))) +float32x4_t vcmulq_rot90_f32(float32x4_t, float32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_rot90_f32))) +float32x4_t vcmulq_rot90(float32x4_t, float32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_rot90_m_f16))) +float16x8_t vcmulq_rot90_m_f16(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_rot90_m_f16))) +float16x8_t vcmulq_rot90_m(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_rot90_m_f32))) +float32x4_t vcmulq_rot90_m_f32(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_rot90_m_f32))) +float32x4_t vcmulq_rot90_m(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_rot90_x_f16))) +float16x8_t vcmulq_rot90_x_f16(float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_rot90_x_f16))) +float16x8_t vcmulq_rot90_x(float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_rot90_x_f32))) +float32x4_t vcmulq_rot90_x_f32(float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_rot90_x_f32))) +float32x4_t vcmulq_rot90_x(float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_x_f16))) +float16x8_t vcmulq_x_f16(float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_x_f16))) +float16x8_t vcmulq_x(float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_x_f32))) +float32x4_t vcmulq_x_f32(float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcmulq_x_f32))) +float32x4_t vcmulq_x(float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcreateq_f16))) +float16x8_t vcreateq_f16(uint64_t, uint64_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcreateq_f32))) +float32x4_t vcreateq_f32(uint64_t, uint64_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtaq_m_s16_f16))) +int16x8_t vcvtaq_m_s16_f16(int16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtaq_m_s16_f16))) +int16x8_t vcvtaq_m(int16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtaq_m_s32_f32))) +int32x4_t vcvtaq_m_s32_f32(int32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtaq_m_s32_f32))) +int32x4_t vcvtaq_m(int32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtaq_m_u16_f16))) +uint16x8_t vcvtaq_m_u16_f16(uint16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtaq_m_u16_f16))) +uint16x8_t vcvtaq_m(uint16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtaq_m_u32_f32))) +uint32x4_t vcvtaq_m_u32_f32(uint32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtaq_m_u32_f32))) +uint32x4_t vcvtaq_m(uint32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtaq_s16_f16))) +int16x8_t vcvtaq_s16_f16(float16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtaq_s32_f32))) +int32x4_t vcvtaq_s32_f32(float32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtaq_u16_f16))) +uint16x8_t vcvtaq_u16_f16(float16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtaq_u32_f32))) +uint32x4_t vcvtaq_u32_f32(float32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtaq_x_s16_f16))) +int16x8_t vcvtaq_x_s16_f16(float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtaq_x_s32_f32))) +int32x4_t vcvtaq_x_s32_f32(float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtaq_x_u16_f16))) +uint16x8_t vcvtaq_x_u16_f16(float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtaq_x_u32_f32))) +uint32x4_t vcvtaq_x_u32_f32(float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtbq_f16_f32))) +float16x8_t vcvtbq_f16_f32(float16x8_t, float32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtbq_f32_f16))) +float32x4_t vcvtbq_f32_f16(float16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtbq_m_f16_f32))) +float16x8_t vcvtbq_m_f16_f32(float16x8_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtbq_m_f32_f16))) +float32x4_t vcvtbq_m_f32_f16(float32x4_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtbq_x_f32_f16))) +float32x4_t vcvtbq_x_f32_f16(float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtmq_m_s16_f16))) +int16x8_t vcvtmq_m_s16_f16(int16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtmq_m_s16_f16))) +int16x8_t vcvtmq_m(int16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtmq_m_s32_f32))) +int32x4_t vcvtmq_m_s32_f32(int32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtmq_m_s32_f32))) +int32x4_t vcvtmq_m(int32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtmq_m_u16_f16))) +uint16x8_t vcvtmq_m_u16_f16(uint16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtmq_m_u16_f16))) +uint16x8_t vcvtmq_m(uint16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtmq_m_u32_f32))) +uint32x4_t vcvtmq_m_u32_f32(uint32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtmq_m_u32_f32))) +uint32x4_t vcvtmq_m(uint32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtmq_s16_f16))) +int16x8_t vcvtmq_s16_f16(float16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtmq_s32_f32))) +int32x4_t vcvtmq_s32_f32(float32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtmq_u16_f16))) +uint16x8_t vcvtmq_u16_f16(float16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtmq_u32_f32))) +uint32x4_t vcvtmq_u32_f32(float32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtmq_x_s16_f16))) +int16x8_t vcvtmq_x_s16_f16(float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtmq_x_s32_f32))) +int32x4_t vcvtmq_x_s32_f32(float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtmq_x_u16_f16))) +uint16x8_t vcvtmq_x_u16_f16(float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtmq_x_u32_f32))) +uint32x4_t vcvtmq_x_u32_f32(float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtnq_m_s16_f16))) +int16x8_t vcvtnq_m_s16_f16(int16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtnq_m_s16_f16))) +int16x8_t vcvtnq_m(int16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtnq_m_s32_f32))) +int32x4_t vcvtnq_m_s32_f32(int32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtnq_m_s32_f32))) +int32x4_t vcvtnq_m(int32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtnq_m_u16_f16))) +uint16x8_t vcvtnq_m_u16_f16(uint16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtnq_m_u16_f16))) +uint16x8_t vcvtnq_m(uint16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtnq_m_u32_f32))) +uint32x4_t vcvtnq_m_u32_f32(uint32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtnq_m_u32_f32))) +uint32x4_t vcvtnq_m(uint32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtnq_s16_f16))) +int16x8_t vcvtnq_s16_f16(float16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtnq_s32_f32))) +int32x4_t vcvtnq_s32_f32(float32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtnq_u16_f16))) +uint16x8_t vcvtnq_u16_f16(float16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtnq_u32_f32))) +uint32x4_t vcvtnq_u32_f32(float32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtnq_x_s16_f16))) +int16x8_t vcvtnq_x_s16_f16(float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtnq_x_s32_f32))) +int32x4_t vcvtnq_x_s32_f32(float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtnq_x_u16_f16))) +uint16x8_t vcvtnq_x_u16_f16(float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtnq_x_u32_f32))) +uint32x4_t vcvtnq_x_u32_f32(float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtpq_m_s16_f16))) +int16x8_t vcvtpq_m_s16_f16(int16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtpq_m_s16_f16))) +int16x8_t vcvtpq_m(int16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtpq_m_s32_f32))) +int32x4_t vcvtpq_m_s32_f32(int32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtpq_m_s32_f32))) +int32x4_t vcvtpq_m(int32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtpq_m_u16_f16))) +uint16x8_t vcvtpq_m_u16_f16(uint16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtpq_m_u16_f16))) +uint16x8_t vcvtpq_m(uint16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtpq_m_u32_f32))) +uint32x4_t vcvtpq_m_u32_f32(uint32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtpq_m_u32_f32))) +uint32x4_t vcvtpq_m(uint32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtpq_s16_f16))) +int16x8_t vcvtpq_s16_f16(float16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtpq_s32_f32))) +int32x4_t vcvtpq_s32_f32(float32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtpq_u16_f16))) +uint16x8_t vcvtpq_u16_f16(float16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtpq_u32_f32))) +uint32x4_t vcvtpq_u32_f32(float32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtpq_x_s16_f16))) +int16x8_t vcvtpq_x_s16_f16(float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtpq_x_s32_f32))) +int32x4_t vcvtpq_x_s32_f32(float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtpq_x_u16_f16))) +uint16x8_t vcvtpq_x_u16_f16(float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtpq_x_u32_f32))) +uint32x4_t vcvtpq_x_u32_f32(float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_f16_s16))) +float16x8_t vcvtq_f16_s16(int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_f16_s16))) +float16x8_t vcvtq(int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_f16_u16))) +float16x8_t vcvtq_f16_u16(uint16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_f16_u16))) +float16x8_t vcvtq(uint16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_f32_s32))) +float32x4_t vcvtq_f32_s32(int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_f32_s32))) +float32x4_t vcvtq(int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_f32_u32))) +float32x4_t vcvtq_f32_u32(uint32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_f32_u32))) +float32x4_t vcvtq(uint32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_m_f16_s16))) +float16x8_t vcvtq_m_f16_s16(float16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_m_f16_s16))) +float16x8_t vcvtq_m(float16x8_t, int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_m_f16_u16))) +float16x8_t vcvtq_m_f16_u16(float16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_m_f16_u16))) +float16x8_t vcvtq_m(float16x8_t, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_m_f32_s32))) +float32x4_t vcvtq_m_f32_s32(float32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_m_f32_s32))) +float32x4_t vcvtq_m(float32x4_t, int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_m_f32_u32))) +float32x4_t vcvtq_m_f32_u32(float32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_m_f32_u32))) +float32x4_t vcvtq_m(float32x4_t, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_m_n_f16_s16))) +float16x8_t vcvtq_m_n_f16_s16(float16x8_t, int16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_m_n_f16_s16))) +float16x8_t vcvtq_m_n(float16x8_t, int16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_m_n_f16_u16))) +float16x8_t vcvtq_m_n_f16_u16(float16x8_t, uint16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_m_n_f16_u16))) +float16x8_t vcvtq_m_n(float16x8_t, uint16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_m_n_f32_s32))) +float32x4_t vcvtq_m_n_f32_s32(float32x4_t, int32x4_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_m_n_f32_s32))) +float32x4_t vcvtq_m_n(float32x4_t, int32x4_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_m_n_f32_u32))) +float32x4_t vcvtq_m_n_f32_u32(float32x4_t, uint32x4_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_m_n_f32_u32))) +float32x4_t vcvtq_m_n(float32x4_t, uint32x4_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_m_n_s16_f16))) +int16x8_t vcvtq_m_n_s16_f16(int16x8_t, float16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_m_n_s16_f16))) +int16x8_t vcvtq_m_n(int16x8_t, float16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_m_n_s32_f32))) +int32x4_t vcvtq_m_n_s32_f32(int32x4_t, float32x4_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_m_n_s32_f32))) +int32x4_t vcvtq_m_n(int32x4_t, float32x4_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_m_n_u16_f16))) +uint16x8_t vcvtq_m_n_u16_f16(uint16x8_t, float16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_m_n_u16_f16))) +uint16x8_t vcvtq_m_n(uint16x8_t, float16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_m_n_u32_f32))) +uint32x4_t vcvtq_m_n_u32_f32(uint32x4_t, float32x4_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_m_n_u32_f32))) +uint32x4_t vcvtq_m_n(uint32x4_t, float32x4_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_m_s16_f16))) +int16x8_t vcvtq_m_s16_f16(int16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_m_s16_f16))) +int16x8_t vcvtq_m(int16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_m_s32_f32))) +int32x4_t vcvtq_m_s32_f32(int32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_m_s32_f32))) +int32x4_t vcvtq_m(int32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_m_u16_f16))) +uint16x8_t vcvtq_m_u16_f16(uint16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_m_u16_f16))) +uint16x8_t vcvtq_m(uint16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_m_u32_f32))) +uint32x4_t vcvtq_m_u32_f32(uint32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_m_u32_f32))) +uint32x4_t vcvtq_m(uint32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_n_f16_s16))) +float16x8_t vcvtq_n_f16_s16(int16x8_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_n_f16_s16))) +float16x8_t vcvtq_n(int16x8_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_n_f16_u16))) +float16x8_t vcvtq_n_f16_u16(uint16x8_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_n_f16_u16))) +float16x8_t vcvtq_n(uint16x8_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_n_f32_s32))) +float32x4_t vcvtq_n_f32_s32(int32x4_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_n_f32_s32))) +float32x4_t vcvtq_n(int32x4_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_n_f32_u32))) +float32x4_t vcvtq_n_f32_u32(uint32x4_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_n_f32_u32))) +float32x4_t vcvtq_n(uint32x4_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_n_s16_f16))) +int16x8_t vcvtq_n_s16_f16(float16x8_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_n_s32_f32))) +int32x4_t vcvtq_n_s32_f32(float32x4_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_n_u16_f16))) +uint16x8_t vcvtq_n_u16_f16(float16x8_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_n_u32_f32))) +uint32x4_t vcvtq_n_u32_f32(float32x4_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_s16_f16))) +int16x8_t vcvtq_s16_f16(float16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_s32_f32))) +int32x4_t vcvtq_s32_f32(float32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_u16_f16))) +uint16x8_t vcvtq_u16_f16(float16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_u32_f32))) +uint32x4_t vcvtq_u32_f32(float32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_x_f16_s16))) +float16x8_t vcvtq_x_f16_s16(int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_x_f16_s16))) +float16x8_t vcvtq_x(int16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_x_f16_u16))) +float16x8_t vcvtq_x_f16_u16(uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_x_f16_u16))) +float16x8_t vcvtq_x(uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_x_f32_s32))) +float32x4_t vcvtq_x_f32_s32(int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_x_f32_s32))) +float32x4_t vcvtq_x(int32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_x_f32_u32))) +float32x4_t vcvtq_x_f32_u32(uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_x_f32_u32))) +float32x4_t vcvtq_x(uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_x_n_f16_s16))) +float16x8_t vcvtq_x_n_f16_s16(int16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_x_n_f16_s16))) +float16x8_t vcvtq_x_n(int16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_x_n_f16_u16))) +float16x8_t vcvtq_x_n_f16_u16(uint16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_x_n_f16_u16))) +float16x8_t vcvtq_x_n(uint16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_x_n_f32_s32))) +float32x4_t vcvtq_x_n_f32_s32(int32x4_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_x_n_f32_s32))) +float32x4_t vcvtq_x_n(int32x4_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_x_n_f32_u32))) +float32x4_t vcvtq_x_n_f32_u32(uint32x4_t, int, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_x_n_f32_u32))) +float32x4_t vcvtq_x_n(uint32x4_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_x_n_s16_f16))) +int16x8_t vcvtq_x_n_s16_f16(float16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_x_n_s32_f32))) +int32x4_t vcvtq_x_n_s32_f32(float32x4_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_x_n_u16_f16))) +uint16x8_t vcvtq_x_n_u16_f16(float16x8_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_x_n_u32_f32))) +uint32x4_t vcvtq_x_n_u32_f32(float32x4_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_x_s16_f16))) +int16x8_t vcvtq_x_s16_f16(float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_x_s32_f32))) +int32x4_t vcvtq_x_s32_f32(float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_x_u16_f16))) +uint16x8_t vcvtq_x_u16_f16(float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvtq_x_u32_f32))) +uint32x4_t vcvtq_x_u32_f32(float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvttq_f16_f32))) +float16x8_t vcvttq_f16_f32(float16x8_t, float32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvttq_f32_f16))) +float32x4_t vcvttq_f32_f16(float16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvttq_m_f16_f32))) +float16x8_t vcvttq_m_f16_f32(float16x8_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvttq_m_f32_f16))) +float32x4_t vcvttq_m_f32_f16(float32x4_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vcvttq_x_f32_f16))) +float32x4_t vcvttq_x_f32_f16(float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vdupq_m_n_f16))) +float16x8_t vdupq_m_n_f16(float16x8_t, float16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vdupq_m_n_f16))) +float16x8_t vdupq_m(float16x8_t, float16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vdupq_m_n_f32))) +float32x4_t vdupq_m_n_f32(float32x4_t, float32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vdupq_m_n_f32))) +float32x4_t vdupq_m(float32x4_t, float32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vdupq_n_f16))) +float16x8_t vdupq_n_f16(float16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vdupq_n_f32))) +float32x4_t vdupq_n_f32(float32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vdupq_x_n_f16))) +float16x8_t vdupq_x_n_f16(float16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vdupq_x_n_f32))) +float32x4_t vdupq_x_n_f32(float32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_veorq_f16))) +float16x8_t veorq_f16(float16x8_t, float16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_veorq_f16))) +float16x8_t veorq(float16x8_t, float16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_veorq_f32))) +float32x4_t veorq_f32(float32x4_t, float32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_veorq_f32))) +float32x4_t veorq(float32x4_t, float32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_veorq_m_f16))) +float16x8_t veorq_m_f16(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_veorq_m_f16))) +float16x8_t veorq_m(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_veorq_m_f32))) +float32x4_t veorq_m_f32(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_veorq_m_f32))) +float32x4_t veorq_m(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_veorq_x_f16))) +float16x8_t veorq_x_f16(float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_veorq_x_f16))) +float16x8_t veorq_x(float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_veorq_x_f32))) +float32x4_t veorq_x_f32(float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_veorq_x_f32))) +float32x4_t veorq_x(float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vfmaq_f16))) +float16x8_t vfmaq_f16(float16x8_t, float16x8_t, float16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vfmaq_f16))) +float16x8_t vfmaq(float16x8_t, float16x8_t, float16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vfmaq_f32))) +float32x4_t vfmaq_f32(float32x4_t, float32x4_t, float32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vfmaq_f32))) +float32x4_t vfmaq(float32x4_t, float32x4_t, float32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vfmaq_m_f16))) +float16x8_t vfmaq_m_f16(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vfmaq_m_f16))) +float16x8_t vfmaq_m(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vfmaq_m_f32))) +float32x4_t vfmaq_m_f32(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vfmaq_m_f32))) +float32x4_t vfmaq_m(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vfmaq_m_n_f16))) +float16x8_t vfmaq_m_n_f16(float16x8_t, float16x8_t, float16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vfmaq_m_n_f16))) +float16x8_t vfmaq_m(float16x8_t, float16x8_t, float16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vfmaq_m_n_f32))) +float32x4_t vfmaq_m_n_f32(float32x4_t, float32x4_t, float32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vfmaq_m_n_f32))) +float32x4_t vfmaq_m(float32x4_t, float32x4_t, float32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vfmaq_n_f16))) +float16x8_t vfmaq_n_f16(float16x8_t, float16x8_t, float16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vfmaq_n_f16))) +float16x8_t vfmaq(float16x8_t, float16x8_t, float16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vfmaq_n_f32))) +float32x4_t vfmaq_n_f32(float32x4_t, float32x4_t, float32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vfmaq_n_f32))) +float32x4_t vfmaq(float32x4_t, float32x4_t, float32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vfmasq_m_n_f16))) +float16x8_t vfmasq_m_n_f16(float16x8_t, float16x8_t, float16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vfmasq_m_n_f16))) +float16x8_t vfmasq_m(float16x8_t, float16x8_t, float16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vfmasq_m_n_f32))) +float32x4_t vfmasq_m_n_f32(float32x4_t, float32x4_t, float32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vfmasq_m_n_f32))) +float32x4_t vfmasq_m(float32x4_t, float32x4_t, float32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vfmasq_n_f16))) +float16x8_t vfmasq_n_f16(float16x8_t, float16x8_t, float16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vfmasq_n_f16))) +float16x8_t vfmasq(float16x8_t, float16x8_t, float16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vfmasq_n_f32))) +float32x4_t vfmasq_n_f32(float32x4_t, float32x4_t, float32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vfmasq_n_f32))) +float32x4_t vfmasq(float32x4_t, float32x4_t, float32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vfmsq_f16))) +float16x8_t vfmsq_f16(float16x8_t, float16x8_t, float16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vfmsq_f16))) +float16x8_t vfmsq(float16x8_t, float16x8_t, float16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vfmsq_f32))) +float32x4_t vfmsq_f32(float32x4_t, float32x4_t, float32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vfmsq_f32))) +float32x4_t vfmsq(float32x4_t, float32x4_t, float32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vfmsq_m_f16))) +float16x8_t vfmsq_m_f16(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vfmsq_m_f16))) +float16x8_t vfmsq_m(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vfmsq_m_f32))) +float32x4_t vfmsq_m_f32(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vfmsq_m_f32))) +float32x4_t vfmsq_m(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vgetq_lane_f16))) +float16_t vgetq_lane_f16(float16x8_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vgetq_lane_f16))) +float16_t vgetq_lane(float16x8_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vgetq_lane_f32))) +float32_t vgetq_lane_f32(float32x4_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vgetq_lane_f32))) +float32_t vgetq_lane(float32x4_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vld1q_f16))) +float16x8_t vld1q_f16(const float16_t *); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vld1q_f16))) +float16x8_t vld1q(const float16_t *); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vld1q_f32))) +float32x4_t vld1q_f32(const float32_t *); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vld1q_f32))) +float32x4_t vld1q(const float32_t *); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vld1q_z_f16))) +float16x8_t vld1q_z_f16(const float16_t *, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vld1q_z_f16))) +float16x8_t vld1q_z(const float16_t *, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vld1q_z_f32))) +float32x4_t vld1q_z_f32(const float32_t *, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vld1q_z_f32))) +float32x4_t vld1q_z(const float32_t *, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vld2q_f16))) +float16x8x2_t vld2q_f16(const float16_t *); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vld2q_f16))) +float16x8x2_t vld2q(const float16_t *); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vld2q_f32))) +float32x4x2_t vld2q_f32(const float32_t *); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vld2q_f32))) +float32x4x2_t vld2q(const float32_t *); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vld4q_f16))) +float16x8x4_t vld4q_f16(const float16_t *); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vld4q_f16))) +float16x8x4_t vld4q(const float16_t *); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vld4q_f32))) +float32x4x4_t vld4q_f32(const float32_t *); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vld4q_f32))) +float32x4x4_t vld4q(const float32_t *); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_f16))) +float16x8_t vldrhq_f16(const float16_t *); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_gather_offset_f16))) +float16x8_t vldrhq_gather_offset_f16(const float16_t *, uint16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_gather_offset_f16))) +float16x8_t vldrhq_gather_offset(const float16_t *, uint16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_gather_offset_z_f16))) +float16x8_t vldrhq_gather_offset_z_f16(const float16_t *, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_gather_offset_z_f16))) +float16x8_t vldrhq_gather_offset_z(const float16_t *, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_gather_shifted_offset_f16))) +float16x8_t vldrhq_gather_shifted_offset_f16(const float16_t *, uint16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_gather_shifted_offset_f16))) +float16x8_t vldrhq_gather_shifted_offset(const float16_t *, uint16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_gather_shifted_offset_z_f16))) +float16x8_t vldrhq_gather_shifted_offset_z_f16(const float16_t *, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_gather_shifted_offset_z_f16))) +float16x8_t vldrhq_gather_shifted_offset_z(const float16_t *, uint16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrhq_z_f16))) +float16x8_t vldrhq_z_f16(const float16_t *, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrwq_f32))) +float32x4_t vldrwq_f32(const float32_t *); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrwq_gather_base_f32))) +float32x4_t vldrwq_gather_base_f32(uint32x4_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrwq_gather_base_wb_f32))) +float32x4_t vldrwq_gather_base_wb_f32(uint32x4_t *, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrwq_gather_base_wb_z_f32))) +float32x4_t vldrwq_gather_base_wb_z_f32(uint32x4_t *, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrwq_gather_base_z_f32))) +float32x4_t vldrwq_gather_base_z_f32(uint32x4_t, int, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrwq_gather_offset_f32))) +float32x4_t vldrwq_gather_offset_f32(const float32_t *, uint32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrwq_gather_offset_f32))) +float32x4_t vldrwq_gather_offset(const float32_t *, uint32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrwq_gather_offset_z_f32))) +float32x4_t vldrwq_gather_offset_z_f32(const float32_t *, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrwq_gather_offset_z_f32))) +float32x4_t vldrwq_gather_offset_z(const float32_t *, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrwq_gather_shifted_offset_f32))) +float32x4_t vldrwq_gather_shifted_offset_f32(const float32_t *, uint32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrwq_gather_shifted_offset_f32))) +float32x4_t vldrwq_gather_shifted_offset(const float32_t *, uint32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrwq_gather_shifted_offset_z_f32))) +float32x4_t vldrwq_gather_shifted_offset_z_f32(const float32_t *, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vldrwq_gather_shifted_offset_z_f32))) +float32x4_t vldrwq_gather_shifted_offset_z(const float32_t *, uint32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vldrwq_z_f32))) +float32x4_t vldrwq_z_f32(const float32_t *, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxnmaq_f16))) +float16x8_t vmaxnmaq_f16(float16x8_t, float16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxnmaq_f16))) +float16x8_t vmaxnmaq(float16x8_t, float16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxnmaq_f32))) +float32x4_t vmaxnmaq_f32(float32x4_t, float32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxnmaq_f32))) +float32x4_t vmaxnmaq(float32x4_t, float32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxnmaq_m_f16))) +float16x8_t vmaxnmaq_m_f16(float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxnmaq_m_f16))) +float16x8_t vmaxnmaq_m(float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxnmaq_m_f32))) +float32x4_t vmaxnmaq_m_f32(float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxnmaq_m_f32))) +float32x4_t vmaxnmaq_m(float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxnmavq_f16))) +float16_t vmaxnmavq_f16(float16_t, float16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxnmavq_f16))) +float16_t vmaxnmavq(float16_t, float16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxnmavq_f32))) +float32_t vmaxnmavq_f32(float32_t, float32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxnmavq_f32))) +float32_t vmaxnmavq(float32_t, float32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxnmavq_p_f16))) +float16_t vmaxnmavq_p_f16(float16_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxnmavq_p_f16))) +float16_t vmaxnmavq_p(float16_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxnmavq_p_f32))) +float32_t vmaxnmavq_p_f32(float32_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxnmavq_p_f32))) +float32_t vmaxnmavq_p(float32_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxnmq_f16))) +float16x8_t vmaxnmq_f16(float16x8_t, float16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxnmq_f16))) +float16x8_t vmaxnmq(float16x8_t, float16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxnmq_f32))) +float32x4_t vmaxnmq_f32(float32x4_t, float32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxnmq_f32))) +float32x4_t vmaxnmq(float32x4_t, float32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxnmq_m_f16))) +float16x8_t vmaxnmq_m_f16(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxnmq_m_f16))) +float16x8_t vmaxnmq_m(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxnmq_m_f32))) +float32x4_t vmaxnmq_m_f32(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxnmq_m_f32))) +float32x4_t vmaxnmq_m(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxnmq_x_f16))) +float16x8_t vmaxnmq_x_f16(float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxnmq_x_f16))) +float16x8_t vmaxnmq_x(float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxnmq_x_f32))) +float32x4_t vmaxnmq_x_f32(float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxnmq_x_f32))) +float32x4_t vmaxnmq_x(float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxnmvq_f16))) +float16_t vmaxnmvq_f16(float16_t, float16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxnmvq_f16))) +float16_t vmaxnmvq(float16_t, float16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxnmvq_f32))) +float32_t vmaxnmvq_f32(float32_t, float32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxnmvq_f32))) +float32_t vmaxnmvq(float32_t, float32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxnmvq_p_f16))) +float16_t vmaxnmvq_p_f16(float16_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxnmvq_p_f16))) +float16_t vmaxnmvq_p(float16_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmaxnmvq_p_f32))) +float32_t vmaxnmvq_p_f32(float32_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmaxnmvq_p_f32))) +float32_t vmaxnmvq_p(float32_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminnmaq_f16))) +float16x8_t vminnmaq_f16(float16x8_t, float16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminnmaq_f16))) +float16x8_t vminnmaq(float16x8_t, float16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminnmaq_f32))) +float32x4_t vminnmaq_f32(float32x4_t, float32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminnmaq_f32))) +float32x4_t vminnmaq(float32x4_t, float32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminnmaq_m_f16))) +float16x8_t vminnmaq_m_f16(float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminnmaq_m_f16))) +float16x8_t vminnmaq_m(float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminnmaq_m_f32))) +float32x4_t vminnmaq_m_f32(float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminnmaq_m_f32))) +float32x4_t vminnmaq_m(float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminnmavq_f16))) +float16_t vminnmavq_f16(float16_t, float16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminnmavq_f16))) +float16_t vminnmavq(float16_t, float16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminnmavq_f32))) +float32_t vminnmavq_f32(float32_t, float32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminnmavq_f32))) +float32_t vminnmavq(float32_t, float32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminnmavq_p_f16))) +float16_t vminnmavq_p_f16(float16_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminnmavq_p_f16))) +float16_t vminnmavq_p(float16_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminnmavq_p_f32))) +float32_t vminnmavq_p_f32(float32_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminnmavq_p_f32))) +float32_t vminnmavq_p(float32_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminnmq_f16))) +float16x8_t vminnmq_f16(float16x8_t, float16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminnmq_f16))) +float16x8_t vminnmq(float16x8_t, float16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminnmq_f32))) +float32x4_t vminnmq_f32(float32x4_t, float32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminnmq_f32))) +float32x4_t vminnmq(float32x4_t, float32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminnmq_m_f16))) +float16x8_t vminnmq_m_f16(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminnmq_m_f16))) +float16x8_t vminnmq_m(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminnmq_m_f32))) +float32x4_t vminnmq_m_f32(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminnmq_m_f32))) +float32x4_t vminnmq_m(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminnmq_x_f16))) +float16x8_t vminnmq_x_f16(float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminnmq_x_f16))) +float16x8_t vminnmq_x(float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminnmq_x_f32))) +float32x4_t vminnmq_x_f32(float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminnmq_x_f32))) +float32x4_t vminnmq_x(float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminnmvq_f16))) +float16_t vminnmvq_f16(float16_t, float16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminnmvq_f16))) +float16_t vminnmvq(float16_t, float16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminnmvq_f32))) +float32_t vminnmvq_f32(float32_t, float32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminnmvq_f32))) +float32_t vminnmvq(float32_t, float32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminnmvq_p_f16))) +float16_t vminnmvq_p_f16(float16_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminnmvq_p_f16))) +float16_t vminnmvq_p(float16_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vminnmvq_p_f32))) +float32_t vminnmvq_p_f32(float32_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vminnmvq_p_f32))) +float32_t vminnmvq_p(float32_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_f16))) +float16x8_t vmulq_f16(float16x8_t, float16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_f16))) +float16x8_t vmulq(float16x8_t, float16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_f32))) +float32x4_t vmulq_f32(float32x4_t, float32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_f32))) +float32x4_t vmulq(float32x4_t, float32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_m_f16))) +float16x8_t vmulq_m_f16(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_m_f16))) +float16x8_t vmulq_m(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_m_f32))) +float32x4_t vmulq_m_f32(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_m_f32))) +float32x4_t vmulq_m(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_m_n_f16))) +float16x8_t vmulq_m_n_f16(float16x8_t, float16x8_t, float16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_m_n_f16))) +float16x8_t vmulq_m(float16x8_t, float16x8_t, float16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_m_n_f32))) +float32x4_t vmulq_m_n_f32(float32x4_t, float32x4_t, float32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_m_n_f32))) +float32x4_t vmulq_m(float32x4_t, float32x4_t, float32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_n_f16))) +float16x8_t vmulq_n_f16(float16x8_t, float16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_n_f16))) +float16x8_t vmulq(float16x8_t, float16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_n_f32))) +float32x4_t vmulq_n_f32(float32x4_t, float32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_n_f32))) +float32x4_t vmulq(float32x4_t, float32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_x_f16))) +float16x8_t vmulq_x_f16(float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_x_f16))) +float16x8_t vmulq_x(float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_x_f32))) +float32x4_t vmulq_x_f32(float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_x_f32))) +float32x4_t vmulq_x(float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_x_n_f16))) +float16x8_t vmulq_x_n_f16(float16x8_t, float16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_x_n_f16))) +float16x8_t vmulq_x(float16x8_t, float16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vmulq_x_n_f32))) +float32x4_t vmulq_x_n_f32(float32x4_t, float32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vmulq_x_n_f32))) +float32x4_t vmulq_x(float32x4_t, float32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vnegq_f16))) +float16x8_t vnegq_f16(float16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vnegq_f16))) +float16x8_t vnegq(float16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vnegq_f32))) +float32x4_t vnegq_f32(float32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vnegq_f32))) +float32x4_t vnegq(float32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vnegq_m_f16))) +float16x8_t vnegq_m_f16(float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vnegq_m_f16))) +float16x8_t vnegq_m(float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vnegq_m_f32))) +float32x4_t vnegq_m_f32(float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vnegq_m_f32))) +float32x4_t vnegq_m(float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vnegq_x_f16))) +float16x8_t vnegq_x_f16(float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vnegq_x_f16))) +float16x8_t vnegq_x(float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vnegq_x_f32))) +float32x4_t vnegq_x_f32(float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vnegq_x_f32))) +float32x4_t vnegq_x(float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vornq_f16))) +float16x8_t vornq_f16(float16x8_t, float16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vornq_f16))) +float16x8_t vornq(float16x8_t, float16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vornq_f32))) +float32x4_t vornq_f32(float32x4_t, float32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vornq_f32))) +float32x4_t vornq(float32x4_t, float32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vornq_m_f16))) +float16x8_t vornq_m_f16(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vornq_m_f16))) +float16x8_t vornq_m(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vornq_m_f32))) +float32x4_t vornq_m_f32(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vornq_m_f32))) +float32x4_t vornq_m(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vornq_x_f16))) +float16x8_t vornq_x_f16(float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vornq_x_f16))) +float16x8_t vornq_x(float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vornq_x_f32))) +float32x4_t vornq_x_f32(float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vornq_x_f32))) +float32x4_t vornq_x(float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vorrq_f16))) +float16x8_t vorrq_f16(float16x8_t, float16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vorrq_f16))) +float16x8_t vorrq(float16x8_t, float16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vorrq_f32))) +float32x4_t vorrq_f32(float32x4_t, float32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vorrq_f32))) +float32x4_t vorrq(float32x4_t, float32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vorrq_m_f16))) +float16x8_t vorrq_m_f16(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vorrq_m_f16))) +float16x8_t vorrq_m(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vorrq_m_f32))) +float32x4_t vorrq_m_f32(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vorrq_m_f32))) +float32x4_t vorrq_m(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vorrq_x_f16))) +float16x8_t vorrq_x_f16(float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vorrq_x_f16))) +float16x8_t vorrq_x(float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vorrq_x_f32))) +float32x4_t vorrq_x_f32(float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vorrq_x_f32))) +float32x4_t vorrq_x(float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vpselq_f16))) +float16x8_t vpselq_f16(float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vpselq_f16))) +float16x8_t vpselq(float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vpselq_f32))) +float32x4_t vpselq_f32(float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vpselq_f32))) +float32x4_t vpselq(float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_f16_f32))) +float16x8_t vreinterpretq_f16_f32(float32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_f16_f32))) +float16x8_t vreinterpretq_f16(float32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_f16_s16))) +float16x8_t vreinterpretq_f16_s16(int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_f16_s16))) +float16x8_t vreinterpretq_f16(int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_f16_s32))) +float16x8_t vreinterpretq_f16_s32(int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_f16_s32))) +float16x8_t vreinterpretq_f16(int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_f16_s64))) +float16x8_t vreinterpretq_f16_s64(int64x2_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_f16_s64))) +float16x8_t vreinterpretq_f16(int64x2_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_f16_s8))) +float16x8_t vreinterpretq_f16_s8(int8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_f16_s8))) +float16x8_t vreinterpretq_f16(int8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_f16_u16))) +float16x8_t vreinterpretq_f16_u16(uint16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_f16_u16))) +float16x8_t vreinterpretq_f16(uint16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_f16_u32))) +float16x8_t vreinterpretq_f16_u32(uint32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_f16_u32))) +float16x8_t vreinterpretq_f16(uint32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_f16_u64))) +float16x8_t vreinterpretq_f16_u64(uint64x2_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_f16_u64))) +float16x8_t vreinterpretq_f16(uint64x2_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_f16_u8))) +float16x8_t vreinterpretq_f16_u8(uint8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_f16_u8))) +float16x8_t vreinterpretq_f16(uint8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_f32_f16))) +float32x4_t vreinterpretq_f32_f16(float16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_f32_f16))) +float32x4_t vreinterpretq_f32(float16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_f32_s16))) +float32x4_t vreinterpretq_f32_s16(int16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_f32_s16))) +float32x4_t vreinterpretq_f32(int16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_f32_s32))) +float32x4_t vreinterpretq_f32_s32(int32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_f32_s32))) +float32x4_t vreinterpretq_f32(int32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_f32_s64))) +float32x4_t vreinterpretq_f32_s64(int64x2_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_f32_s64))) +float32x4_t vreinterpretq_f32(int64x2_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_f32_s8))) +float32x4_t vreinterpretq_f32_s8(int8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_f32_s8))) +float32x4_t vreinterpretq_f32(int8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_f32_u16))) +float32x4_t vreinterpretq_f32_u16(uint16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_f32_u16))) +float32x4_t vreinterpretq_f32(uint16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_f32_u32))) +float32x4_t vreinterpretq_f32_u32(uint32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_f32_u32))) +float32x4_t vreinterpretq_f32(uint32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_f32_u64))) +float32x4_t vreinterpretq_f32_u64(uint64x2_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_f32_u64))) +float32x4_t vreinterpretq_f32(uint64x2_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_f32_u8))) +float32x4_t vreinterpretq_f32_u8(uint8x16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_f32_u8))) +float32x4_t vreinterpretq_f32(uint8x16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s16_f16))) +int16x8_t vreinterpretq_s16_f16(float16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s16_f16))) +int16x8_t vreinterpretq_s16(float16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s16_f32))) +int16x8_t vreinterpretq_s16_f32(float32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s16_f32))) +int16x8_t vreinterpretq_s16(float32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s32_f16))) +int32x4_t vreinterpretq_s32_f16(float16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s32_f16))) +int32x4_t vreinterpretq_s32(float16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s32_f32))) +int32x4_t vreinterpretq_s32_f32(float32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s32_f32))) +int32x4_t vreinterpretq_s32(float32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s64_f16))) +int64x2_t vreinterpretq_s64_f16(float16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s64_f16))) +int64x2_t vreinterpretq_s64(float16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s64_f32))) +int64x2_t vreinterpretq_s64_f32(float32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s64_f32))) +int64x2_t vreinterpretq_s64(float32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s8_f16))) +int8x16_t vreinterpretq_s8_f16(float16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s8_f16))) +int8x16_t vreinterpretq_s8(float16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s8_f32))) +int8x16_t vreinterpretq_s8_f32(float32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s8_f32))) +int8x16_t vreinterpretq_s8(float32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u16_f16))) +uint16x8_t vreinterpretq_u16_f16(float16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u16_f16))) +uint16x8_t vreinterpretq_u16(float16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u16_f32))) +uint16x8_t vreinterpretq_u16_f32(float32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u16_f32))) +uint16x8_t vreinterpretq_u16(float32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u32_f16))) +uint32x4_t vreinterpretq_u32_f16(float16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u32_f16))) +uint32x4_t vreinterpretq_u32(float16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u32_f32))) +uint32x4_t vreinterpretq_u32_f32(float32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u32_f32))) +uint32x4_t vreinterpretq_u32(float32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u64_f16))) +uint64x2_t vreinterpretq_u64_f16(float16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u64_f16))) +uint64x2_t vreinterpretq_u64(float16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u64_f32))) +uint64x2_t vreinterpretq_u64_f32(float32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u64_f32))) +uint64x2_t vreinterpretq_u64(float32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u8_f16))) +uint8x16_t vreinterpretq_u8_f16(float16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u8_f16))) +uint8x16_t vreinterpretq_u8(float16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u8_f32))) +uint8x16_t vreinterpretq_u8_f32(float32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u8_f32))) +uint8x16_t vreinterpretq_u8(float32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev32q_f16))) +float16x8_t vrev32q_f16(float16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev32q_f16))) +float16x8_t vrev32q(float16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev32q_m_f16))) +float16x8_t vrev32q_m_f16(float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev32q_m_f16))) +float16x8_t vrev32q_m(float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev32q_x_f16))) +float16x8_t vrev32q_x_f16(float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev32q_x_f16))) +float16x8_t vrev32q_x(float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_f16))) +float16x8_t vrev64q_f16(float16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_f16))) +float16x8_t vrev64q(float16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_f32))) +float32x4_t vrev64q_f32(float32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_f32))) +float32x4_t vrev64q(float32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_m_f16))) +float16x8_t vrev64q_m_f16(float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_m_f16))) +float16x8_t vrev64q_m(float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_m_f32))) +float32x4_t vrev64q_m_f32(float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_m_f32))) +float32x4_t vrev64q_m(float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_x_f16))) +float16x8_t vrev64q_x_f16(float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_x_f16))) +float16x8_t vrev64q_x(float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_x_f32))) +float32x4_t vrev64q_x_f32(float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrev64q_x_f32))) +float32x4_t vrev64q_x(float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrndaq_f16))) +float16x8_t vrndaq_f16(float16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrndaq_f16))) +float16x8_t vrndaq(float16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrndaq_f32))) +float32x4_t vrndaq_f32(float32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrndaq_f32))) +float32x4_t vrndaq(float32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrndaq_m_f16))) +float16x8_t vrndaq_m_f16(float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrndaq_m_f16))) +float16x8_t vrndaq_m(float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrndaq_m_f32))) +float32x4_t vrndaq_m_f32(float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrndaq_m_f32))) +float32x4_t vrndaq_m(float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrndaq_x_f16))) +float16x8_t vrndaq_x_f16(float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrndaq_x_f16))) +float16x8_t vrndaq_x(float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrndaq_x_f32))) +float32x4_t vrndaq_x_f32(float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrndaq_x_f32))) +float32x4_t vrndaq_x(float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrndmq_f16))) +float16x8_t vrndmq_f16(float16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrndmq_f16))) +float16x8_t vrndmq(float16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrndmq_f32))) +float32x4_t vrndmq_f32(float32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrndmq_f32))) +float32x4_t vrndmq(float32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrndmq_m_f16))) +float16x8_t vrndmq_m_f16(float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrndmq_m_f16))) +float16x8_t vrndmq_m(float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrndmq_m_f32))) +float32x4_t vrndmq_m_f32(float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrndmq_m_f32))) +float32x4_t vrndmq_m(float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrndmq_x_f16))) +float16x8_t vrndmq_x_f16(float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrndmq_x_f16))) +float16x8_t vrndmq_x(float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrndmq_x_f32))) +float32x4_t vrndmq_x_f32(float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrndmq_x_f32))) +float32x4_t vrndmq_x(float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrndnq_f16))) +float16x8_t vrndnq_f16(float16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrndnq_f16))) +float16x8_t vrndnq(float16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrndnq_f32))) +float32x4_t vrndnq_f32(float32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrndnq_f32))) +float32x4_t vrndnq(float32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrndnq_m_f16))) +float16x8_t vrndnq_m_f16(float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrndnq_m_f16))) +float16x8_t vrndnq_m(float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrndnq_m_f32))) +float32x4_t vrndnq_m_f32(float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrndnq_m_f32))) +float32x4_t vrndnq_m(float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrndnq_x_f16))) +float16x8_t vrndnq_x_f16(float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrndnq_x_f16))) +float16x8_t vrndnq_x(float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrndnq_x_f32))) +float32x4_t vrndnq_x_f32(float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrndnq_x_f32))) +float32x4_t vrndnq_x(float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrndpq_f16))) +float16x8_t vrndpq_f16(float16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrndpq_f16))) +float16x8_t vrndpq(float16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrndpq_f32))) +float32x4_t vrndpq_f32(float32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrndpq_f32))) +float32x4_t vrndpq(float32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrndpq_m_f16))) +float16x8_t vrndpq_m_f16(float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrndpq_m_f16))) +float16x8_t vrndpq_m(float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrndpq_m_f32))) +float32x4_t vrndpq_m_f32(float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrndpq_m_f32))) +float32x4_t vrndpq_m(float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrndpq_x_f16))) +float16x8_t vrndpq_x_f16(float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrndpq_x_f16))) +float16x8_t vrndpq_x(float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrndpq_x_f32))) +float32x4_t vrndpq_x_f32(float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrndpq_x_f32))) +float32x4_t vrndpq_x(float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrndq_f16))) +float16x8_t vrndq_f16(float16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrndq_f16))) +float16x8_t vrndq(float16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrndq_f32))) +float32x4_t vrndq_f32(float32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrndq_f32))) +float32x4_t vrndq(float32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrndq_m_f16))) +float16x8_t vrndq_m_f16(float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrndq_m_f16))) +float16x8_t vrndq_m(float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrndq_m_f32))) +float32x4_t vrndq_m_f32(float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrndq_m_f32))) +float32x4_t vrndq_m(float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrndq_x_f16))) +float16x8_t vrndq_x_f16(float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrndq_x_f16))) +float16x8_t vrndq_x(float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrndq_x_f32))) +float32x4_t vrndq_x_f32(float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrndq_x_f32))) +float32x4_t vrndq_x(float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrndxq_f16))) +float16x8_t vrndxq_f16(float16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrndxq_f16))) +float16x8_t vrndxq(float16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrndxq_f32))) +float32x4_t vrndxq_f32(float32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrndxq_f32))) +float32x4_t vrndxq(float32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrndxq_m_f16))) +float16x8_t vrndxq_m_f16(float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrndxq_m_f16))) +float16x8_t vrndxq_m(float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrndxq_m_f32))) +float32x4_t vrndxq_m_f32(float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrndxq_m_f32))) +float32x4_t vrndxq_m(float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrndxq_x_f16))) +float16x8_t vrndxq_x_f16(float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrndxq_x_f16))) +float16x8_t vrndxq_x(float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vrndxq_x_f32))) +float32x4_t vrndxq_x_f32(float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vrndxq_x_f32))) +float32x4_t vrndxq_x(float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsetq_lane_f16))) +float16x8_t vsetq_lane_f16(float16_t, float16x8_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsetq_lane_f16))) +float16x8_t vsetq_lane(float16_t, float16x8_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsetq_lane_f32))) +float32x4_t vsetq_lane_f32(float32_t, float32x4_t, int); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsetq_lane_f32))) +float32x4_t vsetq_lane(float32_t, float32x4_t, int); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vst1q_f16))) +void vst1q_f16(float16_t *, float16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vst1q_f16))) +void vst1q(float16_t *, float16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vst1q_f32))) +void vst1q_f32(float32_t *, float32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vst1q_f32))) +void vst1q(float32_t *, float32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vst1q_p_f16))) +void vst1q_p_f16(float16_t *, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vst1q_p_f16))) +void vst1q_p(float16_t *, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vst1q_p_f32))) +void vst1q_p_f32(float32_t *, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vst1q_p_f32))) +void vst1q_p(float32_t *, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vst2q_f16))) +void vst2q_f16(float16_t *, float16x8x2_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vst2q_f16))) +void vst2q(float16_t *, float16x8x2_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vst2q_f32))) +void vst2q_f32(float32_t *, float32x4x2_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vst2q_f32))) +void vst2q(float32_t *, float32x4x2_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vst4q_f16))) +void vst4q_f16(float16_t *, float16x8x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vst4q_f16))) +void vst4q(float16_t *, float16x8x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vst4q_f32))) +void vst4q_f32(float32_t *, float32x4x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vst4q_f32))) +void vst4q(float32_t *, float32x4x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_f16))) +void vstrhq_f16(float16_t *, float16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_f16))) +void vstrhq(float16_t *, float16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_p_f16))) +void vstrhq_p_f16(float16_t *, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_p_f16))) +void vstrhq_p(float16_t *, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_scatter_offset_f16))) +void vstrhq_scatter_offset_f16(float16_t *, uint16x8_t, float16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_scatter_offset_f16))) +void vstrhq_scatter_offset(float16_t *, uint16x8_t, float16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_scatter_offset_p_f16))) +void vstrhq_scatter_offset_p_f16(float16_t *, uint16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_scatter_offset_p_f16))) +void vstrhq_scatter_offset_p(float16_t *, uint16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_scatter_shifted_offset_f16))) +void vstrhq_scatter_shifted_offset_f16(float16_t *, uint16x8_t, float16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_scatter_shifted_offset_f16))) +void vstrhq_scatter_shifted_offset(float16_t *, uint16x8_t, float16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_scatter_shifted_offset_p_f16))) +void vstrhq_scatter_shifted_offset_p_f16(float16_t *, uint16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrhq_scatter_shifted_offset_p_f16))) +void vstrhq_scatter_shifted_offset_p(float16_t *, uint16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_f32))) +void vstrwq_f32(float32_t *, float32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_f32))) +void vstrwq(float32_t *, float32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_p_f32))) +void vstrwq_p_f32(float32_t *, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_p_f32))) +void vstrwq_p(float32_t *, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_base_f32))) +void vstrwq_scatter_base_f32(uint32x4_t, int, float32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_base_f32))) +void vstrwq_scatter_base(uint32x4_t, int, float32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_base_p_f32))) +void vstrwq_scatter_base_p_f32(uint32x4_t, int, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_base_p_f32))) +void vstrwq_scatter_base_p(uint32x4_t, int, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_base_wb_f32))) +void vstrwq_scatter_base_wb_f32(uint32x4_t *, int, float32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_base_wb_f32))) +void vstrwq_scatter_base_wb(uint32x4_t *, int, float32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_base_wb_p_f32))) +void vstrwq_scatter_base_wb_p_f32(uint32x4_t *, int, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_base_wb_p_f32))) +void vstrwq_scatter_base_wb_p(uint32x4_t *, int, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_offset_f32))) +void vstrwq_scatter_offset_f32(float32_t *, uint32x4_t, float32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_offset_f32))) +void vstrwq_scatter_offset(float32_t *, uint32x4_t, float32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_offset_p_f32))) +void vstrwq_scatter_offset_p_f32(float32_t *, uint32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_offset_p_f32))) +void vstrwq_scatter_offset_p(float32_t *, uint32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_shifted_offset_f32))) +void vstrwq_scatter_shifted_offset_f32(float32_t *, uint32x4_t, float32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_shifted_offset_f32))) +void vstrwq_scatter_shifted_offset(float32_t *, uint32x4_t, float32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_shifted_offset_p_f32))) +void vstrwq_scatter_shifted_offset_p_f32(float32_t *, uint32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vstrwq_scatter_shifted_offset_p_f32))) +void vstrwq_scatter_shifted_offset_p(float32_t *, uint32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_f16))) +float16x8_t vsubq_f16(float16x8_t, float16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_f16))) +float16x8_t vsubq(float16x8_t, float16x8_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_f32))) +float32x4_t vsubq_f32(float32x4_t, float32x4_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_f32))) +float32x4_t vsubq(float32x4_t, float32x4_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_m_f16))) +float16x8_t vsubq_m_f16(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_m_f16))) +float16x8_t vsubq_m(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_m_f32))) +float32x4_t vsubq_m_f32(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_m_f32))) +float32x4_t vsubq_m(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_m_n_f16))) +float16x8_t vsubq_m_n_f16(float16x8_t, float16x8_t, float16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_m_n_f16))) +float16x8_t vsubq_m(float16x8_t, float16x8_t, float16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_m_n_f32))) +float32x4_t vsubq_m_n_f32(float32x4_t, float32x4_t, float32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_m_n_f32))) +float32x4_t vsubq_m(float32x4_t, float32x4_t, float32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_n_f16))) +float16x8_t vsubq_n_f16(float16x8_t, float16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_n_f16))) +float16x8_t vsubq(float16x8_t, float16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_n_f32))) +float32x4_t vsubq_n_f32(float32x4_t, float32_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_n_f32))) +float32x4_t vsubq(float32x4_t, float32_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_x_f16))) +float16x8_t vsubq_x_f16(float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_x_f16))) +float16x8_t vsubq_x(float16x8_t, float16x8_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_x_f32))) +float32x4_t vsubq_x_f32(float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_x_f32))) +float32x4_t vsubq_x(float32x4_t, float32x4_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_x_n_f16))) +float16x8_t vsubq_x_n_f16(float16x8_t, float16_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_x_n_f16))) +float16x8_t vsubq_x(float16x8_t, float16_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vsubq_x_n_f32))) +float32x4_t vsubq_x_n_f32(float32x4_t, float32_t, mve_pred16_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vsubq_x_n_f32))) +float32x4_t vsubq_x(float32x4_t, float32_t, mve_pred16_t); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vuninitializedq_f16))) +float16x8_t vuninitializedq_f16(); +static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vuninitializedq_f32))) +float32x4_t vuninitializedq_f32(); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vuninitializedq_polymorphic_f16))) +float16x8_t vuninitializedq(float16x8_t); +static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vuninitializedq_polymorphic_f32))) +float32x4_t vuninitializedq(float32x4_t); + +#endif /* (__ARM_FEATURE_MVE & 2) && (!defined __ARM_MVE_PRESERVE_USER_NAMESPACE) */ + +#ifdef __cplusplus +} /* extern "C" */ +#endif + +#endif /* __ARM_MVE_H */ diff --git a/third_party/aarch64/clang/arm_neon.h b/third_party/aarch64/clang/arm_neon.h new file mode 100644 index 000000000..b67616134 --- /dev/null +++ b/third_party/aarch64/clang/arm_neon.h @@ -0,0 +1,69638 @@ +/*===---- arm_neon.h - ARM Neon intrinsics ---------------------------------=== + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + * + *===-----------------------------------------------------------------------=== + */ + +#ifndef __ARM_NEON_H +#define __ARM_NEON_H + +#ifndef __ARM_FP +#error "NEON intrinsics not available with the soft-float ABI. Please use -mfloat-abi=softfp or -mfloat-abi=hard" +#else + +#include + +#include +#include +#if defined(__aarch64__) || defined(__arm64ec__) +typedef uint8_t poly8_t; +typedef uint16_t poly16_t; +typedef uint64_t poly64_t; +typedef __uint128_t poly128_t; +#else +typedef int8_t poly8_t; +typedef int16_t poly16_t; +typedef int64_t poly64_t; +#endif +typedef __attribute__((neon_polyvector_type(8))) poly8_t poly8x8_t; +typedef __attribute__((neon_polyvector_type(16))) poly8_t poly8x16_t; +typedef __attribute__((neon_polyvector_type(4))) poly16_t poly16x4_t; +typedef __attribute__((neon_polyvector_type(8))) poly16_t poly16x8_t; +typedef __attribute__((neon_polyvector_type(1))) poly64_t poly64x1_t; +typedef __attribute__((neon_polyvector_type(2))) poly64_t poly64x2_t; + +typedef struct poly8x8x2_t { + poly8x8_t val[2]; +} poly8x8x2_t; + +typedef struct poly8x16x2_t { + poly8x16_t val[2]; +} poly8x16x2_t; + +typedef struct poly16x4x2_t { + poly16x4_t val[2]; +} poly16x4x2_t; + +typedef struct poly16x8x2_t { + poly16x8_t val[2]; +} poly16x8x2_t; + +typedef struct poly64x1x2_t { + poly64x1_t val[2]; +} poly64x1x2_t; + +typedef struct poly64x2x2_t { + poly64x2_t val[2]; +} poly64x2x2_t; + +typedef struct poly8x8x3_t { + poly8x8_t val[3]; +} poly8x8x3_t; + +typedef struct poly8x16x3_t { + poly8x16_t val[3]; +} poly8x16x3_t; + +typedef struct poly16x4x3_t { + poly16x4_t val[3]; +} poly16x4x3_t; + +typedef struct poly16x8x3_t { + poly16x8_t val[3]; +} poly16x8x3_t; + +typedef struct poly64x1x3_t { + poly64x1_t val[3]; +} poly64x1x3_t; + +typedef struct poly64x2x3_t { + poly64x2_t val[3]; +} poly64x2x3_t; + +typedef struct poly8x8x4_t { + poly8x8_t val[4]; +} poly8x8x4_t; + +typedef struct poly8x16x4_t { + poly8x16_t val[4]; +} poly8x16x4_t; + +typedef struct poly16x4x4_t { + poly16x4_t val[4]; +} poly16x4x4_t; + +typedef struct poly16x8x4_t { + poly16x8_t val[4]; +} poly16x8x4_t; + +typedef struct poly64x1x4_t { + poly64x1_t val[4]; +} poly64x1x4_t; + +typedef struct poly64x2x4_t { + poly64x2_t val[4]; +} poly64x2x4_t; + +#define __ai static __inline__ __attribute__((__always_inline__, __nodebug__)) + +#ifdef __LITTLE_ENDIAN__ +#define splatq_lane_bf16(__p0, __p1) __extension__ ({ \ + bfloat16x8_t __ret; \ + bfloat16x4_t __s0 = __p0; \ + __ret = (bfloat16x8_t) __builtin_neon_splatq_lane_bf16((int8x8_t)__s0, __p1, 11); \ + __ret; \ +}) +#else +#define splatq_lane_bf16(__p0, __p1) __extension__ ({ \ + bfloat16x8_t __ret; \ + bfloat16x4_t __s0 = __p0; \ + bfloat16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + __ret = (bfloat16x8_t) __builtin_neon_splatq_lane_bf16((int8x8_t)__rev0, __p1, 11); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#define __noswap_splatq_lane_bf16(__p0, __p1) __extension__ ({ \ + bfloat16x8_t __ret; \ + bfloat16x4_t __s0 = __p0; \ + __ret = (bfloat16x8_t) __builtin_neon_splatq_lane_bf16((int8x8_t)__s0, __p1, 11); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define splat_lane_bf16(__p0, __p1) __extension__ ({ \ + bfloat16x4_t __ret; \ + bfloat16x4_t __s0 = __p0; \ + __ret = (bfloat16x4_t) __builtin_neon_splat_lane_bf16((int8x8_t)__s0, __p1, 11); \ + __ret; \ +}) +#else +#define splat_lane_bf16(__p0, __p1) __extension__ ({ \ + bfloat16x4_t __ret; \ + bfloat16x4_t __s0 = __p0; \ + bfloat16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + __ret = (bfloat16x4_t) __builtin_neon_splat_lane_bf16((int8x8_t)__rev0, __p1, 11); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#define __noswap_splat_lane_bf16(__p0, __p1) __extension__ ({ \ + bfloat16x4_t __ret; \ + bfloat16x4_t __s0 = __p0; \ + __ret = (bfloat16x4_t) __builtin_neon_splat_lane_bf16((int8x8_t)__s0, __p1, 11); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define splatq_laneq_bf16(__p0, __p1) __extension__ ({ \ + bfloat16x8_t __ret; \ + bfloat16x8_t __s0 = __p0; \ + __ret = (bfloat16x8_t) __builtin_neon_splatq_laneq_bf16((int8x16_t)__s0, __p1, 43); \ + __ret; \ +}) +#else +#define splatq_laneq_bf16(__p0, __p1) __extension__ ({ \ + bfloat16x8_t __ret; \ + bfloat16x8_t __s0 = __p0; \ + bfloat16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (bfloat16x8_t) __builtin_neon_splatq_laneq_bf16((int8x16_t)__rev0, __p1, 43); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#define __noswap_splatq_laneq_bf16(__p0, __p1) __extension__ ({ \ + bfloat16x8_t __ret; \ + bfloat16x8_t __s0 = __p0; \ + __ret = (bfloat16x8_t) __builtin_neon_splatq_laneq_bf16((int8x16_t)__s0, __p1, 43); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define splat_laneq_bf16(__p0, __p1) __extension__ ({ \ + bfloat16x4_t __ret; \ + bfloat16x8_t __s0 = __p0; \ + __ret = (bfloat16x4_t) __builtin_neon_splat_laneq_bf16((int8x16_t)__s0, __p1, 43); \ + __ret; \ +}) +#else +#define splat_laneq_bf16(__p0, __p1) __extension__ ({ \ + bfloat16x4_t __ret; \ + bfloat16x8_t __s0 = __p0; \ + bfloat16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (bfloat16x4_t) __builtin_neon_splat_laneq_bf16((int8x16_t)__rev0, __p1, 43); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#define __noswap_splat_laneq_bf16(__p0, __p1) __extension__ ({ \ + bfloat16x4_t __ret; \ + bfloat16x8_t __s0 = __p0; \ + __ret = (bfloat16x4_t) __builtin_neon_splat_laneq_bf16((int8x16_t)__s0, __p1, 43); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("bf16,neon"))) float32x4_t vbfdotq_f32(float32x4_t __p0, bfloat16x8_t __p1, bfloat16x8_t __p2) { + float32x4_t __ret; + __ret = (float32x4_t) __builtin_neon_vbfdotq_f32((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41); + return __ret; +} +#else +__ai __attribute__((target("bf16,neon"))) float32x4_t vbfdotq_f32(float32x4_t __p0, bfloat16x8_t __p1, bfloat16x8_t __p2) { + float32x4_t __ret; + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + bfloat16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + bfloat16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (float32x4_t) __builtin_neon_vbfdotq_f32((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 41); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +__ai __attribute__((target("bf16,neon"))) float32x4_t __noswap_vbfdotq_f32(float32x4_t __p0, bfloat16x8_t __p1, bfloat16x8_t __p2) { + float32x4_t __ret; + __ret = (float32x4_t) __builtin_neon_vbfdotq_f32((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("bf16,neon"))) float32x2_t vbfdot_f32(float32x2_t __p0, bfloat16x4_t __p1, bfloat16x4_t __p2) { + float32x2_t __ret; + __ret = (float32x2_t) __builtin_neon_vbfdot_f32((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 9); + return __ret; +} +#else +__ai __attribute__((target("bf16,neon"))) float32x2_t vbfdot_f32(float32x2_t __p0, bfloat16x4_t __p1, bfloat16x4_t __p2) { + float32x2_t __ret; + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + bfloat16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + bfloat16x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); + __ret = (float32x2_t) __builtin_neon_vbfdot_f32((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 9); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +__ai __attribute__((target("bf16,neon"))) float32x2_t __noswap_vbfdot_f32(float32x2_t __p0, bfloat16x4_t __p1, bfloat16x4_t __p2) { + float32x2_t __ret; + __ret = (float32x2_t) __builtin_neon_vbfdot_f32((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 9); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("bf16,neon"))) float32x4_t vbfmlalbq_f32(float32x4_t __p0, bfloat16x8_t __p1, bfloat16x8_t __p2) { + float32x4_t __ret; + __ret = (float32x4_t) __builtin_neon_vbfmlalbq_f32((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41); + return __ret; +} +#else +__ai __attribute__((target("bf16,neon"))) float32x4_t vbfmlalbq_f32(float32x4_t __p0, bfloat16x8_t __p1, bfloat16x8_t __p2) { + float32x4_t __ret; + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + bfloat16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + bfloat16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (float32x4_t) __builtin_neon_vbfmlalbq_f32((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 41); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +__ai __attribute__((target("bf16,neon"))) float32x4_t __noswap_vbfmlalbq_f32(float32x4_t __p0, bfloat16x8_t __p1, bfloat16x8_t __p2) { + float32x4_t __ret; + __ret = (float32x4_t) __builtin_neon_vbfmlalbq_f32((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("bf16,neon"))) float32x4_t vbfmlaltq_f32(float32x4_t __p0, bfloat16x8_t __p1, bfloat16x8_t __p2) { + float32x4_t __ret; + __ret = (float32x4_t) __builtin_neon_vbfmlaltq_f32((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41); + return __ret; +} +#else +__ai __attribute__((target("bf16,neon"))) float32x4_t vbfmlaltq_f32(float32x4_t __p0, bfloat16x8_t __p1, bfloat16x8_t __p2) { + float32x4_t __ret; + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + bfloat16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + bfloat16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (float32x4_t) __builtin_neon_vbfmlaltq_f32((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 41); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +__ai __attribute__((target("bf16,neon"))) float32x4_t __noswap_vbfmlaltq_f32(float32x4_t __p0, bfloat16x8_t __p1, bfloat16x8_t __p2) { + float32x4_t __ret; + __ret = (float32x4_t) __builtin_neon_vbfmlaltq_f32((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("bf16,neon"))) float32x4_t vbfmmlaq_f32(float32x4_t __p0, bfloat16x8_t __p1, bfloat16x8_t __p2) { + float32x4_t __ret; + __ret = (float32x4_t) __builtin_neon_vbfmmlaq_f32((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41); + return __ret; +} +#else +__ai __attribute__((target("bf16,neon"))) float32x4_t vbfmmlaq_f32(float32x4_t __p0, bfloat16x8_t __p1, bfloat16x8_t __p2) { + float32x4_t __ret; + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + bfloat16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + bfloat16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (float32x4_t) __builtin_neon_vbfmmlaq_f32((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 41); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("bf16,neon"))) bfloat16x8_t vcombine_bf16(bfloat16x4_t __p0, bfloat16x4_t __p1) { + bfloat16x8_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 1, 2, 3, 4, 5, 6, 7); + return __ret; +} +#else +__ai __attribute__((target("bf16,neon"))) bfloat16x8_t vcombine_bf16(bfloat16x4_t __p0, bfloat16x4_t __p1) { + bfloat16x8_t __ret; + bfloat16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + bfloat16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 0, 1, 2, 3, 4, 5, 6, 7); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +__ai __attribute__((target("bf16,neon"))) bfloat16x8_t __noswap_vcombine_bf16(bfloat16x4_t __p0, bfloat16x4_t __p1) { + bfloat16x8_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 1, 2, 3, 4, 5, 6, 7); + return __ret; +} +#endif + +#define vcreate_bf16(__p0) __extension__ ({ \ + bfloat16x4_t __ret; \ + uint64_t __promote = __p0; \ + __ret = (bfloat16x4_t)(__promote); \ + __ret; \ +}) +__ai __attribute__((target("bf16,neon"))) float32_t vcvtah_f32_bf16(bfloat16_t __p0) { + float32_t __ret; +bfloat16_t __reint = __p0; +int32_t __reint1 = (int32_t)(*(int16_t *) &__reint) << 16; + __ret = *(float32_t *) &__reint1; + return __ret; +} +__ai __attribute__((target("bf16,neon"))) bfloat16_t vcvth_bf16_f32(float32_t __p0) { + bfloat16_t __ret; + __ret = (bfloat16_t) __builtin_neon_vcvth_bf16_f32(__p0); + return __ret; +} +#ifdef __LITTLE_ENDIAN__ +#define vduph_lane_bf16(__p0, __p1) __extension__ ({ \ + bfloat16_t __ret; \ + bfloat16x4_t __s0 = __p0; \ + __ret = (bfloat16_t) __builtin_neon_vduph_lane_bf16((bfloat16x4_t)__s0, __p1); \ + __ret; \ +}) +#else +#define vduph_lane_bf16(__p0, __p1) __extension__ ({ \ + bfloat16_t __ret; \ + bfloat16x4_t __s0 = __p0; \ + bfloat16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + __ret = (bfloat16_t) __builtin_neon_vduph_lane_bf16((bfloat16x4_t)__rev0, __p1); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vdupq_lane_bf16(__p0_0, __p1_0) __extension__ ({ \ + bfloat16x8_t __ret_0; \ + bfloat16x4_t __s0_0 = __p0_0; \ + __ret_0 = splatq_lane_bf16(__s0_0, __p1_0); \ + __ret_0; \ +}) +#else +#define vdupq_lane_bf16(__p0_1, __p1_1) __extension__ ({ \ + bfloat16x8_t __ret_1; \ + bfloat16x4_t __s0_1 = __p0_1; \ + bfloat16x4_t __rev0_1; __rev0_1 = __builtin_shufflevector(__s0_1, __s0_1, 3, 2, 1, 0); \ + __ret_1 = __noswap_splatq_lane_bf16(__rev0_1, __p1_1); \ + __ret_1 = __builtin_shufflevector(__ret_1, __ret_1, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_1; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vdup_lane_bf16(__p0_2, __p1_2) __extension__ ({ \ + bfloat16x4_t __ret_2; \ + bfloat16x4_t __s0_2 = __p0_2; \ + __ret_2 = splat_lane_bf16(__s0_2, __p1_2); \ + __ret_2; \ +}) +#else +#define vdup_lane_bf16(__p0_3, __p1_3) __extension__ ({ \ + bfloat16x4_t __ret_3; \ + bfloat16x4_t __s0_3 = __p0_3; \ + bfloat16x4_t __rev0_3; __rev0_3 = __builtin_shufflevector(__s0_3, __s0_3, 3, 2, 1, 0); \ + __ret_3 = __noswap_splat_lane_bf16(__rev0_3, __p1_3); \ + __ret_3 = __builtin_shufflevector(__ret_3, __ret_3, 3, 2, 1, 0); \ + __ret_3; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vduph_laneq_bf16(__p0, __p1) __extension__ ({ \ + bfloat16_t __ret; \ + bfloat16x8_t __s0 = __p0; \ + __ret = (bfloat16_t) __builtin_neon_vduph_laneq_bf16((bfloat16x8_t)__s0, __p1); \ + __ret; \ +}) +#else +#define vduph_laneq_bf16(__p0, __p1) __extension__ ({ \ + bfloat16_t __ret; \ + bfloat16x8_t __s0 = __p0; \ + bfloat16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (bfloat16_t) __builtin_neon_vduph_laneq_bf16((bfloat16x8_t)__rev0, __p1); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vdupq_laneq_bf16(__p0_4, __p1_4) __extension__ ({ \ + bfloat16x8_t __ret_4; \ + bfloat16x8_t __s0_4 = __p0_4; \ + __ret_4 = splatq_laneq_bf16(__s0_4, __p1_4); \ + __ret_4; \ +}) +#else +#define vdupq_laneq_bf16(__p0_5, __p1_5) __extension__ ({ \ + bfloat16x8_t __ret_5; \ + bfloat16x8_t __s0_5 = __p0_5; \ + bfloat16x8_t __rev0_5; __rev0_5 = __builtin_shufflevector(__s0_5, __s0_5, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_5 = __noswap_splatq_laneq_bf16(__rev0_5, __p1_5); \ + __ret_5 = __builtin_shufflevector(__ret_5, __ret_5, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_5; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vdup_laneq_bf16(__p0_6, __p1_6) __extension__ ({ \ + bfloat16x4_t __ret_6; \ + bfloat16x8_t __s0_6 = __p0_6; \ + __ret_6 = splat_laneq_bf16(__s0_6, __p1_6); \ + __ret_6; \ +}) +#else +#define vdup_laneq_bf16(__p0_7, __p1_7) __extension__ ({ \ + bfloat16x4_t __ret_7; \ + bfloat16x8_t __s0_7 = __p0_7; \ + bfloat16x8_t __rev0_7; __rev0_7 = __builtin_shufflevector(__s0_7, __s0_7, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_7 = __noswap_splat_laneq_bf16(__rev0_7, __p1_7); \ + __ret_7 = __builtin_shufflevector(__ret_7, __ret_7, 3, 2, 1, 0); \ + __ret_7; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("bf16,neon"))) bfloat16x8_t vdupq_n_bf16(bfloat16_t __p0) { + bfloat16x8_t __ret; + __ret = (bfloat16x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0}; + return __ret; +} +#else +__ai __attribute__((target("bf16,neon"))) bfloat16x8_t vdupq_n_bf16(bfloat16_t __p0) { + bfloat16x8_t __ret; + __ret = (bfloat16x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0}; + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("bf16,neon"))) bfloat16x4_t vdup_n_bf16(bfloat16_t __p0) { + bfloat16x4_t __ret; + __ret = (bfloat16x4_t) {__p0, __p0, __p0, __p0}; + return __ret; +} +#else +__ai __attribute__((target("bf16,neon"))) bfloat16x4_t vdup_n_bf16(bfloat16_t __p0) { + bfloat16x4_t __ret; + __ret = (bfloat16x4_t) {__p0, __p0, __p0, __p0}; + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("bf16,neon"))) bfloat16x4_t vget_high_bf16(bfloat16x8_t __p0) { + bfloat16x4_t __ret; + __ret = __builtin_shufflevector(__p0, __p0, 4, 5, 6, 7); + return __ret; +} +#else +__ai __attribute__((target("bf16,neon"))) bfloat16x4_t vget_high_bf16(bfloat16x8_t __p0) { + bfloat16x4_t __ret; + bfloat16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev0, 4, 5, 6, 7); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +__ai __attribute__((target("bf16,neon"))) bfloat16x4_t __noswap_vget_high_bf16(bfloat16x8_t __p0) { + bfloat16x4_t __ret; + __ret = __builtin_shufflevector(__p0, __p0, 4, 5, 6, 7); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vgetq_lane_bf16(__p0, __p1) __extension__ ({ \ + bfloat16_t __ret; \ + bfloat16x8_t __s0 = __p0; \ + __ret = (bfloat16_t) __builtin_neon_vgetq_lane_bf16((bfloat16x8_t)__s0, __p1); \ + __ret; \ +}) +#else +#define vgetq_lane_bf16(__p0, __p1) __extension__ ({ \ + bfloat16_t __ret; \ + bfloat16x8_t __s0 = __p0; \ + bfloat16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (bfloat16_t) __builtin_neon_vgetq_lane_bf16((bfloat16x8_t)__rev0, __p1); \ + __ret; \ +}) +#define __noswap_vgetq_lane_bf16(__p0, __p1) __extension__ ({ \ + bfloat16_t __ret; \ + bfloat16x8_t __s0 = __p0; \ + __ret = (bfloat16_t) __builtin_neon_vgetq_lane_bf16((bfloat16x8_t)__s0, __p1); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vget_lane_bf16(__p0, __p1) __extension__ ({ \ + bfloat16_t __ret; \ + bfloat16x4_t __s0 = __p0; \ + __ret = (bfloat16_t) __builtin_neon_vget_lane_bf16((bfloat16x4_t)__s0, __p1); \ + __ret; \ +}) +#else +#define vget_lane_bf16(__p0, __p1) __extension__ ({ \ + bfloat16_t __ret; \ + bfloat16x4_t __s0 = __p0; \ + bfloat16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + __ret = (bfloat16_t) __builtin_neon_vget_lane_bf16((bfloat16x4_t)__rev0, __p1); \ + __ret; \ +}) +#define __noswap_vget_lane_bf16(__p0, __p1) __extension__ ({ \ + bfloat16_t __ret; \ + bfloat16x4_t __s0 = __p0; \ + __ret = (bfloat16_t) __builtin_neon_vget_lane_bf16((bfloat16x4_t)__s0, __p1); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("bf16,neon"))) bfloat16x4_t vget_low_bf16(bfloat16x8_t __p0) { + bfloat16x4_t __ret; + __ret = __builtin_shufflevector(__p0, __p0, 0, 1, 2, 3); + return __ret; +} +#else +__ai __attribute__((target("bf16,neon"))) bfloat16x4_t vget_low_bf16(bfloat16x8_t __p0) { + bfloat16x4_t __ret; + bfloat16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev0, 0, 1, 2, 3); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +__ai __attribute__((target("bf16,neon"))) bfloat16x4_t __noswap_vget_low_bf16(bfloat16x8_t __p0) { + bfloat16x4_t __ret; + __ret = __builtin_shufflevector(__p0, __p0, 0, 1, 2, 3); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1q_bf16(__p0) __extension__ ({ \ + bfloat16x8_t __ret; \ + __ret = (bfloat16x8_t) __builtin_neon_vld1q_bf16(__p0, 43); \ + __ret; \ +}) +#else +#define vld1q_bf16(__p0) __extension__ ({ \ + bfloat16x8_t __ret; \ + __ret = (bfloat16x8_t) __builtin_neon_vld1q_bf16(__p0, 43); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1_bf16(__p0) __extension__ ({ \ + bfloat16x4_t __ret; \ + __ret = (bfloat16x4_t) __builtin_neon_vld1_bf16(__p0, 11); \ + __ret; \ +}) +#else +#define vld1_bf16(__p0) __extension__ ({ \ + bfloat16x4_t __ret; \ + __ret = (bfloat16x4_t) __builtin_neon_vld1_bf16(__p0, 11); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1q_dup_bf16(__p0) __extension__ ({ \ + bfloat16x8_t __ret; \ + __ret = (bfloat16x8_t) __builtin_neon_vld1q_dup_bf16(__p0, 43); \ + __ret; \ +}) +#else +#define vld1q_dup_bf16(__p0) __extension__ ({ \ + bfloat16x8_t __ret; \ + __ret = (bfloat16x8_t) __builtin_neon_vld1q_dup_bf16(__p0, 43); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1_dup_bf16(__p0) __extension__ ({ \ + bfloat16x4_t __ret; \ + __ret = (bfloat16x4_t) __builtin_neon_vld1_dup_bf16(__p0, 11); \ + __ret; \ +}) +#else +#define vld1_dup_bf16(__p0) __extension__ ({ \ + bfloat16x4_t __ret; \ + __ret = (bfloat16x4_t) __builtin_neon_vld1_dup_bf16(__p0, 11); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1q_lane_bf16(__p0, __p1, __p2) __extension__ ({ \ + bfloat16x8_t __ret; \ + bfloat16x8_t __s1 = __p1; \ + __ret = (bfloat16x8_t) __builtin_neon_vld1q_lane_bf16(__p0, (int8x16_t)__s1, __p2, 43); \ + __ret; \ +}) +#else +#define vld1q_lane_bf16(__p0, __p1, __p2) __extension__ ({ \ + bfloat16x8_t __ret; \ + bfloat16x8_t __s1 = __p1; \ + bfloat16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (bfloat16x8_t) __builtin_neon_vld1q_lane_bf16(__p0, (int8x16_t)__rev1, __p2, 43); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1_lane_bf16(__p0, __p1, __p2) __extension__ ({ \ + bfloat16x4_t __ret; \ + bfloat16x4_t __s1 = __p1; \ + __ret = (bfloat16x4_t) __builtin_neon_vld1_lane_bf16(__p0, (int8x8_t)__s1, __p2, 11); \ + __ret; \ +}) +#else +#define vld1_lane_bf16(__p0, __p1, __p2) __extension__ ({ \ + bfloat16x4_t __ret; \ + bfloat16x4_t __s1 = __p1; \ + bfloat16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + __ret = (bfloat16x4_t) __builtin_neon_vld1_lane_bf16(__p0, (int8x8_t)__rev1, __p2, 11); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1q_bf16_x2(__p0) __extension__ ({ \ + bfloat16x8x2_t __ret; \ + __builtin_neon_vld1q_bf16_x2(&__ret, __p0, 43); \ + __ret; \ +}) +#else +#define vld1q_bf16_x2(__p0) __extension__ ({ \ + bfloat16x8x2_t __ret; \ + __builtin_neon_vld1q_bf16_x2(&__ret, __p0, 43); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1_bf16_x2(__p0) __extension__ ({ \ + bfloat16x4x2_t __ret; \ + __builtin_neon_vld1_bf16_x2(&__ret, __p0, 11); \ + __ret; \ +}) +#else +#define vld1_bf16_x2(__p0) __extension__ ({ \ + bfloat16x4x2_t __ret; \ + __builtin_neon_vld1_bf16_x2(&__ret, __p0, 11); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1q_bf16_x3(__p0) __extension__ ({ \ + bfloat16x8x3_t __ret; \ + __builtin_neon_vld1q_bf16_x3(&__ret, __p0, 43); \ + __ret; \ +}) +#else +#define vld1q_bf16_x3(__p0) __extension__ ({ \ + bfloat16x8x3_t __ret; \ + __builtin_neon_vld1q_bf16_x3(&__ret, __p0, 43); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1_bf16_x3(__p0) __extension__ ({ \ + bfloat16x4x3_t __ret; \ + __builtin_neon_vld1_bf16_x3(&__ret, __p0, 11); \ + __ret; \ +}) +#else +#define vld1_bf16_x3(__p0) __extension__ ({ \ + bfloat16x4x3_t __ret; \ + __builtin_neon_vld1_bf16_x3(&__ret, __p0, 11); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1q_bf16_x4(__p0) __extension__ ({ \ + bfloat16x8x4_t __ret; \ + __builtin_neon_vld1q_bf16_x4(&__ret, __p0, 43); \ + __ret; \ +}) +#else +#define vld1q_bf16_x4(__p0) __extension__ ({ \ + bfloat16x8x4_t __ret; \ + __builtin_neon_vld1q_bf16_x4(&__ret, __p0, 43); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1_bf16_x4(__p0) __extension__ ({ \ + bfloat16x4x4_t __ret; \ + __builtin_neon_vld1_bf16_x4(&__ret, __p0, 11); \ + __ret; \ +}) +#else +#define vld1_bf16_x4(__p0) __extension__ ({ \ + bfloat16x4x4_t __ret; \ + __builtin_neon_vld1_bf16_x4(&__ret, __p0, 11); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld2q_bf16(__p0) __extension__ ({ \ + bfloat16x8x2_t __ret; \ + __builtin_neon_vld2q_bf16(&__ret, __p0, 43); \ + __ret; \ +}) +#else +#define vld2q_bf16(__p0) __extension__ ({ \ + bfloat16x8x2_t __ret; \ + __builtin_neon_vld2q_bf16(&__ret, __p0, 43); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld2_bf16(__p0) __extension__ ({ \ + bfloat16x4x2_t __ret; \ + __builtin_neon_vld2_bf16(&__ret, __p0, 11); \ + __ret; \ +}) +#else +#define vld2_bf16(__p0) __extension__ ({ \ + bfloat16x4x2_t __ret; \ + __builtin_neon_vld2_bf16(&__ret, __p0, 11); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld2q_dup_bf16(__p0) __extension__ ({ \ + bfloat16x8x2_t __ret; \ + __builtin_neon_vld2q_dup_bf16(&__ret, __p0, 43); \ + __ret; \ +}) +#else +#define vld2q_dup_bf16(__p0) __extension__ ({ \ + bfloat16x8x2_t __ret; \ + __builtin_neon_vld2q_dup_bf16(&__ret, __p0, 43); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld2_dup_bf16(__p0) __extension__ ({ \ + bfloat16x4x2_t __ret; \ + __builtin_neon_vld2_dup_bf16(&__ret, __p0, 11); \ + __ret; \ +}) +#else +#define vld2_dup_bf16(__p0) __extension__ ({ \ + bfloat16x4x2_t __ret; \ + __builtin_neon_vld2_dup_bf16(&__ret, __p0, 11); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld2q_lane_bf16(__p0, __p1, __p2) __extension__ ({ \ + bfloat16x8x2_t __ret; \ + bfloat16x8x2_t __s1 = __p1; \ + __builtin_neon_vld2q_lane_bf16(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 43); \ + __ret; \ +}) +#else +#define vld2q_lane_bf16(__p0, __p1, __p2) __extension__ ({ \ + bfloat16x8x2_t __ret; \ + bfloat16x8x2_t __s1 = __p1; \ + bfloat16x8x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vld2q_lane_bf16(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 43); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld2_lane_bf16(__p0, __p1, __p2) __extension__ ({ \ + bfloat16x4x2_t __ret; \ + bfloat16x4x2_t __s1 = __p1; \ + __builtin_neon_vld2_lane_bf16(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 11); \ + __ret; \ +}) +#else +#define vld2_lane_bf16(__p0, __p1, __p2) __extension__ ({ \ + bfloat16x4x2_t __ret; \ + bfloat16x4x2_t __s1 = __p1; \ + bfloat16x4x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __builtin_neon_vld2_lane_bf16(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], __p2, 11); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld3q_bf16(__p0) __extension__ ({ \ + bfloat16x8x3_t __ret; \ + __builtin_neon_vld3q_bf16(&__ret, __p0, 43); \ + __ret; \ +}) +#else +#define vld3q_bf16(__p0) __extension__ ({ \ + bfloat16x8x3_t __ret; \ + __builtin_neon_vld3q_bf16(&__ret, __p0, 43); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld3_bf16(__p0) __extension__ ({ \ + bfloat16x4x3_t __ret; \ + __builtin_neon_vld3_bf16(&__ret, __p0, 11); \ + __ret; \ +}) +#else +#define vld3_bf16(__p0) __extension__ ({ \ + bfloat16x4x3_t __ret; \ + __builtin_neon_vld3_bf16(&__ret, __p0, 11); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld3q_dup_bf16(__p0) __extension__ ({ \ + bfloat16x8x3_t __ret; \ + __builtin_neon_vld3q_dup_bf16(&__ret, __p0, 43); \ + __ret; \ +}) +#else +#define vld3q_dup_bf16(__p0) __extension__ ({ \ + bfloat16x8x3_t __ret; \ + __builtin_neon_vld3q_dup_bf16(&__ret, __p0, 43); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld3_dup_bf16(__p0) __extension__ ({ \ + bfloat16x4x3_t __ret; \ + __builtin_neon_vld3_dup_bf16(&__ret, __p0, 11); \ + __ret; \ +}) +#else +#define vld3_dup_bf16(__p0) __extension__ ({ \ + bfloat16x4x3_t __ret; \ + __builtin_neon_vld3_dup_bf16(&__ret, __p0, 11); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld3q_lane_bf16(__p0, __p1, __p2) __extension__ ({ \ + bfloat16x8x3_t __ret; \ + bfloat16x8x3_t __s1 = __p1; \ + __builtin_neon_vld3q_lane_bf16(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 43); \ + __ret; \ +}) +#else +#define vld3q_lane_bf16(__p0, __p1, __p2) __extension__ ({ \ + bfloat16x8x3_t __ret; \ + bfloat16x8x3_t __s1 = __p1; \ + bfloat16x8x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vld3q_lane_bf16(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 43); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld3_lane_bf16(__p0, __p1, __p2) __extension__ ({ \ + bfloat16x4x3_t __ret; \ + bfloat16x4x3_t __s1 = __p1; \ + __builtin_neon_vld3_lane_bf16(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 11); \ + __ret; \ +}) +#else +#define vld3_lane_bf16(__p0, __p1, __p2) __extension__ ({ \ + bfloat16x4x3_t __ret; \ + bfloat16x4x3_t __s1 = __p1; \ + bfloat16x4x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ + __builtin_neon_vld3_lane_bf16(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], __p2, 11); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld4q_bf16(__p0) __extension__ ({ \ + bfloat16x8x4_t __ret; \ + __builtin_neon_vld4q_bf16(&__ret, __p0, 43); \ + __ret; \ +}) +#else +#define vld4q_bf16(__p0) __extension__ ({ \ + bfloat16x8x4_t __ret; \ + __builtin_neon_vld4q_bf16(&__ret, __p0, 43); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld4_bf16(__p0) __extension__ ({ \ + bfloat16x4x4_t __ret; \ + __builtin_neon_vld4_bf16(&__ret, __p0, 11); \ + __ret; \ +}) +#else +#define vld4_bf16(__p0) __extension__ ({ \ + bfloat16x4x4_t __ret; \ + __builtin_neon_vld4_bf16(&__ret, __p0, 11); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld4q_dup_bf16(__p0) __extension__ ({ \ + bfloat16x8x4_t __ret; \ + __builtin_neon_vld4q_dup_bf16(&__ret, __p0, 43); \ + __ret; \ +}) +#else +#define vld4q_dup_bf16(__p0) __extension__ ({ \ + bfloat16x8x4_t __ret; \ + __builtin_neon_vld4q_dup_bf16(&__ret, __p0, 43); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld4_dup_bf16(__p0) __extension__ ({ \ + bfloat16x4x4_t __ret; \ + __builtin_neon_vld4_dup_bf16(&__ret, __p0, 11); \ + __ret; \ +}) +#else +#define vld4_dup_bf16(__p0) __extension__ ({ \ + bfloat16x4x4_t __ret; \ + __builtin_neon_vld4_dup_bf16(&__ret, __p0, 11); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld4q_lane_bf16(__p0, __p1, __p2) __extension__ ({ \ + bfloat16x8x4_t __ret; \ + bfloat16x8x4_t __s1 = __p1; \ + __builtin_neon_vld4q_lane_bf16(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 43); \ + __ret; \ +}) +#else +#define vld4q_lane_bf16(__p0, __p1, __p2) __extension__ ({ \ + bfloat16x8x4_t __ret; \ + bfloat16x8x4_t __s1 = __p1; \ + bfloat16x8x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vld4q_lane_bf16(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 43); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld4_lane_bf16(__p0, __p1, __p2) __extension__ ({ \ + bfloat16x4x4_t __ret; \ + bfloat16x4x4_t __s1 = __p1; \ + __builtin_neon_vld4_lane_bf16(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 11); \ + __ret; \ +}) +#else +#define vld4_lane_bf16(__p0, __p1, __p2) __extension__ ({ \ + bfloat16x4x4_t __ret; \ + bfloat16x4x4_t __s1 = __p1; \ + bfloat16x4x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \ + __builtin_neon_vld4_lane_bf16(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], __p2, 11); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vsetq_lane_bf16(__p0, __p1, __p2) __extension__ ({ \ + bfloat16x8_t __ret; \ + bfloat16_t __s0 = __p0; \ + bfloat16x8_t __s1 = __p1; \ + __ret = (bfloat16x8_t) __builtin_neon_vsetq_lane_bf16(__s0, (bfloat16x8_t)__s1, __p2); \ + __ret; \ +}) +#else +#define vsetq_lane_bf16(__p0, __p1, __p2) __extension__ ({ \ + bfloat16x8_t __ret; \ + bfloat16_t __s0 = __p0; \ + bfloat16x8_t __s1 = __p1; \ + bfloat16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (bfloat16x8_t) __builtin_neon_vsetq_lane_bf16(__s0, (bfloat16x8_t)__rev1, __p2); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#define __noswap_vsetq_lane_bf16(__p0, __p1, __p2) __extension__ ({ \ + bfloat16x8_t __ret; \ + bfloat16_t __s0 = __p0; \ + bfloat16x8_t __s1 = __p1; \ + __ret = (bfloat16x8_t) __builtin_neon_vsetq_lane_bf16(__s0, (bfloat16x8_t)__s1, __p2); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vset_lane_bf16(__p0, __p1, __p2) __extension__ ({ \ + bfloat16x4_t __ret; \ + bfloat16_t __s0 = __p0; \ + bfloat16x4_t __s1 = __p1; \ + __ret = (bfloat16x4_t) __builtin_neon_vset_lane_bf16(__s0, (bfloat16x4_t)__s1, __p2); \ + __ret; \ +}) +#else +#define vset_lane_bf16(__p0, __p1, __p2) __extension__ ({ \ + bfloat16x4_t __ret; \ + bfloat16_t __s0 = __p0; \ + bfloat16x4_t __s1 = __p1; \ + bfloat16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + __ret = (bfloat16x4_t) __builtin_neon_vset_lane_bf16(__s0, (bfloat16x4_t)__rev1, __p2); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#define __noswap_vset_lane_bf16(__p0, __p1, __p2) __extension__ ({ \ + bfloat16x4_t __ret; \ + bfloat16_t __s0 = __p0; \ + bfloat16x4_t __s1 = __p1; \ + __ret = (bfloat16x4_t) __builtin_neon_vset_lane_bf16(__s0, (bfloat16x4_t)__s1, __p2); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1q_bf16(__p0, __p1) __extension__ ({ \ + bfloat16x8_t __s1 = __p1; \ + __builtin_neon_vst1q_bf16(__p0, (int8x16_t)__s1, 43); \ +}) +#else +#define vst1q_bf16(__p0, __p1) __extension__ ({ \ + bfloat16x8_t __s1 = __p1; \ + bfloat16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst1q_bf16(__p0, (int8x16_t)__rev1, 43); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1_bf16(__p0, __p1) __extension__ ({ \ + bfloat16x4_t __s1 = __p1; \ + __builtin_neon_vst1_bf16(__p0, (int8x8_t)__s1, 11); \ +}) +#else +#define vst1_bf16(__p0, __p1) __extension__ ({ \ + bfloat16x4_t __s1 = __p1; \ + bfloat16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + __builtin_neon_vst1_bf16(__p0, (int8x8_t)__rev1, 11); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1q_lane_bf16(__p0, __p1, __p2) __extension__ ({ \ + bfloat16x8_t __s1 = __p1; \ + __builtin_neon_vst1q_lane_bf16(__p0, (int8x16_t)__s1, __p2, 43); \ +}) +#else +#define vst1q_lane_bf16(__p0, __p1, __p2) __extension__ ({ \ + bfloat16x8_t __s1 = __p1; \ + bfloat16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst1q_lane_bf16(__p0, (int8x16_t)__rev1, __p2, 43); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1_lane_bf16(__p0, __p1, __p2) __extension__ ({ \ + bfloat16x4_t __s1 = __p1; \ + __builtin_neon_vst1_lane_bf16(__p0, (int8x8_t)__s1, __p2, 11); \ +}) +#else +#define vst1_lane_bf16(__p0, __p1, __p2) __extension__ ({ \ + bfloat16x4_t __s1 = __p1; \ + bfloat16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + __builtin_neon_vst1_lane_bf16(__p0, (int8x8_t)__rev1, __p2, 11); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1q_bf16_x2(__p0, __p1) __extension__ ({ \ + bfloat16x8x2_t __s1 = __p1; \ + __builtin_neon_vst1q_bf16_x2(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 43); \ +}) +#else +#define vst1q_bf16_x2(__p0, __p1) __extension__ ({ \ + bfloat16x8x2_t __s1 = __p1; \ + bfloat16x8x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst1q_bf16_x2(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 43); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1_bf16_x2(__p0, __p1) __extension__ ({ \ + bfloat16x4x2_t __s1 = __p1; \ + __builtin_neon_vst1_bf16_x2(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 11); \ +}) +#else +#define vst1_bf16_x2(__p0, __p1) __extension__ ({ \ + bfloat16x4x2_t __s1 = __p1; \ + bfloat16x4x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __builtin_neon_vst1_bf16_x2(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], 11); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1q_bf16_x3(__p0, __p1) __extension__ ({ \ + bfloat16x8x3_t __s1 = __p1; \ + __builtin_neon_vst1q_bf16_x3(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 43); \ +}) +#else +#define vst1q_bf16_x3(__p0, __p1) __extension__ ({ \ + bfloat16x8x3_t __s1 = __p1; \ + bfloat16x8x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst1q_bf16_x3(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 43); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1_bf16_x3(__p0, __p1) __extension__ ({ \ + bfloat16x4x3_t __s1 = __p1; \ + __builtin_neon_vst1_bf16_x3(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 11); \ +}) +#else +#define vst1_bf16_x3(__p0, __p1) __extension__ ({ \ + bfloat16x4x3_t __s1 = __p1; \ + bfloat16x4x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ + __builtin_neon_vst1_bf16_x3(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], 11); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1q_bf16_x4(__p0, __p1) __extension__ ({ \ + bfloat16x8x4_t __s1 = __p1; \ + __builtin_neon_vst1q_bf16_x4(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 43); \ +}) +#else +#define vst1q_bf16_x4(__p0, __p1) __extension__ ({ \ + bfloat16x8x4_t __s1 = __p1; \ + bfloat16x8x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst1q_bf16_x4(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 43); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1_bf16_x4(__p0, __p1) __extension__ ({ \ + bfloat16x4x4_t __s1 = __p1; \ + __builtin_neon_vst1_bf16_x4(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 11); \ +}) +#else +#define vst1_bf16_x4(__p0, __p1) __extension__ ({ \ + bfloat16x4x4_t __s1 = __p1; \ + bfloat16x4x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \ + __builtin_neon_vst1_bf16_x4(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], 11); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst2q_bf16(__p0, __p1) __extension__ ({ \ + bfloat16x8x2_t __s1 = __p1; \ + __builtin_neon_vst2q_bf16(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 43); \ +}) +#else +#define vst2q_bf16(__p0, __p1) __extension__ ({ \ + bfloat16x8x2_t __s1 = __p1; \ + bfloat16x8x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst2q_bf16(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 43); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst2_bf16(__p0, __p1) __extension__ ({ \ + bfloat16x4x2_t __s1 = __p1; \ + __builtin_neon_vst2_bf16(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 11); \ +}) +#else +#define vst2_bf16(__p0, __p1) __extension__ ({ \ + bfloat16x4x2_t __s1 = __p1; \ + bfloat16x4x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __builtin_neon_vst2_bf16(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], 11); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst2q_lane_bf16(__p0, __p1, __p2) __extension__ ({ \ + bfloat16x8x2_t __s1 = __p1; \ + __builtin_neon_vst2q_lane_bf16(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 43); \ +}) +#else +#define vst2q_lane_bf16(__p0, __p1, __p2) __extension__ ({ \ + bfloat16x8x2_t __s1 = __p1; \ + bfloat16x8x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst2q_lane_bf16(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 43); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst2_lane_bf16(__p0, __p1, __p2) __extension__ ({ \ + bfloat16x4x2_t __s1 = __p1; \ + __builtin_neon_vst2_lane_bf16(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 11); \ +}) +#else +#define vst2_lane_bf16(__p0, __p1, __p2) __extension__ ({ \ + bfloat16x4x2_t __s1 = __p1; \ + bfloat16x4x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __builtin_neon_vst2_lane_bf16(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], __p2, 11); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst3q_bf16(__p0, __p1) __extension__ ({ \ + bfloat16x8x3_t __s1 = __p1; \ + __builtin_neon_vst3q_bf16(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 43); \ +}) +#else +#define vst3q_bf16(__p0, __p1) __extension__ ({ \ + bfloat16x8x3_t __s1 = __p1; \ + bfloat16x8x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst3q_bf16(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 43); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst3_bf16(__p0, __p1) __extension__ ({ \ + bfloat16x4x3_t __s1 = __p1; \ + __builtin_neon_vst3_bf16(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 11); \ +}) +#else +#define vst3_bf16(__p0, __p1) __extension__ ({ \ + bfloat16x4x3_t __s1 = __p1; \ + bfloat16x4x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ + __builtin_neon_vst3_bf16(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], 11); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst3q_lane_bf16(__p0, __p1, __p2) __extension__ ({ \ + bfloat16x8x3_t __s1 = __p1; \ + __builtin_neon_vst3q_lane_bf16(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 43); \ +}) +#else +#define vst3q_lane_bf16(__p0, __p1, __p2) __extension__ ({ \ + bfloat16x8x3_t __s1 = __p1; \ + bfloat16x8x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst3q_lane_bf16(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 43); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst3_lane_bf16(__p0, __p1, __p2) __extension__ ({ \ + bfloat16x4x3_t __s1 = __p1; \ + __builtin_neon_vst3_lane_bf16(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 11); \ +}) +#else +#define vst3_lane_bf16(__p0, __p1, __p2) __extension__ ({ \ + bfloat16x4x3_t __s1 = __p1; \ + bfloat16x4x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ + __builtin_neon_vst3_lane_bf16(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], __p2, 11); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst4q_bf16(__p0, __p1) __extension__ ({ \ + bfloat16x8x4_t __s1 = __p1; \ + __builtin_neon_vst4q_bf16(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 43); \ +}) +#else +#define vst4q_bf16(__p0, __p1) __extension__ ({ \ + bfloat16x8x4_t __s1 = __p1; \ + bfloat16x8x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst4q_bf16(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 43); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst4_bf16(__p0, __p1) __extension__ ({ \ + bfloat16x4x4_t __s1 = __p1; \ + __builtin_neon_vst4_bf16(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 11); \ +}) +#else +#define vst4_bf16(__p0, __p1) __extension__ ({ \ + bfloat16x4x4_t __s1 = __p1; \ + bfloat16x4x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \ + __builtin_neon_vst4_bf16(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], 11); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst4q_lane_bf16(__p0, __p1, __p2) __extension__ ({ \ + bfloat16x8x4_t __s1 = __p1; \ + __builtin_neon_vst4q_lane_bf16(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 43); \ +}) +#else +#define vst4q_lane_bf16(__p0, __p1, __p2) __extension__ ({ \ + bfloat16x8x4_t __s1 = __p1; \ + bfloat16x8x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst4q_lane_bf16(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 43); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst4_lane_bf16(__p0, __p1, __p2) __extension__ ({ \ + bfloat16x4x4_t __s1 = __p1; \ + __builtin_neon_vst4_lane_bf16(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 11); \ +}) +#else +#define vst4_lane_bf16(__p0, __p1, __p2) __extension__ ({ \ + bfloat16x4x4_t __s1 = __p1; \ + bfloat16x4x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \ + __builtin_neon_vst4_lane_bf16(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], __p2, 11); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("dotprod,neon"))) uint32x4_t vdotq_u32(uint32x4_t __p0, uint8x16_t __p1, uint8x16_t __p2) { + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vdotq_u32((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 50); + return __ret; +} +#else +__ai __attribute__((target("dotprod,neon"))) uint32x4_t vdotq_u32(uint32x4_t __p0, uint8x16_t __p1, uint8x16_t __p2) { + uint32x4_t __ret; + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint32x4_t) __builtin_neon_vdotq_u32((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 50); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +__ai __attribute__((target("dotprod,neon"))) uint32x4_t __noswap_vdotq_u32(uint32x4_t __p0, uint8x16_t __p1, uint8x16_t __p2) { + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vdotq_u32((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 50); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("dotprod,neon"))) int32x4_t vdotq_s32(int32x4_t __p0, int8x16_t __p1, int8x16_t __p2) { + int32x4_t __ret; + __ret = (int32x4_t) __builtin_neon_vdotq_s32((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 34); + return __ret; +} +#else +__ai __attribute__((target("dotprod,neon"))) int32x4_t vdotq_s32(int32x4_t __p0, int8x16_t __p1, int8x16_t __p2) { + int32x4_t __ret; + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int32x4_t) __builtin_neon_vdotq_s32((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 34); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +__ai __attribute__((target("dotprod,neon"))) int32x4_t __noswap_vdotq_s32(int32x4_t __p0, int8x16_t __p1, int8x16_t __p2) { + int32x4_t __ret; + __ret = (int32x4_t) __builtin_neon_vdotq_s32((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 34); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("dotprod,neon"))) uint32x2_t vdot_u32(uint32x2_t __p0, uint8x8_t __p1, uint8x8_t __p2) { + uint32x2_t __ret; + __ret = (uint32x2_t) __builtin_neon_vdot_u32((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 18); + return __ret; +} +#else +__ai __attribute__((target("dotprod,neon"))) uint32x2_t vdot_u32(uint32x2_t __p0, uint8x8_t __p1, uint8x8_t __p2) { + uint32x2_t __ret; + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint32x2_t) __builtin_neon_vdot_u32((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 18); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +__ai __attribute__((target("dotprod,neon"))) uint32x2_t __noswap_vdot_u32(uint32x2_t __p0, uint8x8_t __p1, uint8x8_t __p2) { + uint32x2_t __ret; + __ret = (uint32x2_t) __builtin_neon_vdot_u32((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 18); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("dotprod,neon"))) int32x2_t vdot_s32(int32x2_t __p0, int8x8_t __p1, int8x8_t __p2) { + int32x2_t __ret; + __ret = (int32x2_t) __builtin_neon_vdot_s32((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 2); + return __ret; +} +#else +__ai __attribute__((target("dotprod,neon"))) int32x2_t vdot_s32(int32x2_t __p0, int8x8_t __p1, int8x8_t __p2) { + int32x2_t __ret; + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int32x2_t) __builtin_neon_vdot_s32((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 2); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +__ai __attribute__((target("dotprod,neon"))) int32x2_t __noswap_vdot_s32(int32x2_t __p0, int8x8_t __p1, int8x8_t __p2) { + int32x2_t __ret; + __ret = (int32x2_t) __builtin_neon_vdot_s32((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 2); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16,neon"))) float16x8_t vabdq_f16(float16x8_t __p0, float16x8_t __p1) { + float16x8_t __ret; + __ret = (float16x8_t) __builtin_neon_vabdq_f16((int8x16_t)__p0, (int8x16_t)__p1, 40); + return __ret; +} +#else +__ai __attribute__((target("fullfp16,neon"))) float16x8_t vabdq_f16(float16x8_t __p0, float16x8_t __p1) { + float16x8_t __ret; + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (float16x8_t) __builtin_neon_vabdq_f16((int8x16_t)__rev0, (int8x16_t)__rev1, 40); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16,neon"))) float16x4_t vabd_f16(float16x4_t __p0, float16x4_t __p1) { + float16x4_t __ret; + __ret = (float16x4_t) __builtin_neon_vabd_f16((int8x8_t)__p0, (int8x8_t)__p1, 8); + return __ret; +} +#else +__ai __attribute__((target("fullfp16,neon"))) float16x4_t vabd_f16(float16x4_t __p0, float16x4_t __p1) { + float16x4_t __ret; + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (float16x4_t) __builtin_neon_vabd_f16((int8x8_t)__rev0, (int8x8_t)__rev1, 8); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16,neon"))) float16x8_t vabsq_f16(float16x8_t __p0) { + float16x8_t __ret; + __ret = (float16x8_t) __builtin_neon_vabsq_f16((int8x16_t)__p0, 40); + return __ret; +} +#else +__ai __attribute__((target("fullfp16,neon"))) float16x8_t vabsq_f16(float16x8_t __p0) { + float16x8_t __ret; + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (float16x8_t) __builtin_neon_vabsq_f16((int8x16_t)__rev0, 40); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16,neon"))) float16x4_t vabs_f16(float16x4_t __p0) { + float16x4_t __ret; + __ret = (float16x4_t) __builtin_neon_vabs_f16((int8x8_t)__p0, 8); + return __ret; +} +#else +__ai __attribute__((target("fullfp16,neon"))) float16x4_t vabs_f16(float16x4_t __p0) { + float16x4_t __ret; + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = (float16x4_t) __builtin_neon_vabs_f16((int8x8_t)__rev0, 8); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16,neon"))) float16x8_t vaddq_f16(float16x8_t __p0, float16x8_t __p1) { + float16x8_t __ret; + __ret = __p0 + __p1; + return __ret; +} +#else +__ai __attribute__((target("fullfp16,neon"))) float16x8_t vaddq_f16(float16x8_t __p0, float16x8_t __p1) { + float16x8_t __ret; + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __rev0 + __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16,neon"))) float16x4_t vadd_f16(float16x4_t __p0, float16x4_t __p1) { + float16x4_t __ret; + __ret = __p0 + __p1; + return __ret; +} +#else +__ai __attribute__((target("fullfp16,neon"))) float16x4_t vadd_f16(float16x4_t __p0, float16x4_t __p1) { + float16x4_t __ret; + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = __rev0 + __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16,neon"))) uint16x8_t vcageq_f16(float16x8_t __p0, float16x8_t __p1) { + uint16x8_t __ret; + __ret = (uint16x8_t) __builtin_neon_vcageq_f16((int8x16_t)__p0, (int8x16_t)__p1, 49); + return __ret; +} +#else +__ai __attribute__((target("fullfp16,neon"))) uint16x8_t vcageq_f16(float16x8_t __p0, float16x8_t __p1) { + uint16x8_t __ret; + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint16x8_t) __builtin_neon_vcageq_f16((int8x16_t)__rev0, (int8x16_t)__rev1, 49); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16,neon"))) uint16x4_t vcage_f16(float16x4_t __p0, float16x4_t __p1) { + uint16x4_t __ret; + __ret = (uint16x4_t) __builtin_neon_vcage_f16((int8x8_t)__p0, (int8x8_t)__p1, 17); + return __ret; +} +#else +__ai __attribute__((target("fullfp16,neon"))) uint16x4_t vcage_f16(float16x4_t __p0, float16x4_t __p1) { + uint16x4_t __ret; + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (uint16x4_t) __builtin_neon_vcage_f16((int8x8_t)__rev0, (int8x8_t)__rev1, 17); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16,neon"))) uint16x8_t vcagtq_f16(float16x8_t __p0, float16x8_t __p1) { + uint16x8_t __ret; + __ret = (uint16x8_t) __builtin_neon_vcagtq_f16((int8x16_t)__p0, (int8x16_t)__p1, 49); + return __ret; +} +#else +__ai __attribute__((target("fullfp16,neon"))) uint16x8_t vcagtq_f16(float16x8_t __p0, float16x8_t __p1) { + uint16x8_t __ret; + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint16x8_t) __builtin_neon_vcagtq_f16((int8x16_t)__rev0, (int8x16_t)__rev1, 49); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16,neon"))) uint16x4_t vcagt_f16(float16x4_t __p0, float16x4_t __p1) { + uint16x4_t __ret; + __ret = (uint16x4_t) __builtin_neon_vcagt_f16((int8x8_t)__p0, (int8x8_t)__p1, 17); + return __ret; +} +#else +__ai __attribute__((target("fullfp16,neon"))) uint16x4_t vcagt_f16(float16x4_t __p0, float16x4_t __p1) { + uint16x4_t __ret; + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (uint16x4_t) __builtin_neon_vcagt_f16((int8x8_t)__rev0, (int8x8_t)__rev1, 17); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16,neon"))) uint16x8_t vcaleq_f16(float16x8_t __p0, float16x8_t __p1) { + uint16x8_t __ret; + __ret = (uint16x8_t) __builtin_neon_vcaleq_f16((int8x16_t)__p0, (int8x16_t)__p1, 49); + return __ret; +} +#else +__ai __attribute__((target("fullfp16,neon"))) uint16x8_t vcaleq_f16(float16x8_t __p0, float16x8_t __p1) { + uint16x8_t __ret; + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint16x8_t) __builtin_neon_vcaleq_f16((int8x16_t)__rev0, (int8x16_t)__rev1, 49); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16,neon"))) uint16x4_t vcale_f16(float16x4_t __p0, float16x4_t __p1) { + uint16x4_t __ret; + __ret = (uint16x4_t) __builtin_neon_vcale_f16((int8x8_t)__p0, (int8x8_t)__p1, 17); + return __ret; +} +#else +__ai __attribute__((target("fullfp16,neon"))) uint16x4_t vcale_f16(float16x4_t __p0, float16x4_t __p1) { + uint16x4_t __ret; + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (uint16x4_t) __builtin_neon_vcale_f16((int8x8_t)__rev0, (int8x8_t)__rev1, 17); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16,neon"))) uint16x8_t vcaltq_f16(float16x8_t __p0, float16x8_t __p1) { + uint16x8_t __ret; + __ret = (uint16x8_t) __builtin_neon_vcaltq_f16((int8x16_t)__p0, (int8x16_t)__p1, 49); + return __ret; +} +#else +__ai __attribute__((target("fullfp16,neon"))) uint16x8_t vcaltq_f16(float16x8_t __p0, float16x8_t __p1) { + uint16x8_t __ret; + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint16x8_t) __builtin_neon_vcaltq_f16((int8x16_t)__rev0, (int8x16_t)__rev1, 49); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16,neon"))) uint16x4_t vcalt_f16(float16x4_t __p0, float16x4_t __p1) { + uint16x4_t __ret; + __ret = (uint16x4_t) __builtin_neon_vcalt_f16((int8x8_t)__p0, (int8x8_t)__p1, 17); + return __ret; +} +#else +__ai __attribute__((target("fullfp16,neon"))) uint16x4_t vcalt_f16(float16x4_t __p0, float16x4_t __p1) { + uint16x4_t __ret; + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (uint16x4_t) __builtin_neon_vcalt_f16((int8x8_t)__rev0, (int8x8_t)__rev1, 17); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16,neon"))) uint16x8_t vceqq_f16(float16x8_t __p0, float16x8_t __p1) { + uint16x8_t __ret; + __ret = (uint16x8_t)(__p0 == __p1); + return __ret; +} +#else +__ai __attribute__((target("fullfp16,neon"))) uint16x8_t vceqq_f16(float16x8_t __p0, float16x8_t __p1) { + uint16x8_t __ret; + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint16x8_t)(__rev0 == __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16,neon"))) uint16x4_t vceq_f16(float16x4_t __p0, float16x4_t __p1) { + uint16x4_t __ret; + __ret = (uint16x4_t)(__p0 == __p1); + return __ret; +} +#else +__ai __attribute__((target("fullfp16,neon"))) uint16x4_t vceq_f16(float16x4_t __p0, float16x4_t __p1) { + uint16x4_t __ret; + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (uint16x4_t)(__rev0 == __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16,neon"))) uint16x8_t vceqzq_f16(float16x8_t __p0) { + uint16x8_t __ret; + __ret = (uint16x8_t) __builtin_neon_vceqzq_f16((int8x16_t)__p0, 49); + return __ret; +} +#else +__ai __attribute__((target("fullfp16,neon"))) uint16x8_t vceqzq_f16(float16x8_t __p0) { + uint16x8_t __ret; + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint16x8_t) __builtin_neon_vceqzq_f16((int8x16_t)__rev0, 49); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16,neon"))) uint16x4_t vceqz_f16(float16x4_t __p0) { + uint16x4_t __ret; + __ret = (uint16x4_t) __builtin_neon_vceqz_f16((int8x8_t)__p0, 17); + return __ret; +} +#else +__ai __attribute__((target("fullfp16,neon"))) uint16x4_t vceqz_f16(float16x4_t __p0) { + uint16x4_t __ret; + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = (uint16x4_t) __builtin_neon_vceqz_f16((int8x8_t)__rev0, 17); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16,neon"))) uint16x8_t vcgeq_f16(float16x8_t __p0, float16x8_t __p1) { + uint16x8_t __ret; + __ret = (uint16x8_t)(__p0 >= __p1); + return __ret; +} +#else +__ai __attribute__((target("fullfp16,neon"))) uint16x8_t vcgeq_f16(float16x8_t __p0, float16x8_t __p1) { + uint16x8_t __ret; + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint16x8_t)(__rev0 >= __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16,neon"))) uint16x4_t vcge_f16(float16x4_t __p0, float16x4_t __p1) { + uint16x4_t __ret; + __ret = (uint16x4_t)(__p0 >= __p1); + return __ret; +} +#else +__ai __attribute__((target("fullfp16,neon"))) uint16x4_t vcge_f16(float16x4_t __p0, float16x4_t __p1) { + uint16x4_t __ret; + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (uint16x4_t)(__rev0 >= __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16,neon"))) uint16x8_t vcgezq_f16(float16x8_t __p0) { + uint16x8_t __ret; + __ret = (uint16x8_t) __builtin_neon_vcgezq_f16((int8x16_t)__p0, 49); + return __ret; +} +#else +__ai __attribute__((target("fullfp16,neon"))) uint16x8_t vcgezq_f16(float16x8_t __p0) { + uint16x8_t __ret; + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint16x8_t) __builtin_neon_vcgezq_f16((int8x16_t)__rev0, 49); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16,neon"))) uint16x4_t vcgez_f16(float16x4_t __p0) { + uint16x4_t __ret; + __ret = (uint16x4_t) __builtin_neon_vcgez_f16((int8x8_t)__p0, 17); + return __ret; +} +#else +__ai __attribute__((target("fullfp16,neon"))) uint16x4_t vcgez_f16(float16x4_t __p0) { + uint16x4_t __ret; + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = (uint16x4_t) __builtin_neon_vcgez_f16((int8x8_t)__rev0, 17); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16,neon"))) uint16x8_t vcgtq_f16(float16x8_t __p0, float16x8_t __p1) { + uint16x8_t __ret; + __ret = (uint16x8_t)(__p0 > __p1); + return __ret; +} +#else +__ai __attribute__((target("fullfp16,neon"))) uint16x8_t vcgtq_f16(float16x8_t __p0, float16x8_t __p1) { + uint16x8_t __ret; + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint16x8_t)(__rev0 > __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16,neon"))) uint16x4_t vcgt_f16(float16x4_t __p0, float16x4_t __p1) { + uint16x4_t __ret; + __ret = (uint16x4_t)(__p0 > __p1); + return __ret; +} +#else +__ai __attribute__((target("fullfp16,neon"))) uint16x4_t vcgt_f16(float16x4_t __p0, float16x4_t __p1) { + uint16x4_t __ret; + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (uint16x4_t)(__rev0 > __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16,neon"))) uint16x8_t vcgtzq_f16(float16x8_t __p0) { + uint16x8_t __ret; + __ret = (uint16x8_t) __builtin_neon_vcgtzq_f16((int8x16_t)__p0, 49); + return __ret; +} +#else +__ai __attribute__((target("fullfp16,neon"))) uint16x8_t vcgtzq_f16(float16x8_t __p0) { + uint16x8_t __ret; + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint16x8_t) __builtin_neon_vcgtzq_f16((int8x16_t)__rev0, 49); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16,neon"))) uint16x4_t vcgtz_f16(float16x4_t __p0) { + uint16x4_t __ret; + __ret = (uint16x4_t) __builtin_neon_vcgtz_f16((int8x8_t)__p0, 17); + return __ret; +} +#else +__ai __attribute__((target("fullfp16,neon"))) uint16x4_t vcgtz_f16(float16x4_t __p0) { + uint16x4_t __ret; + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = (uint16x4_t) __builtin_neon_vcgtz_f16((int8x8_t)__rev0, 17); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16,neon"))) uint16x8_t vcleq_f16(float16x8_t __p0, float16x8_t __p1) { + uint16x8_t __ret; + __ret = (uint16x8_t)(__p0 <= __p1); + return __ret; +} +#else +__ai __attribute__((target("fullfp16,neon"))) uint16x8_t vcleq_f16(float16x8_t __p0, float16x8_t __p1) { + uint16x8_t __ret; + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint16x8_t)(__rev0 <= __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16,neon"))) uint16x4_t vcle_f16(float16x4_t __p0, float16x4_t __p1) { + uint16x4_t __ret; + __ret = (uint16x4_t)(__p0 <= __p1); + return __ret; +} +#else +__ai __attribute__((target("fullfp16,neon"))) uint16x4_t vcle_f16(float16x4_t __p0, float16x4_t __p1) { + uint16x4_t __ret; + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (uint16x4_t)(__rev0 <= __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16,neon"))) uint16x8_t vclezq_f16(float16x8_t __p0) { + uint16x8_t __ret; + __ret = (uint16x8_t) __builtin_neon_vclezq_f16((int8x16_t)__p0, 49); + return __ret; +} +#else +__ai __attribute__((target("fullfp16,neon"))) uint16x8_t vclezq_f16(float16x8_t __p0) { + uint16x8_t __ret; + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint16x8_t) __builtin_neon_vclezq_f16((int8x16_t)__rev0, 49); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16,neon"))) uint16x4_t vclez_f16(float16x4_t __p0) { + uint16x4_t __ret; + __ret = (uint16x4_t) __builtin_neon_vclez_f16((int8x8_t)__p0, 17); + return __ret; +} +#else +__ai __attribute__((target("fullfp16,neon"))) uint16x4_t vclez_f16(float16x4_t __p0) { + uint16x4_t __ret; + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = (uint16x4_t) __builtin_neon_vclez_f16((int8x8_t)__rev0, 17); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16,neon"))) uint16x8_t vcltq_f16(float16x8_t __p0, float16x8_t __p1) { + uint16x8_t __ret; + __ret = (uint16x8_t)(__p0 < __p1); + return __ret; +} +#else +__ai __attribute__((target("fullfp16,neon"))) uint16x8_t vcltq_f16(float16x8_t __p0, float16x8_t __p1) { + uint16x8_t __ret; + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint16x8_t)(__rev0 < __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16,neon"))) uint16x4_t vclt_f16(float16x4_t __p0, float16x4_t __p1) { + uint16x4_t __ret; + __ret = (uint16x4_t)(__p0 < __p1); + return __ret; +} +#else +__ai __attribute__((target("fullfp16,neon"))) uint16x4_t vclt_f16(float16x4_t __p0, float16x4_t __p1) { + uint16x4_t __ret; + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (uint16x4_t)(__rev0 < __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16,neon"))) uint16x8_t vcltzq_f16(float16x8_t __p0) { + uint16x8_t __ret; + __ret = (uint16x8_t) __builtin_neon_vcltzq_f16((int8x16_t)__p0, 49); + return __ret; +} +#else +__ai __attribute__((target("fullfp16,neon"))) uint16x8_t vcltzq_f16(float16x8_t __p0) { + uint16x8_t __ret; + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint16x8_t) __builtin_neon_vcltzq_f16((int8x16_t)__rev0, 49); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16,neon"))) uint16x4_t vcltz_f16(float16x4_t __p0) { + uint16x4_t __ret; + __ret = (uint16x4_t) __builtin_neon_vcltz_f16((int8x8_t)__p0, 17); + return __ret; +} +#else +__ai __attribute__((target("fullfp16,neon"))) uint16x4_t vcltz_f16(float16x4_t __p0) { + uint16x4_t __ret; + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = (uint16x4_t) __builtin_neon_vcltz_f16((int8x8_t)__rev0, 17); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16,neon"))) float16x8_t vcvtq_f16_u16(uint16x8_t __p0) { + float16x8_t __ret; + __ret = (float16x8_t) __builtin_neon_vcvtq_f16_u16((int8x16_t)__p0, 49); + return __ret; +} +#else +__ai __attribute__((target("fullfp16,neon"))) float16x8_t vcvtq_f16_u16(uint16x8_t __p0) { + float16x8_t __ret; + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (float16x8_t) __builtin_neon_vcvtq_f16_u16((int8x16_t)__rev0, 49); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16,neon"))) float16x8_t vcvtq_f16_s16(int16x8_t __p0) { + float16x8_t __ret; + __ret = (float16x8_t) __builtin_neon_vcvtq_f16_s16((int8x16_t)__p0, 33); + return __ret; +} +#else +__ai __attribute__((target("fullfp16,neon"))) float16x8_t vcvtq_f16_s16(int16x8_t __p0) { + float16x8_t __ret; + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (float16x8_t) __builtin_neon_vcvtq_f16_s16((int8x16_t)__rev0, 33); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16,neon"))) float16x4_t vcvt_f16_u16(uint16x4_t __p0) { + float16x4_t __ret; + __ret = (float16x4_t) __builtin_neon_vcvt_f16_u16((int8x8_t)__p0, 17); + return __ret; +} +#else +__ai __attribute__((target("fullfp16,neon"))) float16x4_t vcvt_f16_u16(uint16x4_t __p0) { + float16x4_t __ret; + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = (float16x4_t) __builtin_neon_vcvt_f16_u16((int8x8_t)__rev0, 17); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16,neon"))) float16x4_t vcvt_f16_s16(int16x4_t __p0) { + float16x4_t __ret; + __ret = (float16x4_t) __builtin_neon_vcvt_f16_s16((int8x8_t)__p0, 1); + return __ret; +} +#else +__ai __attribute__((target("fullfp16,neon"))) float16x4_t vcvt_f16_s16(int16x4_t __p0) { + float16x4_t __ret; + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = (float16x4_t) __builtin_neon_vcvt_f16_s16((int8x8_t)__rev0, 1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcvtq_n_f16_u16(__p0, __p1) __extension__ ({ \ + float16x8_t __ret; \ + uint16x8_t __s0 = __p0; \ + __ret = (float16x8_t) __builtin_neon_vcvtq_n_f16_u16((int8x16_t)__s0, __p1, 49); \ + __ret; \ +}) +#else +#define vcvtq_n_f16_u16(__p0, __p1) __extension__ ({ \ + float16x8_t __ret; \ + uint16x8_t __s0 = __p0; \ + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (float16x8_t) __builtin_neon_vcvtq_n_f16_u16((int8x16_t)__rev0, __p1, 49); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcvtq_n_f16_s16(__p0, __p1) __extension__ ({ \ + float16x8_t __ret; \ + int16x8_t __s0 = __p0; \ + __ret = (float16x8_t) __builtin_neon_vcvtq_n_f16_s16((int8x16_t)__s0, __p1, 33); \ + __ret; \ +}) +#else +#define vcvtq_n_f16_s16(__p0, __p1) __extension__ ({ \ + float16x8_t __ret; \ + int16x8_t __s0 = __p0; \ + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (float16x8_t) __builtin_neon_vcvtq_n_f16_s16((int8x16_t)__rev0, __p1, 33); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcvt_n_f16_u16(__p0, __p1) __extension__ ({ \ + float16x4_t __ret; \ + uint16x4_t __s0 = __p0; \ + __ret = (float16x4_t) __builtin_neon_vcvt_n_f16_u16((int8x8_t)__s0, __p1, 17); \ + __ret; \ +}) +#else +#define vcvt_n_f16_u16(__p0, __p1) __extension__ ({ \ + float16x4_t __ret; \ + uint16x4_t __s0 = __p0; \ + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + __ret = (float16x4_t) __builtin_neon_vcvt_n_f16_u16((int8x8_t)__rev0, __p1, 17); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcvt_n_f16_s16(__p0, __p1) __extension__ ({ \ + float16x4_t __ret; \ + int16x4_t __s0 = __p0; \ + __ret = (float16x4_t) __builtin_neon_vcvt_n_f16_s16((int8x8_t)__s0, __p1, 1); \ + __ret; \ +}) +#else +#define vcvt_n_f16_s16(__p0, __p1) __extension__ ({ \ + float16x4_t __ret; \ + int16x4_t __s0 = __p0; \ + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + __ret = (float16x4_t) __builtin_neon_vcvt_n_f16_s16((int8x8_t)__rev0, __p1, 1); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcvtq_n_s16_f16(__p0, __p1) __extension__ ({ \ + int16x8_t __ret; \ + float16x8_t __s0 = __p0; \ + __ret = (int16x8_t) __builtin_neon_vcvtq_n_s16_f16((int8x16_t)__s0, __p1, 33); \ + __ret; \ +}) +#else +#define vcvtq_n_s16_f16(__p0, __p1) __extension__ ({ \ + int16x8_t __ret; \ + float16x8_t __s0 = __p0; \ + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (int16x8_t) __builtin_neon_vcvtq_n_s16_f16((int8x16_t)__rev0, __p1, 33); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcvt_n_s16_f16(__p0, __p1) __extension__ ({ \ + int16x4_t __ret; \ + float16x4_t __s0 = __p0; \ + __ret = (int16x4_t) __builtin_neon_vcvt_n_s16_f16((int8x8_t)__s0, __p1, 1); \ + __ret; \ +}) +#else +#define vcvt_n_s16_f16(__p0, __p1) __extension__ ({ \ + int16x4_t __ret; \ + float16x4_t __s0 = __p0; \ + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + __ret = (int16x4_t) __builtin_neon_vcvt_n_s16_f16((int8x8_t)__rev0, __p1, 1); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcvtq_n_u16_f16(__p0, __p1) __extension__ ({ \ + uint16x8_t __ret; \ + float16x8_t __s0 = __p0; \ + __ret = (uint16x8_t) __builtin_neon_vcvtq_n_u16_f16((int8x16_t)__s0, __p1, 49); \ + __ret; \ +}) +#else +#define vcvtq_n_u16_f16(__p0, __p1) __extension__ ({ \ + uint16x8_t __ret; \ + float16x8_t __s0 = __p0; \ + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (uint16x8_t) __builtin_neon_vcvtq_n_u16_f16((int8x16_t)__rev0, __p1, 49); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcvt_n_u16_f16(__p0, __p1) __extension__ ({ \ + uint16x4_t __ret; \ + float16x4_t __s0 = __p0; \ + __ret = (uint16x4_t) __builtin_neon_vcvt_n_u16_f16((int8x8_t)__s0, __p1, 17); \ + __ret; \ +}) +#else +#define vcvt_n_u16_f16(__p0, __p1) __extension__ ({ \ + uint16x4_t __ret; \ + float16x4_t __s0 = __p0; \ + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + __ret = (uint16x4_t) __builtin_neon_vcvt_n_u16_f16((int8x8_t)__rev0, __p1, 17); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16,neon"))) int16x8_t vcvtq_s16_f16(float16x8_t __p0) { + int16x8_t __ret; + __ret = (int16x8_t) __builtin_neon_vcvtq_s16_f16((int8x16_t)__p0, 33); + return __ret; +} +#else +__ai __attribute__((target("fullfp16,neon"))) int16x8_t vcvtq_s16_f16(float16x8_t __p0) { + int16x8_t __ret; + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int16x8_t) __builtin_neon_vcvtq_s16_f16((int8x16_t)__rev0, 33); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16,neon"))) int16x4_t vcvt_s16_f16(float16x4_t __p0) { + int16x4_t __ret; + __ret = (int16x4_t) __builtin_neon_vcvt_s16_f16((int8x8_t)__p0, 1); + return __ret; +} +#else +__ai __attribute__((target("fullfp16,neon"))) int16x4_t vcvt_s16_f16(float16x4_t __p0) { + int16x4_t __ret; + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = (int16x4_t) __builtin_neon_vcvt_s16_f16((int8x8_t)__rev0, 1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16,neon"))) uint16x8_t vcvtq_u16_f16(float16x8_t __p0) { + uint16x8_t __ret; + __ret = (uint16x8_t) __builtin_neon_vcvtq_u16_f16((int8x16_t)__p0, 49); + return __ret; +} +#else +__ai __attribute__((target("fullfp16,neon"))) uint16x8_t vcvtq_u16_f16(float16x8_t __p0) { + uint16x8_t __ret; + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint16x8_t) __builtin_neon_vcvtq_u16_f16((int8x16_t)__rev0, 49); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16,neon"))) uint16x4_t vcvt_u16_f16(float16x4_t __p0) { + uint16x4_t __ret; + __ret = (uint16x4_t) __builtin_neon_vcvt_u16_f16((int8x8_t)__p0, 17); + return __ret; +} +#else +__ai __attribute__((target("fullfp16,neon"))) uint16x4_t vcvt_u16_f16(float16x4_t __p0) { + uint16x4_t __ret; + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = (uint16x4_t) __builtin_neon_vcvt_u16_f16((int8x8_t)__rev0, 17); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16,neon"))) int16x8_t vcvtaq_s16_f16(float16x8_t __p0) { + int16x8_t __ret; + __ret = (int16x8_t) __builtin_neon_vcvtaq_s16_f16((int8x16_t)__p0, 33); + return __ret; +} +#else +__ai __attribute__((target("fullfp16,neon"))) int16x8_t vcvtaq_s16_f16(float16x8_t __p0) { + int16x8_t __ret; + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int16x8_t) __builtin_neon_vcvtaq_s16_f16((int8x16_t)__rev0, 33); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16,neon"))) int16x4_t vcvta_s16_f16(float16x4_t __p0) { + int16x4_t __ret; + __ret = (int16x4_t) __builtin_neon_vcvta_s16_f16((int8x8_t)__p0, 1); + return __ret; +} +#else +__ai __attribute__((target("fullfp16,neon"))) int16x4_t vcvta_s16_f16(float16x4_t __p0) { + int16x4_t __ret; + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = (int16x4_t) __builtin_neon_vcvta_s16_f16((int8x8_t)__rev0, 1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16,neon"))) uint16x8_t vcvtaq_u16_f16(float16x8_t __p0) { + uint16x8_t __ret; + __ret = (uint16x8_t) __builtin_neon_vcvtaq_u16_f16((int8x16_t)__p0, 49); + return __ret; +} +#else +__ai __attribute__((target("fullfp16,neon"))) uint16x8_t vcvtaq_u16_f16(float16x8_t __p0) { + uint16x8_t __ret; + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint16x8_t) __builtin_neon_vcvtaq_u16_f16((int8x16_t)__rev0, 49); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16,neon"))) uint16x4_t vcvta_u16_f16(float16x4_t __p0) { + uint16x4_t __ret; + __ret = (uint16x4_t) __builtin_neon_vcvta_u16_f16((int8x8_t)__p0, 17); + return __ret; +} +#else +__ai __attribute__((target("fullfp16,neon"))) uint16x4_t vcvta_u16_f16(float16x4_t __p0) { + uint16x4_t __ret; + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = (uint16x4_t) __builtin_neon_vcvta_u16_f16((int8x8_t)__rev0, 17); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16,neon"))) int16x8_t vcvtmq_s16_f16(float16x8_t __p0) { + int16x8_t __ret; + __ret = (int16x8_t) __builtin_neon_vcvtmq_s16_f16((int8x16_t)__p0, 33); + return __ret; +} +#else +__ai __attribute__((target("fullfp16,neon"))) int16x8_t vcvtmq_s16_f16(float16x8_t __p0) { + int16x8_t __ret; + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int16x8_t) __builtin_neon_vcvtmq_s16_f16((int8x16_t)__rev0, 33); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16,neon"))) int16x4_t vcvtm_s16_f16(float16x4_t __p0) { + int16x4_t __ret; + __ret = (int16x4_t) __builtin_neon_vcvtm_s16_f16((int8x8_t)__p0, 1); + return __ret; +} +#else +__ai __attribute__((target("fullfp16,neon"))) int16x4_t vcvtm_s16_f16(float16x4_t __p0) { + int16x4_t __ret; + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = (int16x4_t) __builtin_neon_vcvtm_s16_f16((int8x8_t)__rev0, 1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16,neon"))) uint16x8_t vcvtmq_u16_f16(float16x8_t __p0) { + uint16x8_t __ret; + __ret = (uint16x8_t) __builtin_neon_vcvtmq_u16_f16((int8x16_t)__p0, 49); + return __ret; +} +#else +__ai __attribute__((target("fullfp16,neon"))) uint16x8_t vcvtmq_u16_f16(float16x8_t __p0) { + uint16x8_t __ret; + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint16x8_t) __builtin_neon_vcvtmq_u16_f16((int8x16_t)__rev0, 49); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16,neon"))) uint16x4_t vcvtm_u16_f16(float16x4_t __p0) { + uint16x4_t __ret; + __ret = (uint16x4_t) __builtin_neon_vcvtm_u16_f16((int8x8_t)__p0, 17); + return __ret; +} +#else +__ai __attribute__((target("fullfp16,neon"))) uint16x4_t vcvtm_u16_f16(float16x4_t __p0) { + uint16x4_t __ret; + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = (uint16x4_t) __builtin_neon_vcvtm_u16_f16((int8x8_t)__rev0, 17); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16,neon"))) int16x8_t vcvtnq_s16_f16(float16x8_t __p0) { + int16x8_t __ret; + __ret = (int16x8_t) __builtin_neon_vcvtnq_s16_f16((int8x16_t)__p0, 33); + return __ret; +} +#else +__ai __attribute__((target("fullfp16,neon"))) int16x8_t vcvtnq_s16_f16(float16x8_t __p0) { + int16x8_t __ret; + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int16x8_t) __builtin_neon_vcvtnq_s16_f16((int8x16_t)__rev0, 33); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16,neon"))) int16x4_t vcvtn_s16_f16(float16x4_t __p0) { + int16x4_t __ret; + __ret = (int16x4_t) __builtin_neon_vcvtn_s16_f16((int8x8_t)__p0, 1); + return __ret; +} +#else +__ai __attribute__((target("fullfp16,neon"))) int16x4_t vcvtn_s16_f16(float16x4_t __p0) { + int16x4_t __ret; + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = (int16x4_t) __builtin_neon_vcvtn_s16_f16((int8x8_t)__rev0, 1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16,neon"))) uint16x8_t vcvtnq_u16_f16(float16x8_t __p0) { + uint16x8_t __ret; + __ret = (uint16x8_t) __builtin_neon_vcvtnq_u16_f16((int8x16_t)__p0, 49); + return __ret; +} +#else +__ai __attribute__((target("fullfp16,neon"))) uint16x8_t vcvtnq_u16_f16(float16x8_t __p0) { + uint16x8_t __ret; + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint16x8_t) __builtin_neon_vcvtnq_u16_f16((int8x16_t)__rev0, 49); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16,neon"))) uint16x4_t vcvtn_u16_f16(float16x4_t __p0) { + uint16x4_t __ret; + __ret = (uint16x4_t) __builtin_neon_vcvtn_u16_f16((int8x8_t)__p0, 17); + return __ret; +} +#else +__ai __attribute__((target("fullfp16,neon"))) uint16x4_t vcvtn_u16_f16(float16x4_t __p0) { + uint16x4_t __ret; + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = (uint16x4_t) __builtin_neon_vcvtn_u16_f16((int8x8_t)__rev0, 17); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16,neon"))) int16x8_t vcvtpq_s16_f16(float16x8_t __p0) { + int16x8_t __ret; + __ret = (int16x8_t) __builtin_neon_vcvtpq_s16_f16((int8x16_t)__p0, 33); + return __ret; +} +#else +__ai __attribute__((target("fullfp16,neon"))) int16x8_t vcvtpq_s16_f16(float16x8_t __p0) { + int16x8_t __ret; + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int16x8_t) __builtin_neon_vcvtpq_s16_f16((int8x16_t)__rev0, 33); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16,neon"))) int16x4_t vcvtp_s16_f16(float16x4_t __p0) { + int16x4_t __ret; + __ret = (int16x4_t) __builtin_neon_vcvtp_s16_f16((int8x8_t)__p0, 1); + return __ret; +} +#else +__ai __attribute__((target("fullfp16,neon"))) int16x4_t vcvtp_s16_f16(float16x4_t __p0) { + int16x4_t __ret; + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = (int16x4_t) __builtin_neon_vcvtp_s16_f16((int8x8_t)__rev0, 1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16,neon"))) uint16x8_t vcvtpq_u16_f16(float16x8_t __p0) { + uint16x8_t __ret; + __ret = (uint16x8_t) __builtin_neon_vcvtpq_u16_f16((int8x16_t)__p0, 49); + return __ret; +} +#else +__ai __attribute__((target("fullfp16,neon"))) uint16x8_t vcvtpq_u16_f16(float16x8_t __p0) { + uint16x8_t __ret; + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint16x8_t) __builtin_neon_vcvtpq_u16_f16((int8x16_t)__rev0, 49); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16,neon"))) uint16x4_t vcvtp_u16_f16(float16x4_t __p0) { + uint16x4_t __ret; + __ret = (uint16x4_t) __builtin_neon_vcvtp_u16_f16((int8x8_t)__p0, 17); + return __ret; +} +#else +__ai __attribute__((target("fullfp16,neon"))) uint16x4_t vcvtp_u16_f16(float16x4_t __p0) { + uint16x4_t __ret; + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = (uint16x4_t) __builtin_neon_vcvtp_u16_f16((int8x8_t)__rev0, 17); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16,neon"))) float16x8_t vfmaq_f16(float16x8_t __p0, float16x8_t __p1, float16x8_t __p2) { + float16x8_t __ret; + __ret = (float16x8_t) __builtin_neon_vfmaq_f16((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 40); + return __ret; +} +#else +__ai __attribute__((target("fullfp16,neon"))) float16x8_t vfmaq_f16(float16x8_t __p0, float16x8_t __p1, float16x8_t __p2) { + float16x8_t __ret; + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + float16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (float16x8_t) __builtin_neon_vfmaq_f16((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 40); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +__ai __attribute__((target("fullfp16,neon"))) float16x8_t __noswap_vfmaq_f16(float16x8_t __p0, float16x8_t __p1, float16x8_t __p2) { + float16x8_t __ret; + __ret = (float16x8_t) __builtin_neon_vfmaq_f16((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 40); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16,neon"))) float16x4_t vfma_f16(float16x4_t __p0, float16x4_t __p1, float16x4_t __p2) { + float16x4_t __ret; + __ret = (float16x4_t) __builtin_neon_vfma_f16((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 8); + return __ret; +} +#else +__ai __attribute__((target("fullfp16,neon"))) float16x4_t vfma_f16(float16x4_t __p0, float16x4_t __p1, float16x4_t __p2) { + float16x4_t __ret; + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + float16x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); + __ret = (float16x4_t) __builtin_neon_vfma_f16((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 8); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +__ai __attribute__((target("fullfp16,neon"))) float16x4_t __noswap_vfma_f16(float16x4_t __p0, float16x4_t __p1, float16x4_t __p2) { + float16x4_t __ret; + __ret = (float16x4_t) __builtin_neon_vfma_f16((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 8); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16,neon"))) float16x8_t vfmsq_f16(float16x8_t __p0, float16x8_t __p1, float16x8_t __p2) { + float16x8_t __ret; + __ret = vfmaq_f16(__p0, -__p1, __p2); + return __ret; +} +#else +__ai __attribute__((target("fullfp16,neon"))) float16x8_t vfmsq_f16(float16x8_t __p0, float16x8_t __p1, float16x8_t __p2) { + float16x8_t __ret; + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + float16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __noswap_vfmaq_f16(__rev0, -__rev1, __rev2); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16,neon"))) float16x4_t vfms_f16(float16x4_t __p0, float16x4_t __p1, float16x4_t __p2) { + float16x4_t __ret; + __ret = vfma_f16(__p0, -__p1, __p2); + return __ret; +} +#else +__ai __attribute__((target("fullfp16,neon"))) float16x4_t vfms_f16(float16x4_t __p0, float16x4_t __p1, float16x4_t __p2) { + float16x4_t __ret; + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + float16x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); + __ret = __noswap_vfma_f16(__rev0, -__rev1, __rev2); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16,neon"))) float16x8_t vmaxq_f16(float16x8_t __p0, float16x8_t __p1) { + float16x8_t __ret; + __ret = (float16x8_t) __builtin_neon_vmaxq_f16((int8x16_t)__p0, (int8x16_t)__p1, 40); + return __ret; +} +#else +__ai __attribute__((target("fullfp16,neon"))) float16x8_t vmaxq_f16(float16x8_t __p0, float16x8_t __p1) { + float16x8_t __ret; + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (float16x8_t) __builtin_neon_vmaxq_f16((int8x16_t)__rev0, (int8x16_t)__rev1, 40); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16,neon"))) float16x4_t vmax_f16(float16x4_t __p0, float16x4_t __p1) { + float16x4_t __ret; + __ret = (float16x4_t) __builtin_neon_vmax_f16((int8x8_t)__p0, (int8x8_t)__p1, 8); + return __ret; +} +#else +__ai __attribute__((target("fullfp16,neon"))) float16x4_t vmax_f16(float16x4_t __p0, float16x4_t __p1) { + float16x4_t __ret; + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (float16x4_t) __builtin_neon_vmax_f16((int8x8_t)__rev0, (int8x8_t)__rev1, 8); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16,neon"))) float16x8_t vminq_f16(float16x8_t __p0, float16x8_t __p1) { + float16x8_t __ret; + __ret = (float16x8_t) __builtin_neon_vminq_f16((int8x16_t)__p0, (int8x16_t)__p1, 40); + return __ret; +} +#else +__ai __attribute__((target("fullfp16,neon"))) float16x8_t vminq_f16(float16x8_t __p0, float16x8_t __p1) { + float16x8_t __ret; + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (float16x8_t) __builtin_neon_vminq_f16((int8x16_t)__rev0, (int8x16_t)__rev1, 40); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16,neon"))) float16x4_t vmin_f16(float16x4_t __p0, float16x4_t __p1) { + float16x4_t __ret; + __ret = (float16x4_t) __builtin_neon_vmin_f16((int8x8_t)__p0, (int8x8_t)__p1, 8); + return __ret; +} +#else +__ai __attribute__((target("fullfp16,neon"))) float16x4_t vmin_f16(float16x4_t __p0, float16x4_t __p1) { + float16x4_t __ret; + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (float16x4_t) __builtin_neon_vmin_f16((int8x8_t)__rev0, (int8x8_t)__rev1, 8); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16,neon"))) float16x8_t vmulq_f16(float16x8_t __p0, float16x8_t __p1) { + float16x8_t __ret; + __ret = __p0 * __p1; + return __ret; +} +#else +__ai __attribute__((target("fullfp16,neon"))) float16x8_t vmulq_f16(float16x8_t __p0, float16x8_t __p1) { + float16x8_t __ret; + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __rev0 * __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16,neon"))) float16x4_t vmul_f16(float16x4_t __p0, float16x4_t __p1) { + float16x4_t __ret; + __ret = __p0 * __p1; + return __ret; +} +#else +__ai __attribute__((target("fullfp16,neon"))) float16x4_t vmul_f16(float16x4_t __p0, float16x4_t __p1) { + float16x4_t __ret; + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = __rev0 * __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmulq_n_f16(__p0, __p1) __extension__ ({ \ + float16x8_t __ret; \ + float16x8_t __s0 = __p0; \ + float16_t __s1 = __p1; \ + __ret = __s0 * (float16x8_t) {__s1, __s1, __s1, __s1, __s1, __s1, __s1, __s1}; \ + __ret; \ +}) +#else +#define vmulq_n_f16(__p0, __p1) __extension__ ({ \ + float16x8_t __ret; \ + float16x8_t __s0 = __p0; \ + float16_t __s1 = __p1; \ + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = __rev0 * (float16x8_t) {__s1, __s1, __s1, __s1, __s1, __s1, __s1, __s1}; \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmul_n_f16(__p0, __p1) __extension__ ({ \ + float16x4_t __ret; \ + float16x4_t __s0 = __p0; \ + float16_t __s1 = __p1; \ + __ret = __s0 * (float16x4_t) {__s1, __s1, __s1, __s1}; \ + __ret; \ +}) +#else +#define vmul_n_f16(__p0, __p1) __extension__ ({ \ + float16x4_t __ret; \ + float16x4_t __s0 = __p0; \ + float16_t __s1 = __p1; \ + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + __ret = __rev0 * (float16x4_t) {__s1, __s1, __s1, __s1}; \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16,neon"))) float16x8_t vnegq_f16(float16x8_t __p0) { + float16x8_t __ret; + __ret = -__p0; + return __ret; +} +#else +__ai __attribute__((target("fullfp16,neon"))) float16x8_t vnegq_f16(float16x8_t __p0) { + float16x8_t __ret; + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = -__rev0; + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16,neon"))) float16x4_t vneg_f16(float16x4_t __p0) { + float16x4_t __ret; + __ret = -__p0; + return __ret; +} +#else +__ai __attribute__((target("fullfp16,neon"))) float16x4_t vneg_f16(float16x4_t __p0) { + float16x4_t __ret; + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = -__rev0; + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16,neon"))) float16x4_t vpadd_f16(float16x4_t __p0, float16x4_t __p1) { + float16x4_t __ret; + __ret = (float16x4_t) __builtin_neon_vpadd_f16((int8x8_t)__p0, (int8x8_t)__p1, 8); + return __ret; +} +#else +__ai __attribute__((target("fullfp16,neon"))) float16x4_t vpadd_f16(float16x4_t __p0, float16x4_t __p1) { + float16x4_t __ret; + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (float16x4_t) __builtin_neon_vpadd_f16((int8x8_t)__rev0, (int8x8_t)__rev1, 8); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16,neon"))) float16x4_t vpmax_f16(float16x4_t __p0, float16x4_t __p1) { + float16x4_t __ret; + __ret = (float16x4_t) __builtin_neon_vpmax_f16((int8x8_t)__p0, (int8x8_t)__p1, 8); + return __ret; +} +#else +__ai __attribute__((target("fullfp16,neon"))) float16x4_t vpmax_f16(float16x4_t __p0, float16x4_t __p1) { + float16x4_t __ret; + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (float16x4_t) __builtin_neon_vpmax_f16((int8x8_t)__rev0, (int8x8_t)__rev1, 8); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16,neon"))) float16x4_t vpmin_f16(float16x4_t __p0, float16x4_t __p1) { + float16x4_t __ret; + __ret = (float16x4_t) __builtin_neon_vpmin_f16((int8x8_t)__p0, (int8x8_t)__p1, 8); + return __ret; +} +#else +__ai __attribute__((target("fullfp16,neon"))) float16x4_t vpmin_f16(float16x4_t __p0, float16x4_t __p1) { + float16x4_t __ret; + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (float16x4_t) __builtin_neon_vpmin_f16((int8x8_t)__rev0, (int8x8_t)__rev1, 8); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16,neon"))) float16x8_t vrecpeq_f16(float16x8_t __p0) { + float16x8_t __ret; + __ret = (float16x8_t) __builtin_neon_vrecpeq_f16((int8x16_t)__p0, 40); + return __ret; +} +#else +__ai __attribute__((target("fullfp16,neon"))) float16x8_t vrecpeq_f16(float16x8_t __p0) { + float16x8_t __ret; + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (float16x8_t) __builtin_neon_vrecpeq_f16((int8x16_t)__rev0, 40); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16,neon"))) float16x4_t vrecpe_f16(float16x4_t __p0) { + float16x4_t __ret; + __ret = (float16x4_t) __builtin_neon_vrecpe_f16((int8x8_t)__p0, 8); + return __ret; +} +#else +__ai __attribute__((target("fullfp16,neon"))) float16x4_t vrecpe_f16(float16x4_t __p0) { + float16x4_t __ret; + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = (float16x4_t) __builtin_neon_vrecpe_f16((int8x8_t)__rev0, 8); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16,neon"))) float16x8_t vrecpsq_f16(float16x8_t __p0, float16x8_t __p1) { + float16x8_t __ret; + __ret = (float16x8_t) __builtin_neon_vrecpsq_f16((int8x16_t)__p0, (int8x16_t)__p1, 40); + return __ret; +} +#else +__ai __attribute__((target("fullfp16,neon"))) float16x8_t vrecpsq_f16(float16x8_t __p0, float16x8_t __p1) { + float16x8_t __ret; + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (float16x8_t) __builtin_neon_vrecpsq_f16((int8x16_t)__rev0, (int8x16_t)__rev1, 40); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16,neon"))) float16x4_t vrecps_f16(float16x4_t __p0, float16x4_t __p1) { + float16x4_t __ret; + __ret = (float16x4_t) __builtin_neon_vrecps_f16((int8x8_t)__p0, (int8x8_t)__p1, 8); + return __ret; +} +#else +__ai __attribute__((target("fullfp16,neon"))) float16x4_t vrecps_f16(float16x4_t __p0, float16x4_t __p1) { + float16x4_t __ret; + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (float16x4_t) __builtin_neon_vrecps_f16((int8x8_t)__rev0, (int8x8_t)__rev1, 8); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16,neon"))) float16x8_t vrsqrteq_f16(float16x8_t __p0) { + float16x8_t __ret; + __ret = (float16x8_t) __builtin_neon_vrsqrteq_f16((int8x16_t)__p0, 40); + return __ret; +} +#else +__ai __attribute__((target("fullfp16,neon"))) float16x8_t vrsqrteq_f16(float16x8_t __p0) { + float16x8_t __ret; + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (float16x8_t) __builtin_neon_vrsqrteq_f16((int8x16_t)__rev0, 40); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16,neon"))) float16x4_t vrsqrte_f16(float16x4_t __p0) { + float16x4_t __ret; + __ret = (float16x4_t) __builtin_neon_vrsqrte_f16((int8x8_t)__p0, 8); + return __ret; +} +#else +__ai __attribute__((target("fullfp16,neon"))) float16x4_t vrsqrte_f16(float16x4_t __p0) { + float16x4_t __ret; + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = (float16x4_t) __builtin_neon_vrsqrte_f16((int8x8_t)__rev0, 8); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16,neon"))) float16x8_t vrsqrtsq_f16(float16x8_t __p0, float16x8_t __p1) { + float16x8_t __ret; + __ret = (float16x8_t) __builtin_neon_vrsqrtsq_f16((int8x16_t)__p0, (int8x16_t)__p1, 40); + return __ret; +} +#else +__ai __attribute__((target("fullfp16,neon"))) float16x8_t vrsqrtsq_f16(float16x8_t __p0, float16x8_t __p1) { + float16x8_t __ret; + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (float16x8_t) __builtin_neon_vrsqrtsq_f16((int8x16_t)__rev0, (int8x16_t)__rev1, 40); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16,neon"))) float16x4_t vrsqrts_f16(float16x4_t __p0, float16x4_t __p1) { + float16x4_t __ret; + __ret = (float16x4_t) __builtin_neon_vrsqrts_f16((int8x8_t)__p0, (int8x8_t)__p1, 8); + return __ret; +} +#else +__ai __attribute__((target("fullfp16,neon"))) float16x4_t vrsqrts_f16(float16x4_t __p0, float16x4_t __p1) { + float16x4_t __ret; + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (float16x4_t) __builtin_neon_vrsqrts_f16((int8x8_t)__rev0, (int8x8_t)__rev1, 8); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16,neon"))) float16x8_t vsubq_f16(float16x8_t __p0, float16x8_t __p1) { + float16x8_t __ret; + __ret = __p0 - __p1; + return __ret; +} +#else +__ai __attribute__((target("fullfp16,neon"))) float16x8_t vsubq_f16(float16x8_t __p0, float16x8_t __p1) { + float16x8_t __ret; + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __rev0 - __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16,neon"))) float16x4_t vsub_f16(float16x4_t __p0, float16x4_t __p1) { + float16x4_t __ret; + __ret = __p0 - __p1; + return __ret; +} +#else +__ai __attribute__((target("fullfp16,neon"))) float16x4_t vsub_f16(float16x4_t __p0, float16x4_t __p1) { + float16x4_t __ret; + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = __rev0 - __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("i8mm,neon"))) uint32x4_t vmmlaq_u32(uint32x4_t __p0, uint8x16_t __p1, uint8x16_t __p2) { + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vmmlaq_u32((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 50); + return __ret; +} +#else +__ai __attribute__((target("i8mm,neon"))) uint32x4_t vmmlaq_u32(uint32x4_t __p0, uint8x16_t __p1, uint8x16_t __p2) { + uint32x4_t __ret; + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint32x4_t) __builtin_neon_vmmlaq_u32((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 50); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("i8mm,neon"))) int32x4_t vmmlaq_s32(int32x4_t __p0, int8x16_t __p1, int8x16_t __p2) { + int32x4_t __ret; + __ret = (int32x4_t) __builtin_neon_vmmlaq_s32((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 34); + return __ret; +} +#else +__ai __attribute__((target("i8mm,neon"))) int32x4_t vmmlaq_s32(int32x4_t __p0, int8x16_t __p1, int8x16_t __p2) { + int32x4_t __ret; + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int32x4_t) __builtin_neon_vmmlaq_s32((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 34); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("i8mm,neon"))) int32x4_t vusdotq_s32(int32x4_t __p0, uint8x16_t __p1, int8x16_t __p2) { + int32x4_t __ret; + __ret = (int32x4_t) __builtin_neon_vusdotq_s32((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 34); + return __ret; +} +#else +__ai __attribute__((target("i8mm,neon"))) int32x4_t vusdotq_s32(int32x4_t __p0, uint8x16_t __p1, int8x16_t __p2) { + int32x4_t __ret; + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int32x4_t) __builtin_neon_vusdotq_s32((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 34); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +__ai __attribute__((target("i8mm,neon"))) int32x4_t __noswap_vusdotq_s32(int32x4_t __p0, uint8x16_t __p1, int8x16_t __p2) { + int32x4_t __ret; + __ret = (int32x4_t) __builtin_neon_vusdotq_s32((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 34); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("i8mm,neon"))) int32x2_t vusdot_s32(int32x2_t __p0, uint8x8_t __p1, int8x8_t __p2) { + int32x2_t __ret; + __ret = (int32x2_t) __builtin_neon_vusdot_s32((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 2); + return __ret; +} +#else +__ai __attribute__((target("i8mm,neon"))) int32x2_t vusdot_s32(int32x2_t __p0, uint8x8_t __p1, int8x8_t __p2) { + int32x2_t __ret; + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int32x2_t) __builtin_neon_vusdot_s32((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 2); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +__ai __attribute__((target("i8mm,neon"))) int32x2_t __noswap_vusdot_s32(int32x2_t __p0, uint8x8_t __p1, int8x8_t __p2) { + int32x2_t __ret; + __ret = (int32x2_t) __builtin_neon_vusdot_s32((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 2); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("i8mm,neon"))) int32x4_t vusmmlaq_s32(int32x4_t __p0, uint8x16_t __p1, int8x16_t __p2) { + int32x4_t __ret; + __ret = (int32x4_t) __builtin_neon_vusmmlaq_s32((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 34); + return __ret; +} +#else +__ai __attribute__((target("i8mm,neon"))) int32x4_t vusmmlaq_s32(int32x4_t __p0, uint8x16_t __p1, int8x16_t __p2) { + int32x4_t __ret; + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int32x4_t) __builtin_neon_vusmmlaq_s32((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 34); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +#define splat_lane_p8(__p0, __p1) __extension__ ({ \ + poly8x8_t __ret; \ + poly8x8_t __s0 = __p0; \ + __ret = (poly8x8_t) __builtin_neon_splat_lane_v((int8x8_t)__s0, __p1, 4); \ + __ret; \ +}) +#else +#define splat_lane_p8(__p0, __p1) __extension__ ({ \ + poly8x8_t __ret; \ + poly8x8_t __s0 = __p0; \ + poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (poly8x8_t) __builtin_neon_splat_lane_v((int8x8_t)__rev0, __p1, 4); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#define __noswap_splat_lane_p8(__p0, __p1) __extension__ ({ \ + poly8x8_t __ret; \ + poly8x8_t __s0 = __p0; \ + __ret = (poly8x8_t) __builtin_neon_splat_lane_v((int8x8_t)__s0, __p1, 4); \ + __ret; \ +}) +#endif + +#define splat_lane_p64(__p0, __p1) __extension__ ({ \ + poly64x1_t __ret; \ + poly64x1_t __s0 = __p0; \ + __ret = (poly64x1_t) __builtin_neon_splat_lane_v((int8x8_t)__s0, __p1, 6); \ + __ret; \ +}) +#ifdef __LITTLE_ENDIAN__ +#define splat_lane_p16(__p0, __p1) __extension__ ({ \ + poly16x4_t __ret; \ + poly16x4_t __s0 = __p0; \ + __ret = (poly16x4_t) __builtin_neon_splat_lane_v((int8x8_t)__s0, __p1, 5); \ + __ret; \ +}) +#else +#define splat_lane_p16(__p0, __p1) __extension__ ({ \ + poly16x4_t __ret; \ + poly16x4_t __s0 = __p0; \ + poly16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + __ret = (poly16x4_t) __builtin_neon_splat_lane_v((int8x8_t)__rev0, __p1, 5); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#define __noswap_splat_lane_p16(__p0, __p1) __extension__ ({ \ + poly16x4_t __ret; \ + poly16x4_t __s0 = __p0; \ + __ret = (poly16x4_t) __builtin_neon_splat_lane_v((int8x8_t)__s0, __p1, 5); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define splatq_lane_p8(__p0, __p1) __extension__ ({ \ + poly8x16_t __ret; \ + poly8x8_t __s0 = __p0; \ + __ret = (poly8x16_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 4); \ + __ret; \ +}) +#else +#define splatq_lane_p8(__p0, __p1) __extension__ ({ \ + poly8x16_t __ret; \ + poly8x8_t __s0 = __p0; \ + poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (poly8x16_t) __builtin_neon_splatq_lane_v((int8x8_t)__rev0, __p1, 4); \ + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#define __noswap_splatq_lane_p8(__p0, __p1) __extension__ ({ \ + poly8x16_t __ret; \ + poly8x8_t __s0 = __p0; \ + __ret = (poly8x16_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 4); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define splatq_lane_p64(__p0, __p1) __extension__ ({ \ + poly64x2_t __ret; \ + poly64x1_t __s0 = __p0; \ + __ret = (poly64x2_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 6); \ + __ret; \ +}) +#else +#define splatq_lane_p64(__p0, __p1) __extension__ ({ \ + poly64x2_t __ret; \ + poly64x1_t __s0 = __p0; \ + __ret = (poly64x2_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 6); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#define __noswap_splatq_lane_p64(__p0, __p1) __extension__ ({ \ + poly64x2_t __ret; \ + poly64x1_t __s0 = __p0; \ + __ret = (poly64x2_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 6); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define splatq_lane_p16(__p0, __p1) __extension__ ({ \ + poly16x8_t __ret; \ + poly16x4_t __s0 = __p0; \ + __ret = (poly16x8_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 5); \ + __ret; \ +}) +#else +#define splatq_lane_p16(__p0, __p1) __extension__ ({ \ + poly16x8_t __ret; \ + poly16x4_t __s0 = __p0; \ + poly16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + __ret = (poly16x8_t) __builtin_neon_splatq_lane_v((int8x8_t)__rev0, __p1, 5); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#define __noswap_splatq_lane_p16(__p0, __p1) __extension__ ({ \ + poly16x8_t __ret; \ + poly16x4_t __s0 = __p0; \ + __ret = (poly16x8_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 5); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define splatq_lane_u8(__p0, __p1) __extension__ ({ \ + uint8x16_t __ret; \ + uint8x8_t __s0 = __p0; \ + __ret = (uint8x16_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 16); \ + __ret; \ +}) +#else +#define splatq_lane_u8(__p0, __p1) __extension__ ({ \ + uint8x16_t __ret; \ + uint8x8_t __s0 = __p0; \ + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (uint8x16_t) __builtin_neon_splatq_lane_v((int8x8_t)__rev0, __p1, 16); \ + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#define __noswap_splatq_lane_u8(__p0, __p1) __extension__ ({ \ + uint8x16_t __ret; \ + uint8x8_t __s0 = __p0; \ + __ret = (uint8x16_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 16); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define splatq_lane_u32(__p0, __p1) __extension__ ({ \ + uint32x4_t __ret; \ + uint32x2_t __s0 = __p0; \ + __ret = (uint32x4_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 18); \ + __ret; \ +}) +#else +#define splatq_lane_u32(__p0, __p1) __extension__ ({ \ + uint32x4_t __ret; \ + uint32x2_t __s0 = __p0; \ + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + __ret = (uint32x4_t) __builtin_neon_splatq_lane_v((int8x8_t)__rev0, __p1, 18); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#define __noswap_splatq_lane_u32(__p0, __p1) __extension__ ({ \ + uint32x4_t __ret; \ + uint32x2_t __s0 = __p0; \ + __ret = (uint32x4_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 18); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define splatq_lane_u64(__p0, __p1) __extension__ ({ \ + uint64x2_t __ret; \ + uint64x1_t __s0 = __p0; \ + __ret = (uint64x2_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 19); \ + __ret; \ +}) +#else +#define splatq_lane_u64(__p0, __p1) __extension__ ({ \ + uint64x2_t __ret; \ + uint64x1_t __s0 = __p0; \ + __ret = (uint64x2_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 19); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#define __noswap_splatq_lane_u64(__p0, __p1) __extension__ ({ \ + uint64x2_t __ret; \ + uint64x1_t __s0 = __p0; \ + __ret = (uint64x2_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 19); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define splatq_lane_u16(__p0, __p1) __extension__ ({ \ + uint16x8_t __ret; \ + uint16x4_t __s0 = __p0; \ + __ret = (uint16x8_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 17); \ + __ret; \ +}) +#else +#define splatq_lane_u16(__p0, __p1) __extension__ ({ \ + uint16x8_t __ret; \ + uint16x4_t __s0 = __p0; \ + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + __ret = (uint16x8_t) __builtin_neon_splatq_lane_v((int8x8_t)__rev0, __p1, 17); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#define __noswap_splatq_lane_u16(__p0, __p1) __extension__ ({ \ + uint16x8_t __ret; \ + uint16x4_t __s0 = __p0; \ + __ret = (uint16x8_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 17); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define splatq_lane_s8(__p0, __p1) __extension__ ({ \ + int8x16_t __ret; \ + int8x8_t __s0 = __p0; \ + __ret = (int8x16_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 0); \ + __ret; \ +}) +#else +#define splatq_lane_s8(__p0, __p1) __extension__ ({ \ + int8x16_t __ret; \ + int8x8_t __s0 = __p0; \ + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (int8x16_t) __builtin_neon_splatq_lane_v((int8x8_t)__rev0, __p1, 0); \ + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#define __noswap_splatq_lane_s8(__p0, __p1) __extension__ ({ \ + int8x16_t __ret; \ + int8x8_t __s0 = __p0; \ + __ret = (int8x16_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define splatq_lane_f64(__p0, __p1) __extension__ ({ \ + float64x2_t __ret; \ + float64x1_t __s0 = __p0; \ + __ret = (float64x2_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 10); \ + __ret; \ +}) +#else +#define splatq_lane_f64(__p0, __p1) __extension__ ({ \ + float64x2_t __ret; \ + float64x1_t __s0 = __p0; \ + __ret = (float64x2_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 10); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#define __noswap_splatq_lane_f64(__p0, __p1) __extension__ ({ \ + float64x2_t __ret; \ + float64x1_t __s0 = __p0; \ + __ret = (float64x2_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 10); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define splatq_lane_f32(__p0, __p1) __extension__ ({ \ + float32x4_t __ret; \ + float32x2_t __s0 = __p0; \ + __ret = (float32x4_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 9); \ + __ret; \ +}) +#else +#define splatq_lane_f32(__p0, __p1) __extension__ ({ \ + float32x4_t __ret; \ + float32x2_t __s0 = __p0; \ + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + __ret = (float32x4_t) __builtin_neon_splatq_lane_v((int8x8_t)__rev0, __p1, 9); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#define __noswap_splatq_lane_f32(__p0, __p1) __extension__ ({ \ + float32x4_t __ret; \ + float32x2_t __s0 = __p0; \ + __ret = (float32x4_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 9); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define splatq_lane_f16(__p0, __p1) __extension__ ({ \ + float16x8_t __ret; \ + float16x4_t __s0 = __p0; \ + __ret = (float16x8_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 8); \ + __ret; \ +}) +#else +#define splatq_lane_f16(__p0, __p1) __extension__ ({ \ + float16x8_t __ret; \ + float16x4_t __s0 = __p0; \ + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + __ret = (float16x8_t) __builtin_neon_splatq_lane_v((int8x8_t)__rev0, __p1, 8); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#define __noswap_splatq_lane_f16(__p0, __p1) __extension__ ({ \ + float16x8_t __ret; \ + float16x4_t __s0 = __p0; \ + __ret = (float16x8_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 8); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define splatq_lane_s32(__p0, __p1) __extension__ ({ \ + int32x4_t __ret; \ + int32x2_t __s0 = __p0; \ + __ret = (int32x4_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 2); \ + __ret; \ +}) +#else +#define splatq_lane_s32(__p0, __p1) __extension__ ({ \ + int32x4_t __ret; \ + int32x2_t __s0 = __p0; \ + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + __ret = (int32x4_t) __builtin_neon_splatq_lane_v((int8x8_t)__rev0, __p1, 2); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#define __noswap_splatq_lane_s32(__p0, __p1) __extension__ ({ \ + int32x4_t __ret; \ + int32x2_t __s0 = __p0; \ + __ret = (int32x4_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 2); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define splatq_lane_s64(__p0, __p1) __extension__ ({ \ + int64x2_t __ret; \ + int64x1_t __s0 = __p0; \ + __ret = (int64x2_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 3); \ + __ret; \ +}) +#else +#define splatq_lane_s64(__p0, __p1) __extension__ ({ \ + int64x2_t __ret; \ + int64x1_t __s0 = __p0; \ + __ret = (int64x2_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 3); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#define __noswap_splatq_lane_s64(__p0, __p1) __extension__ ({ \ + int64x2_t __ret; \ + int64x1_t __s0 = __p0; \ + __ret = (int64x2_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 3); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define splatq_lane_s16(__p0, __p1) __extension__ ({ \ + int16x8_t __ret; \ + int16x4_t __s0 = __p0; \ + __ret = (int16x8_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 1); \ + __ret; \ +}) +#else +#define splatq_lane_s16(__p0, __p1) __extension__ ({ \ + int16x8_t __ret; \ + int16x4_t __s0 = __p0; \ + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + __ret = (int16x8_t) __builtin_neon_splatq_lane_v((int8x8_t)__rev0, __p1, 1); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#define __noswap_splatq_lane_s16(__p0, __p1) __extension__ ({ \ + int16x8_t __ret; \ + int16x4_t __s0 = __p0; \ + __ret = (int16x8_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 1); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define splat_lane_u8(__p0, __p1) __extension__ ({ \ + uint8x8_t __ret; \ + uint8x8_t __s0 = __p0; \ + __ret = (uint8x8_t) __builtin_neon_splat_lane_v((int8x8_t)__s0, __p1, 16); \ + __ret; \ +}) +#else +#define splat_lane_u8(__p0, __p1) __extension__ ({ \ + uint8x8_t __ret; \ + uint8x8_t __s0 = __p0; \ + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (uint8x8_t) __builtin_neon_splat_lane_v((int8x8_t)__rev0, __p1, 16); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#define __noswap_splat_lane_u8(__p0, __p1) __extension__ ({ \ + uint8x8_t __ret; \ + uint8x8_t __s0 = __p0; \ + __ret = (uint8x8_t) __builtin_neon_splat_lane_v((int8x8_t)__s0, __p1, 16); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define splat_lane_u32(__p0, __p1) __extension__ ({ \ + uint32x2_t __ret; \ + uint32x2_t __s0 = __p0; \ + __ret = (uint32x2_t) __builtin_neon_splat_lane_v((int8x8_t)__s0, __p1, 18); \ + __ret; \ +}) +#else +#define splat_lane_u32(__p0, __p1) __extension__ ({ \ + uint32x2_t __ret; \ + uint32x2_t __s0 = __p0; \ + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + __ret = (uint32x2_t) __builtin_neon_splat_lane_v((int8x8_t)__rev0, __p1, 18); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#define __noswap_splat_lane_u32(__p0, __p1) __extension__ ({ \ + uint32x2_t __ret; \ + uint32x2_t __s0 = __p0; \ + __ret = (uint32x2_t) __builtin_neon_splat_lane_v((int8x8_t)__s0, __p1, 18); \ + __ret; \ +}) +#endif + +#define splat_lane_u64(__p0, __p1) __extension__ ({ \ + uint64x1_t __ret; \ + uint64x1_t __s0 = __p0; \ + __ret = (uint64x1_t) __builtin_neon_splat_lane_v((int8x8_t)__s0, __p1, 19); \ + __ret; \ +}) +#ifdef __LITTLE_ENDIAN__ +#define splat_lane_u16(__p0, __p1) __extension__ ({ \ + uint16x4_t __ret; \ + uint16x4_t __s0 = __p0; \ + __ret = (uint16x4_t) __builtin_neon_splat_lane_v((int8x8_t)__s0, __p1, 17); \ + __ret; \ +}) +#else +#define splat_lane_u16(__p0, __p1) __extension__ ({ \ + uint16x4_t __ret; \ + uint16x4_t __s0 = __p0; \ + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + __ret = (uint16x4_t) __builtin_neon_splat_lane_v((int8x8_t)__rev0, __p1, 17); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#define __noswap_splat_lane_u16(__p0, __p1) __extension__ ({ \ + uint16x4_t __ret; \ + uint16x4_t __s0 = __p0; \ + __ret = (uint16x4_t) __builtin_neon_splat_lane_v((int8x8_t)__s0, __p1, 17); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define splat_lane_s8(__p0, __p1) __extension__ ({ \ + int8x8_t __ret; \ + int8x8_t __s0 = __p0; \ + __ret = (int8x8_t) __builtin_neon_splat_lane_v((int8x8_t)__s0, __p1, 0); \ + __ret; \ +}) +#else +#define splat_lane_s8(__p0, __p1) __extension__ ({ \ + int8x8_t __ret; \ + int8x8_t __s0 = __p0; \ + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (int8x8_t) __builtin_neon_splat_lane_v((int8x8_t)__rev0, __p1, 0); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#define __noswap_splat_lane_s8(__p0, __p1) __extension__ ({ \ + int8x8_t __ret; \ + int8x8_t __s0 = __p0; \ + __ret = (int8x8_t) __builtin_neon_splat_lane_v((int8x8_t)__s0, __p1, 0); \ + __ret; \ +}) +#endif + +#define splat_lane_f64(__p0, __p1) __extension__ ({ \ + float64x1_t __ret; \ + float64x1_t __s0 = __p0; \ + __ret = (float64x1_t) __builtin_neon_splat_lane_v((int8x8_t)__s0, __p1, 10); \ + __ret; \ +}) +#ifdef __LITTLE_ENDIAN__ +#define splat_lane_f32(__p0, __p1) __extension__ ({ \ + float32x2_t __ret; \ + float32x2_t __s0 = __p0; \ + __ret = (float32x2_t) __builtin_neon_splat_lane_v((int8x8_t)__s0, __p1, 9); \ + __ret; \ +}) +#else +#define splat_lane_f32(__p0, __p1) __extension__ ({ \ + float32x2_t __ret; \ + float32x2_t __s0 = __p0; \ + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + __ret = (float32x2_t) __builtin_neon_splat_lane_v((int8x8_t)__rev0, __p1, 9); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#define __noswap_splat_lane_f32(__p0, __p1) __extension__ ({ \ + float32x2_t __ret; \ + float32x2_t __s0 = __p0; \ + __ret = (float32x2_t) __builtin_neon_splat_lane_v((int8x8_t)__s0, __p1, 9); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define splat_lane_f16(__p0, __p1) __extension__ ({ \ + float16x4_t __ret; \ + float16x4_t __s0 = __p0; \ + __ret = (float16x4_t) __builtin_neon_splat_lane_v((int8x8_t)__s0, __p1, 8); \ + __ret; \ +}) +#else +#define splat_lane_f16(__p0, __p1) __extension__ ({ \ + float16x4_t __ret; \ + float16x4_t __s0 = __p0; \ + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + __ret = (float16x4_t) __builtin_neon_splat_lane_v((int8x8_t)__rev0, __p1, 8); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#define __noswap_splat_lane_f16(__p0, __p1) __extension__ ({ \ + float16x4_t __ret; \ + float16x4_t __s0 = __p0; \ + __ret = (float16x4_t) __builtin_neon_splat_lane_v((int8x8_t)__s0, __p1, 8); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define splat_lane_s32(__p0, __p1) __extension__ ({ \ + int32x2_t __ret; \ + int32x2_t __s0 = __p0; \ + __ret = (int32x2_t) __builtin_neon_splat_lane_v((int8x8_t)__s0, __p1, 2); \ + __ret; \ +}) +#else +#define splat_lane_s32(__p0, __p1) __extension__ ({ \ + int32x2_t __ret; \ + int32x2_t __s0 = __p0; \ + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + __ret = (int32x2_t) __builtin_neon_splat_lane_v((int8x8_t)__rev0, __p1, 2); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#define __noswap_splat_lane_s32(__p0, __p1) __extension__ ({ \ + int32x2_t __ret; \ + int32x2_t __s0 = __p0; \ + __ret = (int32x2_t) __builtin_neon_splat_lane_v((int8x8_t)__s0, __p1, 2); \ + __ret; \ +}) +#endif + +#define splat_lane_s64(__p0, __p1) __extension__ ({ \ + int64x1_t __ret; \ + int64x1_t __s0 = __p0; \ + __ret = (int64x1_t) __builtin_neon_splat_lane_v((int8x8_t)__s0, __p1, 3); \ + __ret; \ +}) +#ifdef __LITTLE_ENDIAN__ +#define splat_lane_s16(__p0, __p1) __extension__ ({ \ + int16x4_t __ret; \ + int16x4_t __s0 = __p0; \ + __ret = (int16x4_t) __builtin_neon_splat_lane_v((int8x8_t)__s0, __p1, 1); \ + __ret; \ +}) +#else +#define splat_lane_s16(__p0, __p1) __extension__ ({ \ + int16x4_t __ret; \ + int16x4_t __s0 = __p0; \ + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + __ret = (int16x4_t) __builtin_neon_splat_lane_v((int8x8_t)__rev0, __p1, 1); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#define __noswap_splat_lane_s16(__p0, __p1) __extension__ ({ \ + int16x4_t __ret; \ + int16x4_t __s0 = __p0; \ + __ret = (int16x4_t) __builtin_neon_splat_lane_v((int8x8_t)__s0, __p1, 1); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define splat_laneq_p8(__p0, __p1) __extension__ ({ \ + poly8x8_t __ret; \ + poly8x16_t __s0 = __p0; \ + __ret = (poly8x8_t) __builtin_neon_splat_laneq_v((int8x16_t)__s0, __p1, 36); \ + __ret; \ +}) +#else +#define splat_laneq_p8(__p0, __p1) __extension__ ({ \ + poly8x8_t __ret; \ + poly8x16_t __s0 = __p0; \ + poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (poly8x8_t) __builtin_neon_splat_laneq_v((int8x16_t)__rev0, __p1, 36); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#define __noswap_splat_laneq_p8(__p0, __p1) __extension__ ({ \ + poly8x8_t __ret; \ + poly8x16_t __s0 = __p0; \ + __ret = (poly8x8_t) __builtin_neon_splat_laneq_v((int8x16_t)__s0, __p1, 36); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define splat_laneq_p64(__p0, __p1) __extension__ ({ \ + poly64x1_t __ret; \ + poly64x2_t __s0 = __p0; \ + __ret = (poly64x1_t) __builtin_neon_splat_laneq_v((int8x16_t)__s0, __p1, 38); \ + __ret; \ +}) +#else +#define splat_laneq_p64(__p0, __p1) __extension__ ({ \ + poly64x1_t __ret; \ + poly64x2_t __s0 = __p0; \ + poly64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + __ret = (poly64x1_t) __builtin_neon_splat_laneq_v((int8x16_t)__rev0, __p1, 38); \ + __ret; \ +}) +#define __noswap_splat_laneq_p64(__p0, __p1) __extension__ ({ \ + poly64x1_t __ret; \ + poly64x2_t __s0 = __p0; \ + __ret = (poly64x1_t) __builtin_neon_splat_laneq_v((int8x16_t)__s0, __p1, 38); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define splat_laneq_p16(__p0, __p1) __extension__ ({ \ + poly16x4_t __ret; \ + poly16x8_t __s0 = __p0; \ + __ret = (poly16x4_t) __builtin_neon_splat_laneq_v((int8x16_t)__s0, __p1, 37); \ + __ret; \ +}) +#else +#define splat_laneq_p16(__p0, __p1) __extension__ ({ \ + poly16x4_t __ret; \ + poly16x8_t __s0 = __p0; \ + poly16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (poly16x4_t) __builtin_neon_splat_laneq_v((int8x16_t)__rev0, __p1, 37); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#define __noswap_splat_laneq_p16(__p0, __p1) __extension__ ({ \ + poly16x4_t __ret; \ + poly16x8_t __s0 = __p0; \ + __ret = (poly16x4_t) __builtin_neon_splat_laneq_v((int8x16_t)__s0, __p1, 37); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define splatq_laneq_p8(__p0, __p1) __extension__ ({ \ + poly8x16_t __ret; \ + poly8x16_t __s0 = __p0; \ + __ret = (poly8x16_t) __builtin_neon_splatq_laneq_v((int8x16_t)__s0, __p1, 36); \ + __ret; \ +}) +#else +#define splatq_laneq_p8(__p0, __p1) __extension__ ({ \ + poly8x16_t __ret; \ + poly8x16_t __s0 = __p0; \ + poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (poly8x16_t) __builtin_neon_splatq_laneq_v((int8x16_t)__rev0, __p1, 36); \ + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#define __noswap_splatq_laneq_p8(__p0, __p1) __extension__ ({ \ + poly8x16_t __ret; \ + poly8x16_t __s0 = __p0; \ + __ret = (poly8x16_t) __builtin_neon_splatq_laneq_v((int8x16_t)__s0, __p1, 36); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define splatq_laneq_p64(__p0, __p1) __extension__ ({ \ + poly64x2_t __ret; \ + poly64x2_t __s0 = __p0; \ + __ret = (poly64x2_t) __builtin_neon_splatq_laneq_v((int8x16_t)__s0, __p1, 38); \ + __ret; \ +}) +#else +#define splatq_laneq_p64(__p0, __p1) __extension__ ({ \ + poly64x2_t __ret; \ + poly64x2_t __s0 = __p0; \ + poly64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + __ret = (poly64x2_t) __builtin_neon_splatq_laneq_v((int8x16_t)__rev0, __p1, 38); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#define __noswap_splatq_laneq_p64(__p0, __p1) __extension__ ({ \ + poly64x2_t __ret; \ + poly64x2_t __s0 = __p0; \ + __ret = (poly64x2_t) __builtin_neon_splatq_laneq_v((int8x16_t)__s0, __p1, 38); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define splatq_laneq_p16(__p0, __p1) __extension__ ({ \ + poly16x8_t __ret; \ + poly16x8_t __s0 = __p0; \ + __ret = (poly16x8_t) __builtin_neon_splatq_laneq_v((int8x16_t)__s0, __p1, 37); \ + __ret; \ +}) +#else +#define splatq_laneq_p16(__p0, __p1) __extension__ ({ \ + poly16x8_t __ret; \ + poly16x8_t __s0 = __p0; \ + poly16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (poly16x8_t) __builtin_neon_splatq_laneq_v((int8x16_t)__rev0, __p1, 37); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#define __noswap_splatq_laneq_p16(__p0, __p1) __extension__ ({ \ + poly16x8_t __ret; \ + poly16x8_t __s0 = __p0; \ + __ret = (poly16x8_t) __builtin_neon_splatq_laneq_v((int8x16_t)__s0, __p1, 37); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define splatq_laneq_u8(__p0, __p1) __extension__ ({ \ + uint8x16_t __ret; \ + uint8x16_t __s0 = __p0; \ + __ret = (uint8x16_t) __builtin_neon_splatq_laneq_v((int8x16_t)__s0, __p1, 48); \ + __ret; \ +}) +#else +#define splatq_laneq_u8(__p0, __p1) __extension__ ({ \ + uint8x16_t __ret; \ + uint8x16_t __s0 = __p0; \ + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (uint8x16_t) __builtin_neon_splatq_laneq_v((int8x16_t)__rev0, __p1, 48); \ + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#define __noswap_splatq_laneq_u8(__p0, __p1) __extension__ ({ \ + uint8x16_t __ret; \ + uint8x16_t __s0 = __p0; \ + __ret = (uint8x16_t) __builtin_neon_splatq_laneq_v((int8x16_t)__s0, __p1, 48); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define splatq_laneq_u32(__p0, __p1) __extension__ ({ \ + uint32x4_t __ret; \ + uint32x4_t __s0 = __p0; \ + __ret = (uint32x4_t) __builtin_neon_splatq_laneq_v((int8x16_t)__s0, __p1, 50); \ + __ret; \ +}) +#else +#define splatq_laneq_u32(__p0, __p1) __extension__ ({ \ + uint32x4_t __ret; \ + uint32x4_t __s0 = __p0; \ + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + __ret = (uint32x4_t) __builtin_neon_splatq_laneq_v((int8x16_t)__rev0, __p1, 50); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#define __noswap_splatq_laneq_u32(__p0, __p1) __extension__ ({ \ + uint32x4_t __ret; \ + uint32x4_t __s0 = __p0; \ + __ret = (uint32x4_t) __builtin_neon_splatq_laneq_v((int8x16_t)__s0, __p1, 50); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define splatq_laneq_u64(__p0, __p1) __extension__ ({ \ + uint64x2_t __ret; \ + uint64x2_t __s0 = __p0; \ + __ret = (uint64x2_t) __builtin_neon_splatq_laneq_v((int8x16_t)__s0, __p1, 51); \ + __ret; \ +}) +#else +#define splatq_laneq_u64(__p0, __p1) __extension__ ({ \ + uint64x2_t __ret; \ + uint64x2_t __s0 = __p0; \ + uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + __ret = (uint64x2_t) __builtin_neon_splatq_laneq_v((int8x16_t)__rev0, __p1, 51); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#define __noswap_splatq_laneq_u64(__p0, __p1) __extension__ ({ \ + uint64x2_t __ret; \ + uint64x2_t __s0 = __p0; \ + __ret = (uint64x2_t) __builtin_neon_splatq_laneq_v((int8x16_t)__s0, __p1, 51); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define splatq_laneq_u16(__p0, __p1) __extension__ ({ \ + uint16x8_t __ret; \ + uint16x8_t __s0 = __p0; \ + __ret = (uint16x8_t) __builtin_neon_splatq_laneq_v((int8x16_t)__s0, __p1, 49); \ + __ret; \ +}) +#else +#define splatq_laneq_u16(__p0, __p1) __extension__ ({ \ + uint16x8_t __ret; \ + uint16x8_t __s0 = __p0; \ + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (uint16x8_t) __builtin_neon_splatq_laneq_v((int8x16_t)__rev0, __p1, 49); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#define __noswap_splatq_laneq_u16(__p0, __p1) __extension__ ({ \ + uint16x8_t __ret; \ + uint16x8_t __s0 = __p0; \ + __ret = (uint16x8_t) __builtin_neon_splatq_laneq_v((int8x16_t)__s0, __p1, 49); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define splatq_laneq_s8(__p0, __p1) __extension__ ({ \ + int8x16_t __ret; \ + int8x16_t __s0 = __p0; \ + __ret = (int8x16_t) __builtin_neon_splatq_laneq_v((int8x16_t)__s0, __p1, 32); \ + __ret; \ +}) +#else +#define splatq_laneq_s8(__p0, __p1) __extension__ ({ \ + int8x16_t __ret; \ + int8x16_t __s0 = __p0; \ + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (int8x16_t) __builtin_neon_splatq_laneq_v((int8x16_t)__rev0, __p1, 32); \ + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#define __noswap_splatq_laneq_s8(__p0, __p1) __extension__ ({ \ + int8x16_t __ret; \ + int8x16_t __s0 = __p0; \ + __ret = (int8x16_t) __builtin_neon_splatq_laneq_v((int8x16_t)__s0, __p1, 32); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define splatq_laneq_f64(__p0, __p1) __extension__ ({ \ + float64x2_t __ret; \ + float64x2_t __s0 = __p0; \ + __ret = (float64x2_t) __builtin_neon_splatq_laneq_v((int8x16_t)__s0, __p1, 42); \ + __ret; \ +}) +#else +#define splatq_laneq_f64(__p0, __p1) __extension__ ({ \ + float64x2_t __ret; \ + float64x2_t __s0 = __p0; \ + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + __ret = (float64x2_t) __builtin_neon_splatq_laneq_v((int8x16_t)__rev0, __p1, 42); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#define __noswap_splatq_laneq_f64(__p0, __p1) __extension__ ({ \ + float64x2_t __ret; \ + float64x2_t __s0 = __p0; \ + __ret = (float64x2_t) __builtin_neon_splatq_laneq_v((int8x16_t)__s0, __p1, 42); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define splatq_laneq_f32(__p0, __p1) __extension__ ({ \ + float32x4_t __ret; \ + float32x4_t __s0 = __p0; \ + __ret = (float32x4_t) __builtin_neon_splatq_laneq_v((int8x16_t)__s0, __p1, 41); \ + __ret; \ +}) +#else +#define splatq_laneq_f32(__p0, __p1) __extension__ ({ \ + float32x4_t __ret; \ + float32x4_t __s0 = __p0; \ + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + __ret = (float32x4_t) __builtin_neon_splatq_laneq_v((int8x16_t)__rev0, __p1, 41); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#define __noswap_splatq_laneq_f32(__p0, __p1) __extension__ ({ \ + float32x4_t __ret; \ + float32x4_t __s0 = __p0; \ + __ret = (float32x4_t) __builtin_neon_splatq_laneq_v((int8x16_t)__s0, __p1, 41); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define splatq_laneq_f16(__p0, __p1) __extension__ ({ \ + float16x8_t __ret; \ + float16x8_t __s0 = __p0; \ + __ret = (float16x8_t) __builtin_neon_splatq_laneq_v((int8x16_t)__s0, __p1, 40); \ + __ret; \ +}) +#else +#define splatq_laneq_f16(__p0, __p1) __extension__ ({ \ + float16x8_t __ret; \ + float16x8_t __s0 = __p0; \ + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (float16x8_t) __builtin_neon_splatq_laneq_v((int8x16_t)__rev0, __p1, 40); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#define __noswap_splatq_laneq_f16(__p0, __p1) __extension__ ({ \ + float16x8_t __ret; \ + float16x8_t __s0 = __p0; \ + __ret = (float16x8_t) __builtin_neon_splatq_laneq_v((int8x16_t)__s0, __p1, 40); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define splatq_laneq_s32(__p0, __p1) __extension__ ({ \ + int32x4_t __ret; \ + int32x4_t __s0 = __p0; \ + __ret = (int32x4_t) __builtin_neon_splatq_laneq_v((int8x16_t)__s0, __p1, 34); \ + __ret; \ +}) +#else +#define splatq_laneq_s32(__p0, __p1) __extension__ ({ \ + int32x4_t __ret; \ + int32x4_t __s0 = __p0; \ + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + __ret = (int32x4_t) __builtin_neon_splatq_laneq_v((int8x16_t)__rev0, __p1, 34); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#define __noswap_splatq_laneq_s32(__p0, __p1) __extension__ ({ \ + int32x4_t __ret; \ + int32x4_t __s0 = __p0; \ + __ret = (int32x4_t) __builtin_neon_splatq_laneq_v((int8x16_t)__s0, __p1, 34); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define splatq_laneq_s64(__p0, __p1) __extension__ ({ \ + int64x2_t __ret; \ + int64x2_t __s0 = __p0; \ + __ret = (int64x2_t) __builtin_neon_splatq_laneq_v((int8x16_t)__s0, __p1, 35); \ + __ret; \ +}) +#else +#define splatq_laneq_s64(__p0, __p1) __extension__ ({ \ + int64x2_t __ret; \ + int64x2_t __s0 = __p0; \ + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + __ret = (int64x2_t) __builtin_neon_splatq_laneq_v((int8x16_t)__rev0, __p1, 35); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#define __noswap_splatq_laneq_s64(__p0, __p1) __extension__ ({ \ + int64x2_t __ret; \ + int64x2_t __s0 = __p0; \ + __ret = (int64x2_t) __builtin_neon_splatq_laneq_v((int8x16_t)__s0, __p1, 35); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define splatq_laneq_s16(__p0, __p1) __extension__ ({ \ + int16x8_t __ret; \ + int16x8_t __s0 = __p0; \ + __ret = (int16x8_t) __builtin_neon_splatq_laneq_v((int8x16_t)__s0, __p1, 33); \ + __ret; \ +}) +#else +#define splatq_laneq_s16(__p0, __p1) __extension__ ({ \ + int16x8_t __ret; \ + int16x8_t __s0 = __p0; \ + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (int16x8_t) __builtin_neon_splatq_laneq_v((int8x16_t)__rev0, __p1, 33); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#define __noswap_splatq_laneq_s16(__p0, __p1) __extension__ ({ \ + int16x8_t __ret; \ + int16x8_t __s0 = __p0; \ + __ret = (int16x8_t) __builtin_neon_splatq_laneq_v((int8x16_t)__s0, __p1, 33); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define splat_laneq_u8(__p0, __p1) __extension__ ({ \ + uint8x8_t __ret; \ + uint8x16_t __s0 = __p0; \ + __ret = (uint8x8_t) __builtin_neon_splat_laneq_v((int8x16_t)__s0, __p1, 48); \ + __ret; \ +}) +#else +#define splat_laneq_u8(__p0, __p1) __extension__ ({ \ + uint8x8_t __ret; \ + uint8x16_t __s0 = __p0; \ + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (uint8x8_t) __builtin_neon_splat_laneq_v((int8x16_t)__rev0, __p1, 48); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#define __noswap_splat_laneq_u8(__p0, __p1) __extension__ ({ \ + uint8x8_t __ret; \ + uint8x16_t __s0 = __p0; \ + __ret = (uint8x8_t) __builtin_neon_splat_laneq_v((int8x16_t)__s0, __p1, 48); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define splat_laneq_u32(__p0, __p1) __extension__ ({ \ + uint32x2_t __ret; \ + uint32x4_t __s0 = __p0; \ + __ret = (uint32x2_t) __builtin_neon_splat_laneq_v((int8x16_t)__s0, __p1, 50); \ + __ret; \ +}) +#else +#define splat_laneq_u32(__p0, __p1) __extension__ ({ \ + uint32x2_t __ret; \ + uint32x4_t __s0 = __p0; \ + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + __ret = (uint32x2_t) __builtin_neon_splat_laneq_v((int8x16_t)__rev0, __p1, 50); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#define __noswap_splat_laneq_u32(__p0, __p1) __extension__ ({ \ + uint32x2_t __ret; \ + uint32x4_t __s0 = __p0; \ + __ret = (uint32x2_t) __builtin_neon_splat_laneq_v((int8x16_t)__s0, __p1, 50); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define splat_laneq_u64(__p0, __p1) __extension__ ({ \ + uint64x1_t __ret; \ + uint64x2_t __s0 = __p0; \ + __ret = (uint64x1_t) __builtin_neon_splat_laneq_v((int8x16_t)__s0, __p1, 51); \ + __ret; \ +}) +#else +#define splat_laneq_u64(__p0, __p1) __extension__ ({ \ + uint64x1_t __ret; \ + uint64x2_t __s0 = __p0; \ + uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + __ret = (uint64x1_t) __builtin_neon_splat_laneq_v((int8x16_t)__rev0, __p1, 51); \ + __ret; \ +}) +#define __noswap_splat_laneq_u64(__p0, __p1) __extension__ ({ \ + uint64x1_t __ret; \ + uint64x2_t __s0 = __p0; \ + __ret = (uint64x1_t) __builtin_neon_splat_laneq_v((int8x16_t)__s0, __p1, 51); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define splat_laneq_u16(__p0, __p1) __extension__ ({ \ + uint16x4_t __ret; \ + uint16x8_t __s0 = __p0; \ + __ret = (uint16x4_t) __builtin_neon_splat_laneq_v((int8x16_t)__s0, __p1, 49); \ + __ret; \ +}) +#else +#define splat_laneq_u16(__p0, __p1) __extension__ ({ \ + uint16x4_t __ret; \ + uint16x8_t __s0 = __p0; \ + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (uint16x4_t) __builtin_neon_splat_laneq_v((int8x16_t)__rev0, __p1, 49); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#define __noswap_splat_laneq_u16(__p0, __p1) __extension__ ({ \ + uint16x4_t __ret; \ + uint16x8_t __s0 = __p0; \ + __ret = (uint16x4_t) __builtin_neon_splat_laneq_v((int8x16_t)__s0, __p1, 49); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define splat_laneq_s8(__p0, __p1) __extension__ ({ \ + int8x8_t __ret; \ + int8x16_t __s0 = __p0; \ + __ret = (int8x8_t) __builtin_neon_splat_laneq_v((int8x16_t)__s0, __p1, 32); \ + __ret; \ +}) +#else +#define splat_laneq_s8(__p0, __p1) __extension__ ({ \ + int8x8_t __ret; \ + int8x16_t __s0 = __p0; \ + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (int8x8_t) __builtin_neon_splat_laneq_v((int8x16_t)__rev0, __p1, 32); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#define __noswap_splat_laneq_s8(__p0, __p1) __extension__ ({ \ + int8x8_t __ret; \ + int8x16_t __s0 = __p0; \ + __ret = (int8x8_t) __builtin_neon_splat_laneq_v((int8x16_t)__s0, __p1, 32); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define splat_laneq_f64(__p0, __p1) __extension__ ({ \ + float64x1_t __ret; \ + float64x2_t __s0 = __p0; \ + __ret = (float64x1_t) __builtin_neon_splat_laneq_v((int8x16_t)__s0, __p1, 42); \ + __ret; \ +}) +#else +#define splat_laneq_f64(__p0, __p1) __extension__ ({ \ + float64x1_t __ret; \ + float64x2_t __s0 = __p0; \ + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + __ret = (float64x1_t) __builtin_neon_splat_laneq_v((int8x16_t)__rev0, __p1, 42); \ + __ret; \ +}) +#define __noswap_splat_laneq_f64(__p0, __p1) __extension__ ({ \ + float64x1_t __ret; \ + float64x2_t __s0 = __p0; \ + __ret = (float64x1_t) __builtin_neon_splat_laneq_v((int8x16_t)__s0, __p1, 42); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define splat_laneq_f32(__p0, __p1) __extension__ ({ \ + float32x2_t __ret; \ + float32x4_t __s0 = __p0; \ + __ret = (float32x2_t) __builtin_neon_splat_laneq_v((int8x16_t)__s0, __p1, 41); \ + __ret; \ +}) +#else +#define splat_laneq_f32(__p0, __p1) __extension__ ({ \ + float32x2_t __ret; \ + float32x4_t __s0 = __p0; \ + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + __ret = (float32x2_t) __builtin_neon_splat_laneq_v((int8x16_t)__rev0, __p1, 41); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#define __noswap_splat_laneq_f32(__p0, __p1) __extension__ ({ \ + float32x2_t __ret; \ + float32x4_t __s0 = __p0; \ + __ret = (float32x2_t) __builtin_neon_splat_laneq_v((int8x16_t)__s0, __p1, 41); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define splat_laneq_f16(__p0, __p1) __extension__ ({ \ + float16x4_t __ret; \ + float16x8_t __s0 = __p0; \ + __ret = (float16x4_t) __builtin_neon_splat_laneq_v((int8x16_t)__s0, __p1, 40); \ + __ret; \ +}) +#else +#define splat_laneq_f16(__p0, __p1) __extension__ ({ \ + float16x4_t __ret; \ + float16x8_t __s0 = __p0; \ + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (float16x4_t) __builtin_neon_splat_laneq_v((int8x16_t)__rev0, __p1, 40); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#define __noswap_splat_laneq_f16(__p0, __p1) __extension__ ({ \ + float16x4_t __ret; \ + float16x8_t __s0 = __p0; \ + __ret = (float16x4_t) __builtin_neon_splat_laneq_v((int8x16_t)__s0, __p1, 40); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define splat_laneq_s32(__p0, __p1) __extension__ ({ \ + int32x2_t __ret; \ + int32x4_t __s0 = __p0; \ + __ret = (int32x2_t) __builtin_neon_splat_laneq_v((int8x16_t)__s0, __p1, 34); \ + __ret; \ +}) +#else +#define splat_laneq_s32(__p0, __p1) __extension__ ({ \ + int32x2_t __ret; \ + int32x4_t __s0 = __p0; \ + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + __ret = (int32x2_t) __builtin_neon_splat_laneq_v((int8x16_t)__rev0, __p1, 34); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#define __noswap_splat_laneq_s32(__p0, __p1) __extension__ ({ \ + int32x2_t __ret; \ + int32x4_t __s0 = __p0; \ + __ret = (int32x2_t) __builtin_neon_splat_laneq_v((int8x16_t)__s0, __p1, 34); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define splat_laneq_s64(__p0, __p1) __extension__ ({ \ + int64x1_t __ret; \ + int64x2_t __s0 = __p0; \ + __ret = (int64x1_t) __builtin_neon_splat_laneq_v((int8x16_t)__s0, __p1, 35); \ + __ret; \ +}) +#else +#define splat_laneq_s64(__p0, __p1) __extension__ ({ \ + int64x1_t __ret; \ + int64x2_t __s0 = __p0; \ + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + __ret = (int64x1_t) __builtin_neon_splat_laneq_v((int8x16_t)__rev0, __p1, 35); \ + __ret; \ +}) +#define __noswap_splat_laneq_s64(__p0, __p1) __extension__ ({ \ + int64x1_t __ret; \ + int64x2_t __s0 = __p0; \ + __ret = (int64x1_t) __builtin_neon_splat_laneq_v((int8x16_t)__s0, __p1, 35); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define splat_laneq_s16(__p0, __p1) __extension__ ({ \ + int16x4_t __ret; \ + int16x8_t __s0 = __p0; \ + __ret = (int16x4_t) __builtin_neon_splat_laneq_v((int8x16_t)__s0, __p1, 33); \ + __ret; \ +}) +#else +#define splat_laneq_s16(__p0, __p1) __extension__ ({ \ + int16x4_t __ret; \ + int16x8_t __s0 = __p0; \ + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (int16x4_t) __builtin_neon_splat_laneq_v((int8x16_t)__rev0, __p1, 33); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#define __noswap_splat_laneq_s16(__p0, __p1) __extension__ ({ \ + int16x4_t __ret; \ + int16x8_t __s0 = __p0; \ + __ret = (int16x4_t) __builtin_neon_splat_laneq_v((int8x16_t)__s0, __p1, 33); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint8x16_t vabdq_u8(uint8x16_t __p0, uint8x16_t __p1) { + uint8x16_t __ret; + __ret = (uint8x16_t) __builtin_neon_vabdq_v((int8x16_t)__p0, (int8x16_t)__p1, 48); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint8x16_t vabdq_u8(uint8x16_t __p0, uint8x16_t __p1) { + uint8x16_t __ret; + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint8x16_t) __builtin_neon_vabdq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +__ai __attribute__((target("neon"))) uint8x16_t __noswap_vabdq_u8(uint8x16_t __p0, uint8x16_t __p1) { + uint8x16_t __ret; + __ret = (uint8x16_t) __builtin_neon_vabdq_v((int8x16_t)__p0, (int8x16_t)__p1, 48); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint32x4_t vabdq_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vabdq_v((int8x16_t)__p0, (int8x16_t)__p1, 50); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint32x4_t vabdq_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint32x4_t __ret; + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (uint32x4_t) __builtin_neon_vabdq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +__ai __attribute__((target("neon"))) uint32x4_t __noswap_vabdq_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vabdq_v((int8x16_t)__p0, (int8x16_t)__p1, 50); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint16x8_t vabdq_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint16x8_t __ret; + __ret = (uint16x8_t) __builtin_neon_vabdq_v((int8x16_t)__p0, (int8x16_t)__p1, 49); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint16x8_t vabdq_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint16x8_t __ret; + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint16x8_t) __builtin_neon_vabdq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +__ai __attribute__((target("neon"))) uint16x8_t __noswap_vabdq_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint16x8_t __ret; + __ret = (uint16x8_t) __builtin_neon_vabdq_v((int8x16_t)__p0, (int8x16_t)__p1, 49); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int8x16_t vabdq_s8(int8x16_t __p0, int8x16_t __p1) { + int8x16_t __ret; + __ret = (int8x16_t) __builtin_neon_vabdq_v((int8x16_t)__p0, (int8x16_t)__p1, 32); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int8x16_t vabdq_s8(int8x16_t __p0, int8x16_t __p1) { + int8x16_t __ret; + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int8x16_t) __builtin_neon_vabdq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 32); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +__ai __attribute__((target("neon"))) int8x16_t __noswap_vabdq_s8(int8x16_t __p0, int8x16_t __p1) { + int8x16_t __ret; + __ret = (int8x16_t) __builtin_neon_vabdq_v((int8x16_t)__p0, (int8x16_t)__p1, 32); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) float32x4_t vabdq_f32(float32x4_t __p0, float32x4_t __p1) { + float32x4_t __ret; + __ret = (float32x4_t) __builtin_neon_vabdq_v((int8x16_t)__p0, (int8x16_t)__p1, 41); + return __ret; +} +#else +__ai __attribute__((target("neon"))) float32x4_t vabdq_f32(float32x4_t __p0, float32x4_t __p1) { + float32x4_t __ret; + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (float32x4_t) __builtin_neon_vabdq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 41); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int32x4_t vabdq_s32(int32x4_t __p0, int32x4_t __p1) { + int32x4_t __ret; + __ret = (int32x4_t) __builtin_neon_vabdq_v((int8x16_t)__p0, (int8x16_t)__p1, 34); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int32x4_t vabdq_s32(int32x4_t __p0, int32x4_t __p1) { + int32x4_t __ret; + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (int32x4_t) __builtin_neon_vabdq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 34); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +__ai __attribute__((target("neon"))) int32x4_t __noswap_vabdq_s32(int32x4_t __p0, int32x4_t __p1) { + int32x4_t __ret; + __ret = (int32x4_t) __builtin_neon_vabdq_v((int8x16_t)__p0, (int8x16_t)__p1, 34); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int16x8_t vabdq_s16(int16x8_t __p0, int16x8_t __p1) { + int16x8_t __ret; + __ret = (int16x8_t) __builtin_neon_vabdq_v((int8x16_t)__p0, (int8x16_t)__p1, 33); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int16x8_t vabdq_s16(int16x8_t __p0, int16x8_t __p1) { + int16x8_t __ret; + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int16x8_t) __builtin_neon_vabdq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 33); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +__ai __attribute__((target("neon"))) int16x8_t __noswap_vabdq_s16(int16x8_t __p0, int16x8_t __p1) { + int16x8_t __ret; + __ret = (int16x8_t) __builtin_neon_vabdq_v((int8x16_t)__p0, (int8x16_t)__p1, 33); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint8x8_t vabd_u8(uint8x8_t __p0, uint8x8_t __p1) { + uint8x8_t __ret; + __ret = (uint8x8_t) __builtin_neon_vabd_v((int8x8_t)__p0, (int8x8_t)__p1, 16); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint8x8_t vabd_u8(uint8x8_t __p0, uint8x8_t __p1) { + uint8x8_t __ret; + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint8x8_t) __builtin_neon_vabd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 16); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +__ai __attribute__((target("neon"))) uint8x8_t __noswap_vabd_u8(uint8x8_t __p0, uint8x8_t __p1) { + uint8x8_t __ret; + __ret = (uint8x8_t) __builtin_neon_vabd_v((int8x8_t)__p0, (int8x8_t)__p1, 16); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint32x2_t vabd_u32(uint32x2_t __p0, uint32x2_t __p1) { + uint32x2_t __ret; + __ret = (uint32x2_t) __builtin_neon_vabd_v((int8x8_t)__p0, (int8x8_t)__p1, 18); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint32x2_t vabd_u32(uint32x2_t __p0, uint32x2_t __p1) { + uint32x2_t __ret; + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (uint32x2_t) __builtin_neon_vabd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +__ai __attribute__((target("neon"))) uint32x2_t __noswap_vabd_u32(uint32x2_t __p0, uint32x2_t __p1) { + uint32x2_t __ret; + __ret = (uint32x2_t) __builtin_neon_vabd_v((int8x8_t)__p0, (int8x8_t)__p1, 18); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint16x4_t vabd_u16(uint16x4_t __p0, uint16x4_t __p1) { + uint16x4_t __ret; + __ret = (uint16x4_t) __builtin_neon_vabd_v((int8x8_t)__p0, (int8x8_t)__p1, 17); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint16x4_t vabd_u16(uint16x4_t __p0, uint16x4_t __p1) { + uint16x4_t __ret; + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (uint16x4_t) __builtin_neon_vabd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +__ai __attribute__((target("neon"))) uint16x4_t __noswap_vabd_u16(uint16x4_t __p0, uint16x4_t __p1) { + uint16x4_t __ret; + __ret = (uint16x4_t) __builtin_neon_vabd_v((int8x8_t)__p0, (int8x8_t)__p1, 17); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int8x8_t vabd_s8(int8x8_t __p0, int8x8_t __p1) { + int8x8_t __ret; + __ret = (int8x8_t) __builtin_neon_vabd_v((int8x8_t)__p0, (int8x8_t)__p1, 0); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int8x8_t vabd_s8(int8x8_t __p0, int8x8_t __p1) { + int8x8_t __ret; + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int8x8_t) __builtin_neon_vabd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 0); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +__ai __attribute__((target("neon"))) int8x8_t __noswap_vabd_s8(int8x8_t __p0, int8x8_t __p1) { + int8x8_t __ret; + __ret = (int8x8_t) __builtin_neon_vabd_v((int8x8_t)__p0, (int8x8_t)__p1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) float32x2_t vabd_f32(float32x2_t __p0, float32x2_t __p1) { + float32x2_t __ret; + __ret = (float32x2_t) __builtin_neon_vabd_v((int8x8_t)__p0, (int8x8_t)__p1, 9); + return __ret; +} +#else +__ai __attribute__((target("neon"))) float32x2_t vabd_f32(float32x2_t __p0, float32x2_t __p1) { + float32x2_t __ret; + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (float32x2_t) __builtin_neon_vabd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 9); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int32x2_t vabd_s32(int32x2_t __p0, int32x2_t __p1) { + int32x2_t __ret; + __ret = (int32x2_t) __builtin_neon_vabd_v((int8x8_t)__p0, (int8x8_t)__p1, 2); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int32x2_t vabd_s32(int32x2_t __p0, int32x2_t __p1) { + int32x2_t __ret; + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (int32x2_t) __builtin_neon_vabd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 2); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +__ai __attribute__((target("neon"))) int32x2_t __noswap_vabd_s32(int32x2_t __p0, int32x2_t __p1) { + int32x2_t __ret; + __ret = (int32x2_t) __builtin_neon_vabd_v((int8x8_t)__p0, (int8x8_t)__p1, 2); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int16x4_t vabd_s16(int16x4_t __p0, int16x4_t __p1) { + int16x4_t __ret; + __ret = (int16x4_t) __builtin_neon_vabd_v((int8x8_t)__p0, (int8x8_t)__p1, 1); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int16x4_t vabd_s16(int16x4_t __p0, int16x4_t __p1) { + int16x4_t __ret; + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (int16x4_t) __builtin_neon_vabd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +__ai __attribute__((target("neon"))) int16x4_t __noswap_vabd_s16(int16x4_t __p0, int16x4_t __p1) { + int16x4_t __ret; + __ret = (int16x4_t) __builtin_neon_vabd_v((int8x8_t)__p0, (int8x8_t)__p1, 1); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int8x16_t vabsq_s8(int8x16_t __p0) { + int8x16_t __ret; + __ret = (int8x16_t) __builtin_neon_vabsq_v((int8x16_t)__p0, 32); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int8x16_t vabsq_s8(int8x16_t __p0) { + int8x16_t __ret; + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int8x16_t) __builtin_neon_vabsq_v((int8x16_t)__rev0, 32); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) float32x4_t vabsq_f32(float32x4_t __p0) { + float32x4_t __ret; + __ret = (float32x4_t) __builtin_neon_vabsq_v((int8x16_t)__p0, 41); + return __ret; +} +#else +__ai __attribute__((target("neon"))) float32x4_t vabsq_f32(float32x4_t __p0) { + float32x4_t __ret; + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = (float32x4_t) __builtin_neon_vabsq_v((int8x16_t)__rev0, 41); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int32x4_t vabsq_s32(int32x4_t __p0) { + int32x4_t __ret; + __ret = (int32x4_t) __builtin_neon_vabsq_v((int8x16_t)__p0, 34); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int32x4_t vabsq_s32(int32x4_t __p0) { + int32x4_t __ret; + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = (int32x4_t) __builtin_neon_vabsq_v((int8x16_t)__rev0, 34); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int16x8_t vabsq_s16(int16x8_t __p0) { + int16x8_t __ret; + __ret = (int16x8_t) __builtin_neon_vabsq_v((int8x16_t)__p0, 33); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int16x8_t vabsq_s16(int16x8_t __p0) { + int16x8_t __ret; + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int16x8_t) __builtin_neon_vabsq_v((int8x16_t)__rev0, 33); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int8x8_t vabs_s8(int8x8_t __p0) { + int8x8_t __ret; + __ret = (int8x8_t) __builtin_neon_vabs_v((int8x8_t)__p0, 0); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int8x8_t vabs_s8(int8x8_t __p0) { + int8x8_t __ret; + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int8x8_t) __builtin_neon_vabs_v((int8x8_t)__rev0, 0); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) float32x2_t vabs_f32(float32x2_t __p0) { + float32x2_t __ret; + __ret = (float32x2_t) __builtin_neon_vabs_v((int8x8_t)__p0, 9); + return __ret; +} +#else +__ai __attribute__((target("neon"))) float32x2_t vabs_f32(float32x2_t __p0) { + float32x2_t __ret; + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = (float32x2_t) __builtin_neon_vabs_v((int8x8_t)__rev0, 9); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int32x2_t vabs_s32(int32x2_t __p0) { + int32x2_t __ret; + __ret = (int32x2_t) __builtin_neon_vabs_v((int8x8_t)__p0, 2); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int32x2_t vabs_s32(int32x2_t __p0) { + int32x2_t __ret; + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = (int32x2_t) __builtin_neon_vabs_v((int8x8_t)__rev0, 2); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int16x4_t vabs_s16(int16x4_t __p0) { + int16x4_t __ret; + __ret = (int16x4_t) __builtin_neon_vabs_v((int8x8_t)__p0, 1); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int16x4_t vabs_s16(int16x4_t __p0) { + int16x4_t __ret; + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = (int16x4_t) __builtin_neon_vabs_v((int8x8_t)__rev0, 1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint8x16_t vaddq_u8(uint8x16_t __p0, uint8x16_t __p1) { + uint8x16_t __ret; + __ret = __p0 + __p1; + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint8x16_t vaddq_u8(uint8x16_t __p0, uint8x16_t __p1) { + uint8x16_t __ret; + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __rev0 + __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint32x4_t vaddq_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint32x4_t __ret; + __ret = __p0 + __p1; + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint32x4_t vaddq_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint32x4_t __ret; + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = __rev0 + __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint64x2_t vaddq_u64(uint64x2_t __p0, uint64x2_t __p1) { + uint64x2_t __ret; + __ret = __p0 + __p1; + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint64x2_t vaddq_u64(uint64x2_t __p0, uint64x2_t __p1) { + uint64x2_t __ret; + uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = __rev0 + __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint16x8_t vaddq_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint16x8_t __ret; + __ret = __p0 + __p1; + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint16x8_t vaddq_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint16x8_t __ret; + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __rev0 + __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int8x16_t vaddq_s8(int8x16_t __p0, int8x16_t __p1) { + int8x16_t __ret; + __ret = __p0 + __p1; + return __ret; +} +#else +__ai __attribute__((target("neon"))) int8x16_t vaddq_s8(int8x16_t __p0, int8x16_t __p1) { + int8x16_t __ret; + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __rev0 + __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) float32x4_t vaddq_f32(float32x4_t __p0, float32x4_t __p1) { + float32x4_t __ret; + __ret = __p0 + __p1; + return __ret; +} +#else +__ai __attribute__((target("neon"))) float32x4_t vaddq_f32(float32x4_t __p0, float32x4_t __p1) { + float32x4_t __ret; + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = __rev0 + __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int32x4_t vaddq_s32(int32x4_t __p0, int32x4_t __p1) { + int32x4_t __ret; + __ret = __p0 + __p1; + return __ret; +} +#else +__ai __attribute__((target("neon"))) int32x4_t vaddq_s32(int32x4_t __p0, int32x4_t __p1) { + int32x4_t __ret; + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = __rev0 + __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int64x2_t vaddq_s64(int64x2_t __p0, int64x2_t __p1) { + int64x2_t __ret; + __ret = __p0 + __p1; + return __ret; +} +#else +__ai __attribute__((target("neon"))) int64x2_t vaddq_s64(int64x2_t __p0, int64x2_t __p1) { + int64x2_t __ret; + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = __rev0 + __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int16x8_t vaddq_s16(int16x8_t __p0, int16x8_t __p1) { + int16x8_t __ret; + __ret = __p0 + __p1; + return __ret; +} +#else +__ai __attribute__((target("neon"))) int16x8_t vaddq_s16(int16x8_t __p0, int16x8_t __p1) { + int16x8_t __ret; + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __rev0 + __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint8x8_t vadd_u8(uint8x8_t __p0, uint8x8_t __p1) { + uint8x8_t __ret; + __ret = __p0 + __p1; + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint8x8_t vadd_u8(uint8x8_t __p0, uint8x8_t __p1) { + uint8x8_t __ret; + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __rev0 + __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint32x2_t vadd_u32(uint32x2_t __p0, uint32x2_t __p1) { + uint32x2_t __ret; + __ret = __p0 + __p1; + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint32x2_t vadd_u32(uint32x2_t __p0, uint32x2_t __p1) { + uint32x2_t __ret; + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = __rev0 + __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +__ai __attribute__((target("neon"))) uint64x1_t vadd_u64(uint64x1_t __p0, uint64x1_t __p1) { + uint64x1_t __ret; + __ret = __p0 + __p1; + return __ret; +} +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint16x4_t vadd_u16(uint16x4_t __p0, uint16x4_t __p1) { + uint16x4_t __ret; + __ret = __p0 + __p1; + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint16x4_t vadd_u16(uint16x4_t __p0, uint16x4_t __p1) { + uint16x4_t __ret; + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = __rev0 + __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int8x8_t vadd_s8(int8x8_t __p0, int8x8_t __p1) { + int8x8_t __ret; + __ret = __p0 + __p1; + return __ret; +} +#else +__ai __attribute__((target("neon"))) int8x8_t vadd_s8(int8x8_t __p0, int8x8_t __p1) { + int8x8_t __ret; + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __rev0 + __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) float32x2_t vadd_f32(float32x2_t __p0, float32x2_t __p1) { + float32x2_t __ret; + __ret = __p0 + __p1; + return __ret; +} +#else +__ai __attribute__((target("neon"))) float32x2_t vadd_f32(float32x2_t __p0, float32x2_t __p1) { + float32x2_t __ret; + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = __rev0 + __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int32x2_t vadd_s32(int32x2_t __p0, int32x2_t __p1) { + int32x2_t __ret; + __ret = __p0 + __p1; + return __ret; +} +#else +__ai __attribute__((target("neon"))) int32x2_t vadd_s32(int32x2_t __p0, int32x2_t __p1) { + int32x2_t __ret; + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = __rev0 + __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +__ai __attribute__((target("neon"))) int64x1_t vadd_s64(int64x1_t __p0, int64x1_t __p1) { + int64x1_t __ret; + __ret = __p0 + __p1; + return __ret; +} +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int16x4_t vadd_s16(int16x4_t __p0, int16x4_t __p1) { + int16x4_t __ret; + __ret = __p0 + __p1; + return __ret; +} +#else +__ai __attribute__((target("neon"))) int16x4_t vadd_s16(int16x4_t __p0, int16x4_t __p1) { + int16x4_t __ret; + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = __rev0 + __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) poly8x8_t vadd_p8(poly8x8_t __p0, poly8x8_t __p1) { + poly8x8_t __ret; + __ret = (poly8x8_t) __builtin_neon_vadd_v((int8x8_t)__p0, (int8x8_t)__p1, 4); + return __ret; +} +#else +__ai __attribute__((target("neon"))) poly8x8_t vadd_p8(poly8x8_t __p0, poly8x8_t __p1) { + poly8x8_t __ret; + poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + poly8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (poly8x8_t) __builtin_neon_vadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 4); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +__ai __attribute__((target("neon"))) poly64x1_t vadd_p64(poly64x1_t __p0, poly64x1_t __p1) { + poly64x1_t __ret; + __ret = (poly64x1_t) __builtin_neon_vadd_v((int8x8_t)__p0, (int8x8_t)__p1, 6); + return __ret; +} +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) poly16x4_t vadd_p16(poly16x4_t __p0, poly16x4_t __p1) { + poly16x4_t __ret; + __ret = (poly16x4_t) __builtin_neon_vadd_v((int8x8_t)__p0, (int8x8_t)__p1, 5); + return __ret; +} +#else +__ai __attribute__((target("neon"))) poly16x4_t vadd_p16(poly16x4_t __p0, poly16x4_t __p1) { + poly16x4_t __ret; + poly16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + poly16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (poly16x4_t) __builtin_neon_vadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 5); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) poly8x16_t vaddq_p8(poly8x16_t __p0, poly8x16_t __p1) { + poly8x16_t __ret; + __ret = (poly8x16_t) __builtin_neon_vaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 36); + return __ret; +} +#else +__ai __attribute__((target("neon"))) poly8x16_t vaddq_p8(poly8x16_t __p0, poly8x16_t __p1) { + poly8x16_t __ret; + poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + poly8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (poly8x16_t) __builtin_neon_vaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 36); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) poly64x2_t vaddq_p64(poly64x2_t __p0, poly64x2_t __p1) { + poly64x2_t __ret; + __ret = (poly64x2_t) __builtin_neon_vaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 38); + return __ret; +} +#else +__ai __attribute__((target("neon"))) poly64x2_t vaddq_p64(poly64x2_t __p0, poly64x2_t __p1) { + poly64x2_t __ret; + poly64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + poly64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (poly64x2_t) __builtin_neon_vaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 38); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) poly16x8_t vaddq_p16(poly16x8_t __p0, poly16x8_t __p1) { + poly16x8_t __ret; + __ret = (poly16x8_t) __builtin_neon_vaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 37); + return __ret; +} +#else +__ai __attribute__((target("neon"))) poly16x8_t vaddq_p16(poly16x8_t __p0, poly16x8_t __p1) { + poly16x8_t __ret; + poly16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + poly16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (poly16x8_t) __builtin_neon_vaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 37); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint16x4_t vaddhn_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint16x4_t __ret; + __ret = (uint16x4_t) __builtin_neon_vaddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 17); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint16x4_t vaddhn_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint16x4_t __ret; + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (uint16x4_t) __builtin_neon_vaddhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 17); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +__ai __attribute__((target("neon"))) uint16x4_t __noswap_vaddhn_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint16x4_t __ret; + __ret = (uint16x4_t) __builtin_neon_vaddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 17); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint32x2_t vaddhn_u64(uint64x2_t __p0, uint64x2_t __p1) { + uint32x2_t __ret; + __ret = (uint32x2_t) __builtin_neon_vaddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 18); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint32x2_t vaddhn_u64(uint64x2_t __p0, uint64x2_t __p1) { + uint32x2_t __ret; + uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (uint32x2_t) __builtin_neon_vaddhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 18); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +__ai __attribute__((target("neon"))) uint32x2_t __noswap_vaddhn_u64(uint64x2_t __p0, uint64x2_t __p1) { + uint32x2_t __ret; + __ret = (uint32x2_t) __builtin_neon_vaddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 18); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint8x8_t vaddhn_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint8x8_t __ret; + __ret = (uint8x8_t) __builtin_neon_vaddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 16); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint8x8_t vaddhn_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint8x8_t __ret; + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint8x8_t) __builtin_neon_vaddhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 16); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +__ai __attribute__((target("neon"))) uint8x8_t __noswap_vaddhn_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint8x8_t __ret; + __ret = (uint8x8_t) __builtin_neon_vaddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 16); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int16x4_t vaddhn_s32(int32x4_t __p0, int32x4_t __p1) { + int16x4_t __ret; + __ret = (int16x4_t) __builtin_neon_vaddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 1); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int16x4_t vaddhn_s32(int32x4_t __p0, int32x4_t __p1) { + int16x4_t __ret; + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (int16x4_t) __builtin_neon_vaddhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +__ai __attribute__((target("neon"))) int16x4_t __noswap_vaddhn_s32(int32x4_t __p0, int32x4_t __p1) { + int16x4_t __ret; + __ret = (int16x4_t) __builtin_neon_vaddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 1); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int32x2_t vaddhn_s64(int64x2_t __p0, int64x2_t __p1) { + int32x2_t __ret; + __ret = (int32x2_t) __builtin_neon_vaddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 2); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int32x2_t vaddhn_s64(int64x2_t __p0, int64x2_t __p1) { + int32x2_t __ret; + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (int32x2_t) __builtin_neon_vaddhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 2); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +__ai __attribute__((target("neon"))) int32x2_t __noswap_vaddhn_s64(int64x2_t __p0, int64x2_t __p1) { + int32x2_t __ret; + __ret = (int32x2_t) __builtin_neon_vaddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 2); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int8x8_t vaddhn_s16(int16x8_t __p0, int16x8_t __p1) { + int8x8_t __ret; + __ret = (int8x8_t) __builtin_neon_vaddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 0); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int8x8_t vaddhn_s16(int16x8_t __p0, int16x8_t __p1) { + int8x8_t __ret; + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int8x8_t) __builtin_neon_vaddhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 0); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +__ai __attribute__((target("neon"))) int8x8_t __noswap_vaddhn_s16(int16x8_t __p0, int16x8_t __p1) { + int8x8_t __ret; + __ret = (int8x8_t) __builtin_neon_vaddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint8x16_t vandq_u8(uint8x16_t __p0, uint8x16_t __p1) { + uint8x16_t __ret; + __ret = __p0 & __p1; + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint8x16_t vandq_u8(uint8x16_t __p0, uint8x16_t __p1) { + uint8x16_t __ret; + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __rev0 & __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint32x4_t vandq_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint32x4_t __ret; + __ret = __p0 & __p1; + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint32x4_t vandq_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint32x4_t __ret; + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = __rev0 & __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint64x2_t vandq_u64(uint64x2_t __p0, uint64x2_t __p1) { + uint64x2_t __ret; + __ret = __p0 & __p1; + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint64x2_t vandq_u64(uint64x2_t __p0, uint64x2_t __p1) { + uint64x2_t __ret; + uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = __rev0 & __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint16x8_t vandq_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint16x8_t __ret; + __ret = __p0 & __p1; + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint16x8_t vandq_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint16x8_t __ret; + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __rev0 & __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int8x16_t vandq_s8(int8x16_t __p0, int8x16_t __p1) { + int8x16_t __ret; + __ret = __p0 & __p1; + return __ret; +} +#else +__ai __attribute__((target("neon"))) int8x16_t vandq_s8(int8x16_t __p0, int8x16_t __p1) { + int8x16_t __ret; + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __rev0 & __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int32x4_t vandq_s32(int32x4_t __p0, int32x4_t __p1) { + int32x4_t __ret; + __ret = __p0 & __p1; + return __ret; +} +#else +__ai __attribute__((target("neon"))) int32x4_t vandq_s32(int32x4_t __p0, int32x4_t __p1) { + int32x4_t __ret; + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = __rev0 & __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int64x2_t vandq_s64(int64x2_t __p0, int64x2_t __p1) { + int64x2_t __ret; + __ret = __p0 & __p1; + return __ret; +} +#else +__ai __attribute__((target("neon"))) int64x2_t vandq_s64(int64x2_t __p0, int64x2_t __p1) { + int64x2_t __ret; + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = __rev0 & __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int16x8_t vandq_s16(int16x8_t __p0, int16x8_t __p1) { + int16x8_t __ret; + __ret = __p0 & __p1; + return __ret; +} +#else +__ai __attribute__((target("neon"))) int16x8_t vandq_s16(int16x8_t __p0, int16x8_t __p1) { + int16x8_t __ret; + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __rev0 & __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint8x8_t vand_u8(uint8x8_t __p0, uint8x8_t __p1) { + uint8x8_t __ret; + __ret = __p0 & __p1; + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint8x8_t vand_u8(uint8x8_t __p0, uint8x8_t __p1) { + uint8x8_t __ret; + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __rev0 & __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint32x2_t vand_u32(uint32x2_t __p0, uint32x2_t __p1) { + uint32x2_t __ret; + __ret = __p0 & __p1; + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint32x2_t vand_u32(uint32x2_t __p0, uint32x2_t __p1) { + uint32x2_t __ret; + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = __rev0 & __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +__ai __attribute__((target("neon"))) uint64x1_t vand_u64(uint64x1_t __p0, uint64x1_t __p1) { + uint64x1_t __ret; + __ret = __p0 & __p1; + return __ret; +} +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint16x4_t vand_u16(uint16x4_t __p0, uint16x4_t __p1) { + uint16x4_t __ret; + __ret = __p0 & __p1; + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint16x4_t vand_u16(uint16x4_t __p0, uint16x4_t __p1) { + uint16x4_t __ret; + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = __rev0 & __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int8x8_t vand_s8(int8x8_t __p0, int8x8_t __p1) { + int8x8_t __ret; + __ret = __p0 & __p1; + return __ret; +} +#else +__ai __attribute__((target("neon"))) int8x8_t vand_s8(int8x8_t __p0, int8x8_t __p1) { + int8x8_t __ret; + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __rev0 & __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int32x2_t vand_s32(int32x2_t __p0, int32x2_t __p1) { + int32x2_t __ret; + __ret = __p0 & __p1; + return __ret; +} +#else +__ai __attribute__((target("neon"))) int32x2_t vand_s32(int32x2_t __p0, int32x2_t __p1) { + int32x2_t __ret; + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = __rev0 & __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +__ai __attribute__((target("neon"))) int64x1_t vand_s64(int64x1_t __p0, int64x1_t __p1) { + int64x1_t __ret; + __ret = __p0 & __p1; + return __ret; +} +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int16x4_t vand_s16(int16x4_t __p0, int16x4_t __p1) { + int16x4_t __ret; + __ret = __p0 & __p1; + return __ret; +} +#else +__ai __attribute__((target("neon"))) int16x4_t vand_s16(int16x4_t __p0, int16x4_t __p1) { + int16x4_t __ret; + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = __rev0 & __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint8x16_t vbicq_u8(uint8x16_t __p0, uint8x16_t __p1) { + uint8x16_t __ret; + __ret = __p0 & ~__p1; + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint8x16_t vbicq_u8(uint8x16_t __p0, uint8x16_t __p1) { + uint8x16_t __ret; + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __rev0 & ~__rev1; + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint32x4_t vbicq_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint32x4_t __ret; + __ret = __p0 & ~__p1; + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint32x4_t vbicq_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint32x4_t __ret; + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = __rev0 & ~__rev1; + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint64x2_t vbicq_u64(uint64x2_t __p0, uint64x2_t __p1) { + uint64x2_t __ret; + __ret = __p0 & ~__p1; + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint64x2_t vbicq_u64(uint64x2_t __p0, uint64x2_t __p1) { + uint64x2_t __ret; + uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = __rev0 & ~__rev1; + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint16x8_t vbicq_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint16x8_t __ret; + __ret = __p0 & ~__p1; + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint16x8_t vbicq_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint16x8_t __ret; + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __rev0 & ~__rev1; + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int8x16_t vbicq_s8(int8x16_t __p0, int8x16_t __p1) { + int8x16_t __ret; + __ret = __p0 & ~__p1; + return __ret; +} +#else +__ai __attribute__((target("neon"))) int8x16_t vbicq_s8(int8x16_t __p0, int8x16_t __p1) { + int8x16_t __ret; + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __rev0 & ~__rev1; + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int32x4_t vbicq_s32(int32x4_t __p0, int32x4_t __p1) { + int32x4_t __ret; + __ret = __p0 & ~__p1; + return __ret; +} +#else +__ai __attribute__((target("neon"))) int32x4_t vbicq_s32(int32x4_t __p0, int32x4_t __p1) { + int32x4_t __ret; + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = __rev0 & ~__rev1; + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int64x2_t vbicq_s64(int64x2_t __p0, int64x2_t __p1) { + int64x2_t __ret; + __ret = __p0 & ~__p1; + return __ret; +} +#else +__ai __attribute__((target("neon"))) int64x2_t vbicq_s64(int64x2_t __p0, int64x2_t __p1) { + int64x2_t __ret; + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = __rev0 & ~__rev1; + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int16x8_t vbicq_s16(int16x8_t __p0, int16x8_t __p1) { + int16x8_t __ret; + __ret = __p0 & ~__p1; + return __ret; +} +#else +__ai __attribute__((target("neon"))) int16x8_t vbicq_s16(int16x8_t __p0, int16x8_t __p1) { + int16x8_t __ret; + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __rev0 & ~__rev1; + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint8x8_t vbic_u8(uint8x8_t __p0, uint8x8_t __p1) { + uint8x8_t __ret; + __ret = __p0 & ~__p1; + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint8x8_t vbic_u8(uint8x8_t __p0, uint8x8_t __p1) { + uint8x8_t __ret; + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __rev0 & ~__rev1; + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint32x2_t vbic_u32(uint32x2_t __p0, uint32x2_t __p1) { + uint32x2_t __ret; + __ret = __p0 & ~__p1; + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint32x2_t vbic_u32(uint32x2_t __p0, uint32x2_t __p1) { + uint32x2_t __ret; + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = __rev0 & ~__rev1; + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +__ai __attribute__((target("neon"))) uint64x1_t vbic_u64(uint64x1_t __p0, uint64x1_t __p1) { + uint64x1_t __ret; + __ret = __p0 & ~__p1; + return __ret; +} +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint16x4_t vbic_u16(uint16x4_t __p0, uint16x4_t __p1) { + uint16x4_t __ret; + __ret = __p0 & ~__p1; + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint16x4_t vbic_u16(uint16x4_t __p0, uint16x4_t __p1) { + uint16x4_t __ret; + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = __rev0 & ~__rev1; + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int8x8_t vbic_s8(int8x8_t __p0, int8x8_t __p1) { + int8x8_t __ret; + __ret = __p0 & ~__p1; + return __ret; +} +#else +__ai __attribute__((target("neon"))) int8x8_t vbic_s8(int8x8_t __p0, int8x8_t __p1) { + int8x8_t __ret; + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __rev0 & ~__rev1; + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int32x2_t vbic_s32(int32x2_t __p0, int32x2_t __p1) { + int32x2_t __ret; + __ret = __p0 & ~__p1; + return __ret; +} +#else +__ai __attribute__((target("neon"))) int32x2_t vbic_s32(int32x2_t __p0, int32x2_t __p1) { + int32x2_t __ret; + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = __rev0 & ~__rev1; + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +__ai __attribute__((target("neon"))) int64x1_t vbic_s64(int64x1_t __p0, int64x1_t __p1) { + int64x1_t __ret; + __ret = __p0 & ~__p1; + return __ret; +} +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int16x4_t vbic_s16(int16x4_t __p0, int16x4_t __p1) { + int16x4_t __ret; + __ret = __p0 & ~__p1; + return __ret; +} +#else +__ai __attribute__((target("neon"))) int16x4_t vbic_s16(int16x4_t __p0, int16x4_t __p1) { + int16x4_t __ret; + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = __rev0 & ~__rev1; + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) poly8x8_t vbsl_p8(uint8x8_t __p0, poly8x8_t __p1, poly8x8_t __p2) { + poly8x8_t __ret; + __ret = (poly8x8_t) __builtin_neon_vbsl_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 4); + return __ret; +} +#else +__ai __attribute__((target("neon"))) poly8x8_t vbsl_p8(uint8x8_t __p0, poly8x8_t __p1, poly8x8_t __p2) { + poly8x8_t __ret; + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + poly8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + poly8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (poly8x8_t) __builtin_neon_vbsl_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 4); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) poly16x4_t vbsl_p16(uint16x4_t __p0, poly16x4_t __p1, poly16x4_t __p2) { + poly16x4_t __ret; + __ret = (poly16x4_t) __builtin_neon_vbsl_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 5); + return __ret; +} +#else +__ai __attribute__((target("neon"))) poly16x4_t vbsl_p16(uint16x4_t __p0, poly16x4_t __p1, poly16x4_t __p2) { + poly16x4_t __ret; + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + poly16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + poly16x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); + __ret = (poly16x4_t) __builtin_neon_vbsl_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 5); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) poly8x16_t vbslq_p8(uint8x16_t __p0, poly8x16_t __p1, poly8x16_t __p2) { + poly8x16_t __ret; + __ret = (poly8x16_t) __builtin_neon_vbslq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 36); + return __ret; +} +#else +__ai __attribute__((target("neon"))) poly8x16_t vbslq_p8(uint8x16_t __p0, poly8x16_t __p1, poly8x16_t __p2) { + poly8x16_t __ret; + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + poly8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + poly8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (poly8x16_t) __builtin_neon_vbslq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 36); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) poly16x8_t vbslq_p16(uint16x8_t __p0, poly16x8_t __p1, poly16x8_t __p2) { + poly16x8_t __ret; + __ret = (poly16x8_t) __builtin_neon_vbslq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 37); + return __ret; +} +#else +__ai __attribute__((target("neon"))) poly16x8_t vbslq_p16(uint16x8_t __p0, poly16x8_t __p1, poly16x8_t __p2) { + poly16x8_t __ret; + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + poly16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + poly16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (poly16x8_t) __builtin_neon_vbslq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 37); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint8x16_t vbslq_u8(uint8x16_t __p0, uint8x16_t __p1, uint8x16_t __p2) { + uint8x16_t __ret; + __ret = (uint8x16_t) __builtin_neon_vbslq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 48); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint8x16_t vbslq_u8(uint8x16_t __p0, uint8x16_t __p1, uint8x16_t __p2) { + uint8x16_t __ret; + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint8x16_t) __builtin_neon_vbslq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 48); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint32x4_t vbslq_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) { + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vbslq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 50); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint32x4_t vbslq_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) { + uint32x4_t __ret; + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + uint32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); + __ret = (uint32x4_t) __builtin_neon_vbslq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 50); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint64x2_t vbslq_u64(uint64x2_t __p0, uint64x2_t __p1, uint64x2_t __p2) { + uint64x2_t __ret; + __ret = (uint64x2_t) __builtin_neon_vbslq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 51); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint64x2_t vbslq_u64(uint64x2_t __p0, uint64x2_t __p1, uint64x2_t __p2) { + uint64x2_t __ret; + uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + uint64x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0); + __ret = (uint64x2_t) __builtin_neon_vbslq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 51); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint16x8_t vbslq_u16(uint16x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) { + uint16x8_t __ret; + __ret = (uint16x8_t) __builtin_neon_vbslq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 49); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint16x8_t vbslq_u16(uint16x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) { + uint16x8_t __ret; + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint16x8_t) __builtin_neon_vbslq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 49); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int8x16_t vbslq_s8(uint8x16_t __p0, int8x16_t __p1, int8x16_t __p2) { + int8x16_t __ret; + __ret = (int8x16_t) __builtin_neon_vbslq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 32); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int8x16_t vbslq_s8(uint8x16_t __p0, int8x16_t __p1, int8x16_t __p2) { + int8x16_t __ret; + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int8x16_t) __builtin_neon_vbslq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 32); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) float32x4_t vbslq_f32(uint32x4_t __p0, float32x4_t __p1, float32x4_t __p2) { + float32x4_t __ret; + __ret = (float32x4_t) __builtin_neon_vbslq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41); + return __ret; +} +#else +__ai __attribute__((target("neon"))) float32x4_t vbslq_f32(uint32x4_t __p0, float32x4_t __p1, float32x4_t __p2) { + float32x4_t __ret; + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + float32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); + __ret = (float32x4_t) __builtin_neon_vbslq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 41); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int32x4_t vbslq_s32(uint32x4_t __p0, int32x4_t __p1, int32x4_t __p2) { + int32x4_t __ret; + __ret = (int32x4_t) __builtin_neon_vbslq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 34); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int32x4_t vbslq_s32(uint32x4_t __p0, int32x4_t __p1, int32x4_t __p2) { + int32x4_t __ret; + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + int32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); + __ret = (int32x4_t) __builtin_neon_vbslq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 34); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int64x2_t vbslq_s64(uint64x2_t __p0, int64x2_t __p1, int64x2_t __p2) { + int64x2_t __ret; + __ret = (int64x2_t) __builtin_neon_vbslq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 35); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int64x2_t vbslq_s64(uint64x2_t __p0, int64x2_t __p1, int64x2_t __p2) { + int64x2_t __ret; + uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + int64x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0); + __ret = (int64x2_t) __builtin_neon_vbslq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 35); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int16x8_t vbslq_s16(uint16x8_t __p0, int16x8_t __p1, int16x8_t __p2) { + int16x8_t __ret; + __ret = (int16x8_t) __builtin_neon_vbslq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 33); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int16x8_t vbslq_s16(uint16x8_t __p0, int16x8_t __p1, int16x8_t __p2) { + int16x8_t __ret; + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int16x8_t) __builtin_neon_vbslq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 33); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint8x8_t vbsl_u8(uint8x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) { + uint8x8_t __ret; + __ret = (uint8x8_t) __builtin_neon_vbsl_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 16); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint8x8_t vbsl_u8(uint8x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) { + uint8x8_t __ret; + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint8x8_t) __builtin_neon_vbsl_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 16); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint32x2_t vbsl_u32(uint32x2_t __p0, uint32x2_t __p1, uint32x2_t __p2) { + uint32x2_t __ret; + __ret = (uint32x2_t) __builtin_neon_vbsl_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 18); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint32x2_t vbsl_u32(uint32x2_t __p0, uint32x2_t __p1, uint32x2_t __p2) { + uint32x2_t __ret; + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + uint32x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0); + __ret = (uint32x2_t) __builtin_neon_vbsl_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 18); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +__ai __attribute__((target("neon"))) uint64x1_t vbsl_u64(uint64x1_t __p0, uint64x1_t __p1, uint64x1_t __p2) { + uint64x1_t __ret; + __ret = (uint64x1_t) __builtin_neon_vbsl_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 19); + return __ret; +} +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint16x4_t vbsl_u16(uint16x4_t __p0, uint16x4_t __p1, uint16x4_t __p2) { + uint16x4_t __ret; + __ret = (uint16x4_t) __builtin_neon_vbsl_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 17); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint16x4_t vbsl_u16(uint16x4_t __p0, uint16x4_t __p1, uint16x4_t __p2) { + uint16x4_t __ret; + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + uint16x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); + __ret = (uint16x4_t) __builtin_neon_vbsl_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 17); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int8x8_t vbsl_s8(uint8x8_t __p0, int8x8_t __p1, int8x8_t __p2) { + int8x8_t __ret; + __ret = (int8x8_t) __builtin_neon_vbsl_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 0); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int8x8_t vbsl_s8(uint8x8_t __p0, int8x8_t __p1, int8x8_t __p2) { + int8x8_t __ret; + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int8x8_t) __builtin_neon_vbsl_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 0); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) float32x2_t vbsl_f32(uint32x2_t __p0, float32x2_t __p1, float32x2_t __p2) { + float32x2_t __ret; + __ret = (float32x2_t) __builtin_neon_vbsl_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 9); + return __ret; +} +#else +__ai __attribute__((target("neon"))) float32x2_t vbsl_f32(uint32x2_t __p0, float32x2_t __p1, float32x2_t __p2) { + float32x2_t __ret; + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + float32x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0); + __ret = (float32x2_t) __builtin_neon_vbsl_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 9); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int32x2_t vbsl_s32(uint32x2_t __p0, int32x2_t __p1, int32x2_t __p2) { + int32x2_t __ret; + __ret = (int32x2_t) __builtin_neon_vbsl_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 2); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int32x2_t vbsl_s32(uint32x2_t __p0, int32x2_t __p1, int32x2_t __p2) { + int32x2_t __ret; + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + int32x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0); + __ret = (int32x2_t) __builtin_neon_vbsl_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 2); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +__ai __attribute__((target("neon"))) int64x1_t vbsl_s64(uint64x1_t __p0, int64x1_t __p1, int64x1_t __p2) { + int64x1_t __ret; + __ret = (int64x1_t) __builtin_neon_vbsl_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 3); + return __ret; +} +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int16x4_t vbsl_s16(uint16x4_t __p0, int16x4_t __p1, int16x4_t __p2) { + int16x4_t __ret; + __ret = (int16x4_t) __builtin_neon_vbsl_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 1); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int16x4_t vbsl_s16(uint16x4_t __p0, int16x4_t __p1, int16x4_t __p2) { + int16x4_t __ret; + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + int16x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); + __ret = (int16x4_t) __builtin_neon_vbsl_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) float16x8_t vbslq_f16(uint16x8_t __p0, float16x8_t __p1, float16x8_t __p2) { + float16x8_t __ret; + __ret = (float16x8_t) __builtin_neon_vbslq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 40); + return __ret; +} +#else +__ai __attribute__((target("neon"))) float16x8_t vbslq_f16(uint16x8_t __p0, float16x8_t __p1, float16x8_t __p2) { + float16x8_t __ret; + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + float16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (float16x8_t) __builtin_neon_vbslq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 40); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) float16x4_t vbsl_f16(uint16x4_t __p0, float16x4_t __p1, float16x4_t __p2) { + float16x4_t __ret; + __ret = (float16x4_t) __builtin_neon_vbsl_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 8); + return __ret; +} +#else +__ai __attribute__((target("neon"))) float16x4_t vbsl_f16(uint16x4_t __p0, float16x4_t __p1, float16x4_t __p2) { + float16x4_t __ret; + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + float16x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); + __ret = (float16x4_t) __builtin_neon_vbsl_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 8); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint32x4_t vcageq_f32(float32x4_t __p0, float32x4_t __p1) { + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vcageq_v((int8x16_t)__p0, (int8x16_t)__p1, 50); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint32x4_t vcageq_f32(float32x4_t __p0, float32x4_t __p1) { + uint32x4_t __ret; + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (uint32x4_t) __builtin_neon_vcageq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint32x2_t vcage_f32(float32x2_t __p0, float32x2_t __p1) { + uint32x2_t __ret; + __ret = (uint32x2_t) __builtin_neon_vcage_v((int8x8_t)__p0, (int8x8_t)__p1, 18); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint32x2_t vcage_f32(float32x2_t __p0, float32x2_t __p1) { + uint32x2_t __ret; + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (uint32x2_t) __builtin_neon_vcage_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint32x4_t vcagtq_f32(float32x4_t __p0, float32x4_t __p1) { + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vcagtq_v((int8x16_t)__p0, (int8x16_t)__p1, 50); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint32x4_t vcagtq_f32(float32x4_t __p0, float32x4_t __p1) { + uint32x4_t __ret; + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (uint32x4_t) __builtin_neon_vcagtq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint32x2_t vcagt_f32(float32x2_t __p0, float32x2_t __p1) { + uint32x2_t __ret; + __ret = (uint32x2_t) __builtin_neon_vcagt_v((int8x8_t)__p0, (int8x8_t)__p1, 18); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint32x2_t vcagt_f32(float32x2_t __p0, float32x2_t __p1) { + uint32x2_t __ret; + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (uint32x2_t) __builtin_neon_vcagt_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint32x4_t vcaleq_f32(float32x4_t __p0, float32x4_t __p1) { + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vcaleq_v((int8x16_t)__p0, (int8x16_t)__p1, 50); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint32x4_t vcaleq_f32(float32x4_t __p0, float32x4_t __p1) { + uint32x4_t __ret; + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (uint32x4_t) __builtin_neon_vcaleq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint32x2_t vcale_f32(float32x2_t __p0, float32x2_t __p1) { + uint32x2_t __ret; + __ret = (uint32x2_t) __builtin_neon_vcale_v((int8x8_t)__p0, (int8x8_t)__p1, 18); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint32x2_t vcale_f32(float32x2_t __p0, float32x2_t __p1) { + uint32x2_t __ret; + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (uint32x2_t) __builtin_neon_vcale_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint32x4_t vcaltq_f32(float32x4_t __p0, float32x4_t __p1) { + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vcaltq_v((int8x16_t)__p0, (int8x16_t)__p1, 50); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint32x4_t vcaltq_f32(float32x4_t __p0, float32x4_t __p1) { + uint32x4_t __ret; + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (uint32x4_t) __builtin_neon_vcaltq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint32x2_t vcalt_f32(float32x2_t __p0, float32x2_t __p1) { + uint32x2_t __ret; + __ret = (uint32x2_t) __builtin_neon_vcalt_v((int8x8_t)__p0, (int8x8_t)__p1, 18); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint32x2_t vcalt_f32(float32x2_t __p0, float32x2_t __p1) { + uint32x2_t __ret; + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (uint32x2_t) __builtin_neon_vcalt_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint8x8_t vceq_p8(poly8x8_t __p0, poly8x8_t __p1) { + uint8x8_t __ret; + __ret = (uint8x8_t)(__p0 == __p1); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint8x8_t vceq_p8(poly8x8_t __p0, poly8x8_t __p1) { + uint8x8_t __ret; + poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + poly8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint8x8_t)(__rev0 == __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint8x16_t vceqq_p8(poly8x16_t __p0, poly8x16_t __p1) { + uint8x16_t __ret; + __ret = (uint8x16_t)(__p0 == __p1); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint8x16_t vceqq_p8(poly8x16_t __p0, poly8x16_t __p1) { + uint8x16_t __ret; + poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + poly8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint8x16_t)(__rev0 == __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint8x16_t vceqq_u8(uint8x16_t __p0, uint8x16_t __p1) { + uint8x16_t __ret; + __ret = (uint8x16_t)(__p0 == __p1); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint8x16_t vceqq_u8(uint8x16_t __p0, uint8x16_t __p1) { + uint8x16_t __ret; + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint8x16_t)(__rev0 == __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint32x4_t vceqq_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint32x4_t __ret; + __ret = (uint32x4_t)(__p0 == __p1); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint32x4_t vceqq_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint32x4_t __ret; + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (uint32x4_t)(__rev0 == __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint16x8_t vceqq_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint16x8_t __ret; + __ret = (uint16x8_t)(__p0 == __p1); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint16x8_t vceqq_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint16x8_t __ret; + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint16x8_t)(__rev0 == __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint8x16_t vceqq_s8(int8x16_t __p0, int8x16_t __p1) { + uint8x16_t __ret; + __ret = (uint8x16_t)(__p0 == __p1); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint8x16_t vceqq_s8(int8x16_t __p0, int8x16_t __p1) { + uint8x16_t __ret; + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint8x16_t)(__rev0 == __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint32x4_t vceqq_f32(float32x4_t __p0, float32x4_t __p1) { + uint32x4_t __ret; + __ret = (uint32x4_t)(__p0 == __p1); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint32x4_t vceqq_f32(float32x4_t __p0, float32x4_t __p1) { + uint32x4_t __ret; + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (uint32x4_t)(__rev0 == __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint32x4_t vceqq_s32(int32x4_t __p0, int32x4_t __p1) { + uint32x4_t __ret; + __ret = (uint32x4_t)(__p0 == __p1); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint32x4_t vceqq_s32(int32x4_t __p0, int32x4_t __p1) { + uint32x4_t __ret; + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (uint32x4_t)(__rev0 == __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint16x8_t vceqq_s16(int16x8_t __p0, int16x8_t __p1) { + uint16x8_t __ret; + __ret = (uint16x8_t)(__p0 == __p1); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint16x8_t vceqq_s16(int16x8_t __p0, int16x8_t __p1) { + uint16x8_t __ret; + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint16x8_t)(__rev0 == __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint8x8_t vceq_u8(uint8x8_t __p0, uint8x8_t __p1) { + uint8x8_t __ret; + __ret = (uint8x8_t)(__p0 == __p1); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint8x8_t vceq_u8(uint8x8_t __p0, uint8x8_t __p1) { + uint8x8_t __ret; + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint8x8_t)(__rev0 == __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint32x2_t vceq_u32(uint32x2_t __p0, uint32x2_t __p1) { + uint32x2_t __ret; + __ret = (uint32x2_t)(__p0 == __p1); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint32x2_t vceq_u32(uint32x2_t __p0, uint32x2_t __p1) { + uint32x2_t __ret; + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (uint32x2_t)(__rev0 == __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint16x4_t vceq_u16(uint16x4_t __p0, uint16x4_t __p1) { + uint16x4_t __ret; + __ret = (uint16x4_t)(__p0 == __p1); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint16x4_t vceq_u16(uint16x4_t __p0, uint16x4_t __p1) { + uint16x4_t __ret; + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (uint16x4_t)(__rev0 == __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint8x8_t vceq_s8(int8x8_t __p0, int8x8_t __p1) { + uint8x8_t __ret; + __ret = (uint8x8_t)(__p0 == __p1); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint8x8_t vceq_s8(int8x8_t __p0, int8x8_t __p1) { + uint8x8_t __ret; + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint8x8_t)(__rev0 == __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint32x2_t vceq_f32(float32x2_t __p0, float32x2_t __p1) { + uint32x2_t __ret; + __ret = (uint32x2_t)(__p0 == __p1); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint32x2_t vceq_f32(float32x2_t __p0, float32x2_t __p1) { + uint32x2_t __ret; + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (uint32x2_t)(__rev0 == __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint32x2_t vceq_s32(int32x2_t __p0, int32x2_t __p1) { + uint32x2_t __ret; + __ret = (uint32x2_t)(__p0 == __p1); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint32x2_t vceq_s32(int32x2_t __p0, int32x2_t __p1) { + uint32x2_t __ret; + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (uint32x2_t)(__rev0 == __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint16x4_t vceq_s16(int16x4_t __p0, int16x4_t __p1) { + uint16x4_t __ret; + __ret = (uint16x4_t)(__p0 == __p1); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint16x4_t vceq_s16(int16x4_t __p0, int16x4_t __p1) { + uint16x4_t __ret; + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (uint16x4_t)(__rev0 == __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint8x16_t vcgeq_u8(uint8x16_t __p0, uint8x16_t __p1) { + uint8x16_t __ret; + __ret = (uint8x16_t)(__p0 >= __p1); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint8x16_t vcgeq_u8(uint8x16_t __p0, uint8x16_t __p1) { + uint8x16_t __ret; + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint8x16_t)(__rev0 >= __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint32x4_t vcgeq_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint32x4_t __ret; + __ret = (uint32x4_t)(__p0 >= __p1); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint32x4_t vcgeq_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint32x4_t __ret; + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (uint32x4_t)(__rev0 >= __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint16x8_t vcgeq_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint16x8_t __ret; + __ret = (uint16x8_t)(__p0 >= __p1); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint16x8_t vcgeq_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint16x8_t __ret; + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint16x8_t)(__rev0 >= __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint8x16_t vcgeq_s8(int8x16_t __p0, int8x16_t __p1) { + uint8x16_t __ret; + __ret = (uint8x16_t)(__p0 >= __p1); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint8x16_t vcgeq_s8(int8x16_t __p0, int8x16_t __p1) { + uint8x16_t __ret; + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint8x16_t)(__rev0 >= __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint32x4_t vcgeq_f32(float32x4_t __p0, float32x4_t __p1) { + uint32x4_t __ret; + __ret = (uint32x4_t)(__p0 >= __p1); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint32x4_t vcgeq_f32(float32x4_t __p0, float32x4_t __p1) { + uint32x4_t __ret; + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (uint32x4_t)(__rev0 >= __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint32x4_t vcgeq_s32(int32x4_t __p0, int32x4_t __p1) { + uint32x4_t __ret; + __ret = (uint32x4_t)(__p0 >= __p1); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint32x4_t vcgeq_s32(int32x4_t __p0, int32x4_t __p1) { + uint32x4_t __ret; + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (uint32x4_t)(__rev0 >= __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint16x8_t vcgeq_s16(int16x8_t __p0, int16x8_t __p1) { + uint16x8_t __ret; + __ret = (uint16x8_t)(__p0 >= __p1); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint16x8_t vcgeq_s16(int16x8_t __p0, int16x8_t __p1) { + uint16x8_t __ret; + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint16x8_t)(__rev0 >= __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint8x8_t vcge_u8(uint8x8_t __p0, uint8x8_t __p1) { + uint8x8_t __ret; + __ret = (uint8x8_t)(__p0 >= __p1); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint8x8_t vcge_u8(uint8x8_t __p0, uint8x8_t __p1) { + uint8x8_t __ret; + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint8x8_t)(__rev0 >= __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint32x2_t vcge_u32(uint32x2_t __p0, uint32x2_t __p1) { + uint32x2_t __ret; + __ret = (uint32x2_t)(__p0 >= __p1); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint32x2_t vcge_u32(uint32x2_t __p0, uint32x2_t __p1) { + uint32x2_t __ret; + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (uint32x2_t)(__rev0 >= __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint16x4_t vcge_u16(uint16x4_t __p0, uint16x4_t __p1) { + uint16x4_t __ret; + __ret = (uint16x4_t)(__p0 >= __p1); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint16x4_t vcge_u16(uint16x4_t __p0, uint16x4_t __p1) { + uint16x4_t __ret; + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (uint16x4_t)(__rev0 >= __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint8x8_t vcge_s8(int8x8_t __p0, int8x8_t __p1) { + uint8x8_t __ret; + __ret = (uint8x8_t)(__p0 >= __p1); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint8x8_t vcge_s8(int8x8_t __p0, int8x8_t __p1) { + uint8x8_t __ret; + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint8x8_t)(__rev0 >= __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint32x2_t vcge_f32(float32x2_t __p0, float32x2_t __p1) { + uint32x2_t __ret; + __ret = (uint32x2_t)(__p0 >= __p1); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint32x2_t vcge_f32(float32x2_t __p0, float32x2_t __p1) { + uint32x2_t __ret; + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (uint32x2_t)(__rev0 >= __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint32x2_t vcge_s32(int32x2_t __p0, int32x2_t __p1) { + uint32x2_t __ret; + __ret = (uint32x2_t)(__p0 >= __p1); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint32x2_t vcge_s32(int32x2_t __p0, int32x2_t __p1) { + uint32x2_t __ret; + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (uint32x2_t)(__rev0 >= __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint16x4_t vcge_s16(int16x4_t __p0, int16x4_t __p1) { + uint16x4_t __ret; + __ret = (uint16x4_t)(__p0 >= __p1); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint16x4_t vcge_s16(int16x4_t __p0, int16x4_t __p1) { + uint16x4_t __ret; + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (uint16x4_t)(__rev0 >= __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint8x16_t vcgtq_u8(uint8x16_t __p0, uint8x16_t __p1) { + uint8x16_t __ret; + __ret = (uint8x16_t)(__p0 > __p1); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint8x16_t vcgtq_u8(uint8x16_t __p0, uint8x16_t __p1) { + uint8x16_t __ret; + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint8x16_t)(__rev0 > __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint32x4_t vcgtq_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint32x4_t __ret; + __ret = (uint32x4_t)(__p0 > __p1); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint32x4_t vcgtq_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint32x4_t __ret; + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (uint32x4_t)(__rev0 > __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint16x8_t vcgtq_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint16x8_t __ret; + __ret = (uint16x8_t)(__p0 > __p1); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint16x8_t vcgtq_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint16x8_t __ret; + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint16x8_t)(__rev0 > __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint8x16_t vcgtq_s8(int8x16_t __p0, int8x16_t __p1) { + uint8x16_t __ret; + __ret = (uint8x16_t)(__p0 > __p1); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint8x16_t vcgtq_s8(int8x16_t __p0, int8x16_t __p1) { + uint8x16_t __ret; + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint8x16_t)(__rev0 > __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint32x4_t vcgtq_f32(float32x4_t __p0, float32x4_t __p1) { + uint32x4_t __ret; + __ret = (uint32x4_t)(__p0 > __p1); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint32x4_t vcgtq_f32(float32x4_t __p0, float32x4_t __p1) { + uint32x4_t __ret; + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (uint32x4_t)(__rev0 > __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint32x4_t vcgtq_s32(int32x4_t __p0, int32x4_t __p1) { + uint32x4_t __ret; + __ret = (uint32x4_t)(__p0 > __p1); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint32x4_t vcgtq_s32(int32x4_t __p0, int32x4_t __p1) { + uint32x4_t __ret; + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (uint32x4_t)(__rev0 > __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint16x8_t vcgtq_s16(int16x8_t __p0, int16x8_t __p1) { + uint16x8_t __ret; + __ret = (uint16x8_t)(__p0 > __p1); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint16x8_t vcgtq_s16(int16x8_t __p0, int16x8_t __p1) { + uint16x8_t __ret; + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint16x8_t)(__rev0 > __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint8x8_t vcgt_u8(uint8x8_t __p0, uint8x8_t __p1) { + uint8x8_t __ret; + __ret = (uint8x8_t)(__p0 > __p1); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint8x8_t vcgt_u8(uint8x8_t __p0, uint8x8_t __p1) { + uint8x8_t __ret; + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint8x8_t)(__rev0 > __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint32x2_t vcgt_u32(uint32x2_t __p0, uint32x2_t __p1) { + uint32x2_t __ret; + __ret = (uint32x2_t)(__p0 > __p1); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint32x2_t vcgt_u32(uint32x2_t __p0, uint32x2_t __p1) { + uint32x2_t __ret; + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (uint32x2_t)(__rev0 > __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint16x4_t vcgt_u16(uint16x4_t __p0, uint16x4_t __p1) { + uint16x4_t __ret; + __ret = (uint16x4_t)(__p0 > __p1); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint16x4_t vcgt_u16(uint16x4_t __p0, uint16x4_t __p1) { + uint16x4_t __ret; + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (uint16x4_t)(__rev0 > __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint8x8_t vcgt_s8(int8x8_t __p0, int8x8_t __p1) { + uint8x8_t __ret; + __ret = (uint8x8_t)(__p0 > __p1); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint8x8_t vcgt_s8(int8x8_t __p0, int8x8_t __p1) { + uint8x8_t __ret; + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint8x8_t)(__rev0 > __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint32x2_t vcgt_f32(float32x2_t __p0, float32x2_t __p1) { + uint32x2_t __ret; + __ret = (uint32x2_t)(__p0 > __p1); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint32x2_t vcgt_f32(float32x2_t __p0, float32x2_t __p1) { + uint32x2_t __ret; + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (uint32x2_t)(__rev0 > __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint32x2_t vcgt_s32(int32x2_t __p0, int32x2_t __p1) { + uint32x2_t __ret; + __ret = (uint32x2_t)(__p0 > __p1); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint32x2_t vcgt_s32(int32x2_t __p0, int32x2_t __p1) { + uint32x2_t __ret; + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (uint32x2_t)(__rev0 > __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint16x4_t vcgt_s16(int16x4_t __p0, int16x4_t __p1) { + uint16x4_t __ret; + __ret = (uint16x4_t)(__p0 > __p1); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint16x4_t vcgt_s16(int16x4_t __p0, int16x4_t __p1) { + uint16x4_t __ret; + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (uint16x4_t)(__rev0 > __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint8x16_t vcleq_u8(uint8x16_t __p0, uint8x16_t __p1) { + uint8x16_t __ret; + __ret = (uint8x16_t)(__p0 <= __p1); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint8x16_t vcleq_u8(uint8x16_t __p0, uint8x16_t __p1) { + uint8x16_t __ret; + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint8x16_t)(__rev0 <= __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint32x4_t vcleq_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint32x4_t __ret; + __ret = (uint32x4_t)(__p0 <= __p1); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint32x4_t vcleq_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint32x4_t __ret; + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (uint32x4_t)(__rev0 <= __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint16x8_t vcleq_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint16x8_t __ret; + __ret = (uint16x8_t)(__p0 <= __p1); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint16x8_t vcleq_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint16x8_t __ret; + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint16x8_t)(__rev0 <= __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint8x16_t vcleq_s8(int8x16_t __p0, int8x16_t __p1) { + uint8x16_t __ret; + __ret = (uint8x16_t)(__p0 <= __p1); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint8x16_t vcleq_s8(int8x16_t __p0, int8x16_t __p1) { + uint8x16_t __ret; + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint8x16_t)(__rev0 <= __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint32x4_t vcleq_f32(float32x4_t __p0, float32x4_t __p1) { + uint32x4_t __ret; + __ret = (uint32x4_t)(__p0 <= __p1); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint32x4_t vcleq_f32(float32x4_t __p0, float32x4_t __p1) { + uint32x4_t __ret; + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (uint32x4_t)(__rev0 <= __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint32x4_t vcleq_s32(int32x4_t __p0, int32x4_t __p1) { + uint32x4_t __ret; + __ret = (uint32x4_t)(__p0 <= __p1); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint32x4_t vcleq_s32(int32x4_t __p0, int32x4_t __p1) { + uint32x4_t __ret; + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (uint32x4_t)(__rev0 <= __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint16x8_t vcleq_s16(int16x8_t __p0, int16x8_t __p1) { + uint16x8_t __ret; + __ret = (uint16x8_t)(__p0 <= __p1); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint16x8_t vcleq_s16(int16x8_t __p0, int16x8_t __p1) { + uint16x8_t __ret; + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint16x8_t)(__rev0 <= __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint8x8_t vcle_u8(uint8x8_t __p0, uint8x8_t __p1) { + uint8x8_t __ret; + __ret = (uint8x8_t)(__p0 <= __p1); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint8x8_t vcle_u8(uint8x8_t __p0, uint8x8_t __p1) { + uint8x8_t __ret; + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint8x8_t)(__rev0 <= __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint32x2_t vcle_u32(uint32x2_t __p0, uint32x2_t __p1) { + uint32x2_t __ret; + __ret = (uint32x2_t)(__p0 <= __p1); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint32x2_t vcle_u32(uint32x2_t __p0, uint32x2_t __p1) { + uint32x2_t __ret; + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (uint32x2_t)(__rev0 <= __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint16x4_t vcle_u16(uint16x4_t __p0, uint16x4_t __p1) { + uint16x4_t __ret; + __ret = (uint16x4_t)(__p0 <= __p1); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint16x4_t vcle_u16(uint16x4_t __p0, uint16x4_t __p1) { + uint16x4_t __ret; + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (uint16x4_t)(__rev0 <= __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint8x8_t vcle_s8(int8x8_t __p0, int8x8_t __p1) { + uint8x8_t __ret; + __ret = (uint8x8_t)(__p0 <= __p1); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint8x8_t vcle_s8(int8x8_t __p0, int8x8_t __p1) { + uint8x8_t __ret; + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint8x8_t)(__rev0 <= __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint32x2_t vcle_f32(float32x2_t __p0, float32x2_t __p1) { + uint32x2_t __ret; + __ret = (uint32x2_t)(__p0 <= __p1); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint32x2_t vcle_f32(float32x2_t __p0, float32x2_t __p1) { + uint32x2_t __ret; + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (uint32x2_t)(__rev0 <= __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint32x2_t vcle_s32(int32x2_t __p0, int32x2_t __p1) { + uint32x2_t __ret; + __ret = (uint32x2_t)(__p0 <= __p1); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint32x2_t vcle_s32(int32x2_t __p0, int32x2_t __p1) { + uint32x2_t __ret; + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (uint32x2_t)(__rev0 <= __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint16x4_t vcle_s16(int16x4_t __p0, int16x4_t __p1) { + uint16x4_t __ret; + __ret = (uint16x4_t)(__p0 <= __p1); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint16x4_t vcle_s16(int16x4_t __p0, int16x4_t __p1) { + uint16x4_t __ret; + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (uint16x4_t)(__rev0 <= __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int8x16_t vclsq_u8(uint8x16_t __p0) { + int8x16_t __ret; + __ret = (int8x16_t) __builtin_neon_vclsq_v((int8x16_t)__p0, 32); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int8x16_t vclsq_u8(uint8x16_t __p0) { + int8x16_t __ret; + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int8x16_t) __builtin_neon_vclsq_v((int8x16_t)__rev0, 32); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int32x4_t vclsq_u32(uint32x4_t __p0) { + int32x4_t __ret; + __ret = (int32x4_t) __builtin_neon_vclsq_v((int8x16_t)__p0, 34); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int32x4_t vclsq_u32(uint32x4_t __p0) { + int32x4_t __ret; + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = (int32x4_t) __builtin_neon_vclsq_v((int8x16_t)__rev0, 34); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int16x8_t vclsq_u16(uint16x8_t __p0) { + int16x8_t __ret; + __ret = (int16x8_t) __builtin_neon_vclsq_v((int8x16_t)__p0, 33); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int16x8_t vclsq_u16(uint16x8_t __p0) { + int16x8_t __ret; + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int16x8_t) __builtin_neon_vclsq_v((int8x16_t)__rev0, 33); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int8x16_t vclsq_s8(int8x16_t __p0) { + int8x16_t __ret; + __ret = (int8x16_t) __builtin_neon_vclsq_v((int8x16_t)__p0, 32); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int8x16_t vclsq_s8(int8x16_t __p0) { + int8x16_t __ret; + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int8x16_t) __builtin_neon_vclsq_v((int8x16_t)__rev0, 32); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int32x4_t vclsq_s32(int32x4_t __p0) { + int32x4_t __ret; + __ret = (int32x4_t) __builtin_neon_vclsq_v((int8x16_t)__p0, 34); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int32x4_t vclsq_s32(int32x4_t __p0) { + int32x4_t __ret; + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = (int32x4_t) __builtin_neon_vclsq_v((int8x16_t)__rev0, 34); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int16x8_t vclsq_s16(int16x8_t __p0) { + int16x8_t __ret; + __ret = (int16x8_t) __builtin_neon_vclsq_v((int8x16_t)__p0, 33); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int16x8_t vclsq_s16(int16x8_t __p0) { + int16x8_t __ret; + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int16x8_t) __builtin_neon_vclsq_v((int8x16_t)__rev0, 33); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int8x8_t vcls_u8(uint8x8_t __p0) { + int8x8_t __ret; + __ret = (int8x8_t) __builtin_neon_vcls_v((int8x8_t)__p0, 0); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int8x8_t vcls_u8(uint8x8_t __p0) { + int8x8_t __ret; + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int8x8_t) __builtin_neon_vcls_v((int8x8_t)__rev0, 0); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int32x2_t vcls_u32(uint32x2_t __p0) { + int32x2_t __ret; + __ret = (int32x2_t) __builtin_neon_vcls_v((int8x8_t)__p0, 2); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int32x2_t vcls_u32(uint32x2_t __p0) { + int32x2_t __ret; + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = (int32x2_t) __builtin_neon_vcls_v((int8x8_t)__rev0, 2); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int16x4_t vcls_u16(uint16x4_t __p0) { + int16x4_t __ret; + __ret = (int16x4_t) __builtin_neon_vcls_v((int8x8_t)__p0, 1); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int16x4_t vcls_u16(uint16x4_t __p0) { + int16x4_t __ret; + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = (int16x4_t) __builtin_neon_vcls_v((int8x8_t)__rev0, 1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int8x8_t vcls_s8(int8x8_t __p0) { + int8x8_t __ret; + __ret = (int8x8_t) __builtin_neon_vcls_v((int8x8_t)__p0, 0); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int8x8_t vcls_s8(int8x8_t __p0) { + int8x8_t __ret; + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int8x8_t) __builtin_neon_vcls_v((int8x8_t)__rev0, 0); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int32x2_t vcls_s32(int32x2_t __p0) { + int32x2_t __ret; + __ret = (int32x2_t) __builtin_neon_vcls_v((int8x8_t)__p0, 2); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int32x2_t vcls_s32(int32x2_t __p0) { + int32x2_t __ret; + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = (int32x2_t) __builtin_neon_vcls_v((int8x8_t)__rev0, 2); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int16x4_t vcls_s16(int16x4_t __p0) { + int16x4_t __ret; + __ret = (int16x4_t) __builtin_neon_vcls_v((int8x8_t)__p0, 1); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int16x4_t vcls_s16(int16x4_t __p0) { + int16x4_t __ret; + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = (int16x4_t) __builtin_neon_vcls_v((int8x8_t)__rev0, 1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint8x16_t vcltq_u8(uint8x16_t __p0, uint8x16_t __p1) { + uint8x16_t __ret; + __ret = (uint8x16_t)(__p0 < __p1); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint8x16_t vcltq_u8(uint8x16_t __p0, uint8x16_t __p1) { + uint8x16_t __ret; + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint8x16_t)(__rev0 < __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint32x4_t vcltq_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint32x4_t __ret; + __ret = (uint32x4_t)(__p0 < __p1); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint32x4_t vcltq_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint32x4_t __ret; + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (uint32x4_t)(__rev0 < __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint16x8_t vcltq_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint16x8_t __ret; + __ret = (uint16x8_t)(__p0 < __p1); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint16x8_t vcltq_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint16x8_t __ret; + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint16x8_t)(__rev0 < __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint8x16_t vcltq_s8(int8x16_t __p0, int8x16_t __p1) { + uint8x16_t __ret; + __ret = (uint8x16_t)(__p0 < __p1); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint8x16_t vcltq_s8(int8x16_t __p0, int8x16_t __p1) { + uint8x16_t __ret; + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint8x16_t)(__rev0 < __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint32x4_t vcltq_f32(float32x4_t __p0, float32x4_t __p1) { + uint32x4_t __ret; + __ret = (uint32x4_t)(__p0 < __p1); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint32x4_t vcltq_f32(float32x4_t __p0, float32x4_t __p1) { + uint32x4_t __ret; + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (uint32x4_t)(__rev0 < __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint32x4_t vcltq_s32(int32x4_t __p0, int32x4_t __p1) { + uint32x4_t __ret; + __ret = (uint32x4_t)(__p0 < __p1); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint32x4_t vcltq_s32(int32x4_t __p0, int32x4_t __p1) { + uint32x4_t __ret; + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (uint32x4_t)(__rev0 < __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint16x8_t vcltq_s16(int16x8_t __p0, int16x8_t __p1) { + uint16x8_t __ret; + __ret = (uint16x8_t)(__p0 < __p1); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint16x8_t vcltq_s16(int16x8_t __p0, int16x8_t __p1) { + uint16x8_t __ret; + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint16x8_t)(__rev0 < __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint8x8_t vclt_u8(uint8x8_t __p0, uint8x8_t __p1) { + uint8x8_t __ret; + __ret = (uint8x8_t)(__p0 < __p1); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint8x8_t vclt_u8(uint8x8_t __p0, uint8x8_t __p1) { + uint8x8_t __ret; + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint8x8_t)(__rev0 < __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint32x2_t vclt_u32(uint32x2_t __p0, uint32x2_t __p1) { + uint32x2_t __ret; + __ret = (uint32x2_t)(__p0 < __p1); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint32x2_t vclt_u32(uint32x2_t __p0, uint32x2_t __p1) { + uint32x2_t __ret; + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (uint32x2_t)(__rev0 < __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint16x4_t vclt_u16(uint16x4_t __p0, uint16x4_t __p1) { + uint16x4_t __ret; + __ret = (uint16x4_t)(__p0 < __p1); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint16x4_t vclt_u16(uint16x4_t __p0, uint16x4_t __p1) { + uint16x4_t __ret; + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (uint16x4_t)(__rev0 < __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint8x8_t vclt_s8(int8x8_t __p0, int8x8_t __p1) { + uint8x8_t __ret; + __ret = (uint8x8_t)(__p0 < __p1); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint8x8_t vclt_s8(int8x8_t __p0, int8x8_t __p1) { + uint8x8_t __ret; + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint8x8_t)(__rev0 < __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint32x2_t vclt_f32(float32x2_t __p0, float32x2_t __p1) { + uint32x2_t __ret; + __ret = (uint32x2_t)(__p0 < __p1); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint32x2_t vclt_f32(float32x2_t __p0, float32x2_t __p1) { + uint32x2_t __ret; + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (uint32x2_t)(__rev0 < __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint32x2_t vclt_s32(int32x2_t __p0, int32x2_t __p1) { + uint32x2_t __ret; + __ret = (uint32x2_t)(__p0 < __p1); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint32x2_t vclt_s32(int32x2_t __p0, int32x2_t __p1) { + uint32x2_t __ret; + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (uint32x2_t)(__rev0 < __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint16x4_t vclt_s16(int16x4_t __p0, int16x4_t __p1) { + uint16x4_t __ret; + __ret = (uint16x4_t)(__p0 < __p1); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint16x4_t vclt_s16(int16x4_t __p0, int16x4_t __p1) { + uint16x4_t __ret; + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (uint16x4_t)(__rev0 < __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint8x16_t vclzq_u8(uint8x16_t __p0) { + uint8x16_t __ret; + __ret = (uint8x16_t) __builtin_neon_vclzq_v((int8x16_t)__p0, 48); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint8x16_t vclzq_u8(uint8x16_t __p0) { + uint8x16_t __ret; + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint8x16_t) __builtin_neon_vclzq_v((int8x16_t)__rev0, 48); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint32x4_t vclzq_u32(uint32x4_t __p0) { + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vclzq_v((int8x16_t)__p0, 50); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint32x4_t vclzq_u32(uint32x4_t __p0) { + uint32x4_t __ret; + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = (uint32x4_t) __builtin_neon_vclzq_v((int8x16_t)__rev0, 50); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint16x8_t vclzq_u16(uint16x8_t __p0) { + uint16x8_t __ret; + __ret = (uint16x8_t) __builtin_neon_vclzq_v((int8x16_t)__p0, 49); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint16x8_t vclzq_u16(uint16x8_t __p0) { + uint16x8_t __ret; + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint16x8_t) __builtin_neon_vclzq_v((int8x16_t)__rev0, 49); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int8x16_t vclzq_s8(int8x16_t __p0) { + int8x16_t __ret; + __ret = (int8x16_t) __builtin_neon_vclzq_v((int8x16_t)__p0, 32); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int8x16_t vclzq_s8(int8x16_t __p0) { + int8x16_t __ret; + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int8x16_t) __builtin_neon_vclzq_v((int8x16_t)__rev0, 32); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int32x4_t vclzq_s32(int32x4_t __p0) { + int32x4_t __ret; + __ret = (int32x4_t) __builtin_neon_vclzq_v((int8x16_t)__p0, 34); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int32x4_t vclzq_s32(int32x4_t __p0) { + int32x4_t __ret; + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = (int32x4_t) __builtin_neon_vclzq_v((int8x16_t)__rev0, 34); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int16x8_t vclzq_s16(int16x8_t __p0) { + int16x8_t __ret; + __ret = (int16x8_t) __builtin_neon_vclzq_v((int8x16_t)__p0, 33); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int16x8_t vclzq_s16(int16x8_t __p0) { + int16x8_t __ret; + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int16x8_t) __builtin_neon_vclzq_v((int8x16_t)__rev0, 33); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint8x8_t vclz_u8(uint8x8_t __p0) { + uint8x8_t __ret; + __ret = (uint8x8_t) __builtin_neon_vclz_v((int8x8_t)__p0, 16); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint8x8_t vclz_u8(uint8x8_t __p0) { + uint8x8_t __ret; + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint8x8_t) __builtin_neon_vclz_v((int8x8_t)__rev0, 16); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint32x2_t vclz_u32(uint32x2_t __p0) { + uint32x2_t __ret; + __ret = (uint32x2_t) __builtin_neon_vclz_v((int8x8_t)__p0, 18); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint32x2_t vclz_u32(uint32x2_t __p0) { + uint32x2_t __ret; + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = (uint32x2_t) __builtin_neon_vclz_v((int8x8_t)__rev0, 18); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint16x4_t vclz_u16(uint16x4_t __p0) { + uint16x4_t __ret; + __ret = (uint16x4_t) __builtin_neon_vclz_v((int8x8_t)__p0, 17); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint16x4_t vclz_u16(uint16x4_t __p0) { + uint16x4_t __ret; + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = (uint16x4_t) __builtin_neon_vclz_v((int8x8_t)__rev0, 17); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int8x8_t vclz_s8(int8x8_t __p0) { + int8x8_t __ret; + __ret = (int8x8_t) __builtin_neon_vclz_v((int8x8_t)__p0, 0); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int8x8_t vclz_s8(int8x8_t __p0) { + int8x8_t __ret; + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int8x8_t) __builtin_neon_vclz_v((int8x8_t)__rev0, 0); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int32x2_t vclz_s32(int32x2_t __p0) { + int32x2_t __ret; + __ret = (int32x2_t) __builtin_neon_vclz_v((int8x8_t)__p0, 2); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int32x2_t vclz_s32(int32x2_t __p0) { + int32x2_t __ret; + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = (int32x2_t) __builtin_neon_vclz_v((int8x8_t)__rev0, 2); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int16x4_t vclz_s16(int16x4_t __p0) { + int16x4_t __ret; + __ret = (int16x4_t) __builtin_neon_vclz_v((int8x8_t)__p0, 1); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int16x4_t vclz_s16(int16x4_t __p0) { + int16x4_t __ret; + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = (int16x4_t) __builtin_neon_vclz_v((int8x8_t)__rev0, 1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) poly8x8_t vcnt_p8(poly8x8_t __p0) { + poly8x8_t __ret; + __ret = (poly8x8_t) __builtin_neon_vcnt_v((int8x8_t)__p0, 4); + return __ret; +} +#else +__ai __attribute__((target("neon"))) poly8x8_t vcnt_p8(poly8x8_t __p0) { + poly8x8_t __ret; + poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (poly8x8_t) __builtin_neon_vcnt_v((int8x8_t)__rev0, 4); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) poly8x16_t vcntq_p8(poly8x16_t __p0) { + poly8x16_t __ret; + __ret = (poly8x16_t) __builtin_neon_vcntq_v((int8x16_t)__p0, 36); + return __ret; +} +#else +__ai __attribute__((target("neon"))) poly8x16_t vcntq_p8(poly8x16_t __p0) { + poly8x16_t __ret; + poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (poly8x16_t) __builtin_neon_vcntq_v((int8x16_t)__rev0, 36); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint8x16_t vcntq_u8(uint8x16_t __p0) { + uint8x16_t __ret; + __ret = (uint8x16_t) __builtin_neon_vcntq_v((int8x16_t)__p0, 48); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint8x16_t vcntq_u8(uint8x16_t __p0) { + uint8x16_t __ret; + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint8x16_t) __builtin_neon_vcntq_v((int8x16_t)__rev0, 48); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int8x16_t vcntq_s8(int8x16_t __p0) { + int8x16_t __ret; + __ret = (int8x16_t) __builtin_neon_vcntq_v((int8x16_t)__p0, 32); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int8x16_t vcntq_s8(int8x16_t __p0) { + int8x16_t __ret; + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int8x16_t) __builtin_neon_vcntq_v((int8x16_t)__rev0, 32); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint8x8_t vcnt_u8(uint8x8_t __p0) { + uint8x8_t __ret; + __ret = (uint8x8_t) __builtin_neon_vcnt_v((int8x8_t)__p0, 16); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint8x8_t vcnt_u8(uint8x8_t __p0) { + uint8x8_t __ret; + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint8x8_t) __builtin_neon_vcnt_v((int8x8_t)__rev0, 16); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int8x8_t vcnt_s8(int8x8_t __p0) { + int8x8_t __ret; + __ret = (int8x8_t) __builtin_neon_vcnt_v((int8x8_t)__p0, 0); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int8x8_t vcnt_s8(int8x8_t __p0) { + int8x8_t __ret; + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int8x8_t) __builtin_neon_vcnt_v((int8x8_t)__rev0, 0); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) poly8x16_t vcombine_p8(poly8x8_t __p0, poly8x8_t __p1) { + poly8x16_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15); + return __ret; +} +#else +__ai __attribute__((target("neon"))) poly8x16_t vcombine_p8(poly8x8_t __p0, poly8x8_t __p1) { + poly8x16_t __ret; + poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + poly8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) poly16x8_t vcombine_p16(poly16x4_t __p0, poly16x4_t __p1) { + poly16x8_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 1, 2, 3, 4, 5, 6, 7); + return __ret; +} +#else +__ai __attribute__((target("neon"))) poly16x8_t vcombine_p16(poly16x4_t __p0, poly16x4_t __p1) { + poly16x8_t __ret; + poly16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + poly16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 0, 1, 2, 3, 4, 5, 6, 7); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint8x16_t vcombine_u8(uint8x8_t __p0, uint8x8_t __p1) { + uint8x16_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint8x16_t vcombine_u8(uint8x8_t __p0, uint8x8_t __p1) { + uint8x16_t __ret; + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +__ai __attribute__((target("neon"))) uint8x16_t __noswap_vcombine_u8(uint8x8_t __p0, uint8x8_t __p1) { + uint8x16_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint32x4_t vcombine_u32(uint32x2_t __p0, uint32x2_t __p1) { + uint32x4_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 1, 2, 3); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint32x4_t vcombine_u32(uint32x2_t __p0, uint32x2_t __p1) { + uint32x4_t __ret; + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 0, 1, 2, 3); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +__ai __attribute__((target("neon"))) uint32x4_t __noswap_vcombine_u32(uint32x2_t __p0, uint32x2_t __p1) { + uint32x4_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 1, 2, 3); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint64x2_t vcombine_u64(uint64x1_t __p0, uint64x1_t __p1) { + uint64x2_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 1); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint64x2_t vcombine_u64(uint64x1_t __p0, uint64x1_t __p1) { + uint64x2_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 1); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint16x8_t vcombine_u16(uint16x4_t __p0, uint16x4_t __p1) { + uint16x8_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 1, 2, 3, 4, 5, 6, 7); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint16x8_t vcombine_u16(uint16x4_t __p0, uint16x4_t __p1) { + uint16x8_t __ret; + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 0, 1, 2, 3, 4, 5, 6, 7); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +__ai __attribute__((target("neon"))) uint16x8_t __noswap_vcombine_u16(uint16x4_t __p0, uint16x4_t __p1) { + uint16x8_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 1, 2, 3, 4, 5, 6, 7); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int8x16_t vcombine_s8(int8x8_t __p0, int8x8_t __p1) { + int8x16_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int8x16_t vcombine_s8(int8x8_t __p0, int8x8_t __p1) { + int8x16_t __ret; + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +__ai __attribute__((target("neon"))) int8x16_t __noswap_vcombine_s8(int8x8_t __p0, int8x8_t __p1) { + int8x16_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) float32x4_t vcombine_f32(float32x2_t __p0, float32x2_t __p1) { + float32x4_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 1, 2, 3); + return __ret; +} +#else +__ai __attribute__((target("neon"))) float32x4_t vcombine_f32(float32x2_t __p0, float32x2_t __p1) { + float32x4_t __ret; + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 0, 1, 2, 3); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +__ai __attribute__((target("neon"))) float32x4_t __noswap_vcombine_f32(float32x2_t __p0, float32x2_t __p1) { + float32x4_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 1, 2, 3); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) float16x8_t vcombine_f16(float16x4_t __p0, float16x4_t __p1) { + float16x8_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 1, 2, 3, 4, 5, 6, 7); + return __ret; +} +#else +__ai __attribute__((target("neon"))) float16x8_t vcombine_f16(float16x4_t __p0, float16x4_t __p1) { + float16x8_t __ret; + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 0, 1, 2, 3, 4, 5, 6, 7); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +__ai __attribute__((target("neon"))) float16x8_t __noswap_vcombine_f16(float16x4_t __p0, float16x4_t __p1) { + float16x8_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 1, 2, 3, 4, 5, 6, 7); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int32x4_t vcombine_s32(int32x2_t __p0, int32x2_t __p1) { + int32x4_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 1, 2, 3); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int32x4_t vcombine_s32(int32x2_t __p0, int32x2_t __p1) { + int32x4_t __ret; + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 0, 1, 2, 3); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +__ai __attribute__((target("neon"))) int32x4_t __noswap_vcombine_s32(int32x2_t __p0, int32x2_t __p1) { + int32x4_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 1, 2, 3); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int64x2_t vcombine_s64(int64x1_t __p0, int64x1_t __p1) { + int64x2_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 1); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int64x2_t vcombine_s64(int64x1_t __p0, int64x1_t __p1) { + int64x2_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 1); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int16x8_t vcombine_s16(int16x4_t __p0, int16x4_t __p1) { + int16x8_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 1, 2, 3, 4, 5, 6, 7); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int16x8_t vcombine_s16(int16x4_t __p0, int16x4_t __p1) { + int16x8_t __ret; + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 0, 1, 2, 3, 4, 5, 6, 7); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +__ai __attribute__((target("neon"))) int16x8_t __noswap_vcombine_s16(int16x4_t __p0, int16x4_t __p1) { + int16x8_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 1, 2, 3, 4, 5, 6, 7); + return __ret; +} +#endif + +#define vcreate_p8(__p0) __extension__ ({ \ + poly8x8_t __ret; \ + uint64_t __promote = __p0; \ + __ret = (poly8x8_t)(__promote); \ + __ret; \ +}) +#define vcreate_p16(__p0) __extension__ ({ \ + poly16x4_t __ret; \ + uint64_t __promote = __p0; \ + __ret = (poly16x4_t)(__promote); \ + __ret; \ +}) +#define vcreate_u8(__p0) __extension__ ({ \ + uint8x8_t __ret; \ + uint64_t __promote = __p0; \ + __ret = (uint8x8_t)(__promote); \ + __ret; \ +}) +#define vcreate_u32(__p0) __extension__ ({ \ + uint32x2_t __ret; \ + uint64_t __promote = __p0; \ + __ret = (uint32x2_t)(__promote); \ + __ret; \ +}) +#define vcreate_u64(__p0) __extension__ ({ \ + uint64x1_t __ret; \ + uint64_t __promote = __p0; \ + __ret = (uint64x1_t)(__promote); \ + __ret; \ +}) +#define vcreate_u16(__p0) __extension__ ({ \ + uint16x4_t __ret; \ + uint64_t __promote = __p0; \ + __ret = (uint16x4_t)(__promote); \ + __ret; \ +}) +#define vcreate_s8(__p0) __extension__ ({ \ + int8x8_t __ret; \ + uint64_t __promote = __p0; \ + __ret = (int8x8_t)(__promote); \ + __ret; \ +}) +#define vcreate_f32(__p0) __extension__ ({ \ + float32x2_t __ret; \ + uint64_t __promote = __p0; \ + __ret = (float32x2_t)(__promote); \ + __ret; \ +}) +#define vcreate_f16(__p0) __extension__ ({ \ + float16x4_t __ret; \ + uint64_t __promote = __p0; \ + __ret = (float16x4_t)(__promote); \ + __ret; \ +}) +#define vcreate_s32(__p0) __extension__ ({ \ + int32x2_t __ret; \ + uint64_t __promote = __p0; \ + __ret = (int32x2_t)(__promote); \ + __ret; \ +}) +#define vcreate_s64(__p0) __extension__ ({ \ + int64x1_t __ret; \ + uint64_t __promote = __p0; \ + __ret = (int64x1_t)(__promote); \ + __ret; \ +}) +#define vcreate_s16(__p0) __extension__ ({ \ + int16x4_t __ret; \ + uint64_t __promote = __p0; \ + __ret = (int16x4_t)(__promote); \ + __ret; \ +}) +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) float32x4_t vcvtq_f32_u32(uint32x4_t __p0) { + float32x4_t __ret; + __ret = (float32x4_t) __builtin_neon_vcvtq_f32_v((int8x16_t)__p0, 50); + return __ret; +} +#else +__ai __attribute__((target("neon"))) float32x4_t vcvtq_f32_u32(uint32x4_t __p0) { + float32x4_t __ret; + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = (float32x4_t) __builtin_neon_vcvtq_f32_v((int8x16_t)__rev0, 50); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) float32x4_t vcvtq_f32_s32(int32x4_t __p0) { + float32x4_t __ret; + __ret = (float32x4_t) __builtin_neon_vcvtq_f32_v((int8x16_t)__p0, 34); + return __ret; +} +#else +__ai __attribute__((target("neon"))) float32x4_t vcvtq_f32_s32(int32x4_t __p0) { + float32x4_t __ret; + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = (float32x4_t) __builtin_neon_vcvtq_f32_v((int8x16_t)__rev0, 34); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) float32x2_t vcvt_f32_u32(uint32x2_t __p0) { + float32x2_t __ret; + __ret = (float32x2_t) __builtin_neon_vcvt_f32_v((int8x8_t)__p0, 18); + return __ret; +} +#else +__ai __attribute__((target("neon"))) float32x2_t vcvt_f32_u32(uint32x2_t __p0) { + float32x2_t __ret; + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = (float32x2_t) __builtin_neon_vcvt_f32_v((int8x8_t)__rev0, 18); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) float32x2_t vcvt_f32_s32(int32x2_t __p0) { + float32x2_t __ret; + __ret = (float32x2_t) __builtin_neon_vcvt_f32_v((int8x8_t)__p0, 2); + return __ret; +} +#else +__ai __attribute__((target("neon"))) float32x2_t vcvt_f32_s32(int32x2_t __p0) { + float32x2_t __ret; + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = (float32x2_t) __builtin_neon_vcvt_f32_v((int8x8_t)__rev0, 2); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcvtq_n_f32_u32(__p0, __p1) __extension__ ({ \ + float32x4_t __ret; \ + uint32x4_t __s0 = __p0; \ + __ret = (float32x4_t) __builtin_neon_vcvtq_n_f32_v((int8x16_t)__s0, __p1, 50); \ + __ret; \ +}) +#else +#define vcvtq_n_f32_u32(__p0, __p1) __extension__ ({ \ + float32x4_t __ret; \ + uint32x4_t __s0 = __p0; \ + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + __ret = (float32x4_t) __builtin_neon_vcvtq_n_f32_v((int8x16_t)__rev0, __p1, 50); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcvtq_n_f32_s32(__p0, __p1) __extension__ ({ \ + float32x4_t __ret; \ + int32x4_t __s0 = __p0; \ + __ret = (float32x4_t) __builtin_neon_vcvtq_n_f32_v((int8x16_t)__s0, __p1, 34); \ + __ret; \ +}) +#else +#define vcvtq_n_f32_s32(__p0, __p1) __extension__ ({ \ + float32x4_t __ret; \ + int32x4_t __s0 = __p0; \ + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + __ret = (float32x4_t) __builtin_neon_vcvtq_n_f32_v((int8x16_t)__rev0, __p1, 34); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcvt_n_f32_u32(__p0, __p1) __extension__ ({ \ + float32x2_t __ret; \ + uint32x2_t __s0 = __p0; \ + __ret = (float32x2_t) __builtin_neon_vcvt_n_f32_v((int8x8_t)__s0, __p1, 18); \ + __ret; \ +}) +#else +#define vcvt_n_f32_u32(__p0, __p1) __extension__ ({ \ + float32x2_t __ret; \ + uint32x2_t __s0 = __p0; \ + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + __ret = (float32x2_t) __builtin_neon_vcvt_n_f32_v((int8x8_t)__rev0, __p1, 18); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcvt_n_f32_s32(__p0, __p1) __extension__ ({ \ + float32x2_t __ret; \ + int32x2_t __s0 = __p0; \ + __ret = (float32x2_t) __builtin_neon_vcvt_n_f32_v((int8x8_t)__s0, __p1, 2); \ + __ret; \ +}) +#else +#define vcvt_n_f32_s32(__p0, __p1) __extension__ ({ \ + float32x2_t __ret; \ + int32x2_t __s0 = __p0; \ + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + __ret = (float32x2_t) __builtin_neon_vcvt_n_f32_v((int8x8_t)__rev0, __p1, 2); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcvtq_n_s32_f32(__p0, __p1) __extension__ ({ \ + int32x4_t __ret; \ + float32x4_t __s0 = __p0; \ + __ret = (int32x4_t) __builtin_neon_vcvtq_n_s32_v((int8x16_t)__s0, __p1, 34); \ + __ret; \ +}) +#else +#define vcvtq_n_s32_f32(__p0, __p1) __extension__ ({ \ + int32x4_t __ret; \ + float32x4_t __s0 = __p0; \ + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + __ret = (int32x4_t) __builtin_neon_vcvtq_n_s32_v((int8x16_t)__rev0, __p1, 34); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcvt_n_s32_f32(__p0, __p1) __extension__ ({ \ + int32x2_t __ret; \ + float32x2_t __s0 = __p0; \ + __ret = (int32x2_t) __builtin_neon_vcvt_n_s32_v((int8x8_t)__s0, __p1, 2); \ + __ret; \ +}) +#else +#define vcvt_n_s32_f32(__p0, __p1) __extension__ ({ \ + int32x2_t __ret; \ + float32x2_t __s0 = __p0; \ + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + __ret = (int32x2_t) __builtin_neon_vcvt_n_s32_v((int8x8_t)__rev0, __p1, 2); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcvtq_n_u32_f32(__p0, __p1) __extension__ ({ \ + uint32x4_t __ret; \ + float32x4_t __s0 = __p0; \ + __ret = (uint32x4_t) __builtin_neon_vcvtq_n_u32_v((int8x16_t)__s0, __p1, 50); \ + __ret; \ +}) +#else +#define vcvtq_n_u32_f32(__p0, __p1) __extension__ ({ \ + uint32x4_t __ret; \ + float32x4_t __s0 = __p0; \ + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + __ret = (uint32x4_t) __builtin_neon_vcvtq_n_u32_v((int8x16_t)__rev0, __p1, 50); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcvt_n_u32_f32(__p0, __p1) __extension__ ({ \ + uint32x2_t __ret; \ + float32x2_t __s0 = __p0; \ + __ret = (uint32x2_t) __builtin_neon_vcvt_n_u32_v((int8x8_t)__s0, __p1, 18); \ + __ret; \ +}) +#else +#define vcvt_n_u32_f32(__p0, __p1) __extension__ ({ \ + uint32x2_t __ret; \ + float32x2_t __s0 = __p0; \ + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + __ret = (uint32x2_t) __builtin_neon_vcvt_n_u32_v((int8x8_t)__rev0, __p1, 18); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int32x4_t vcvtq_s32_f32(float32x4_t __p0) { + int32x4_t __ret; + __ret = (int32x4_t) __builtin_neon_vcvtq_s32_v((int8x16_t)__p0, 34); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int32x4_t vcvtq_s32_f32(float32x4_t __p0) { + int32x4_t __ret; + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = (int32x4_t) __builtin_neon_vcvtq_s32_v((int8x16_t)__rev0, 34); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int32x2_t vcvt_s32_f32(float32x2_t __p0) { + int32x2_t __ret; + __ret = (int32x2_t) __builtin_neon_vcvt_s32_v((int8x8_t)__p0, 2); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int32x2_t vcvt_s32_f32(float32x2_t __p0) { + int32x2_t __ret; + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = (int32x2_t) __builtin_neon_vcvt_s32_v((int8x8_t)__rev0, 2); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint32x4_t vcvtq_u32_f32(float32x4_t __p0) { + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vcvtq_u32_v((int8x16_t)__p0, 50); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint32x4_t vcvtq_u32_f32(float32x4_t __p0) { + uint32x4_t __ret; + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = (uint32x4_t) __builtin_neon_vcvtq_u32_v((int8x16_t)__rev0, 50); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint32x2_t vcvt_u32_f32(float32x2_t __p0) { + uint32x2_t __ret; + __ret = (uint32x2_t) __builtin_neon_vcvt_u32_v((int8x8_t)__p0, 18); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint32x2_t vcvt_u32_f32(float32x2_t __p0) { + uint32x2_t __ret; + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = (uint32x2_t) __builtin_neon_vcvt_u32_v((int8x8_t)__rev0, 18); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vdup_lane_p8(__p0_8, __p1_8) __extension__ ({ \ + poly8x8_t __ret_8; \ + poly8x8_t __s0_8 = __p0_8; \ + __ret_8 = splat_lane_p8(__s0_8, __p1_8); \ + __ret_8; \ +}) +#else +#define vdup_lane_p8(__p0_9, __p1_9) __extension__ ({ \ + poly8x8_t __ret_9; \ + poly8x8_t __s0_9 = __p0_9; \ + poly8x8_t __rev0_9; __rev0_9 = __builtin_shufflevector(__s0_9, __s0_9, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_9 = __noswap_splat_lane_p8(__rev0_9, __p1_9); \ + __ret_9 = __builtin_shufflevector(__ret_9, __ret_9, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_9; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vdup_lane_p16(__p0_10, __p1_10) __extension__ ({ \ + poly16x4_t __ret_10; \ + poly16x4_t __s0_10 = __p0_10; \ + __ret_10 = splat_lane_p16(__s0_10, __p1_10); \ + __ret_10; \ +}) +#else +#define vdup_lane_p16(__p0_11, __p1_11) __extension__ ({ \ + poly16x4_t __ret_11; \ + poly16x4_t __s0_11 = __p0_11; \ + poly16x4_t __rev0_11; __rev0_11 = __builtin_shufflevector(__s0_11, __s0_11, 3, 2, 1, 0); \ + __ret_11 = __noswap_splat_lane_p16(__rev0_11, __p1_11); \ + __ret_11 = __builtin_shufflevector(__ret_11, __ret_11, 3, 2, 1, 0); \ + __ret_11; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vdupq_lane_p8(__p0_12, __p1_12) __extension__ ({ \ + poly8x16_t __ret_12; \ + poly8x8_t __s0_12 = __p0_12; \ + __ret_12 = splatq_lane_p8(__s0_12, __p1_12); \ + __ret_12; \ +}) +#else +#define vdupq_lane_p8(__p0_13, __p1_13) __extension__ ({ \ + poly8x16_t __ret_13; \ + poly8x8_t __s0_13 = __p0_13; \ + poly8x8_t __rev0_13; __rev0_13 = __builtin_shufflevector(__s0_13, __s0_13, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_13 = __noswap_splatq_lane_p8(__rev0_13, __p1_13); \ + __ret_13 = __builtin_shufflevector(__ret_13, __ret_13, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_13; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vdupq_lane_p16(__p0_14, __p1_14) __extension__ ({ \ + poly16x8_t __ret_14; \ + poly16x4_t __s0_14 = __p0_14; \ + __ret_14 = splatq_lane_p16(__s0_14, __p1_14); \ + __ret_14; \ +}) +#else +#define vdupq_lane_p16(__p0_15, __p1_15) __extension__ ({ \ + poly16x8_t __ret_15; \ + poly16x4_t __s0_15 = __p0_15; \ + poly16x4_t __rev0_15; __rev0_15 = __builtin_shufflevector(__s0_15, __s0_15, 3, 2, 1, 0); \ + __ret_15 = __noswap_splatq_lane_p16(__rev0_15, __p1_15); \ + __ret_15 = __builtin_shufflevector(__ret_15, __ret_15, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_15; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vdupq_lane_u8(__p0_16, __p1_16) __extension__ ({ \ + uint8x16_t __ret_16; \ + uint8x8_t __s0_16 = __p0_16; \ + __ret_16 = splatq_lane_u8(__s0_16, __p1_16); \ + __ret_16; \ +}) +#else +#define vdupq_lane_u8(__p0_17, __p1_17) __extension__ ({ \ + uint8x16_t __ret_17; \ + uint8x8_t __s0_17 = __p0_17; \ + uint8x8_t __rev0_17; __rev0_17 = __builtin_shufflevector(__s0_17, __s0_17, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_17 = __noswap_splatq_lane_u8(__rev0_17, __p1_17); \ + __ret_17 = __builtin_shufflevector(__ret_17, __ret_17, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_17; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vdupq_lane_u32(__p0_18, __p1_18) __extension__ ({ \ + uint32x4_t __ret_18; \ + uint32x2_t __s0_18 = __p0_18; \ + __ret_18 = splatq_lane_u32(__s0_18, __p1_18); \ + __ret_18; \ +}) +#else +#define vdupq_lane_u32(__p0_19, __p1_19) __extension__ ({ \ + uint32x4_t __ret_19; \ + uint32x2_t __s0_19 = __p0_19; \ + uint32x2_t __rev0_19; __rev0_19 = __builtin_shufflevector(__s0_19, __s0_19, 1, 0); \ + __ret_19 = __noswap_splatq_lane_u32(__rev0_19, __p1_19); \ + __ret_19 = __builtin_shufflevector(__ret_19, __ret_19, 3, 2, 1, 0); \ + __ret_19; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vdupq_lane_u64(__p0_20, __p1_20) __extension__ ({ \ + uint64x2_t __ret_20; \ + uint64x1_t __s0_20 = __p0_20; \ + __ret_20 = splatq_lane_u64(__s0_20, __p1_20); \ + __ret_20; \ +}) +#else +#define vdupq_lane_u64(__p0_21, __p1_21) __extension__ ({ \ + uint64x2_t __ret_21; \ + uint64x1_t __s0_21 = __p0_21; \ + __ret_21 = __noswap_splatq_lane_u64(__s0_21, __p1_21); \ + __ret_21 = __builtin_shufflevector(__ret_21, __ret_21, 1, 0); \ + __ret_21; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vdupq_lane_u16(__p0_22, __p1_22) __extension__ ({ \ + uint16x8_t __ret_22; \ + uint16x4_t __s0_22 = __p0_22; \ + __ret_22 = splatq_lane_u16(__s0_22, __p1_22); \ + __ret_22; \ +}) +#else +#define vdupq_lane_u16(__p0_23, __p1_23) __extension__ ({ \ + uint16x8_t __ret_23; \ + uint16x4_t __s0_23 = __p0_23; \ + uint16x4_t __rev0_23; __rev0_23 = __builtin_shufflevector(__s0_23, __s0_23, 3, 2, 1, 0); \ + __ret_23 = __noswap_splatq_lane_u16(__rev0_23, __p1_23); \ + __ret_23 = __builtin_shufflevector(__ret_23, __ret_23, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_23; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vdupq_lane_s8(__p0_24, __p1_24) __extension__ ({ \ + int8x16_t __ret_24; \ + int8x8_t __s0_24 = __p0_24; \ + __ret_24 = splatq_lane_s8(__s0_24, __p1_24); \ + __ret_24; \ +}) +#else +#define vdupq_lane_s8(__p0_25, __p1_25) __extension__ ({ \ + int8x16_t __ret_25; \ + int8x8_t __s0_25 = __p0_25; \ + int8x8_t __rev0_25; __rev0_25 = __builtin_shufflevector(__s0_25, __s0_25, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_25 = __noswap_splatq_lane_s8(__rev0_25, __p1_25); \ + __ret_25 = __builtin_shufflevector(__ret_25, __ret_25, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_25; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vdupq_lane_f32(__p0_26, __p1_26) __extension__ ({ \ + float32x4_t __ret_26; \ + float32x2_t __s0_26 = __p0_26; \ + __ret_26 = splatq_lane_f32(__s0_26, __p1_26); \ + __ret_26; \ +}) +#else +#define vdupq_lane_f32(__p0_27, __p1_27) __extension__ ({ \ + float32x4_t __ret_27; \ + float32x2_t __s0_27 = __p0_27; \ + float32x2_t __rev0_27; __rev0_27 = __builtin_shufflevector(__s0_27, __s0_27, 1, 0); \ + __ret_27 = __noswap_splatq_lane_f32(__rev0_27, __p1_27); \ + __ret_27 = __builtin_shufflevector(__ret_27, __ret_27, 3, 2, 1, 0); \ + __ret_27; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vdupq_lane_f16(__p0_28, __p1_28) __extension__ ({ \ + float16x8_t __ret_28; \ + float16x4_t __s0_28 = __p0_28; \ + __ret_28 = splatq_lane_f16(__s0_28, __p1_28); \ + __ret_28; \ +}) +#else +#define vdupq_lane_f16(__p0_29, __p1_29) __extension__ ({ \ + float16x8_t __ret_29; \ + float16x4_t __s0_29 = __p0_29; \ + float16x4_t __rev0_29; __rev0_29 = __builtin_shufflevector(__s0_29, __s0_29, 3, 2, 1, 0); \ + __ret_29 = __noswap_splatq_lane_f16(__rev0_29, __p1_29); \ + __ret_29 = __builtin_shufflevector(__ret_29, __ret_29, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_29; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vdupq_lane_s32(__p0_30, __p1_30) __extension__ ({ \ + int32x4_t __ret_30; \ + int32x2_t __s0_30 = __p0_30; \ + __ret_30 = splatq_lane_s32(__s0_30, __p1_30); \ + __ret_30; \ +}) +#else +#define vdupq_lane_s32(__p0_31, __p1_31) __extension__ ({ \ + int32x4_t __ret_31; \ + int32x2_t __s0_31 = __p0_31; \ + int32x2_t __rev0_31; __rev0_31 = __builtin_shufflevector(__s0_31, __s0_31, 1, 0); \ + __ret_31 = __noswap_splatq_lane_s32(__rev0_31, __p1_31); \ + __ret_31 = __builtin_shufflevector(__ret_31, __ret_31, 3, 2, 1, 0); \ + __ret_31; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vdupq_lane_s64(__p0_32, __p1_32) __extension__ ({ \ + int64x2_t __ret_32; \ + int64x1_t __s0_32 = __p0_32; \ + __ret_32 = splatq_lane_s64(__s0_32, __p1_32); \ + __ret_32; \ +}) +#else +#define vdupq_lane_s64(__p0_33, __p1_33) __extension__ ({ \ + int64x2_t __ret_33; \ + int64x1_t __s0_33 = __p0_33; \ + __ret_33 = __noswap_splatq_lane_s64(__s0_33, __p1_33); \ + __ret_33 = __builtin_shufflevector(__ret_33, __ret_33, 1, 0); \ + __ret_33; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vdupq_lane_s16(__p0_34, __p1_34) __extension__ ({ \ + int16x8_t __ret_34; \ + int16x4_t __s0_34 = __p0_34; \ + __ret_34 = splatq_lane_s16(__s0_34, __p1_34); \ + __ret_34; \ +}) +#else +#define vdupq_lane_s16(__p0_35, __p1_35) __extension__ ({ \ + int16x8_t __ret_35; \ + int16x4_t __s0_35 = __p0_35; \ + int16x4_t __rev0_35; __rev0_35 = __builtin_shufflevector(__s0_35, __s0_35, 3, 2, 1, 0); \ + __ret_35 = __noswap_splatq_lane_s16(__rev0_35, __p1_35); \ + __ret_35 = __builtin_shufflevector(__ret_35, __ret_35, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_35; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vdup_lane_u8(__p0_36, __p1_36) __extension__ ({ \ + uint8x8_t __ret_36; \ + uint8x8_t __s0_36 = __p0_36; \ + __ret_36 = splat_lane_u8(__s0_36, __p1_36); \ + __ret_36; \ +}) +#else +#define vdup_lane_u8(__p0_37, __p1_37) __extension__ ({ \ + uint8x8_t __ret_37; \ + uint8x8_t __s0_37 = __p0_37; \ + uint8x8_t __rev0_37; __rev0_37 = __builtin_shufflevector(__s0_37, __s0_37, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_37 = __noswap_splat_lane_u8(__rev0_37, __p1_37); \ + __ret_37 = __builtin_shufflevector(__ret_37, __ret_37, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_37; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vdup_lane_u32(__p0_38, __p1_38) __extension__ ({ \ + uint32x2_t __ret_38; \ + uint32x2_t __s0_38 = __p0_38; \ + __ret_38 = splat_lane_u32(__s0_38, __p1_38); \ + __ret_38; \ +}) +#else +#define vdup_lane_u32(__p0_39, __p1_39) __extension__ ({ \ + uint32x2_t __ret_39; \ + uint32x2_t __s0_39 = __p0_39; \ + uint32x2_t __rev0_39; __rev0_39 = __builtin_shufflevector(__s0_39, __s0_39, 1, 0); \ + __ret_39 = __noswap_splat_lane_u32(__rev0_39, __p1_39); \ + __ret_39 = __builtin_shufflevector(__ret_39, __ret_39, 1, 0); \ + __ret_39; \ +}) +#endif + +#define vdup_lane_u64(__p0_40, __p1_40) __extension__ ({ \ + uint64x1_t __ret_40; \ + uint64x1_t __s0_40 = __p0_40; \ + __ret_40 = splat_lane_u64(__s0_40, __p1_40); \ + __ret_40; \ +}) +#ifdef __LITTLE_ENDIAN__ +#define vdup_lane_u16(__p0_41, __p1_41) __extension__ ({ \ + uint16x4_t __ret_41; \ + uint16x4_t __s0_41 = __p0_41; \ + __ret_41 = splat_lane_u16(__s0_41, __p1_41); \ + __ret_41; \ +}) +#else +#define vdup_lane_u16(__p0_42, __p1_42) __extension__ ({ \ + uint16x4_t __ret_42; \ + uint16x4_t __s0_42 = __p0_42; \ + uint16x4_t __rev0_42; __rev0_42 = __builtin_shufflevector(__s0_42, __s0_42, 3, 2, 1, 0); \ + __ret_42 = __noswap_splat_lane_u16(__rev0_42, __p1_42); \ + __ret_42 = __builtin_shufflevector(__ret_42, __ret_42, 3, 2, 1, 0); \ + __ret_42; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vdup_lane_s8(__p0_43, __p1_43) __extension__ ({ \ + int8x8_t __ret_43; \ + int8x8_t __s0_43 = __p0_43; \ + __ret_43 = splat_lane_s8(__s0_43, __p1_43); \ + __ret_43; \ +}) +#else +#define vdup_lane_s8(__p0_44, __p1_44) __extension__ ({ \ + int8x8_t __ret_44; \ + int8x8_t __s0_44 = __p0_44; \ + int8x8_t __rev0_44; __rev0_44 = __builtin_shufflevector(__s0_44, __s0_44, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_44 = __noswap_splat_lane_s8(__rev0_44, __p1_44); \ + __ret_44 = __builtin_shufflevector(__ret_44, __ret_44, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_44; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vdup_lane_f32(__p0_45, __p1_45) __extension__ ({ \ + float32x2_t __ret_45; \ + float32x2_t __s0_45 = __p0_45; \ + __ret_45 = splat_lane_f32(__s0_45, __p1_45); \ + __ret_45; \ +}) +#else +#define vdup_lane_f32(__p0_46, __p1_46) __extension__ ({ \ + float32x2_t __ret_46; \ + float32x2_t __s0_46 = __p0_46; \ + float32x2_t __rev0_46; __rev0_46 = __builtin_shufflevector(__s0_46, __s0_46, 1, 0); \ + __ret_46 = __noswap_splat_lane_f32(__rev0_46, __p1_46); \ + __ret_46 = __builtin_shufflevector(__ret_46, __ret_46, 1, 0); \ + __ret_46; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vdup_lane_f16(__p0_47, __p1_47) __extension__ ({ \ + float16x4_t __ret_47; \ + float16x4_t __s0_47 = __p0_47; \ + __ret_47 = splat_lane_f16(__s0_47, __p1_47); \ + __ret_47; \ +}) +#else +#define vdup_lane_f16(__p0_48, __p1_48) __extension__ ({ \ + float16x4_t __ret_48; \ + float16x4_t __s0_48 = __p0_48; \ + float16x4_t __rev0_48; __rev0_48 = __builtin_shufflevector(__s0_48, __s0_48, 3, 2, 1, 0); \ + __ret_48 = __noswap_splat_lane_f16(__rev0_48, __p1_48); \ + __ret_48 = __builtin_shufflevector(__ret_48, __ret_48, 3, 2, 1, 0); \ + __ret_48; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vdup_lane_s32(__p0_49, __p1_49) __extension__ ({ \ + int32x2_t __ret_49; \ + int32x2_t __s0_49 = __p0_49; \ + __ret_49 = splat_lane_s32(__s0_49, __p1_49); \ + __ret_49; \ +}) +#else +#define vdup_lane_s32(__p0_50, __p1_50) __extension__ ({ \ + int32x2_t __ret_50; \ + int32x2_t __s0_50 = __p0_50; \ + int32x2_t __rev0_50; __rev0_50 = __builtin_shufflevector(__s0_50, __s0_50, 1, 0); \ + __ret_50 = __noswap_splat_lane_s32(__rev0_50, __p1_50); \ + __ret_50 = __builtin_shufflevector(__ret_50, __ret_50, 1, 0); \ + __ret_50; \ +}) +#endif + +#define vdup_lane_s64(__p0_51, __p1_51) __extension__ ({ \ + int64x1_t __ret_51; \ + int64x1_t __s0_51 = __p0_51; \ + __ret_51 = splat_lane_s64(__s0_51, __p1_51); \ + __ret_51; \ +}) +#ifdef __LITTLE_ENDIAN__ +#define vdup_lane_s16(__p0_52, __p1_52) __extension__ ({ \ + int16x4_t __ret_52; \ + int16x4_t __s0_52 = __p0_52; \ + __ret_52 = splat_lane_s16(__s0_52, __p1_52); \ + __ret_52; \ +}) +#else +#define vdup_lane_s16(__p0_53, __p1_53) __extension__ ({ \ + int16x4_t __ret_53; \ + int16x4_t __s0_53 = __p0_53; \ + int16x4_t __rev0_53; __rev0_53 = __builtin_shufflevector(__s0_53, __s0_53, 3, 2, 1, 0); \ + __ret_53 = __noswap_splat_lane_s16(__rev0_53, __p1_53); \ + __ret_53 = __builtin_shufflevector(__ret_53, __ret_53, 3, 2, 1, 0); \ + __ret_53; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) poly8x8_t vdup_n_p8(poly8_t __p0) { + poly8x8_t __ret; + __ret = (poly8x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0}; + return __ret; +} +#else +__ai __attribute__((target("neon"))) poly8x8_t vdup_n_p8(poly8_t __p0) { + poly8x8_t __ret; + __ret = (poly8x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0}; + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) poly16x4_t vdup_n_p16(poly16_t __p0) { + poly16x4_t __ret; + __ret = (poly16x4_t) {__p0, __p0, __p0, __p0}; + return __ret; +} +#else +__ai __attribute__((target("neon"))) poly16x4_t vdup_n_p16(poly16_t __p0) { + poly16x4_t __ret; + __ret = (poly16x4_t) {__p0, __p0, __p0, __p0}; + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) poly8x16_t vdupq_n_p8(poly8_t __p0) { + poly8x16_t __ret; + __ret = (poly8x16_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0}; + return __ret; +} +#else +__ai __attribute__((target("neon"))) poly8x16_t vdupq_n_p8(poly8_t __p0) { + poly8x16_t __ret; + __ret = (poly8x16_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0}; + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) poly16x8_t vdupq_n_p16(poly16_t __p0) { + poly16x8_t __ret; + __ret = (poly16x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0}; + return __ret; +} +#else +__ai __attribute__((target("neon"))) poly16x8_t vdupq_n_p16(poly16_t __p0) { + poly16x8_t __ret; + __ret = (poly16x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0}; + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint8x16_t vdupq_n_u8(uint8_t __p0) { + uint8x16_t __ret; + __ret = (uint8x16_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0}; + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint8x16_t vdupq_n_u8(uint8_t __p0) { + uint8x16_t __ret; + __ret = (uint8x16_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0}; + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint32x4_t vdupq_n_u32(uint32_t __p0) { + uint32x4_t __ret; + __ret = (uint32x4_t) {__p0, __p0, __p0, __p0}; + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint32x4_t vdupq_n_u32(uint32_t __p0) { + uint32x4_t __ret; + __ret = (uint32x4_t) {__p0, __p0, __p0, __p0}; + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint64x2_t vdupq_n_u64(uint64_t __p0) { + uint64x2_t __ret; + __ret = (uint64x2_t) {__p0, __p0}; + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint64x2_t vdupq_n_u64(uint64_t __p0) { + uint64x2_t __ret; + __ret = (uint64x2_t) {__p0, __p0}; + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint16x8_t vdupq_n_u16(uint16_t __p0) { + uint16x8_t __ret; + __ret = (uint16x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0}; + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint16x8_t vdupq_n_u16(uint16_t __p0) { + uint16x8_t __ret; + __ret = (uint16x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0}; + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int8x16_t vdupq_n_s8(int8_t __p0) { + int8x16_t __ret; + __ret = (int8x16_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0}; + return __ret; +} +#else +__ai __attribute__((target("neon"))) int8x16_t vdupq_n_s8(int8_t __p0) { + int8x16_t __ret; + __ret = (int8x16_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0}; + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) float32x4_t vdupq_n_f32(float32_t __p0) { + float32x4_t __ret; + __ret = (float32x4_t) {__p0, __p0, __p0, __p0}; + return __ret; +} +#else +__ai __attribute__((target("neon"))) float32x4_t vdupq_n_f32(float32_t __p0) { + float32x4_t __ret; + __ret = (float32x4_t) {__p0, __p0, __p0, __p0}; + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vdupq_n_f16(__p0) __extension__ ({ \ + float16x8_t __ret; \ + float16_t __s0 = __p0; \ + __ret = (float16x8_t) {__s0, __s0, __s0, __s0, __s0, __s0, __s0, __s0}; \ + __ret; \ +}) +#else +#define vdupq_n_f16(__p0) __extension__ ({ \ + float16x8_t __ret; \ + float16_t __s0 = __p0; \ + __ret = (float16x8_t) {__s0, __s0, __s0, __s0, __s0, __s0, __s0, __s0}; \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int32x4_t vdupq_n_s32(int32_t __p0) { + int32x4_t __ret; + __ret = (int32x4_t) {__p0, __p0, __p0, __p0}; + return __ret; +} +#else +__ai __attribute__((target("neon"))) int32x4_t vdupq_n_s32(int32_t __p0) { + int32x4_t __ret; + __ret = (int32x4_t) {__p0, __p0, __p0, __p0}; + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int64x2_t vdupq_n_s64(int64_t __p0) { + int64x2_t __ret; + __ret = (int64x2_t) {__p0, __p0}; + return __ret; +} +#else +__ai __attribute__((target("neon"))) int64x2_t vdupq_n_s64(int64_t __p0) { + int64x2_t __ret; + __ret = (int64x2_t) {__p0, __p0}; + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int16x8_t vdupq_n_s16(int16_t __p0) { + int16x8_t __ret; + __ret = (int16x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0}; + return __ret; +} +#else +__ai __attribute__((target("neon"))) int16x8_t vdupq_n_s16(int16_t __p0) { + int16x8_t __ret; + __ret = (int16x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0}; + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint8x8_t vdup_n_u8(uint8_t __p0) { + uint8x8_t __ret; + __ret = (uint8x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0}; + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint8x8_t vdup_n_u8(uint8_t __p0) { + uint8x8_t __ret; + __ret = (uint8x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0}; + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint32x2_t vdup_n_u32(uint32_t __p0) { + uint32x2_t __ret; + __ret = (uint32x2_t) {__p0, __p0}; + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint32x2_t vdup_n_u32(uint32_t __p0) { + uint32x2_t __ret; + __ret = (uint32x2_t) {__p0, __p0}; + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +__ai __attribute__((target("neon"))) uint64x1_t vdup_n_u64(uint64_t __p0) { + uint64x1_t __ret; + __ret = (uint64x1_t) {__p0}; + return __ret; +} +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint16x4_t vdup_n_u16(uint16_t __p0) { + uint16x4_t __ret; + __ret = (uint16x4_t) {__p0, __p0, __p0, __p0}; + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint16x4_t vdup_n_u16(uint16_t __p0) { + uint16x4_t __ret; + __ret = (uint16x4_t) {__p0, __p0, __p0, __p0}; + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int8x8_t vdup_n_s8(int8_t __p0) { + int8x8_t __ret; + __ret = (int8x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0}; + return __ret; +} +#else +__ai __attribute__((target("neon"))) int8x8_t vdup_n_s8(int8_t __p0) { + int8x8_t __ret; + __ret = (int8x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0}; + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) float32x2_t vdup_n_f32(float32_t __p0) { + float32x2_t __ret; + __ret = (float32x2_t) {__p0, __p0}; + return __ret; +} +#else +__ai __attribute__((target("neon"))) float32x2_t vdup_n_f32(float32_t __p0) { + float32x2_t __ret; + __ret = (float32x2_t) {__p0, __p0}; + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vdup_n_f16(__p0) __extension__ ({ \ + float16x4_t __ret; \ + float16_t __s0 = __p0; \ + __ret = (float16x4_t) {__s0, __s0, __s0, __s0}; \ + __ret; \ +}) +#else +#define vdup_n_f16(__p0) __extension__ ({ \ + float16x4_t __ret; \ + float16_t __s0 = __p0; \ + __ret = (float16x4_t) {__s0, __s0, __s0, __s0}; \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int32x2_t vdup_n_s32(int32_t __p0) { + int32x2_t __ret; + __ret = (int32x2_t) {__p0, __p0}; + return __ret; +} +#else +__ai __attribute__((target("neon"))) int32x2_t vdup_n_s32(int32_t __p0) { + int32x2_t __ret; + __ret = (int32x2_t) {__p0, __p0}; + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +__ai __attribute__((target("neon"))) int64x1_t vdup_n_s64(int64_t __p0) { + int64x1_t __ret; + __ret = (int64x1_t) {__p0}; + return __ret; +} +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int16x4_t vdup_n_s16(int16_t __p0) { + int16x4_t __ret; + __ret = (int16x4_t) {__p0, __p0, __p0, __p0}; + return __ret; +} +#else +__ai __attribute__((target("neon"))) int16x4_t vdup_n_s16(int16_t __p0) { + int16x4_t __ret; + __ret = (int16x4_t) {__p0, __p0, __p0, __p0}; + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint8x16_t veorq_u8(uint8x16_t __p0, uint8x16_t __p1) { + uint8x16_t __ret; + __ret = __p0 ^ __p1; + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint8x16_t veorq_u8(uint8x16_t __p0, uint8x16_t __p1) { + uint8x16_t __ret; + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __rev0 ^ __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint32x4_t veorq_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint32x4_t __ret; + __ret = __p0 ^ __p1; + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint32x4_t veorq_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint32x4_t __ret; + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = __rev0 ^ __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint64x2_t veorq_u64(uint64x2_t __p0, uint64x2_t __p1) { + uint64x2_t __ret; + __ret = __p0 ^ __p1; + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint64x2_t veorq_u64(uint64x2_t __p0, uint64x2_t __p1) { + uint64x2_t __ret; + uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = __rev0 ^ __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint16x8_t veorq_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint16x8_t __ret; + __ret = __p0 ^ __p1; + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint16x8_t veorq_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint16x8_t __ret; + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __rev0 ^ __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int8x16_t veorq_s8(int8x16_t __p0, int8x16_t __p1) { + int8x16_t __ret; + __ret = __p0 ^ __p1; + return __ret; +} +#else +__ai __attribute__((target("neon"))) int8x16_t veorq_s8(int8x16_t __p0, int8x16_t __p1) { + int8x16_t __ret; + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __rev0 ^ __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int32x4_t veorq_s32(int32x4_t __p0, int32x4_t __p1) { + int32x4_t __ret; + __ret = __p0 ^ __p1; + return __ret; +} +#else +__ai __attribute__((target("neon"))) int32x4_t veorq_s32(int32x4_t __p0, int32x4_t __p1) { + int32x4_t __ret; + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = __rev0 ^ __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int64x2_t veorq_s64(int64x2_t __p0, int64x2_t __p1) { + int64x2_t __ret; + __ret = __p0 ^ __p1; + return __ret; +} +#else +__ai __attribute__((target("neon"))) int64x2_t veorq_s64(int64x2_t __p0, int64x2_t __p1) { + int64x2_t __ret; + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = __rev0 ^ __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int16x8_t veorq_s16(int16x8_t __p0, int16x8_t __p1) { + int16x8_t __ret; + __ret = __p0 ^ __p1; + return __ret; +} +#else +__ai __attribute__((target("neon"))) int16x8_t veorq_s16(int16x8_t __p0, int16x8_t __p1) { + int16x8_t __ret; + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __rev0 ^ __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint8x8_t veor_u8(uint8x8_t __p0, uint8x8_t __p1) { + uint8x8_t __ret; + __ret = __p0 ^ __p1; + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint8x8_t veor_u8(uint8x8_t __p0, uint8x8_t __p1) { + uint8x8_t __ret; + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __rev0 ^ __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint32x2_t veor_u32(uint32x2_t __p0, uint32x2_t __p1) { + uint32x2_t __ret; + __ret = __p0 ^ __p1; + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint32x2_t veor_u32(uint32x2_t __p0, uint32x2_t __p1) { + uint32x2_t __ret; + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = __rev0 ^ __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +__ai __attribute__((target("neon"))) uint64x1_t veor_u64(uint64x1_t __p0, uint64x1_t __p1) { + uint64x1_t __ret; + __ret = __p0 ^ __p1; + return __ret; +} +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint16x4_t veor_u16(uint16x4_t __p0, uint16x4_t __p1) { + uint16x4_t __ret; + __ret = __p0 ^ __p1; + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint16x4_t veor_u16(uint16x4_t __p0, uint16x4_t __p1) { + uint16x4_t __ret; + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = __rev0 ^ __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int8x8_t veor_s8(int8x8_t __p0, int8x8_t __p1) { + int8x8_t __ret; + __ret = __p0 ^ __p1; + return __ret; +} +#else +__ai __attribute__((target("neon"))) int8x8_t veor_s8(int8x8_t __p0, int8x8_t __p1) { + int8x8_t __ret; + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __rev0 ^ __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int32x2_t veor_s32(int32x2_t __p0, int32x2_t __p1) { + int32x2_t __ret; + __ret = __p0 ^ __p1; + return __ret; +} +#else +__ai __attribute__((target("neon"))) int32x2_t veor_s32(int32x2_t __p0, int32x2_t __p1) { + int32x2_t __ret; + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = __rev0 ^ __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +__ai __attribute__((target("neon"))) int64x1_t veor_s64(int64x1_t __p0, int64x1_t __p1) { + int64x1_t __ret; + __ret = __p0 ^ __p1; + return __ret; +} +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int16x4_t veor_s16(int16x4_t __p0, int16x4_t __p1) { + int16x4_t __ret; + __ret = __p0 ^ __p1; + return __ret; +} +#else +__ai __attribute__((target("neon"))) int16x4_t veor_s16(int16x4_t __p0, int16x4_t __p1) { + int16x4_t __ret; + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = __rev0 ^ __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vext_p8(__p0, __p1, __p2) __extension__ ({ \ + poly8x8_t __ret; \ + poly8x8_t __s0 = __p0; \ + poly8x8_t __s1 = __p1; \ + __ret = (poly8x8_t) __builtin_neon_vext_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 4); \ + __ret; \ +}) +#else +#define vext_p8(__p0, __p1, __p2) __extension__ ({ \ + poly8x8_t __ret; \ + poly8x8_t __s0 = __p0; \ + poly8x8_t __s1 = __p1; \ + poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + poly8x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (poly8x8_t) __builtin_neon_vext_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 4); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vext_p16(__p0, __p1, __p2) __extension__ ({ \ + poly16x4_t __ret; \ + poly16x4_t __s0 = __p0; \ + poly16x4_t __s1 = __p1; \ + __ret = (poly16x4_t) __builtin_neon_vext_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 5); \ + __ret; \ +}) +#else +#define vext_p16(__p0, __p1, __p2) __extension__ ({ \ + poly16x4_t __ret; \ + poly16x4_t __s0 = __p0; \ + poly16x4_t __s1 = __p1; \ + poly16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + poly16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + __ret = (poly16x4_t) __builtin_neon_vext_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 5); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vextq_p8(__p0, __p1, __p2) __extension__ ({ \ + poly8x16_t __ret; \ + poly8x16_t __s0 = __p0; \ + poly8x16_t __s1 = __p1; \ + __ret = (poly8x16_t) __builtin_neon_vextq_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 36); \ + __ret; \ +}) +#else +#define vextq_p8(__p0, __p1, __p2) __extension__ ({ \ + poly8x16_t __ret; \ + poly8x16_t __s0 = __p0; \ + poly8x16_t __s1 = __p1; \ + poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + poly8x16_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (poly8x16_t) __builtin_neon_vextq_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 36); \ + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vextq_p16(__p0, __p1, __p2) __extension__ ({ \ + poly16x8_t __ret; \ + poly16x8_t __s0 = __p0; \ + poly16x8_t __s1 = __p1; \ + __ret = (poly16x8_t) __builtin_neon_vextq_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 37); \ + __ret; \ +}) +#else +#define vextq_p16(__p0, __p1, __p2) __extension__ ({ \ + poly16x8_t __ret; \ + poly16x8_t __s0 = __p0; \ + poly16x8_t __s1 = __p1; \ + poly16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + poly16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (poly16x8_t) __builtin_neon_vextq_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 37); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vextq_u8(__p0, __p1, __p2) __extension__ ({ \ + uint8x16_t __ret; \ + uint8x16_t __s0 = __p0; \ + uint8x16_t __s1 = __p1; \ + __ret = (uint8x16_t) __builtin_neon_vextq_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 48); \ + __ret; \ +}) +#else +#define vextq_u8(__p0, __p1, __p2) __extension__ ({ \ + uint8x16_t __ret; \ + uint8x16_t __s0 = __p0; \ + uint8x16_t __s1 = __p1; \ + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (uint8x16_t) __builtin_neon_vextq_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 48); \ + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vextq_u32(__p0, __p1, __p2) __extension__ ({ \ + uint32x4_t __ret; \ + uint32x4_t __s0 = __p0; \ + uint32x4_t __s1 = __p1; \ + __ret = (uint32x4_t) __builtin_neon_vextq_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 50); \ + __ret; \ +}) +#else +#define vextq_u32(__p0, __p1, __p2) __extension__ ({ \ + uint32x4_t __ret; \ + uint32x4_t __s0 = __p0; \ + uint32x4_t __s1 = __p1; \ + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + __ret = (uint32x4_t) __builtin_neon_vextq_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 50); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vextq_u64(__p0, __p1, __p2) __extension__ ({ \ + uint64x2_t __ret; \ + uint64x2_t __s0 = __p0; \ + uint64x2_t __s1 = __p1; \ + __ret = (uint64x2_t) __builtin_neon_vextq_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 51); \ + __ret; \ +}) +#else +#define vextq_u64(__p0, __p1, __p2) __extension__ ({ \ + uint64x2_t __ret; \ + uint64x2_t __s0 = __p0; \ + uint64x2_t __s1 = __p1; \ + uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ + __ret = (uint64x2_t) __builtin_neon_vextq_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 51); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vextq_u16(__p0, __p1, __p2) __extension__ ({ \ + uint16x8_t __ret; \ + uint16x8_t __s0 = __p0; \ + uint16x8_t __s1 = __p1; \ + __ret = (uint16x8_t) __builtin_neon_vextq_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 49); \ + __ret; \ +}) +#else +#define vextq_u16(__p0, __p1, __p2) __extension__ ({ \ + uint16x8_t __ret; \ + uint16x8_t __s0 = __p0; \ + uint16x8_t __s1 = __p1; \ + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (uint16x8_t) __builtin_neon_vextq_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 49); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vextq_s8(__p0, __p1, __p2) __extension__ ({ \ + int8x16_t __ret; \ + int8x16_t __s0 = __p0; \ + int8x16_t __s1 = __p1; \ + __ret = (int8x16_t) __builtin_neon_vextq_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 32); \ + __ret; \ +}) +#else +#define vextq_s8(__p0, __p1, __p2) __extension__ ({ \ + int8x16_t __ret; \ + int8x16_t __s0 = __p0; \ + int8x16_t __s1 = __p1; \ + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + int8x16_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (int8x16_t) __builtin_neon_vextq_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 32); \ + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vextq_f32(__p0, __p1, __p2) __extension__ ({ \ + float32x4_t __ret; \ + float32x4_t __s0 = __p0; \ + float32x4_t __s1 = __p1; \ + __ret = (float32x4_t) __builtin_neon_vextq_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 41); \ + __ret; \ +}) +#else +#define vextq_f32(__p0, __p1, __p2) __extension__ ({ \ + float32x4_t __ret; \ + float32x4_t __s0 = __p0; \ + float32x4_t __s1 = __p1; \ + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + float32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + __ret = (float32x4_t) __builtin_neon_vextq_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 41); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vextq_s32(__p0, __p1, __p2) __extension__ ({ \ + int32x4_t __ret; \ + int32x4_t __s0 = __p0; \ + int32x4_t __s1 = __p1; \ + __ret = (int32x4_t) __builtin_neon_vextq_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 34); \ + __ret; \ +}) +#else +#define vextq_s32(__p0, __p1, __p2) __extension__ ({ \ + int32x4_t __ret; \ + int32x4_t __s0 = __p0; \ + int32x4_t __s1 = __p1; \ + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + __ret = (int32x4_t) __builtin_neon_vextq_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 34); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vextq_s64(__p0, __p1, __p2) __extension__ ({ \ + int64x2_t __ret; \ + int64x2_t __s0 = __p0; \ + int64x2_t __s1 = __p1; \ + __ret = (int64x2_t) __builtin_neon_vextq_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 35); \ + __ret; \ +}) +#else +#define vextq_s64(__p0, __p1, __p2) __extension__ ({ \ + int64x2_t __ret; \ + int64x2_t __s0 = __p0; \ + int64x2_t __s1 = __p1; \ + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + int64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ + __ret = (int64x2_t) __builtin_neon_vextq_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 35); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vextq_s16(__p0, __p1, __p2) __extension__ ({ \ + int16x8_t __ret; \ + int16x8_t __s0 = __p0; \ + int16x8_t __s1 = __p1; \ + __ret = (int16x8_t) __builtin_neon_vextq_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 33); \ + __ret; \ +}) +#else +#define vextq_s16(__p0, __p1, __p2) __extension__ ({ \ + int16x8_t __ret; \ + int16x8_t __s0 = __p0; \ + int16x8_t __s1 = __p1; \ + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (int16x8_t) __builtin_neon_vextq_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 33); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vext_u8(__p0, __p1, __p2) __extension__ ({ \ + uint8x8_t __ret; \ + uint8x8_t __s0 = __p0; \ + uint8x8_t __s1 = __p1; \ + __ret = (uint8x8_t) __builtin_neon_vext_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 16); \ + __ret; \ +}) +#else +#define vext_u8(__p0, __p1, __p2) __extension__ ({ \ + uint8x8_t __ret; \ + uint8x8_t __s0 = __p0; \ + uint8x8_t __s1 = __p1; \ + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (uint8x8_t) __builtin_neon_vext_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 16); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vext_u32(__p0, __p1, __p2) __extension__ ({ \ + uint32x2_t __ret; \ + uint32x2_t __s0 = __p0; \ + uint32x2_t __s1 = __p1; \ + __ret = (uint32x2_t) __builtin_neon_vext_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 18); \ + __ret; \ +}) +#else +#define vext_u32(__p0, __p1, __p2) __extension__ ({ \ + uint32x2_t __ret; \ + uint32x2_t __s0 = __p0; \ + uint32x2_t __s1 = __p1; \ + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ + __ret = (uint32x2_t) __builtin_neon_vext_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 18); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#endif + +#define vext_u64(__p0, __p1, __p2) __extension__ ({ \ + uint64x1_t __ret; \ + uint64x1_t __s0 = __p0; \ + uint64x1_t __s1 = __p1; \ + __ret = (uint64x1_t) __builtin_neon_vext_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 19); \ + __ret; \ +}) +#ifdef __LITTLE_ENDIAN__ +#define vext_u16(__p0, __p1, __p2) __extension__ ({ \ + uint16x4_t __ret; \ + uint16x4_t __s0 = __p0; \ + uint16x4_t __s1 = __p1; \ + __ret = (uint16x4_t) __builtin_neon_vext_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 17); \ + __ret; \ +}) +#else +#define vext_u16(__p0, __p1, __p2) __extension__ ({ \ + uint16x4_t __ret; \ + uint16x4_t __s0 = __p0; \ + uint16x4_t __s1 = __p1; \ + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + __ret = (uint16x4_t) __builtin_neon_vext_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 17); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vext_s8(__p0, __p1, __p2) __extension__ ({ \ + int8x8_t __ret; \ + int8x8_t __s0 = __p0; \ + int8x8_t __s1 = __p1; \ + __ret = (int8x8_t) __builtin_neon_vext_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 0); \ + __ret; \ +}) +#else +#define vext_s8(__p0, __p1, __p2) __extension__ ({ \ + int8x8_t __ret; \ + int8x8_t __s0 = __p0; \ + int8x8_t __s1 = __p1; \ + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + int8x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (int8x8_t) __builtin_neon_vext_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 0); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vext_f32(__p0, __p1, __p2) __extension__ ({ \ + float32x2_t __ret; \ + float32x2_t __s0 = __p0; \ + float32x2_t __s1 = __p1; \ + __ret = (float32x2_t) __builtin_neon_vext_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 9); \ + __ret; \ +}) +#else +#define vext_f32(__p0, __p1, __p2) __extension__ ({ \ + float32x2_t __ret; \ + float32x2_t __s0 = __p0; \ + float32x2_t __s1 = __p1; \ + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + float32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ + __ret = (float32x2_t) __builtin_neon_vext_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 9); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vext_s32(__p0, __p1, __p2) __extension__ ({ \ + int32x2_t __ret; \ + int32x2_t __s0 = __p0; \ + int32x2_t __s1 = __p1; \ + __ret = (int32x2_t) __builtin_neon_vext_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 2); \ + __ret; \ +}) +#else +#define vext_s32(__p0, __p1, __p2) __extension__ ({ \ + int32x2_t __ret; \ + int32x2_t __s0 = __p0; \ + int32x2_t __s1 = __p1; \ + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ + __ret = (int32x2_t) __builtin_neon_vext_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 2); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#endif + +#define vext_s64(__p0, __p1, __p2) __extension__ ({ \ + int64x1_t __ret; \ + int64x1_t __s0 = __p0; \ + int64x1_t __s1 = __p1; \ + __ret = (int64x1_t) __builtin_neon_vext_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 3); \ + __ret; \ +}) +#ifdef __LITTLE_ENDIAN__ +#define vext_s16(__p0, __p1, __p2) __extension__ ({ \ + int16x4_t __ret; \ + int16x4_t __s0 = __p0; \ + int16x4_t __s1 = __p1; \ + __ret = (int16x4_t) __builtin_neon_vext_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 1); \ + __ret; \ +}) +#else +#define vext_s16(__p0, __p1, __p2) __extension__ ({ \ + int16x4_t __ret; \ + int16x4_t __s0 = __p0; \ + int16x4_t __s1 = __p1; \ + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + int16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + __ret = (int16x4_t) __builtin_neon_vext_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 1); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vextq_f16(__p0, __p1, __p2) __extension__ ({ \ + float16x8_t __ret; \ + float16x8_t __s0 = __p0; \ + float16x8_t __s1 = __p1; \ + __ret = (float16x8_t) __builtin_neon_vextq_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 40); \ + __ret; \ +}) +#else +#define vextq_f16(__p0, __p1, __p2) __extension__ ({ \ + float16x8_t __ret; \ + float16x8_t __s0 = __p0; \ + float16x8_t __s1 = __p1; \ + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + float16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (float16x8_t) __builtin_neon_vextq_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 40); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vext_f16(__p0, __p1, __p2) __extension__ ({ \ + float16x4_t __ret; \ + float16x4_t __s0 = __p0; \ + float16x4_t __s1 = __p1; \ + __ret = (float16x4_t) __builtin_neon_vext_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 8); \ + __ret; \ +}) +#else +#define vext_f16(__p0, __p1, __p2) __extension__ ({ \ + float16x4_t __ret; \ + float16x4_t __s0 = __p0; \ + float16x4_t __s1 = __p1; \ + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + float16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + __ret = (float16x4_t) __builtin_neon_vext_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 8); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) poly8x8_t vget_high_p8(poly8x16_t __p0) { + poly8x8_t __ret; + __ret = __builtin_shufflevector(__p0, __p0, 8, 9, 10, 11, 12, 13, 14, 15); + return __ret; +} +#else +__ai __attribute__((target("neon"))) poly8x8_t vget_high_p8(poly8x16_t __p0) { + poly8x8_t __ret; + poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev0, 8, 9, 10, 11, 12, 13, 14, 15); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +__ai __attribute__((target("neon"))) poly8x8_t __noswap_vget_high_p8(poly8x16_t __p0) { + poly8x8_t __ret; + __ret = __builtin_shufflevector(__p0, __p0, 8, 9, 10, 11, 12, 13, 14, 15); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) poly16x4_t vget_high_p16(poly16x8_t __p0) { + poly16x4_t __ret; + __ret = __builtin_shufflevector(__p0, __p0, 4, 5, 6, 7); + return __ret; +} +#else +__ai __attribute__((target("neon"))) poly16x4_t vget_high_p16(poly16x8_t __p0) { + poly16x4_t __ret; + poly16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev0, 4, 5, 6, 7); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint8x8_t vget_high_u8(uint8x16_t __p0) { + uint8x8_t __ret; + __ret = __builtin_shufflevector(__p0, __p0, 8, 9, 10, 11, 12, 13, 14, 15); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint8x8_t vget_high_u8(uint8x16_t __p0) { + uint8x8_t __ret; + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev0, 8, 9, 10, 11, 12, 13, 14, 15); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +__ai __attribute__((target("neon"))) uint8x8_t __noswap_vget_high_u8(uint8x16_t __p0) { + uint8x8_t __ret; + __ret = __builtin_shufflevector(__p0, __p0, 8, 9, 10, 11, 12, 13, 14, 15); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint32x2_t vget_high_u32(uint32x4_t __p0) { + uint32x2_t __ret; + __ret = __builtin_shufflevector(__p0, __p0, 2, 3); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint32x2_t vget_high_u32(uint32x4_t __p0) { + uint32x2_t __ret; + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev0, 2, 3); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +__ai __attribute__((target("neon"))) uint32x2_t __noswap_vget_high_u32(uint32x4_t __p0) { + uint32x2_t __ret; + __ret = __builtin_shufflevector(__p0, __p0, 2, 3); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint64x1_t vget_high_u64(uint64x2_t __p0) { + uint64x1_t __ret; + __ret = __builtin_shufflevector(__p0, __p0, 1); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint64x1_t vget_high_u64(uint64x2_t __p0) { + uint64x1_t __ret; + uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev0, 1); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint16x4_t vget_high_u16(uint16x8_t __p0) { + uint16x4_t __ret; + __ret = __builtin_shufflevector(__p0, __p0, 4, 5, 6, 7); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint16x4_t vget_high_u16(uint16x8_t __p0) { + uint16x4_t __ret; + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev0, 4, 5, 6, 7); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +__ai __attribute__((target("neon"))) uint16x4_t __noswap_vget_high_u16(uint16x8_t __p0) { + uint16x4_t __ret; + __ret = __builtin_shufflevector(__p0, __p0, 4, 5, 6, 7); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int8x8_t vget_high_s8(int8x16_t __p0) { + int8x8_t __ret; + __ret = __builtin_shufflevector(__p0, __p0, 8, 9, 10, 11, 12, 13, 14, 15); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int8x8_t vget_high_s8(int8x16_t __p0) { + int8x8_t __ret; + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev0, 8, 9, 10, 11, 12, 13, 14, 15); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +__ai __attribute__((target("neon"))) int8x8_t __noswap_vget_high_s8(int8x16_t __p0) { + int8x8_t __ret; + __ret = __builtin_shufflevector(__p0, __p0, 8, 9, 10, 11, 12, 13, 14, 15); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) float32x2_t vget_high_f32(float32x4_t __p0) { + float32x2_t __ret; + __ret = __builtin_shufflevector(__p0, __p0, 2, 3); + return __ret; +} +#else +__ai __attribute__((target("neon"))) float32x2_t vget_high_f32(float32x4_t __p0) { + float32x2_t __ret; + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev0, 2, 3); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +__ai __attribute__((target("neon"))) float32x2_t __noswap_vget_high_f32(float32x4_t __p0) { + float32x2_t __ret; + __ret = __builtin_shufflevector(__p0, __p0, 2, 3); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) float16x4_t vget_high_f16(float16x8_t __p0) { + float16x4_t __ret; + __ret = __builtin_shufflevector(__p0, __p0, 4, 5, 6, 7); + return __ret; +} +#else +__ai __attribute__((target("neon"))) float16x4_t vget_high_f16(float16x8_t __p0) { + float16x4_t __ret; + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev0, 4, 5, 6, 7); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +__ai __attribute__((target("neon"))) float16x4_t __noswap_vget_high_f16(float16x8_t __p0) { + float16x4_t __ret; + __ret = __builtin_shufflevector(__p0, __p0, 4, 5, 6, 7); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int32x2_t vget_high_s32(int32x4_t __p0) { + int32x2_t __ret; + __ret = __builtin_shufflevector(__p0, __p0, 2, 3); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int32x2_t vget_high_s32(int32x4_t __p0) { + int32x2_t __ret; + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev0, 2, 3); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +__ai __attribute__((target("neon"))) int32x2_t __noswap_vget_high_s32(int32x4_t __p0) { + int32x2_t __ret; + __ret = __builtin_shufflevector(__p0, __p0, 2, 3); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int64x1_t vget_high_s64(int64x2_t __p0) { + int64x1_t __ret; + __ret = __builtin_shufflevector(__p0, __p0, 1); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int64x1_t vget_high_s64(int64x2_t __p0) { + int64x1_t __ret; + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev0, 1); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int16x4_t vget_high_s16(int16x8_t __p0) { + int16x4_t __ret; + __ret = __builtin_shufflevector(__p0, __p0, 4, 5, 6, 7); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int16x4_t vget_high_s16(int16x8_t __p0) { + int16x4_t __ret; + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev0, 4, 5, 6, 7); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +__ai __attribute__((target("neon"))) int16x4_t __noswap_vget_high_s16(int16x8_t __p0) { + int16x4_t __ret; + __ret = __builtin_shufflevector(__p0, __p0, 4, 5, 6, 7); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vget_lane_p8(__p0, __p1) __extension__ ({ \ + poly8_t __ret; \ + poly8x8_t __s0 = __p0; \ + __ret = (poly8_t) __builtin_neon_vget_lane_i8((poly8x8_t)__s0, __p1); \ + __ret; \ +}) +#else +#define vget_lane_p8(__p0, __p1) __extension__ ({ \ + poly8_t __ret; \ + poly8x8_t __s0 = __p0; \ + poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (poly8_t) __builtin_neon_vget_lane_i8((poly8x8_t)__rev0, __p1); \ + __ret; \ +}) +#define __noswap_vget_lane_p8(__p0, __p1) __extension__ ({ \ + poly8_t __ret; \ + poly8x8_t __s0 = __p0; \ + __ret = (poly8_t) __builtin_neon_vget_lane_i8((poly8x8_t)__s0, __p1); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vget_lane_p16(__p0, __p1) __extension__ ({ \ + poly16_t __ret; \ + poly16x4_t __s0 = __p0; \ + __ret = (poly16_t) __builtin_neon_vget_lane_i16((poly16x4_t)__s0, __p1); \ + __ret; \ +}) +#else +#define vget_lane_p16(__p0, __p1) __extension__ ({ \ + poly16_t __ret; \ + poly16x4_t __s0 = __p0; \ + poly16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + __ret = (poly16_t) __builtin_neon_vget_lane_i16((poly16x4_t)__rev0, __p1); \ + __ret; \ +}) +#define __noswap_vget_lane_p16(__p0, __p1) __extension__ ({ \ + poly16_t __ret; \ + poly16x4_t __s0 = __p0; \ + __ret = (poly16_t) __builtin_neon_vget_lane_i16((poly16x4_t)__s0, __p1); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vgetq_lane_p8(__p0, __p1) __extension__ ({ \ + poly8_t __ret; \ + poly8x16_t __s0 = __p0; \ + __ret = (poly8_t) __builtin_neon_vgetq_lane_i8((poly8x16_t)__s0, __p1); \ + __ret; \ +}) +#else +#define vgetq_lane_p8(__p0, __p1) __extension__ ({ \ + poly8_t __ret; \ + poly8x16_t __s0 = __p0; \ + poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (poly8_t) __builtin_neon_vgetq_lane_i8((poly8x16_t)__rev0, __p1); \ + __ret; \ +}) +#define __noswap_vgetq_lane_p8(__p0, __p1) __extension__ ({ \ + poly8_t __ret; \ + poly8x16_t __s0 = __p0; \ + __ret = (poly8_t) __builtin_neon_vgetq_lane_i8((poly8x16_t)__s0, __p1); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vgetq_lane_p16(__p0, __p1) __extension__ ({ \ + poly16_t __ret; \ + poly16x8_t __s0 = __p0; \ + __ret = (poly16_t) __builtin_neon_vgetq_lane_i16((poly16x8_t)__s0, __p1); \ + __ret; \ +}) +#else +#define vgetq_lane_p16(__p0, __p1) __extension__ ({ \ + poly16_t __ret; \ + poly16x8_t __s0 = __p0; \ + poly16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (poly16_t) __builtin_neon_vgetq_lane_i16((poly16x8_t)__rev0, __p1); \ + __ret; \ +}) +#define __noswap_vgetq_lane_p16(__p0, __p1) __extension__ ({ \ + poly16_t __ret; \ + poly16x8_t __s0 = __p0; \ + __ret = (poly16_t) __builtin_neon_vgetq_lane_i16((poly16x8_t)__s0, __p1); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vgetq_lane_u8(__p0, __p1) __extension__ ({ \ + uint8_t __ret; \ + uint8x16_t __s0 = __p0; \ + __ret = (uint8_t) __builtin_neon_vgetq_lane_i8((int8x16_t)__s0, __p1); \ + __ret; \ +}) +#else +#define vgetq_lane_u8(__p0, __p1) __extension__ ({ \ + uint8_t __ret; \ + uint8x16_t __s0 = __p0; \ + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (uint8_t) __builtin_neon_vgetq_lane_i8((int8x16_t)__rev0, __p1); \ + __ret; \ +}) +#define __noswap_vgetq_lane_u8(__p0, __p1) __extension__ ({ \ + uint8_t __ret; \ + uint8x16_t __s0 = __p0; \ + __ret = (uint8_t) __builtin_neon_vgetq_lane_i8((int8x16_t)__s0, __p1); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vgetq_lane_u32(__p0, __p1) __extension__ ({ \ + uint32_t __ret; \ + uint32x4_t __s0 = __p0; \ + __ret = (uint32_t) __builtin_neon_vgetq_lane_i32((int32x4_t)__s0, __p1); \ + __ret; \ +}) +#else +#define vgetq_lane_u32(__p0, __p1) __extension__ ({ \ + uint32_t __ret; \ + uint32x4_t __s0 = __p0; \ + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + __ret = (uint32_t) __builtin_neon_vgetq_lane_i32((int32x4_t)__rev0, __p1); \ + __ret; \ +}) +#define __noswap_vgetq_lane_u32(__p0, __p1) __extension__ ({ \ + uint32_t __ret; \ + uint32x4_t __s0 = __p0; \ + __ret = (uint32_t) __builtin_neon_vgetq_lane_i32((int32x4_t)__s0, __p1); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vgetq_lane_u64(__p0, __p1) __extension__ ({ \ + uint64_t __ret; \ + uint64x2_t __s0 = __p0; \ + __ret = (uint64_t) __builtin_neon_vgetq_lane_i64((int64x2_t)__s0, __p1); \ + __ret; \ +}) +#else +#define vgetq_lane_u64(__p0, __p1) __extension__ ({ \ + uint64_t __ret; \ + uint64x2_t __s0 = __p0; \ + uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + __ret = (uint64_t) __builtin_neon_vgetq_lane_i64((int64x2_t)__rev0, __p1); \ + __ret; \ +}) +#define __noswap_vgetq_lane_u64(__p0, __p1) __extension__ ({ \ + uint64_t __ret; \ + uint64x2_t __s0 = __p0; \ + __ret = (uint64_t) __builtin_neon_vgetq_lane_i64((int64x2_t)__s0, __p1); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vgetq_lane_u16(__p0, __p1) __extension__ ({ \ + uint16_t __ret; \ + uint16x8_t __s0 = __p0; \ + __ret = (uint16_t) __builtin_neon_vgetq_lane_i16((int16x8_t)__s0, __p1); \ + __ret; \ +}) +#else +#define vgetq_lane_u16(__p0, __p1) __extension__ ({ \ + uint16_t __ret; \ + uint16x8_t __s0 = __p0; \ + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (uint16_t) __builtin_neon_vgetq_lane_i16((int16x8_t)__rev0, __p1); \ + __ret; \ +}) +#define __noswap_vgetq_lane_u16(__p0, __p1) __extension__ ({ \ + uint16_t __ret; \ + uint16x8_t __s0 = __p0; \ + __ret = (uint16_t) __builtin_neon_vgetq_lane_i16((int16x8_t)__s0, __p1); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vgetq_lane_s8(__p0, __p1) __extension__ ({ \ + int8_t __ret; \ + int8x16_t __s0 = __p0; \ + __ret = (int8_t) __builtin_neon_vgetq_lane_i8((int8x16_t)__s0, __p1); \ + __ret; \ +}) +#else +#define vgetq_lane_s8(__p0, __p1) __extension__ ({ \ + int8_t __ret; \ + int8x16_t __s0 = __p0; \ + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (int8_t) __builtin_neon_vgetq_lane_i8((int8x16_t)__rev0, __p1); \ + __ret; \ +}) +#define __noswap_vgetq_lane_s8(__p0, __p1) __extension__ ({ \ + int8_t __ret; \ + int8x16_t __s0 = __p0; \ + __ret = (int8_t) __builtin_neon_vgetq_lane_i8((int8x16_t)__s0, __p1); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vgetq_lane_f32(__p0, __p1) __extension__ ({ \ + float32_t __ret; \ + float32x4_t __s0 = __p0; \ + __ret = (float32_t) __builtin_neon_vgetq_lane_f32((float32x4_t)__s0, __p1); \ + __ret; \ +}) +#else +#define vgetq_lane_f32(__p0, __p1) __extension__ ({ \ + float32_t __ret; \ + float32x4_t __s0 = __p0; \ + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + __ret = (float32_t) __builtin_neon_vgetq_lane_f32((float32x4_t)__rev0, __p1); \ + __ret; \ +}) +#define __noswap_vgetq_lane_f32(__p0, __p1) __extension__ ({ \ + float32_t __ret; \ + float32x4_t __s0 = __p0; \ + __ret = (float32_t) __builtin_neon_vgetq_lane_f32((float32x4_t)__s0, __p1); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vgetq_lane_s32(__p0, __p1) __extension__ ({ \ + int32_t __ret; \ + int32x4_t __s0 = __p0; \ + __ret = (int32_t) __builtin_neon_vgetq_lane_i32((int32x4_t)__s0, __p1); \ + __ret; \ +}) +#else +#define vgetq_lane_s32(__p0, __p1) __extension__ ({ \ + int32_t __ret; \ + int32x4_t __s0 = __p0; \ + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + __ret = (int32_t) __builtin_neon_vgetq_lane_i32((int32x4_t)__rev0, __p1); \ + __ret; \ +}) +#define __noswap_vgetq_lane_s32(__p0, __p1) __extension__ ({ \ + int32_t __ret; \ + int32x4_t __s0 = __p0; \ + __ret = (int32_t) __builtin_neon_vgetq_lane_i32((int32x4_t)__s0, __p1); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vgetq_lane_s64(__p0, __p1) __extension__ ({ \ + int64_t __ret; \ + int64x2_t __s0 = __p0; \ + __ret = (int64_t) __builtin_neon_vgetq_lane_i64((int64x2_t)__s0, __p1); \ + __ret; \ +}) +#else +#define vgetq_lane_s64(__p0, __p1) __extension__ ({ \ + int64_t __ret; \ + int64x2_t __s0 = __p0; \ + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + __ret = (int64_t) __builtin_neon_vgetq_lane_i64((int64x2_t)__rev0, __p1); \ + __ret; \ +}) +#define __noswap_vgetq_lane_s64(__p0, __p1) __extension__ ({ \ + int64_t __ret; \ + int64x2_t __s0 = __p0; \ + __ret = (int64_t) __builtin_neon_vgetq_lane_i64((int64x2_t)__s0, __p1); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vgetq_lane_s16(__p0, __p1) __extension__ ({ \ + int16_t __ret; \ + int16x8_t __s0 = __p0; \ + __ret = (int16_t) __builtin_neon_vgetq_lane_i16((int16x8_t)__s0, __p1); \ + __ret; \ +}) +#else +#define vgetq_lane_s16(__p0, __p1) __extension__ ({ \ + int16_t __ret; \ + int16x8_t __s0 = __p0; \ + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (int16_t) __builtin_neon_vgetq_lane_i16((int16x8_t)__rev0, __p1); \ + __ret; \ +}) +#define __noswap_vgetq_lane_s16(__p0, __p1) __extension__ ({ \ + int16_t __ret; \ + int16x8_t __s0 = __p0; \ + __ret = (int16_t) __builtin_neon_vgetq_lane_i16((int16x8_t)__s0, __p1); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vget_lane_u8(__p0, __p1) __extension__ ({ \ + uint8_t __ret; \ + uint8x8_t __s0 = __p0; \ + __ret = (uint8_t) __builtin_neon_vget_lane_i8((int8x8_t)__s0, __p1); \ + __ret; \ +}) +#else +#define vget_lane_u8(__p0, __p1) __extension__ ({ \ + uint8_t __ret; \ + uint8x8_t __s0 = __p0; \ + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (uint8_t) __builtin_neon_vget_lane_i8((int8x8_t)__rev0, __p1); \ + __ret; \ +}) +#define __noswap_vget_lane_u8(__p0, __p1) __extension__ ({ \ + uint8_t __ret; \ + uint8x8_t __s0 = __p0; \ + __ret = (uint8_t) __builtin_neon_vget_lane_i8((int8x8_t)__s0, __p1); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vget_lane_u32(__p0, __p1) __extension__ ({ \ + uint32_t __ret; \ + uint32x2_t __s0 = __p0; \ + __ret = (uint32_t) __builtin_neon_vget_lane_i32((int32x2_t)__s0, __p1); \ + __ret; \ +}) +#else +#define vget_lane_u32(__p0, __p1) __extension__ ({ \ + uint32_t __ret; \ + uint32x2_t __s0 = __p0; \ + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + __ret = (uint32_t) __builtin_neon_vget_lane_i32((int32x2_t)__rev0, __p1); \ + __ret; \ +}) +#define __noswap_vget_lane_u32(__p0, __p1) __extension__ ({ \ + uint32_t __ret; \ + uint32x2_t __s0 = __p0; \ + __ret = (uint32_t) __builtin_neon_vget_lane_i32((int32x2_t)__s0, __p1); \ + __ret; \ +}) +#endif + +#define vget_lane_u64(__p0, __p1) __extension__ ({ \ + uint64_t __ret; \ + uint64x1_t __s0 = __p0; \ + __ret = (uint64_t) __builtin_neon_vget_lane_i64((int64x1_t)__s0, __p1); \ + __ret; \ +}) +#ifdef __LITTLE_ENDIAN__ +#define vget_lane_u16(__p0, __p1) __extension__ ({ \ + uint16_t __ret; \ + uint16x4_t __s0 = __p0; \ + __ret = (uint16_t) __builtin_neon_vget_lane_i16((int16x4_t)__s0, __p1); \ + __ret; \ +}) +#else +#define vget_lane_u16(__p0, __p1) __extension__ ({ \ + uint16_t __ret; \ + uint16x4_t __s0 = __p0; \ + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + __ret = (uint16_t) __builtin_neon_vget_lane_i16((int16x4_t)__rev0, __p1); \ + __ret; \ +}) +#define __noswap_vget_lane_u16(__p0, __p1) __extension__ ({ \ + uint16_t __ret; \ + uint16x4_t __s0 = __p0; \ + __ret = (uint16_t) __builtin_neon_vget_lane_i16((int16x4_t)__s0, __p1); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vget_lane_s8(__p0, __p1) __extension__ ({ \ + int8_t __ret; \ + int8x8_t __s0 = __p0; \ + __ret = (int8_t) __builtin_neon_vget_lane_i8((int8x8_t)__s0, __p1); \ + __ret; \ +}) +#else +#define vget_lane_s8(__p0, __p1) __extension__ ({ \ + int8_t __ret; \ + int8x8_t __s0 = __p0; \ + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (int8_t) __builtin_neon_vget_lane_i8((int8x8_t)__rev0, __p1); \ + __ret; \ +}) +#define __noswap_vget_lane_s8(__p0, __p1) __extension__ ({ \ + int8_t __ret; \ + int8x8_t __s0 = __p0; \ + __ret = (int8_t) __builtin_neon_vget_lane_i8((int8x8_t)__s0, __p1); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vget_lane_f32(__p0, __p1) __extension__ ({ \ + float32_t __ret; \ + float32x2_t __s0 = __p0; \ + __ret = (float32_t) __builtin_neon_vget_lane_f32((float32x2_t)__s0, __p1); \ + __ret; \ +}) +#else +#define vget_lane_f32(__p0, __p1) __extension__ ({ \ + float32_t __ret; \ + float32x2_t __s0 = __p0; \ + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + __ret = (float32_t) __builtin_neon_vget_lane_f32((float32x2_t)__rev0, __p1); \ + __ret; \ +}) +#define __noswap_vget_lane_f32(__p0, __p1) __extension__ ({ \ + float32_t __ret; \ + float32x2_t __s0 = __p0; \ + __ret = (float32_t) __builtin_neon_vget_lane_f32((float32x2_t)__s0, __p1); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vget_lane_s32(__p0, __p1) __extension__ ({ \ + int32_t __ret; \ + int32x2_t __s0 = __p0; \ + __ret = (int32_t) __builtin_neon_vget_lane_i32((int32x2_t)__s0, __p1); \ + __ret; \ +}) +#else +#define vget_lane_s32(__p0, __p1) __extension__ ({ \ + int32_t __ret; \ + int32x2_t __s0 = __p0; \ + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + __ret = (int32_t) __builtin_neon_vget_lane_i32((int32x2_t)__rev0, __p1); \ + __ret; \ +}) +#define __noswap_vget_lane_s32(__p0, __p1) __extension__ ({ \ + int32_t __ret; \ + int32x2_t __s0 = __p0; \ + __ret = (int32_t) __builtin_neon_vget_lane_i32((int32x2_t)__s0, __p1); \ + __ret; \ +}) +#endif + +#define vget_lane_s64(__p0, __p1) __extension__ ({ \ + int64_t __ret; \ + int64x1_t __s0 = __p0; \ + __ret = (int64_t) __builtin_neon_vget_lane_i64((int64x1_t)__s0, __p1); \ + __ret; \ +}) +#ifdef __LITTLE_ENDIAN__ +#define vget_lane_s16(__p0, __p1) __extension__ ({ \ + int16_t __ret; \ + int16x4_t __s0 = __p0; \ + __ret = (int16_t) __builtin_neon_vget_lane_i16((int16x4_t)__s0, __p1); \ + __ret; \ +}) +#else +#define vget_lane_s16(__p0, __p1) __extension__ ({ \ + int16_t __ret; \ + int16x4_t __s0 = __p0; \ + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + __ret = (int16_t) __builtin_neon_vget_lane_i16((int16x4_t)__rev0, __p1); \ + __ret; \ +}) +#define __noswap_vget_lane_s16(__p0, __p1) __extension__ ({ \ + int16_t __ret; \ + int16x4_t __s0 = __p0; \ + __ret = (int16_t) __builtin_neon_vget_lane_i16((int16x4_t)__s0, __p1); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) poly8x8_t vget_low_p8(poly8x16_t __p0) { + poly8x8_t __ret; + __ret = __builtin_shufflevector(__p0, __p0, 0, 1, 2, 3, 4, 5, 6, 7); + return __ret; +} +#else +__ai __attribute__((target("neon"))) poly8x8_t vget_low_p8(poly8x16_t __p0) { + poly8x8_t __ret; + poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev0, 0, 1, 2, 3, 4, 5, 6, 7); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) poly16x4_t vget_low_p16(poly16x8_t __p0) { + poly16x4_t __ret; + __ret = __builtin_shufflevector(__p0, __p0, 0, 1, 2, 3); + return __ret; +} +#else +__ai __attribute__((target("neon"))) poly16x4_t vget_low_p16(poly16x8_t __p0) { + poly16x4_t __ret; + poly16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev0, 0, 1, 2, 3); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint8x8_t vget_low_u8(uint8x16_t __p0) { + uint8x8_t __ret; + __ret = __builtin_shufflevector(__p0, __p0, 0, 1, 2, 3, 4, 5, 6, 7); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint8x8_t vget_low_u8(uint8x16_t __p0) { + uint8x8_t __ret; + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev0, 0, 1, 2, 3, 4, 5, 6, 7); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint32x2_t vget_low_u32(uint32x4_t __p0) { + uint32x2_t __ret; + __ret = __builtin_shufflevector(__p0, __p0, 0, 1); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint32x2_t vget_low_u32(uint32x4_t __p0) { + uint32x2_t __ret; + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev0, 0, 1); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint64x1_t vget_low_u64(uint64x2_t __p0) { + uint64x1_t __ret; + __ret = __builtin_shufflevector(__p0, __p0, 0); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint64x1_t vget_low_u64(uint64x2_t __p0) { + uint64x1_t __ret; + uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev0, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint16x4_t vget_low_u16(uint16x8_t __p0) { + uint16x4_t __ret; + __ret = __builtin_shufflevector(__p0, __p0, 0, 1, 2, 3); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint16x4_t vget_low_u16(uint16x8_t __p0) { + uint16x4_t __ret; + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev0, 0, 1, 2, 3); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int8x8_t vget_low_s8(int8x16_t __p0) { + int8x8_t __ret; + __ret = __builtin_shufflevector(__p0, __p0, 0, 1, 2, 3, 4, 5, 6, 7); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int8x8_t vget_low_s8(int8x16_t __p0) { + int8x8_t __ret; + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev0, 0, 1, 2, 3, 4, 5, 6, 7); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) float32x2_t vget_low_f32(float32x4_t __p0) { + float32x2_t __ret; + __ret = __builtin_shufflevector(__p0, __p0, 0, 1); + return __ret; +} +#else +__ai __attribute__((target("neon"))) float32x2_t vget_low_f32(float32x4_t __p0) { + float32x2_t __ret; + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev0, 0, 1); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) float16x4_t vget_low_f16(float16x8_t __p0) { + float16x4_t __ret; + __ret = __builtin_shufflevector(__p0, __p0, 0, 1, 2, 3); + return __ret; +} +#else +__ai __attribute__((target("neon"))) float16x4_t vget_low_f16(float16x8_t __p0) { + float16x4_t __ret; + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev0, 0, 1, 2, 3); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int32x2_t vget_low_s32(int32x4_t __p0) { + int32x2_t __ret; + __ret = __builtin_shufflevector(__p0, __p0, 0, 1); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int32x2_t vget_low_s32(int32x4_t __p0) { + int32x2_t __ret; + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev0, 0, 1); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int64x1_t vget_low_s64(int64x2_t __p0) { + int64x1_t __ret; + __ret = __builtin_shufflevector(__p0, __p0, 0); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int64x1_t vget_low_s64(int64x2_t __p0) { + int64x1_t __ret; + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev0, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int16x4_t vget_low_s16(int16x8_t __p0) { + int16x4_t __ret; + __ret = __builtin_shufflevector(__p0, __p0, 0, 1, 2, 3); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int16x4_t vget_low_s16(int16x8_t __p0) { + int16x4_t __ret; + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev0, 0, 1, 2, 3); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint8x16_t vhaddq_u8(uint8x16_t __p0, uint8x16_t __p1) { + uint8x16_t __ret; + __ret = (uint8x16_t) __builtin_neon_vhaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 48); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint8x16_t vhaddq_u8(uint8x16_t __p0, uint8x16_t __p1) { + uint8x16_t __ret; + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint8x16_t) __builtin_neon_vhaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint32x4_t vhaddq_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vhaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 50); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint32x4_t vhaddq_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint32x4_t __ret; + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (uint32x4_t) __builtin_neon_vhaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint16x8_t vhaddq_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint16x8_t __ret; + __ret = (uint16x8_t) __builtin_neon_vhaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 49); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint16x8_t vhaddq_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint16x8_t __ret; + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint16x8_t) __builtin_neon_vhaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int8x16_t vhaddq_s8(int8x16_t __p0, int8x16_t __p1) { + int8x16_t __ret; + __ret = (int8x16_t) __builtin_neon_vhaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 32); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int8x16_t vhaddq_s8(int8x16_t __p0, int8x16_t __p1) { + int8x16_t __ret; + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int8x16_t) __builtin_neon_vhaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 32); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int32x4_t vhaddq_s32(int32x4_t __p0, int32x4_t __p1) { + int32x4_t __ret; + __ret = (int32x4_t) __builtin_neon_vhaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 34); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int32x4_t vhaddq_s32(int32x4_t __p0, int32x4_t __p1) { + int32x4_t __ret; + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (int32x4_t) __builtin_neon_vhaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 34); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int16x8_t vhaddq_s16(int16x8_t __p0, int16x8_t __p1) { + int16x8_t __ret; + __ret = (int16x8_t) __builtin_neon_vhaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 33); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int16x8_t vhaddq_s16(int16x8_t __p0, int16x8_t __p1) { + int16x8_t __ret; + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int16x8_t) __builtin_neon_vhaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 33); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint8x8_t vhadd_u8(uint8x8_t __p0, uint8x8_t __p1) { + uint8x8_t __ret; + __ret = (uint8x8_t) __builtin_neon_vhadd_v((int8x8_t)__p0, (int8x8_t)__p1, 16); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint8x8_t vhadd_u8(uint8x8_t __p0, uint8x8_t __p1) { + uint8x8_t __ret; + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint8x8_t) __builtin_neon_vhadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 16); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint32x2_t vhadd_u32(uint32x2_t __p0, uint32x2_t __p1) { + uint32x2_t __ret; + __ret = (uint32x2_t) __builtin_neon_vhadd_v((int8x8_t)__p0, (int8x8_t)__p1, 18); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint32x2_t vhadd_u32(uint32x2_t __p0, uint32x2_t __p1) { + uint32x2_t __ret; + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (uint32x2_t) __builtin_neon_vhadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint16x4_t vhadd_u16(uint16x4_t __p0, uint16x4_t __p1) { + uint16x4_t __ret; + __ret = (uint16x4_t) __builtin_neon_vhadd_v((int8x8_t)__p0, (int8x8_t)__p1, 17); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint16x4_t vhadd_u16(uint16x4_t __p0, uint16x4_t __p1) { + uint16x4_t __ret; + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (uint16x4_t) __builtin_neon_vhadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int8x8_t vhadd_s8(int8x8_t __p0, int8x8_t __p1) { + int8x8_t __ret; + __ret = (int8x8_t) __builtin_neon_vhadd_v((int8x8_t)__p0, (int8x8_t)__p1, 0); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int8x8_t vhadd_s8(int8x8_t __p0, int8x8_t __p1) { + int8x8_t __ret; + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int8x8_t) __builtin_neon_vhadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 0); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int32x2_t vhadd_s32(int32x2_t __p0, int32x2_t __p1) { + int32x2_t __ret; + __ret = (int32x2_t) __builtin_neon_vhadd_v((int8x8_t)__p0, (int8x8_t)__p1, 2); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int32x2_t vhadd_s32(int32x2_t __p0, int32x2_t __p1) { + int32x2_t __ret; + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (int32x2_t) __builtin_neon_vhadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 2); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int16x4_t vhadd_s16(int16x4_t __p0, int16x4_t __p1) { + int16x4_t __ret; + __ret = (int16x4_t) __builtin_neon_vhadd_v((int8x8_t)__p0, (int8x8_t)__p1, 1); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int16x4_t vhadd_s16(int16x4_t __p0, int16x4_t __p1) { + int16x4_t __ret; + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (int16x4_t) __builtin_neon_vhadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint8x16_t vhsubq_u8(uint8x16_t __p0, uint8x16_t __p1) { + uint8x16_t __ret; + __ret = (uint8x16_t) __builtin_neon_vhsubq_v((int8x16_t)__p0, (int8x16_t)__p1, 48); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint8x16_t vhsubq_u8(uint8x16_t __p0, uint8x16_t __p1) { + uint8x16_t __ret; + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint8x16_t) __builtin_neon_vhsubq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint32x4_t vhsubq_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vhsubq_v((int8x16_t)__p0, (int8x16_t)__p1, 50); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint32x4_t vhsubq_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint32x4_t __ret; + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (uint32x4_t) __builtin_neon_vhsubq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint16x8_t vhsubq_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint16x8_t __ret; + __ret = (uint16x8_t) __builtin_neon_vhsubq_v((int8x16_t)__p0, (int8x16_t)__p1, 49); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint16x8_t vhsubq_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint16x8_t __ret; + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint16x8_t) __builtin_neon_vhsubq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int8x16_t vhsubq_s8(int8x16_t __p0, int8x16_t __p1) { + int8x16_t __ret; + __ret = (int8x16_t) __builtin_neon_vhsubq_v((int8x16_t)__p0, (int8x16_t)__p1, 32); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int8x16_t vhsubq_s8(int8x16_t __p0, int8x16_t __p1) { + int8x16_t __ret; + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int8x16_t) __builtin_neon_vhsubq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 32); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int32x4_t vhsubq_s32(int32x4_t __p0, int32x4_t __p1) { + int32x4_t __ret; + __ret = (int32x4_t) __builtin_neon_vhsubq_v((int8x16_t)__p0, (int8x16_t)__p1, 34); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int32x4_t vhsubq_s32(int32x4_t __p0, int32x4_t __p1) { + int32x4_t __ret; + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (int32x4_t) __builtin_neon_vhsubq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 34); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int16x8_t vhsubq_s16(int16x8_t __p0, int16x8_t __p1) { + int16x8_t __ret; + __ret = (int16x8_t) __builtin_neon_vhsubq_v((int8x16_t)__p0, (int8x16_t)__p1, 33); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int16x8_t vhsubq_s16(int16x8_t __p0, int16x8_t __p1) { + int16x8_t __ret; + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int16x8_t) __builtin_neon_vhsubq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 33); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint8x8_t vhsub_u8(uint8x8_t __p0, uint8x8_t __p1) { + uint8x8_t __ret; + __ret = (uint8x8_t) __builtin_neon_vhsub_v((int8x8_t)__p0, (int8x8_t)__p1, 16); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint8x8_t vhsub_u8(uint8x8_t __p0, uint8x8_t __p1) { + uint8x8_t __ret; + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint8x8_t) __builtin_neon_vhsub_v((int8x8_t)__rev0, (int8x8_t)__rev1, 16); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint32x2_t vhsub_u32(uint32x2_t __p0, uint32x2_t __p1) { + uint32x2_t __ret; + __ret = (uint32x2_t) __builtin_neon_vhsub_v((int8x8_t)__p0, (int8x8_t)__p1, 18); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint32x2_t vhsub_u32(uint32x2_t __p0, uint32x2_t __p1) { + uint32x2_t __ret; + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (uint32x2_t) __builtin_neon_vhsub_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint16x4_t vhsub_u16(uint16x4_t __p0, uint16x4_t __p1) { + uint16x4_t __ret; + __ret = (uint16x4_t) __builtin_neon_vhsub_v((int8x8_t)__p0, (int8x8_t)__p1, 17); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint16x4_t vhsub_u16(uint16x4_t __p0, uint16x4_t __p1) { + uint16x4_t __ret; + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (uint16x4_t) __builtin_neon_vhsub_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int8x8_t vhsub_s8(int8x8_t __p0, int8x8_t __p1) { + int8x8_t __ret; + __ret = (int8x8_t) __builtin_neon_vhsub_v((int8x8_t)__p0, (int8x8_t)__p1, 0); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int8x8_t vhsub_s8(int8x8_t __p0, int8x8_t __p1) { + int8x8_t __ret; + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int8x8_t) __builtin_neon_vhsub_v((int8x8_t)__rev0, (int8x8_t)__rev1, 0); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int32x2_t vhsub_s32(int32x2_t __p0, int32x2_t __p1) { + int32x2_t __ret; + __ret = (int32x2_t) __builtin_neon_vhsub_v((int8x8_t)__p0, (int8x8_t)__p1, 2); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int32x2_t vhsub_s32(int32x2_t __p0, int32x2_t __p1) { + int32x2_t __ret; + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (int32x2_t) __builtin_neon_vhsub_v((int8x8_t)__rev0, (int8x8_t)__rev1, 2); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int16x4_t vhsub_s16(int16x4_t __p0, int16x4_t __p1) { + int16x4_t __ret; + __ret = (int16x4_t) __builtin_neon_vhsub_v((int8x8_t)__p0, (int8x8_t)__p1, 1); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int16x4_t vhsub_s16(int16x4_t __p0, int16x4_t __p1) { + int16x4_t __ret; + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (int16x4_t) __builtin_neon_vhsub_v((int8x8_t)__rev0, (int8x8_t)__rev1, 1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1_p8(__p0) __extension__ ({ \ + poly8x8_t __ret; \ + __ret = (poly8x8_t) __builtin_neon_vld1_v(__p0, 4); \ + __ret; \ +}) +#else +#define vld1_p8(__p0) __extension__ ({ \ + poly8x8_t __ret; \ + __ret = (poly8x8_t) __builtin_neon_vld1_v(__p0, 4); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1_p16(__p0) __extension__ ({ \ + poly16x4_t __ret; \ + __ret = (poly16x4_t) __builtin_neon_vld1_v(__p0, 5); \ + __ret; \ +}) +#else +#define vld1_p16(__p0) __extension__ ({ \ + poly16x4_t __ret; \ + __ret = (poly16x4_t) __builtin_neon_vld1_v(__p0, 5); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1q_p8(__p0) __extension__ ({ \ + poly8x16_t __ret; \ + __ret = (poly8x16_t) __builtin_neon_vld1q_v(__p0, 36); \ + __ret; \ +}) +#else +#define vld1q_p8(__p0) __extension__ ({ \ + poly8x16_t __ret; \ + __ret = (poly8x16_t) __builtin_neon_vld1q_v(__p0, 36); \ + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1q_p16(__p0) __extension__ ({ \ + poly16x8_t __ret; \ + __ret = (poly16x8_t) __builtin_neon_vld1q_v(__p0, 37); \ + __ret; \ +}) +#else +#define vld1q_p16(__p0) __extension__ ({ \ + poly16x8_t __ret; \ + __ret = (poly16x8_t) __builtin_neon_vld1q_v(__p0, 37); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1q_u8(__p0) __extension__ ({ \ + uint8x16_t __ret; \ + __ret = (uint8x16_t) __builtin_neon_vld1q_v(__p0, 48); \ + __ret; \ +}) +#else +#define vld1q_u8(__p0) __extension__ ({ \ + uint8x16_t __ret; \ + __ret = (uint8x16_t) __builtin_neon_vld1q_v(__p0, 48); \ + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1q_u32(__p0) __extension__ ({ \ + uint32x4_t __ret; \ + __ret = (uint32x4_t) __builtin_neon_vld1q_v(__p0, 50); \ + __ret; \ +}) +#else +#define vld1q_u32(__p0) __extension__ ({ \ + uint32x4_t __ret; \ + __ret = (uint32x4_t) __builtin_neon_vld1q_v(__p0, 50); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1q_u64(__p0) __extension__ ({ \ + uint64x2_t __ret; \ + __ret = (uint64x2_t) __builtin_neon_vld1q_v(__p0, 51); \ + __ret; \ +}) +#else +#define vld1q_u64(__p0) __extension__ ({ \ + uint64x2_t __ret; \ + __ret = (uint64x2_t) __builtin_neon_vld1q_v(__p0, 51); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1q_u16(__p0) __extension__ ({ \ + uint16x8_t __ret; \ + __ret = (uint16x8_t) __builtin_neon_vld1q_v(__p0, 49); \ + __ret; \ +}) +#else +#define vld1q_u16(__p0) __extension__ ({ \ + uint16x8_t __ret; \ + __ret = (uint16x8_t) __builtin_neon_vld1q_v(__p0, 49); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1q_s8(__p0) __extension__ ({ \ + int8x16_t __ret; \ + __ret = (int8x16_t) __builtin_neon_vld1q_v(__p0, 32); \ + __ret; \ +}) +#else +#define vld1q_s8(__p0) __extension__ ({ \ + int8x16_t __ret; \ + __ret = (int8x16_t) __builtin_neon_vld1q_v(__p0, 32); \ + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1q_f32(__p0) __extension__ ({ \ + float32x4_t __ret; \ + __ret = (float32x4_t) __builtin_neon_vld1q_v(__p0, 41); \ + __ret; \ +}) +#else +#define vld1q_f32(__p0) __extension__ ({ \ + float32x4_t __ret; \ + __ret = (float32x4_t) __builtin_neon_vld1q_v(__p0, 41); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1q_s32(__p0) __extension__ ({ \ + int32x4_t __ret; \ + __ret = (int32x4_t) __builtin_neon_vld1q_v(__p0, 34); \ + __ret; \ +}) +#else +#define vld1q_s32(__p0) __extension__ ({ \ + int32x4_t __ret; \ + __ret = (int32x4_t) __builtin_neon_vld1q_v(__p0, 34); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1q_s64(__p0) __extension__ ({ \ + int64x2_t __ret; \ + __ret = (int64x2_t) __builtin_neon_vld1q_v(__p0, 35); \ + __ret; \ +}) +#else +#define vld1q_s64(__p0) __extension__ ({ \ + int64x2_t __ret; \ + __ret = (int64x2_t) __builtin_neon_vld1q_v(__p0, 35); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1q_s16(__p0) __extension__ ({ \ + int16x8_t __ret; \ + __ret = (int16x8_t) __builtin_neon_vld1q_v(__p0, 33); \ + __ret; \ +}) +#else +#define vld1q_s16(__p0) __extension__ ({ \ + int16x8_t __ret; \ + __ret = (int16x8_t) __builtin_neon_vld1q_v(__p0, 33); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1_u8(__p0) __extension__ ({ \ + uint8x8_t __ret; \ + __ret = (uint8x8_t) __builtin_neon_vld1_v(__p0, 16); \ + __ret; \ +}) +#else +#define vld1_u8(__p0) __extension__ ({ \ + uint8x8_t __ret; \ + __ret = (uint8x8_t) __builtin_neon_vld1_v(__p0, 16); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1_u32(__p0) __extension__ ({ \ + uint32x2_t __ret; \ + __ret = (uint32x2_t) __builtin_neon_vld1_v(__p0, 18); \ + __ret; \ +}) +#else +#define vld1_u32(__p0) __extension__ ({ \ + uint32x2_t __ret; \ + __ret = (uint32x2_t) __builtin_neon_vld1_v(__p0, 18); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#endif + +#define vld1_u64(__p0) __extension__ ({ \ + uint64x1_t __ret; \ + __ret = (uint64x1_t) __builtin_neon_vld1_v(__p0, 19); \ + __ret; \ +}) +#ifdef __LITTLE_ENDIAN__ +#define vld1_u16(__p0) __extension__ ({ \ + uint16x4_t __ret; \ + __ret = (uint16x4_t) __builtin_neon_vld1_v(__p0, 17); \ + __ret; \ +}) +#else +#define vld1_u16(__p0) __extension__ ({ \ + uint16x4_t __ret; \ + __ret = (uint16x4_t) __builtin_neon_vld1_v(__p0, 17); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1_s8(__p0) __extension__ ({ \ + int8x8_t __ret; \ + __ret = (int8x8_t) __builtin_neon_vld1_v(__p0, 0); \ + __ret; \ +}) +#else +#define vld1_s8(__p0) __extension__ ({ \ + int8x8_t __ret; \ + __ret = (int8x8_t) __builtin_neon_vld1_v(__p0, 0); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1_f32(__p0) __extension__ ({ \ + float32x2_t __ret; \ + __ret = (float32x2_t) __builtin_neon_vld1_v(__p0, 9); \ + __ret; \ +}) +#else +#define vld1_f32(__p0) __extension__ ({ \ + float32x2_t __ret; \ + __ret = (float32x2_t) __builtin_neon_vld1_v(__p0, 9); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1_s32(__p0) __extension__ ({ \ + int32x2_t __ret; \ + __ret = (int32x2_t) __builtin_neon_vld1_v(__p0, 2); \ + __ret; \ +}) +#else +#define vld1_s32(__p0) __extension__ ({ \ + int32x2_t __ret; \ + __ret = (int32x2_t) __builtin_neon_vld1_v(__p0, 2); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#endif + +#define vld1_s64(__p0) __extension__ ({ \ + int64x1_t __ret; \ + __ret = (int64x1_t) __builtin_neon_vld1_v(__p0, 3); \ + __ret; \ +}) +#ifdef __LITTLE_ENDIAN__ +#define vld1_s16(__p0) __extension__ ({ \ + int16x4_t __ret; \ + __ret = (int16x4_t) __builtin_neon_vld1_v(__p0, 1); \ + __ret; \ +}) +#else +#define vld1_s16(__p0) __extension__ ({ \ + int16x4_t __ret; \ + __ret = (int16x4_t) __builtin_neon_vld1_v(__p0, 1); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1_dup_p8(__p0) __extension__ ({ \ + poly8x8_t __ret; \ + __ret = (poly8x8_t) __builtin_neon_vld1_dup_v(__p0, 4); \ + __ret; \ +}) +#else +#define vld1_dup_p8(__p0) __extension__ ({ \ + poly8x8_t __ret; \ + __ret = (poly8x8_t) __builtin_neon_vld1_dup_v(__p0, 4); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1_dup_p16(__p0) __extension__ ({ \ + poly16x4_t __ret; \ + __ret = (poly16x4_t) __builtin_neon_vld1_dup_v(__p0, 5); \ + __ret; \ +}) +#else +#define vld1_dup_p16(__p0) __extension__ ({ \ + poly16x4_t __ret; \ + __ret = (poly16x4_t) __builtin_neon_vld1_dup_v(__p0, 5); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1q_dup_p8(__p0) __extension__ ({ \ + poly8x16_t __ret; \ + __ret = (poly8x16_t) __builtin_neon_vld1q_dup_v(__p0, 36); \ + __ret; \ +}) +#else +#define vld1q_dup_p8(__p0) __extension__ ({ \ + poly8x16_t __ret; \ + __ret = (poly8x16_t) __builtin_neon_vld1q_dup_v(__p0, 36); \ + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1q_dup_p16(__p0) __extension__ ({ \ + poly16x8_t __ret; \ + __ret = (poly16x8_t) __builtin_neon_vld1q_dup_v(__p0, 37); \ + __ret; \ +}) +#else +#define vld1q_dup_p16(__p0) __extension__ ({ \ + poly16x8_t __ret; \ + __ret = (poly16x8_t) __builtin_neon_vld1q_dup_v(__p0, 37); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1q_dup_u8(__p0) __extension__ ({ \ + uint8x16_t __ret; \ + __ret = (uint8x16_t) __builtin_neon_vld1q_dup_v(__p0, 48); \ + __ret; \ +}) +#else +#define vld1q_dup_u8(__p0) __extension__ ({ \ + uint8x16_t __ret; \ + __ret = (uint8x16_t) __builtin_neon_vld1q_dup_v(__p0, 48); \ + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1q_dup_u32(__p0) __extension__ ({ \ + uint32x4_t __ret; \ + __ret = (uint32x4_t) __builtin_neon_vld1q_dup_v(__p0, 50); \ + __ret; \ +}) +#else +#define vld1q_dup_u32(__p0) __extension__ ({ \ + uint32x4_t __ret; \ + __ret = (uint32x4_t) __builtin_neon_vld1q_dup_v(__p0, 50); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1q_dup_u64(__p0) __extension__ ({ \ + uint64x2_t __ret; \ + __ret = (uint64x2_t) __builtin_neon_vld1q_dup_v(__p0, 51); \ + __ret; \ +}) +#else +#define vld1q_dup_u64(__p0) __extension__ ({ \ + uint64x2_t __ret; \ + __ret = (uint64x2_t) __builtin_neon_vld1q_dup_v(__p0, 51); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1q_dup_u16(__p0) __extension__ ({ \ + uint16x8_t __ret; \ + __ret = (uint16x8_t) __builtin_neon_vld1q_dup_v(__p0, 49); \ + __ret; \ +}) +#else +#define vld1q_dup_u16(__p0) __extension__ ({ \ + uint16x8_t __ret; \ + __ret = (uint16x8_t) __builtin_neon_vld1q_dup_v(__p0, 49); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1q_dup_s8(__p0) __extension__ ({ \ + int8x16_t __ret; \ + __ret = (int8x16_t) __builtin_neon_vld1q_dup_v(__p0, 32); \ + __ret; \ +}) +#else +#define vld1q_dup_s8(__p0) __extension__ ({ \ + int8x16_t __ret; \ + __ret = (int8x16_t) __builtin_neon_vld1q_dup_v(__p0, 32); \ + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1q_dup_f32(__p0) __extension__ ({ \ + float32x4_t __ret; \ + __ret = (float32x4_t) __builtin_neon_vld1q_dup_v(__p0, 41); \ + __ret; \ +}) +#else +#define vld1q_dup_f32(__p0) __extension__ ({ \ + float32x4_t __ret; \ + __ret = (float32x4_t) __builtin_neon_vld1q_dup_v(__p0, 41); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1q_dup_s32(__p0) __extension__ ({ \ + int32x4_t __ret; \ + __ret = (int32x4_t) __builtin_neon_vld1q_dup_v(__p0, 34); \ + __ret; \ +}) +#else +#define vld1q_dup_s32(__p0) __extension__ ({ \ + int32x4_t __ret; \ + __ret = (int32x4_t) __builtin_neon_vld1q_dup_v(__p0, 34); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1q_dup_s64(__p0) __extension__ ({ \ + int64x2_t __ret; \ + __ret = (int64x2_t) __builtin_neon_vld1q_dup_v(__p0, 35); \ + __ret; \ +}) +#else +#define vld1q_dup_s64(__p0) __extension__ ({ \ + int64x2_t __ret; \ + __ret = (int64x2_t) __builtin_neon_vld1q_dup_v(__p0, 35); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1q_dup_s16(__p0) __extension__ ({ \ + int16x8_t __ret; \ + __ret = (int16x8_t) __builtin_neon_vld1q_dup_v(__p0, 33); \ + __ret; \ +}) +#else +#define vld1q_dup_s16(__p0) __extension__ ({ \ + int16x8_t __ret; \ + __ret = (int16x8_t) __builtin_neon_vld1q_dup_v(__p0, 33); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1_dup_u8(__p0) __extension__ ({ \ + uint8x8_t __ret; \ + __ret = (uint8x8_t) __builtin_neon_vld1_dup_v(__p0, 16); \ + __ret; \ +}) +#else +#define vld1_dup_u8(__p0) __extension__ ({ \ + uint8x8_t __ret; \ + __ret = (uint8x8_t) __builtin_neon_vld1_dup_v(__p0, 16); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1_dup_u32(__p0) __extension__ ({ \ + uint32x2_t __ret; \ + __ret = (uint32x2_t) __builtin_neon_vld1_dup_v(__p0, 18); \ + __ret; \ +}) +#else +#define vld1_dup_u32(__p0) __extension__ ({ \ + uint32x2_t __ret; \ + __ret = (uint32x2_t) __builtin_neon_vld1_dup_v(__p0, 18); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#endif + +#define vld1_dup_u64(__p0) __extension__ ({ \ + uint64x1_t __ret; \ + __ret = (uint64x1_t) __builtin_neon_vld1_dup_v(__p0, 19); \ + __ret; \ +}) +#ifdef __LITTLE_ENDIAN__ +#define vld1_dup_u16(__p0) __extension__ ({ \ + uint16x4_t __ret; \ + __ret = (uint16x4_t) __builtin_neon_vld1_dup_v(__p0, 17); \ + __ret; \ +}) +#else +#define vld1_dup_u16(__p0) __extension__ ({ \ + uint16x4_t __ret; \ + __ret = (uint16x4_t) __builtin_neon_vld1_dup_v(__p0, 17); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1_dup_s8(__p0) __extension__ ({ \ + int8x8_t __ret; \ + __ret = (int8x8_t) __builtin_neon_vld1_dup_v(__p0, 0); \ + __ret; \ +}) +#else +#define vld1_dup_s8(__p0) __extension__ ({ \ + int8x8_t __ret; \ + __ret = (int8x8_t) __builtin_neon_vld1_dup_v(__p0, 0); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1_dup_f32(__p0) __extension__ ({ \ + float32x2_t __ret; \ + __ret = (float32x2_t) __builtin_neon_vld1_dup_v(__p0, 9); \ + __ret; \ +}) +#else +#define vld1_dup_f32(__p0) __extension__ ({ \ + float32x2_t __ret; \ + __ret = (float32x2_t) __builtin_neon_vld1_dup_v(__p0, 9); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1_dup_s32(__p0) __extension__ ({ \ + int32x2_t __ret; \ + __ret = (int32x2_t) __builtin_neon_vld1_dup_v(__p0, 2); \ + __ret; \ +}) +#else +#define vld1_dup_s32(__p0) __extension__ ({ \ + int32x2_t __ret; \ + __ret = (int32x2_t) __builtin_neon_vld1_dup_v(__p0, 2); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#endif + +#define vld1_dup_s64(__p0) __extension__ ({ \ + int64x1_t __ret; \ + __ret = (int64x1_t) __builtin_neon_vld1_dup_v(__p0, 3); \ + __ret; \ +}) +#ifdef __LITTLE_ENDIAN__ +#define vld1_dup_s16(__p0) __extension__ ({ \ + int16x4_t __ret; \ + __ret = (int16x4_t) __builtin_neon_vld1_dup_v(__p0, 1); \ + __ret; \ +}) +#else +#define vld1_dup_s16(__p0) __extension__ ({ \ + int16x4_t __ret; \ + __ret = (int16x4_t) __builtin_neon_vld1_dup_v(__p0, 1); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1_lane_p8(__p0, __p1, __p2) __extension__ ({ \ + poly8x8_t __ret; \ + poly8x8_t __s1 = __p1; \ + __ret = (poly8x8_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__s1, __p2, 4); \ + __ret; \ +}) +#else +#define vld1_lane_p8(__p0, __p1, __p2) __extension__ ({ \ + poly8x8_t __ret; \ + poly8x8_t __s1 = __p1; \ + poly8x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (poly8x8_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__rev1, __p2, 4); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1_lane_p16(__p0, __p1, __p2) __extension__ ({ \ + poly16x4_t __ret; \ + poly16x4_t __s1 = __p1; \ + __ret = (poly16x4_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__s1, __p2, 5); \ + __ret; \ +}) +#else +#define vld1_lane_p16(__p0, __p1, __p2) __extension__ ({ \ + poly16x4_t __ret; \ + poly16x4_t __s1 = __p1; \ + poly16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + __ret = (poly16x4_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__rev1, __p2, 5); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1q_lane_p8(__p0, __p1, __p2) __extension__ ({ \ + poly8x16_t __ret; \ + poly8x16_t __s1 = __p1; \ + __ret = (poly8x16_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__s1, __p2, 36); \ + __ret; \ +}) +#else +#define vld1q_lane_p8(__p0, __p1, __p2) __extension__ ({ \ + poly8x16_t __ret; \ + poly8x16_t __s1 = __p1; \ + poly8x16_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (poly8x16_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 36); \ + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1q_lane_p16(__p0, __p1, __p2) __extension__ ({ \ + poly16x8_t __ret; \ + poly16x8_t __s1 = __p1; \ + __ret = (poly16x8_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__s1, __p2, 37); \ + __ret; \ +}) +#else +#define vld1q_lane_p16(__p0, __p1, __p2) __extension__ ({ \ + poly16x8_t __ret; \ + poly16x8_t __s1 = __p1; \ + poly16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (poly16x8_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 37); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1q_lane_u8(__p0, __p1, __p2) __extension__ ({ \ + uint8x16_t __ret; \ + uint8x16_t __s1 = __p1; \ + __ret = (uint8x16_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__s1, __p2, 48); \ + __ret; \ +}) +#else +#define vld1q_lane_u8(__p0, __p1, __p2) __extension__ ({ \ + uint8x16_t __ret; \ + uint8x16_t __s1 = __p1; \ + uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (uint8x16_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 48); \ + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1q_lane_u32(__p0, __p1, __p2) __extension__ ({ \ + uint32x4_t __ret; \ + uint32x4_t __s1 = __p1; \ + __ret = (uint32x4_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__s1, __p2, 50); \ + __ret; \ +}) +#else +#define vld1q_lane_u32(__p0, __p1, __p2) __extension__ ({ \ + uint32x4_t __ret; \ + uint32x4_t __s1 = __p1; \ + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + __ret = (uint32x4_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 50); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1q_lane_u64(__p0, __p1, __p2) __extension__ ({ \ + uint64x2_t __ret; \ + uint64x2_t __s1 = __p1; \ + __ret = (uint64x2_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__s1, __p2, 51); \ + __ret; \ +}) +#else +#define vld1q_lane_u64(__p0, __p1, __p2) __extension__ ({ \ + uint64x2_t __ret; \ + uint64x2_t __s1 = __p1; \ + uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ + __ret = (uint64x2_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 51); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1q_lane_u16(__p0, __p1, __p2) __extension__ ({ \ + uint16x8_t __ret; \ + uint16x8_t __s1 = __p1; \ + __ret = (uint16x8_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__s1, __p2, 49); \ + __ret; \ +}) +#else +#define vld1q_lane_u16(__p0, __p1, __p2) __extension__ ({ \ + uint16x8_t __ret; \ + uint16x8_t __s1 = __p1; \ + uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (uint16x8_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 49); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1q_lane_s8(__p0, __p1, __p2) __extension__ ({ \ + int8x16_t __ret; \ + int8x16_t __s1 = __p1; \ + __ret = (int8x16_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__s1, __p2, 32); \ + __ret; \ +}) +#else +#define vld1q_lane_s8(__p0, __p1, __p2) __extension__ ({ \ + int8x16_t __ret; \ + int8x16_t __s1 = __p1; \ + int8x16_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (int8x16_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 32); \ + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1q_lane_f32(__p0, __p1, __p2) __extension__ ({ \ + float32x4_t __ret; \ + float32x4_t __s1 = __p1; \ + __ret = (float32x4_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__s1, __p2, 41); \ + __ret; \ +}) +#else +#define vld1q_lane_f32(__p0, __p1, __p2) __extension__ ({ \ + float32x4_t __ret; \ + float32x4_t __s1 = __p1; \ + float32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + __ret = (float32x4_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 41); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1q_lane_s32(__p0, __p1, __p2) __extension__ ({ \ + int32x4_t __ret; \ + int32x4_t __s1 = __p1; \ + __ret = (int32x4_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__s1, __p2, 34); \ + __ret; \ +}) +#else +#define vld1q_lane_s32(__p0, __p1, __p2) __extension__ ({ \ + int32x4_t __ret; \ + int32x4_t __s1 = __p1; \ + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + __ret = (int32x4_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 34); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1q_lane_s64(__p0, __p1, __p2) __extension__ ({ \ + int64x2_t __ret; \ + int64x2_t __s1 = __p1; \ + __ret = (int64x2_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__s1, __p2, 35); \ + __ret; \ +}) +#else +#define vld1q_lane_s64(__p0, __p1, __p2) __extension__ ({ \ + int64x2_t __ret; \ + int64x2_t __s1 = __p1; \ + int64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ + __ret = (int64x2_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 35); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1q_lane_s16(__p0, __p1, __p2) __extension__ ({ \ + int16x8_t __ret; \ + int16x8_t __s1 = __p1; \ + __ret = (int16x8_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__s1, __p2, 33); \ + __ret; \ +}) +#else +#define vld1q_lane_s16(__p0, __p1, __p2) __extension__ ({ \ + int16x8_t __ret; \ + int16x8_t __s1 = __p1; \ + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (int16x8_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 33); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1_lane_u8(__p0, __p1, __p2) __extension__ ({ \ + uint8x8_t __ret; \ + uint8x8_t __s1 = __p1; \ + __ret = (uint8x8_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__s1, __p2, 16); \ + __ret; \ +}) +#else +#define vld1_lane_u8(__p0, __p1, __p2) __extension__ ({ \ + uint8x8_t __ret; \ + uint8x8_t __s1 = __p1; \ + uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (uint8x8_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__rev1, __p2, 16); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1_lane_u32(__p0, __p1, __p2) __extension__ ({ \ + uint32x2_t __ret; \ + uint32x2_t __s1 = __p1; \ + __ret = (uint32x2_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__s1, __p2, 18); \ + __ret; \ +}) +#else +#define vld1_lane_u32(__p0, __p1, __p2) __extension__ ({ \ + uint32x2_t __ret; \ + uint32x2_t __s1 = __p1; \ + uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ + __ret = (uint32x2_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__rev1, __p2, 18); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#endif + +#define vld1_lane_u64(__p0, __p1, __p2) __extension__ ({ \ + uint64x1_t __ret; \ + uint64x1_t __s1 = __p1; \ + __ret = (uint64x1_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__s1, __p2, 19); \ + __ret; \ +}) +#ifdef __LITTLE_ENDIAN__ +#define vld1_lane_u16(__p0, __p1, __p2) __extension__ ({ \ + uint16x4_t __ret; \ + uint16x4_t __s1 = __p1; \ + __ret = (uint16x4_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__s1, __p2, 17); \ + __ret; \ +}) +#else +#define vld1_lane_u16(__p0, __p1, __p2) __extension__ ({ \ + uint16x4_t __ret; \ + uint16x4_t __s1 = __p1; \ + uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + __ret = (uint16x4_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__rev1, __p2, 17); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1_lane_s8(__p0, __p1, __p2) __extension__ ({ \ + int8x8_t __ret; \ + int8x8_t __s1 = __p1; \ + __ret = (int8x8_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__s1, __p2, 0); \ + __ret; \ +}) +#else +#define vld1_lane_s8(__p0, __p1, __p2) __extension__ ({ \ + int8x8_t __ret; \ + int8x8_t __s1 = __p1; \ + int8x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (int8x8_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__rev1, __p2, 0); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1_lane_f32(__p0, __p1, __p2) __extension__ ({ \ + float32x2_t __ret; \ + float32x2_t __s1 = __p1; \ + __ret = (float32x2_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__s1, __p2, 9); \ + __ret; \ +}) +#else +#define vld1_lane_f32(__p0, __p1, __p2) __extension__ ({ \ + float32x2_t __ret; \ + float32x2_t __s1 = __p1; \ + float32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ + __ret = (float32x2_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__rev1, __p2, 9); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1_lane_s32(__p0, __p1, __p2) __extension__ ({ \ + int32x2_t __ret; \ + int32x2_t __s1 = __p1; \ + __ret = (int32x2_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__s1, __p2, 2); \ + __ret; \ +}) +#else +#define vld1_lane_s32(__p0, __p1, __p2) __extension__ ({ \ + int32x2_t __ret; \ + int32x2_t __s1 = __p1; \ + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ + __ret = (int32x2_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__rev1, __p2, 2); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#endif + +#define vld1_lane_s64(__p0, __p1, __p2) __extension__ ({ \ + int64x1_t __ret; \ + int64x1_t __s1 = __p1; \ + __ret = (int64x1_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__s1, __p2, 3); \ + __ret; \ +}) +#ifdef __LITTLE_ENDIAN__ +#define vld1_lane_s16(__p0, __p1, __p2) __extension__ ({ \ + int16x4_t __ret; \ + int16x4_t __s1 = __p1; \ + __ret = (int16x4_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__s1, __p2, 1); \ + __ret; \ +}) +#else +#define vld1_lane_s16(__p0, __p1, __p2) __extension__ ({ \ + int16x4_t __ret; \ + int16x4_t __s1 = __p1; \ + int16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + __ret = (int16x4_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__rev1, __p2, 1); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1_p8_x2(__p0) __extension__ ({ \ + poly8x8x2_t __ret; \ + __builtin_neon_vld1_x2_v(&__ret, __p0, 4); \ + __ret; \ +}) +#else +#define vld1_p8_x2(__p0) __extension__ ({ \ + poly8x8x2_t __ret; \ + __builtin_neon_vld1_x2_v(&__ret, __p0, 4); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1_p16_x2(__p0) __extension__ ({ \ + poly16x4x2_t __ret; \ + __builtin_neon_vld1_x2_v(&__ret, __p0, 5); \ + __ret; \ +}) +#else +#define vld1_p16_x2(__p0) __extension__ ({ \ + poly16x4x2_t __ret; \ + __builtin_neon_vld1_x2_v(&__ret, __p0, 5); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1q_p8_x2(__p0) __extension__ ({ \ + poly8x16x2_t __ret; \ + __builtin_neon_vld1q_x2_v(&__ret, __p0, 36); \ + __ret; \ +}) +#else +#define vld1q_p8_x2(__p0) __extension__ ({ \ + poly8x16x2_t __ret; \ + __builtin_neon_vld1q_x2_v(&__ret, __p0, 36); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1q_p16_x2(__p0) __extension__ ({ \ + poly16x8x2_t __ret; \ + __builtin_neon_vld1q_x2_v(&__ret, __p0, 37); \ + __ret; \ +}) +#else +#define vld1q_p16_x2(__p0) __extension__ ({ \ + poly16x8x2_t __ret; \ + __builtin_neon_vld1q_x2_v(&__ret, __p0, 37); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1q_u8_x2(__p0) __extension__ ({ \ + uint8x16x2_t __ret; \ + __builtin_neon_vld1q_x2_v(&__ret, __p0, 48); \ + __ret; \ +}) +#else +#define vld1q_u8_x2(__p0) __extension__ ({ \ + uint8x16x2_t __ret; \ + __builtin_neon_vld1q_x2_v(&__ret, __p0, 48); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1q_u32_x2(__p0) __extension__ ({ \ + uint32x4x2_t __ret; \ + __builtin_neon_vld1q_x2_v(&__ret, __p0, 50); \ + __ret; \ +}) +#else +#define vld1q_u32_x2(__p0) __extension__ ({ \ + uint32x4x2_t __ret; \ + __builtin_neon_vld1q_x2_v(&__ret, __p0, 50); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1q_u64_x2(__p0) __extension__ ({ \ + uint64x2x2_t __ret; \ + __builtin_neon_vld1q_x2_v(&__ret, __p0, 51); \ + __ret; \ +}) +#else +#define vld1q_u64_x2(__p0) __extension__ ({ \ + uint64x2x2_t __ret; \ + __builtin_neon_vld1q_x2_v(&__ret, __p0, 51); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1q_u16_x2(__p0) __extension__ ({ \ + uint16x8x2_t __ret; \ + __builtin_neon_vld1q_x2_v(&__ret, __p0, 49); \ + __ret; \ +}) +#else +#define vld1q_u16_x2(__p0) __extension__ ({ \ + uint16x8x2_t __ret; \ + __builtin_neon_vld1q_x2_v(&__ret, __p0, 49); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1q_s8_x2(__p0) __extension__ ({ \ + int8x16x2_t __ret; \ + __builtin_neon_vld1q_x2_v(&__ret, __p0, 32); \ + __ret; \ +}) +#else +#define vld1q_s8_x2(__p0) __extension__ ({ \ + int8x16x2_t __ret; \ + __builtin_neon_vld1q_x2_v(&__ret, __p0, 32); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1q_f32_x2(__p0) __extension__ ({ \ + float32x4x2_t __ret; \ + __builtin_neon_vld1q_x2_v(&__ret, __p0, 41); \ + __ret; \ +}) +#else +#define vld1q_f32_x2(__p0) __extension__ ({ \ + float32x4x2_t __ret; \ + __builtin_neon_vld1q_x2_v(&__ret, __p0, 41); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1q_s32_x2(__p0) __extension__ ({ \ + int32x4x2_t __ret; \ + __builtin_neon_vld1q_x2_v(&__ret, __p0, 34); \ + __ret; \ +}) +#else +#define vld1q_s32_x2(__p0) __extension__ ({ \ + int32x4x2_t __ret; \ + __builtin_neon_vld1q_x2_v(&__ret, __p0, 34); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1q_s64_x2(__p0) __extension__ ({ \ + int64x2x2_t __ret; \ + __builtin_neon_vld1q_x2_v(&__ret, __p0, 35); \ + __ret; \ +}) +#else +#define vld1q_s64_x2(__p0) __extension__ ({ \ + int64x2x2_t __ret; \ + __builtin_neon_vld1q_x2_v(&__ret, __p0, 35); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1q_s16_x2(__p0) __extension__ ({ \ + int16x8x2_t __ret; \ + __builtin_neon_vld1q_x2_v(&__ret, __p0, 33); \ + __ret; \ +}) +#else +#define vld1q_s16_x2(__p0) __extension__ ({ \ + int16x8x2_t __ret; \ + __builtin_neon_vld1q_x2_v(&__ret, __p0, 33); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1_u8_x2(__p0) __extension__ ({ \ + uint8x8x2_t __ret; \ + __builtin_neon_vld1_x2_v(&__ret, __p0, 16); \ + __ret; \ +}) +#else +#define vld1_u8_x2(__p0) __extension__ ({ \ + uint8x8x2_t __ret; \ + __builtin_neon_vld1_x2_v(&__ret, __p0, 16); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1_u32_x2(__p0) __extension__ ({ \ + uint32x2x2_t __ret; \ + __builtin_neon_vld1_x2_v(&__ret, __p0, 18); \ + __ret; \ +}) +#else +#define vld1_u32_x2(__p0) __extension__ ({ \ + uint32x2x2_t __ret; \ + __builtin_neon_vld1_x2_v(&__ret, __p0, 18); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ + __ret; \ +}) +#endif + +#define vld1_u64_x2(__p0) __extension__ ({ \ + uint64x1x2_t __ret; \ + __builtin_neon_vld1_x2_v(&__ret, __p0, 19); \ + __ret; \ +}) +#ifdef __LITTLE_ENDIAN__ +#define vld1_u16_x2(__p0) __extension__ ({ \ + uint16x4x2_t __ret; \ + __builtin_neon_vld1_x2_v(&__ret, __p0, 17); \ + __ret; \ +}) +#else +#define vld1_u16_x2(__p0) __extension__ ({ \ + uint16x4x2_t __ret; \ + __builtin_neon_vld1_x2_v(&__ret, __p0, 17); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1_s8_x2(__p0) __extension__ ({ \ + int8x8x2_t __ret; \ + __builtin_neon_vld1_x2_v(&__ret, __p0, 0); \ + __ret; \ +}) +#else +#define vld1_s8_x2(__p0) __extension__ ({ \ + int8x8x2_t __ret; \ + __builtin_neon_vld1_x2_v(&__ret, __p0, 0); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1_f32_x2(__p0) __extension__ ({ \ + float32x2x2_t __ret; \ + __builtin_neon_vld1_x2_v(&__ret, __p0, 9); \ + __ret; \ +}) +#else +#define vld1_f32_x2(__p0) __extension__ ({ \ + float32x2x2_t __ret; \ + __builtin_neon_vld1_x2_v(&__ret, __p0, 9); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1_s32_x2(__p0) __extension__ ({ \ + int32x2x2_t __ret; \ + __builtin_neon_vld1_x2_v(&__ret, __p0, 2); \ + __ret; \ +}) +#else +#define vld1_s32_x2(__p0) __extension__ ({ \ + int32x2x2_t __ret; \ + __builtin_neon_vld1_x2_v(&__ret, __p0, 2); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ + __ret; \ +}) +#endif + +#define vld1_s64_x2(__p0) __extension__ ({ \ + int64x1x2_t __ret; \ + __builtin_neon_vld1_x2_v(&__ret, __p0, 3); \ + __ret; \ +}) +#ifdef __LITTLE_ENDIAN__ +#define vld1_s16_x2(__p0) __extension__ ({ \ + int16x4x2_t __ret; \ + __builtin_neon_vld1_x2_v(&__ret, __p0, 1); \ + __ret; \ +}) +#else +#define vld1_s16_x2(__p0) __extension__ ({ \ + int16x4x2_t __ret; \ + __builtin_neon_vld1_x2_v(&__ret, __p0, 1); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1_p8_x3(__p0) __extension__ ({ \ + poly8x8x3_t __ret; \ + __builtin_neon_vld1_x3_v(&__ret, __p0, 4); \ + __ret; \ +}) +#else +#define vld1_p8_x3(__p0) __extension__ ({ \ + poly8x8x3_t __ret; \ + __builtin_neon_vld1_x3_v(&__ret, __p0, 4); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1_p16_x3(__p0) __extension__ ({ \ + poly16x4x3_t __ret; \ + __builtin_neon_vld1_x3_v(&__ret, __p0, 5); \ + __ret; \ +}) +#else +#define vld1_p16_x3(__p0) __extension__ ({ \ + poly16x4x3_t __ret; \ + __builtin_neon_vld1_x3_v(&__ret, __p0, 5); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1q_p8_x3(__p0) __extension__ ({ \ + poly8x16x3_t __ret; \ + __builtin_neon_vld1q_x3_v(&__ret, __p0, 36); \ + __ret; \ +}) +#else +#define vld1q_p8_x3(__p0) __extension__ ({ \ + poly8x16x3_t __ret; \ + __builtin_neon_vld1q_x3_v(&__ret, __p0, 36); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1q_p16_x3(__p0) __extension__ ({ \ + poly16x8x3_t __ret; \ + __builtin_neon_vld1q_x3_v(&__ret, __p0, 37); \ + __ret; \ +}) +#else +#define vld1q_p16_x3(__p0) __extension__ ({ \ + poly16x8x3_t __ret; \ + __builtin_neon_vld1q_x3_v(&__ret, __p0, 37); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1q_u8_x3(__p0) __extension__ ({ \ + uint8x16x3_t __ret; \ + __builtin_neon_vld1q_x3_v(&__ret, __p0, 48); \ + __ret; \ +}) +#else +#define vld1q_u8_x3(__p0) __extension__ ({ \ + uint8x16x3_t __ret; \ + __builtin_neon_vld1q_x3_v(&__ret, __p0, 48); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1q_u32_x3(__p0) __extension__ ({ \ + uint32x4x3_t __ret; \ + __builtin_neon_vld1q_x3_v(&__ret, __p0, 50); \ + __ret; \ +}) +#else +#define vld1q_u32_x3(__p0) __extension__ ({ \ + uint32x4x3_t __ret; \ + __builtin_neon_vld1q_x3_v(&__ret, __p0, 50); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1q_u64_x3(__p0) __extension__ ({ \ + uint64x2x3_t __ret; \ + __builtin_neon_vld1q_x3_v(&__ret, __p0, 51); \ + __ret; \ +}) +#else +#define vld1q_u64_x3(__p0) __extension__ ({ \ + uint64x2x3_t __ret; \ + __builtin_neon_vld1q_x3_v(&__ret, __p0, 51); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1q_u16_x3(__p0) __extension__ ({ \ + uint16x8x3_t __ret; \ + __builtin_neon_vld1q_x3_v(&__ret, __p0, 49); \ + __ret; \ +}) +#else +#define vld1q_u16_x3(__p0) __extension__ ({ \ + uint16x8x3_t __ret; \ + __builtin_neon_vld1q_x3_v(&__ret, __p0, 49); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1q_s8_x3(__p0) __extension__ ({ \ + int8x16x3_t __ret; \ + __builtin_neon_vld1q_x3_v(&__ret, __p0, 32); \ + __ret; \ +}) +#else +#define vld1q_s8_x3(__p0) __extension__ ({ \ + int8x16x3_t __ret; \ + __builtin_neon_vld1q_x3_v(&__ret, __p0, 32); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1q_f32_x3(__p0) __extension__ ({ \ + float32x4x3_t __ret; \ + __builtin_neon_vld1q_x3_v(&__ret, __p0, 41); \ + __ret; \ +}) +#else +#define vld1q_f32_x3(__p0) __extension__ ({ \ + float32x4x3_t __ret; \ + __builtin_neon_vld1q_x3_v(&__ret, __p0, 41); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1q_s32_x3(__p0) __extension__ ({ \ + int32x4x3_t __ret; \ + __builtin_neon_vld1q_x3_v(&__ret, __p0, 34); \ + __ret; \ +}) +#else +#define vld1q_s32_x3(__p0) __extension__ ({ \ + int32x4x3_t __ret; \ + __builtin_neon_vld1q_x3_v(&__ret, __p0, 34); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1q_s64_x3(__p0) __extension__ ({ \ + int64x2x3_t __ret; \ + __builtin_neon_vld1q_x3_v(&__ret, __p0, 35); \ + __ret; \ +}) +#else +#define vld1q_s64_x3(__p0) __extension__ ({ \ + int64x2x3_t __ret; \ + __builtin_neon_vld1q_x3_v(&__ret, __p0, 35); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1q_s16_x3(__p0) __extension__ ({ \ + int16x8x3_t __ret; \ + __builtin_neon_vld1q_x3_v(&__ret, __p0, 33); \ + __ret; \ +}) +#else +#define vld1q_s16_x3(__p0) __extension__ ({ \ + int16x8x3_t __ret; \ + __builtin_neon_vld1q_x3_v(&__ret, __p0, 33); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1_u8_x3(__p0) __extension__ ({ \ + uint8x8x3_t __ret; \ + __builtin_neon_vld1_x3_v(&__ret, __p0, 16); \ + __ret; \ +}) +#else +#define vld1_u8_x3(__p0) __extension__ ({ \ + uint8x8x3_t __ret; \ + __builtin_neon_vld1_x3_v(&__ret, __p0, 16); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1_u32_x3(__p0) __extension__ ({ \ + uint32x2x3_t __ret; \ + __builtin_neon_vld1_x3_v(&__ret, __p0, 18); \ + __ret; \ +}) +#else +#define vld1_u32_x3(__p0) __extension__ ({ \ + uint32x2x3_t __ret; \ + __builtin_neon_vld1_x3_v(&__ret, __p0, 18); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ + __ret; \ +}) +#endif + +#define vld1_u64_x3(__p0) __extension__ ({ \ + uint64x1x3_t __ret; \ + __builtin_neon_vld1_x3_v(&__ret, __p0, 19); \ + __ret; \ +}) +#ifdef __LITTLE_ENDIAN__ +#define vld1_u16_x3(__p0) __extension__ ({ \ + uint16x4x3_t __ret; \ + __builtin_neon_vld1_x3_v(&__ret, __p0, 17); \ + __ret; \ +}) +#else +#define vld1_u16_x3(__p0) __extension__ ({ \ + uint16x4x3_t __ret; \ + __builtin_neon_vld1_x3_v(&__ret, __p0, 17); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1_s8_x3(__p0) __extension__ ({ \ + int8x8x3_t __ret; \ + __builtin_neon_vld1_x3_v(&__ret, __p0, 0); \ + __ret; \ +}) +#else +#define vld1_s8_x3(__p0) __extension__ ({ \ + int8x8x3_t __ret; \ + __builtin_neon_vld1_x3_v(&__ret, __p0, 0); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1_f32_x3(__p0) __extension__ ({ \ + float32x2x3_t __ret; \ + __builtin_neon_vld1_x3_v(&__ret, __p0, 9); \ + __ret; \ +}) +#else +#define vld1_f32_x3(__p0) __extension__ ({ \ + float32x2x3_t __ret; \ + __builtin_neon_vld1_x3_v(&__ret, __p0, 9); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1_s32_x3(__p0) __extension__ ({ \ + int32x2x3_t __ret; \ + __builtin_neon_vld1_x3_v(&__ret, __p0, 2); \ + __ret; \ +}) +#else +#define vld1_s32_x3(__p0) __extension__ ({ \ + int32x2x3_t __ret; \ + __builtin_neon_vld1_x3_v(&__ret, __p0, 2); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ + __ret; \ +}) +#endif + +#define vld1_s64_x3(__p0) __extension__ ({ \ + int64x1x3_t __ret; \ + __builtin_neon_vld1_x3_v(&__ret, __p0, 3); \ + __ret; \ +}) +#ifdef __LITTLE_ENDIAN__ +#define vld1_s16_x3(__p0) __extension__ ({ \ + int16x4x3_t __ret; \ + __builtin_neon_vld1_x3_v(&__ret, __p0, 1); \ + __ret; \ +}) +#else +#define vld1_s16_x3(__p0) __extension__ ({ \ + int16x4x3_t __ret; \ + __builtin_neon_vld1_x3_v(&__ret, __p0, 1); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1_p8_x4(__p0) __extension__ ({ \ + poly8x8x4_t __ret; \ + __builtin_neon_vld1_x4_v(&__ret, __p0, 4); \ + __ret; \ +}) +#else +#define vld1_p8_x4(__p0) __extension__ ({ \ + poly8x8x4_t __ret; \ + __builtin_neon_vld1_x4_v(&__ret, __p0, 4); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1_p16_x4(__p0) __extension__ ({ \ + poly16x4x4_t __ret; \ + __builtin_neon_vld1_x4_v(&__ret, __p0, 5); \ + __ret; \ +}) +#else +#define vld1_p16_x4(__p0) __extension__ ({ \ + poly16x4x4_t __ret; \ + __builtin_neon_vld1_x4_v(&__ret, __p0, 5); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1q_p8_x4(__p0) __extension__ ({ \ + poly8x16x4_t __ret; \ + __builtin_neon_vld1q_x4_v(&__ret, __p0, 36); \ + __ret; \ +}) +#else +#define vld1q_p8_x4(__p0) __extension__ ({ \ + poly8x16x4_t __ret; \ + __builtin_neon_vld1q_x4_v(&__ret, __p0, 36); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1q_p16_x4(__p0) __extension__ ({ \ + poly16x8x4_t __ret; \ + __builtin_neon_vld1q_x4_v(&__ret, __p0, 37); \ + __ret; \ +}) +#else +#define vld1q_p16_x4(__p0) __extension__ ({ \ + poly16x8x4_t __ret; \ + __builtin_neon_vld1q_x4_v(&__ret, __p0, 37); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1q_u8_x4(__p0) __extension__ ({ \ + uint8x16x4_t __ret; \ + __builtin_neon_vld1q_x4_v(&__ret, __p0, 48); \ + __ret; \ +}) +#else +#define vld1q_u8_x4(__p0) __extension__ ({ \ + uint8x16x4_t __ret; \ + __builtin_neon_vld1q_x4_v(&__ret, __p0, 48); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1q_u32_x4(__p0) __extension__ ({ \ + uint32x4x4_t __ret; \ + __builtin_neon_vld1q_x4_v(&__ret, __p0, 50); \ + __ret; \ +}) +#else +#define vld1q_u32_x4(__p0) __extension__ ({ \ + uint32x4x4_t __ret; \ + __builtin_neon_vld1q_x4_v(&__ret, __p0, 50); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1q_u64_x4(__p0) __extension__ ({ \ + uint64x2x4_t __ret; \ + __builtin_neon_vld1q_x4_v(&__ret, __p0, 51); \ + __ret; \ +}) +#else +#define vld1q_u64_x4(__p0) __extension__ ({ \ + uint64x2x4_t __ret; \ + __builtin_neon_vld1q_x4_v(&__ret, __p0, 51); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1q_u16_x4(__p0) __extension__ ({ \ + uint16x8x4_t __ret; \ + __builtin_neon_vld1q_x4_v(&__ret, __p0, 49); \ + __ret; \ +}) +#else +#define vld1q_u16_x4(__p0) __extension__ ({ \ + uint16x8x4_t __ret; \ + __builtin_neon_vld1q_x4_v(&__ret, __p0, 49); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1q_s8_x4(__p0) __extension__ ({ \ + int8x16x4_t __ret; \ + __builtin_neon_vld1q_x4_v(&__ret, __p0, 32); \ + __ret; \ +}) +#else +#define vld1q_s8_x4(__p0) __extension__ ({ \ + int8x16x4_t __ret; \ + __builtin_neon_vld1q_x4_v(&__ret, __p0, 32); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1q_f32_x4(__p0) __extension__ ({ \ + float32x4x4_t __ret; \ + __builtin_neon_vld1q_x4_v(&__ret, __p0, 41); \ + __ret; \ +}) +#else +#define vld1q_f32_x4(__p0) __extension__ ({ \ + float32x4x4_t __ret; \ + __builtin_neon_vld1q_x4_v(&__ret, __p0, 41); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1q_s32_x4(__p0) __extension__ ({ \ + int32x4x4_t __ret; \ + __builtin_neon_vld1q_x4_v(&__ret, __p0, 34); \ + __ret; \ +}) +#else +#define vld1q_s32_x4(__p0) __extension__ ({ \ + int32x4x4_t __ret; \ + __builtin_neon_vld1q_x4_v(&__ret, __p0, 34); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1q_s64_x4(__p0) __extension__ ({ \ + int64x2x4_t __ret; \ + __builtin_neon_vld1q_x4_v(&__ret, __p0, 35); \ + __ret; \ +}) +#else +#define vld1q_s64_x4(__p0) __extension__ ({ \ + int64x2x4_t __ret; \ + __builtin_neon_vld1q_x4_v(&__ret, __p0, 35); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1q_s16_x4(__p0) __extension__ ({ \ + int16x8x4_t __ret; \ + __builtin_neon_vld1q_x4_v(&__ret, __p0, 33); \ + __ret; \ +}) +#else +#define vld1q_s16_x4(__p0) __extension__ ({ \ + int16x8x4_t __ret; \ + __builtin_neon_vld1q_x4_v(&__ret, __p0, 33); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1_u8_x4(__p0) __extension__ ({ \ + uint8x8x4_t __ret; \ + __builtin_neon_vld1_x4_v(&__ret, __p0, 16); \ + __ret; \ +}) +#else +#define vld1_u8_x4(__p0) __extension__ ({ \ + uint8x8x4_t __ret; \ + __builtin_neon_vld1_x4_v(&__ret, __p0, 16); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1_u32_x4(__p0) __extension__ ({ \ + uint32x2x4_t __ret; \ + __builtin_neon_vld1_x4_v(&__ret, __p0, 18); \ + __ret; \ +}) +#else +#define vld1_u32_x4(__p0) __extension__ ({ \ + uint32x2x4_t __ret; \ + __builtin_neon_vld1_x4_v(&__ret, __p0, 18); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \ + __ret; \ +}) +#endif + +#define vld1_u64_x4(__p0) __extension__ ({ \ + uint64x1x4_t __ret; \ + __builtin_neon_vld1_x4_v(&__ret, __p0, 19); \ + __ret; \ +}) +#ifdef __LITTLE_ENDIAN__ +#define vld1_u16_x4(__p0) __extension__ ({ \ + uint16x4x4_t __ret; \ + __builtin_neon_vld1_x4_v(&__ret, __p0, 17); \ + __ret; \ +}) +#else +#define vld1_u16_x4(__p0) __extension__ ({ \ + uint16x4x4_t __ret; \ + __builtin_neon_vld1_x4_v(&__ret, __p0, 17); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1_s8_x4(__p0) __extension__ ({ \ + int8x8x4_t __ret; \ + __builtin_neon_vld1_x4_v(&__ret, __p0, 0); \ + __ret; \ +}) +#else +#define vld1_s8_x4(__p0) __extension__ ({ \ + int8x8x4_t __ret; \ + __builtin_neon_vld1_x4_v(&__ret, __p0, 0); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1_f32_x4(__p0) __extension__ ({ \ + float32x2x4_t __ret; \ + __builtin_neon_vld1_x4_v(&__ret, __p0, 9); \ + __ret; \ +}) +#else +#define vld1_f32_x4(__p0) __extension__ ({ \ + float32x2x4_t __ret; \ + __builtin_neon_vld1_x4_v(&__ret, __p0, 9); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1_s32_x4(__p0) __extension__ ({ \ + int32x2x4_t __ret; \ + __builtin_neon_vld1_x4_v(&__ret, __p0, 2); \ + __ret; \ +}) +#else +#define vld1_s32_x4(__p0) __extension__ ({ \ + int32x2x4_t __ret; \ + __builtin_neon_vld1_x4_v(&__ret, __p0, 2); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \ + __ret; \ +}) +#endif + +#define vld1_s64_x4(__p0) __extension__ ({ \ + int64x1x4_t __ret; \ + __builtin_neon_vld1_x4_v(&__ret, __p0, 3); \ + __ret; \ +}) +#ifdef __LITTLE_ENDIAN__ +#define vld1_s16_x4(__p0) __extension__ ({ \ + int16x4x4_t __ret; \ + __builtin_neon_vld1_x4_v(&__ret, __p0, 1); \ + __ret; \ +}) +#else +#define vld1_s16_x4(__p0) __extension__ ({ \ + int16x4x4_t __ret; \ + __builtin_neon_vld1_x4_v(&__ret, __p0, 1); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld2_p8(__p0) __extension__ ({ \ + poly8x8x2_t __ret; \ + __builtin_neon_vld2_v(&__ret, __p0, 4); \ + __ret; \ +}) +#else +#define vld2_p8(__p0) __extension__ ({ \ + poly8x8x2_t __ret; \ + __builtin_neon_vld2_v(&__ret, __p0, 4); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld2_p16(__p0) __extension__ ({ \ + poly16x4x2_t __ret; \ + __builtin_neon_vld2_v(&__ret, __p0, 5); \ + __ret; \ +}) +#else +#define vld2_p16(__p0) __extension__ ({ \ + poly16x4x2_t __ret; \ + __builtin_neon_vld2_v(&__ret, __p0, 5); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld2q_p8(__p0) __extension__ ({ \ + poly8x16x2_t __ret; \ + __builtin_neon_vld2q_v(&__ret, __p0, 36); \ + __ret; \ +}) +#else +#define vld2q_p8(__p0) __extension__ ({ \ + poly8x16x2_t __ret; \ + __builtin_neon_vld2q_v(&__ret, __p0, 36); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld2q_p16(__p0) __extension__ ({ \ + poly16x8x2_t __ret; \ + __builtin_neon_vld2q_v(&__ret, __p0, 37); \ + __ret; \ +}) +#else +#define vld2q_p16(__p0) __extension__ ({ \ + poly16x8x2_t __ret; \ + __builtin_neon_vld2q_v(&__ret, __p0, 37); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld2q_u8(__p0) __extension__ ({ \ + uint8x16x2_t __ret; \ + __builtin_neon_vld2q_v(&__ret, __p0, 48); \ + __ret; \ +}) +#else +#define vld2q_u8(__p0) __extension__ ({ \ + uint8x16x2_t __ret; \ + __builtin_neon_vld2q_v(&__ret, __p0, 48); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld2q_u32(__p0) __extension__ ({ \ + uint32x4x2_t __ret; \ + __builtin_neon_vld2q_v(&__ret, __p0, 50); \ + __ret; \ +}) +#else +#define vld2q_u32(__p0) __extension__ ({ \ + uint32x4x2_t __ret; \ + __builtin_neon_vld2q_v(&__ret, __p0, 50); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld2q_u16(__p0) __extension__ ({ \ + uint16x8x2_t __ret; \ + __builtin_neon_vld2q_v(&__ret, __p0, 49); \ + __ret; \ +}) +#else +#define vld2q_u16(__p0) __extension__ ({ \ + uint16x8x2_t __ret; \ + __builtin_neon_vld2q_v(&__ret, __p0, 49); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld2q_s8(__p0) __extension__ ({ \ + int8x16x2_t __ret; \ + __builtin_neon_vld2q_v(&__ret, __p0, 32); \ + __ret; \ +}) +#else +#define vld2q_s8(__p0) __extension__ ({ \ + int8x16x2_t __ret; \ + __builtin_neon_vld2q_v(&__ret, __p0, 32); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld2q_f32(__p0) __extension__ ({ \ + float32x4x2_t __ret; \ + __builtin_neon_vld2q_v(&__ret, __p0, 41); \ + __ret; \ +}) +#else +#define vld2q_f32(__p0) __extension__ ({ \ + float32x4x2_t __ret; \ + __builtin_neon_vld2q_v(&__ret, __p0, 41); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld2q_s32(__p0) __extension__ ({ \ + int32x4x2_t __ret; \ + __builtin_neon_vld2q_v(&__ret, __p0, 34); \ + __ret; \ +}) +#else +#define vld2q_s32(__p0) __extension__ ({ \ + int32x4x2_t __ret; \ + __builtin_neon_vld2q_v(&__ret, __p0, 34); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld2q_s16(__p0) __extension__ ({ \ + int16x8x2_t __ret; \ + __builtin_neon_vld2q_v(&__ret, __p0, 33); \ + __ret; \ +}) +#else +#define vld2q_s16(__p0) __extension__ ({ \ + int16x8x2_t __ret; \ + __builtin_neon_vld2q_v(&__ret, __p0, 33); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld2_u8(__p0) __extension__ ({ \ + uint8x8x2_t __ret; \ + __builtin_neon_vld2_v(&__ret, __p0, 16); \ + __ret; \ +}) +#else +#define vld2_u8(__p0) __extension__ ({ \ + uint8x8x2_t __ret; \ + __builtin_neon_vld2_v(&__ret, __p0, 16); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld2_u32(__p0) __extension__ ({ \ + uint32x2x2_t __ret; \ + __builtin_neon_vld2_v(&__ret, __p0, 18); \ + __ret; \ +}) +#else +#define vld2_u32(__p0) __extension__ ({ \ + uint32x2x2_t __ret; \ + __builtin_neon_vld2_v(&__ret, __p0, 18); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ + __ret; \ +}) +#endif + +#define vld2_u64(__p0) __extension__ ({ \ + uint64x1x2_t __ret; \ + __builtin_neon_vld2_v(&__ret, __p0, 19); \ + __ret; \ +}) +#ifdef __LITTLE_ENDIAN__ +#define vld2_u16(__p0) __extension__ ({ \ + uint16x4x2_t __ret; \ + __builtin_neon_vld2_v(&__ret, __p0, 17); \ + __ret; \ +}) +#else +#define vld2_u16(__p0) __extension__ ({ \ + uint16x4x2_t __ret; \ + __builtin_neon_vld2_v(&__ret, __p0, 17); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld2_s8(__p0) __extension__ ({ \ + int8x8x2_t __ret; \ + __builtin_neon_vld2_v(&__ret, __p0, 0); \ + __ret; \ +}) +#else +#define vld2_s8(__p0) __extension__ ({ \ + int8x8x2_t __ret; \ + __builtin_neon_vld2_v(&__ret, __p0, 0); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld2_f32(__p0) __extension__ ({ \ + float32x2x2_t __ret; \ + __builtin_neon_vld2_v(&__ret, __p0, 9); \ + __ret; \ +}) +#else +#define vld2_f32(__p0) __extension__ ({ \ + float32x2x2_t __ret; \ + __builtin_neon_vld2_v(&__ret, __p0, 9); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld2_s32(__p0) __extension__ ({ \ + int32x2x2_t __ret; \ + __builtin_neon_vld2_v(&__ret, __p0, 2); \ + __ret; \ +}) +#else +#define vld2_s32(__p0) __extension__ ({ \ + int32x2x2_t __ret; \ + __builtin_neon_vld2_v(&__ret, __p0, 2); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ + __ret; \ +}) +#endif + +#define vld2_s64(__p0) __extension__ ({ \ + int64x1x2_t __ret; \ + __builtin_neon_vld2_v(&__ret, __p0, 3); \ + __ret; \ +}) +#ifdef __LITTLE_ENDIAN__ +#define vld2_s16(__p0) __extension__ ({ \ + int16x4x2_t __ret; \ + __builtin_neon_vld2_v(&__ret, __p0, 1); \ + __ret; \ +}) +#else +#define vld2_s16(__p0) __extension__ ({ \ + int16x4x2_t __ret; \ + __builtin_neon_vld2_v(&__ret, __p0, 1); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld2_dup_p8(__p0) __extension__ ({ \ + poly8x8x2_t __ret; \ + __builtin_neon_vld2_dup_v(&__ret, __p0, 4); \ + __ret; \ +}) +#else +#define vld2_dup_p8(__p0) __extension__ ({ \ + poly8x8x2_t __ret; \ + __builtin_neon_vld2_dup_v(&__ret, __p0, 4); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld2_dup_p16(__p0) __extension__ ({ \ + poly16x4x2_t __ret; \ + __builtin_neon_vld2_dup_v(&__ret, __p0, 5); \ + __ret; \ +}) +#else +#define vld2_dup_p16(__p0) __extension__ ({ \ + poly16x4x2_t __ret; \ + __builtin_neon_vld2_dup_v(&__ret, __p0, 5); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld2q_dup_p8(__p0) __extension__ ({ \ + poly8x16x2_t __ret; \ + __builtin_neon_vld2q_dup_v(&__ret, __p0, 36); \ + __ret; \ +}) +#else +#define vld2q_dup_p8(__p0) __extension__ ({ \ + poly8x16x2_t __ret; \ + __builtin_neon_vld2q_dup_v(&__ret, __p0, 36); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld2q_dup_p16(__p0) __extension__ ({ \ + poly16x8x2_t __ret; \ + __builtin_neon_vld2q_dup_v(&__ret, __p0, 37); \ + __ret; \ +}) +#else +#define vld2q_dup_p16(__p0) __extension__ ({ \ + poly16x8x2_t __ret; \ + __builtin_neon_vld2q_dup_v(&__ret, __p0, 37); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld2q_dup_u8(__p0) __extension__ ({ \ + uint8x16x2_t __ret; \ + __builtin_neon_vld2q_dup_v(&__ret, __p0, 48); \ + __ret; \ +}) +#else +#define vld2q_dup_u8(__p0) __extension__ ({ \ + uint8x16x2_t __ret; \ + __builtin_neon_vld2q_dup_v(&__ret, __p0, 48); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld2q_dup_u32(__p0) __extension__ ({ \ + uint32x4x2_t __ret; \ + __builtin_neon_vld2q_dup_v(&__ret, __p0, 50); \ + __ret; \ +}) +#else +#define vld2q_dup_u32(__p0) __extension__ ({ \ + uint32x4x2_t __ret; \ + __builtin_neon_vld2q_dup_v(&__ret, __p0, 50); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld2q_dup_u64(__p0) __extension__ ({ \ + uint64x2x2_t __ret; \ + __builtin_neon_vld2q_dup_v(&__ret, __p0, 51); \ + __ret; \ +}) +#else +#define vld2q_dup_u64(__p0) __extension__ ({ \ + uint64x2x2_t __ret; \ + __builtin_neon_vld2q_dup_v(&__ret, __p0, 51); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld2q_dup_u16(__p0) __extension__ ({ \ + uint16x8x2_t __ret; \ + __builtin_neon_vld2q_dup_v(&__ret, __p0, 49); \ + __ret; \ +}) +#else +#define vld2q_dup_u16(__p0) __extension__ ({ \ + uint16x8x2_t __ret; \ + __builtin_neon_vld2q_dup_v(&__ret, __p0, 49); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld2q_dup_s8(__p0) __extension__ ({ \ + int8x16x2_t __ret; \ + __builtin_neon_vld2q_dup_v(&__ret, __p0, 32); \ + __ret; \ +}) +#else +#define vld2q_dup_s8(__p0) __extension__ ({ \ + int8x16x2_t __ret; \ + __builtin_neon_vld2q_dup_v(&__ret, __p0, 32); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld2q_dup_f32(__p0) __extension__ ({ \ + float32x4x2_t __ret; \ + __builtin_neon_vld2q_dup_v(&__ret, __p0, 41); \ + __ret; \ +}) +#else +#define vld2q_dup_f32(__p0) __extension__ ({ \ + float32x4x2_t __ret; \ + __builtin_neon_vld2q_dup_v(&__ret, __p0, 41); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld2q_dup_s32(__p0) __extension__ ({ \ + int32x4x2_t __ret; \ + __builtin_neon_vld2q_dup_v(&__ret, __p0, 34); \ + __ret; \ +}) +#else +#define vld2q_dup_s32(__p0) __extension__ ({ \ + int32x4x2_t __ret; \ + __builtin_neon_vld2q_dup_v(&__ret, __p0, 34); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld2q_dup_s64(__p0) __extension__ ({ \ + int64x2x2_t __ret; \ + __builtin_neon_vld2q_dup_v(&__ret, __p0, 35); \ + __ret; \ +}) +#else +#define vld2q_dup_s64(__p0) __extension__ ({ \ + int64x2x2_t __ret; \ + __builtin_neon_vld2q_dup_v(&__ret, __p0, 35); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld2q_dup_s16(__p0) __extension__ ({ \ + int16x8x2_t __ret; \ + __builtin_neon_vld2q_dup_v(&__ret, __p0, 33); \ + __ret; \ +}) +#else +#define vld2q_dup_s16(__p0) __extension__ ({ \ + int16x8x2_t __ret; \ + __builtin_neon_vld2q_dup_v(&__ret, __p0, 33); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld2_dup_u8(__p0) __extension__ ({ \ + uint8x8x2_t __ret; \ + __builtin_neon_vld2_dup_v(&__ret, __p0, 16); \ + __ret; \ +}) +#else +#define vld2_dup_u8(__p0) __extension__ ({ \ + uint8x8x2_t __ret; \ + __builtin_neon_vld2_dup_v(&__ret, __p0, 16); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld2_dup_u32(__p0) __extension__ ({ \ + uint32x2x2_t __ret; \ + __builtin_neon_vld2_dup_v(&__ret, __p0, 18); \ + __ret; \ +}) +#else +#define vld2_dup_u32(__p0) __extension__ ({ \ + uint32x2x2_t __ret; \ + __builtin_neon_vld2_dup_v(&__ret, __p0, 18); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ + __ret; \ +}) +#endif + +#define vld2_dup_u64(__p0) __extension__ ({ \ + uint64x1x2_t __ret; \ + __builtin_neon_vld2_dup_v(&__ret, __p0, 19); \ + __ret; \ +}) +#ifdef __LITTLE_ENDIAN__ +#define vld2_dup_u16(__p0) __extension__ ({ \ + uint16x4x2_t __ret; \ + __builtin_neon_vld2_dup_v(&__ret, __p0, 17); \ + __ret; \ +}) +#else +#define vld2_dup_u16(__p0) __extension__ ({ \ + uint16x4x2_t __ret; \ + __builtin_neon_vld2_dup_v(&__ret, __p0, 17); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld2_dup_s8(__p0) __extension__ ({ \ + int8x8x2_t __ret; \ + __builtin_neon_vld2_dup_v(&__ret, __p0, 0); \ + __ret; \ +}) +#else +#define vld2_dup_s8(__p0) __extension__ ({ \ + int8x8x2_t __ret; \ + __builtin_neon_vld2_dup_v(&__ret, __p0, 0); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld2_dup_f32(__p0) __extension__ ({ \ + float32x2x2_t __ret; \ + __builtin_neon_vld2_dup_v(&__ret, __p0, 9); \ + __ret; \ +}) +#else +#define vld2_dup_f32(__p0) __extension__ ({ \ + float32x2x2_t __ret; \ + __builtin_neon_vld2_dup_v(&__ret, __p0, 9); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld2_dup_s32(__p0) __extension__ ({ \ + int32x2x2_t __ret; \ + __builtin_neon_vld2_dup_v(&__ret, __p0, 2); \ + __ret; \ +}) +#else +#define vld2_dup_s32(__p0) __extension__ ({ \ + int32x2x2_t __ret; \ + __builtin_neon_vld2_dup_v(&__ret, __p0, 2); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ + __ret; \ +}) +#endif + +#define vld2_dup_s64(__p0) __extension__ ({ \ + int64x1x2_t __ret; \ + __builtin_neon_vld2_dup_v(&__ret, __p0, 3); \ + __ret; \ +}) +#ifdef __LITTLE_ENDIAN__ +#define vld2_dup_s16(__p0) __extension__ ({ \ + int16x4x2_t __ret; \ + __builtin_neon_vld2_dup_v(&__ret, __p0, 1); \ + __ret; \ +}) +#else +#define vld2_dup_s16(__p0) __extension__ ({ \ + int16x4x2_t __ret; \ + __builtin_neon_vld2_dup_v(&__ret, __p0, 1); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld2_lane_p8(__p0, __p1, __p2) __extension__ ({ \ + poly8x8x2_t __ret; \ + poly8x8x2_t __s1 = __p1; \ + __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 4); \ + __ret; \ +}) +#else +#define vld2_lane_p8(__p0, __p1, __p2) __extension__ ({ \ + poly8x8x2_t __ret; \ + poly8x8x2_t __s1 = __p1; \ + poly8x8x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], __p2, 4); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld2_lane_p16(__p0, __p1, __p2) __extension__ ({ \ + poly16x4x2_t __ret; \ + poly16x4x2_t __s1 = __p1; \ + __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 5); \ + __ret; \ +}) +#else +#define vld2_lane_p16(__p0, __p1, __p2) __extension__ ({ \ + poly16x4x2_t __ret; \ + poly16x4x2_t __s1 = __p1; \ + poly16x4x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], __p2, 5); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld2q_lane_p16(__p0, __p1, __p2) __extension__ ({ \ + poly16x8x2_t __ret; \ + poly16x8x2_t __s1 = __p1; \ + __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 37); \ + __ret; \ +}) +#else +#define vld2q_lane_p16(__p0, __p1, __p2) __extension__ ({ \ + poly16x8x2_t __ret; \ + poly16x8x2_t __s1 = __p1; \ + poly16x8x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 37); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld2q_lane_u32(__p0, __p1, __p2) __extension__ ({ \ + uint32x4x2_t __ret; \ + uint32x4x2_t __s1 = __p1; \ + __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 50); \ + __ret; \ +}) +#else +#define vld2q_lane_u32(__p0, __p1, __p2) __extension__ ({ \ + uint32x4x2_t __ret; \ + uint32x4x2_t __s1 = __p1; \ + uint32x4x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 50); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld2q_lane_u16(__p0, __p1, __p2) __extension__ ({ \ + uint16x8x2_t __ret; \ + uint16x8x2_t __s1 = __p1; \ + __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 49); \ + __ret; \ +}) +#else +#define vld2q_lane_u16(__p0, __p1, __p2) __extension__ ({ \ + uint16x8x2_t __ret; \ + uint16x8x2_t __s1 = __p1; \ + uint16x8x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 49); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld2q_lane_f32(__p0, __p1, __p2) __extension__ ({ \ + float32x4x2_t __ret; \ + float32x4x2_t __s1 = __p1; \ + __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 41); \ + __ret; \ +}) +#else +#define vld2q_lane_f32(__p0, __p1, __p2) __extension__ ({ \ + float32x4x2_t __ret; \ + float32x4x2_t __s1 = __p1; \ + float32x4x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 41); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld2q_lane_s32(__p0, __p1, __p2) __extension__ ({ \ + int32x4x2_t __ret; \ + int32x4x2_t __s1 = __p1; \ + __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 34); \ + __ret; \ +}) +#else +#define vld2q_lane_s32(__p0, __p1, __p2) __extension__ ({ \ + int32x4x2_t __ret; \ + int32x4x2_t __s1 = __p1; \ + int32x4x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 34); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld2q_lane_s16(__p0, __p1, __p2) __extension__ ({ \ + int16x8x2_t __ret; \ + int16x8x2_t __s1 = __p1; \ + __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 33); \ + __ret; \ +}) +#else +#define vld2q_lane_s16(__p0, __p1, __p2) __extension__ ({ \ + int16x8x2_t __ret; \ + int16x8x2_t __s1 = __p1; \ + int16x8x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 33); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld2_lane_u8(__p0, __p1, __p2) __extension__ ({ \ + uint8x8x2_t __ret; \ + uint8x8x2_t __s1 = __p1; \ + __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 16); \ + __ret; \ +}) +#else +#define vld2_lane_u8(__p0, __p1, __p2) __extension__ ({ \ + uint8x8x2_t __ret; \ + uint8x8x2_t __s1 = __p1; \ + uint8x8x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], __p2, 16); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld2_lane_u32(__p0, __p1, __p2) __extension__ ({ \ + uint32x2x2_t __ret; \ + uint32x2x2_t __s1 = __p1; \ + __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 18); \ + __ret; \ +}) +#else +#define vld2_lane_u32(__p0, __p1, __p2) __extension__ ({ \ + uint32x2x2_t __ret; \ + uint32x2x2_t __s1 = __p1; \ + uint32x2x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ + __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], __p2, 18); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld2_lane_u16(__p0, __p1, __p2) __extension__ ({ \ + uint16x4x2_t __ret; \ + uint16x4x2_t __s1 = __p1; \ + __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 17); \ + __ret; \ +}) +#else +#define vld2_lane_u16(__p0, __p1, __p2) __extension__ ({ \ + uint16x4x2_t __ret; \ + uint16x4x2_t __s1 = __p1; \ + uint16x4x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], __p2, 17); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld2_lane_s8(__p0, __p1, __p2) __extension__ ({ \ + int8x8x2_t __ret; \ + int8x8x2_t __s1 = __p1; \ + __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 0); \ + __ret; \ +}) +#else +#define vld2_lane_s8(__p0, __p1, __p2) __extension__ ({ \ + int8x8x2_t __ret; \ + int8x8x2_t __s1 = __p1; \ + int8x8x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], __p2, 0); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld2_lane_f32(__p0, __p1, __p2) __extension__ ({ \ + float32x2x2_t __ret; \ + float32x2x2_t __s1 = __p1; \ + __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 9); \ + __ret; \ +}) +#else +#define vld2_lane_f32(__p0, __p1, __p2) __extension__ ({ \ + float32x2x2_t __ret; \ + float32x2x2_t __s1 = __p1; \ + float32x2x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ + __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], __p2, 9); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld2_lane_s32(__p0, __p1, __p2) __extension__ ({ \ + int32x2x2_t __ret; \ + int32x2x2_t __s1 = __p1; \ + __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 2); \ + __ret; \ +}) +#else +#define vld2_lane_s32(__p0, __p1, __p2) __extension__ ({ \ + int32x2x2_t __ret; \ + int32x2x2_t __s1 = __p1; \ + int32x2x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ + __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], __p2, 2); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld2_lane_s16(__p0, __p1, __p2) __extension__ ({ \ + int16x4x2_t __ret; \ + int16x4x2_t __s1 = __p1; \ + __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 1); \ + __ret; \ +}) +#else +#define vld2_lane_s16(__p0, __p1, __p2) __extension__ ({ \ + int16x4x2_t __ret; \ + int16x4x2_t __s1 = __p1; \ + int16x4x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], __p2, 1); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld3_p8(__p0) __extension__ ({ \ + poly8x8x3_t __ret; \ + __builtin_neon_vld3_v(&__ret, __p0, 4); \ + __ret; \ +}) +#else +#define vld3_p8(__p0) __extension__ ({ \ + poly8x8x3_t __ret; \ + __builtin_neon_vld3_v(&__ret, __p0, 4); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld3_p16(__p0) __extension__ ({ \ + poly16x4x3_t __ret; \ + __builtin_neon_vld3_v(&__ret, __p0, 5); \ + __ret; \ +}) +#else +#define vld3_p16(__p0) __extension__ ({ \ + poly16x4x3_t __ret; \ + __builtin_neon_vld3_v(&__ret, __p0, 5); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld3q_p8(__p0) __extension__ ({ \ + poly8x16x3_t __ret; \ + __builtin_neon_vld3q_v(&__ret, __p0, 36); \ + __ret; \ +}) +#else +#define vld3q_p8(__p0) __extension__ ({ \ + poly8x16x3_t __ret; \ + __builtin_neon_vld3q_v(&__ret, __p0, 36); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld3q_p16(__p0) __extension__ ({ \ + poly16x8x3_t __ret; \ + __builtin_neon_vld3q_v(&__ret, __p0, 37); \ + __ret; \ +}) +#else +#define vld3q_p16(__p0) __extension__ ({ \ + poly16x8x3_t __ret; \ + __builtin_neon_vld3q_v(&__ret, __p0, 37); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld3q_u8(__p0) __extension__ ({ \ + uint8x16x3_t __ret; \ + __builtin_neon_vld3q_v(&__ret, __p0, 48); \ + __ret; \ +}) +#else +#define vld3q_u8(__p0) __extension__ ({ \ + uint8x16x3_t __ret; \ + __builtin_neon_vld3q_v(&__ret, __p0, 48); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld3q_u32(__p0) __extension__ ({ \ + uint32x4x3_t __ret; \ + __builtin_neon_vld3q_v(&__ret, __p0, 50); \ + __ret; \ +}) +#else +#define vld3q_u32(__p0) __extension__ ({ \ + uint32x4x3_t __ret; \ + __builtin_neon_vld3q_v(&__ret, __p0, 50); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld3q_u16(__p0) __extension__ ({ \ + uint16x8x3_t __ret; \ + __builtin_neon_vld3q_v(&__ret, __p0, 49); \ + __ret; \ +}) +#else +#define vld3q_u16(__p0) __extension__ ({ \ + uint16x8x3_t __ret; \ + __builtin_neon_vld3q_v(&__ret, __p0, 49); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld3q_s8(__p0) __extension__ ({ \ + int8x16x3_t __ret; \ + __builtin_neon_vld3q_v(&__ret, __p0, 32); \ + __ret; \ +}) +#else +#define vld3q_s8(__p0) __extension__ ({ \ + int8x16x3_t __ret; \ + __builtin_neon_vld3q_v(&__ret, __p0, 32); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld3q_f32(__p0) __extension__ ({ \ + float32x4x3_t __ret; \ + __builtin_neon_vld3q_v(&__ret, __p0, 41); \ + __ret; \ +}) +#else +#define vld3q_f32(__p0) __extension__ ({ \ + float32x4x3_t __ret; \ + __builtin_neon_vld3q_v(&__ret, __p0, 41); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld3q_s32(__p0) __extension__ ({ \ + int32x4x3_t __ret; \ + __builtin_neon_vld3q_v(&__ret, __p0, 34); \ + __ret; \ +}) +#else +#define vld3q_s32(__p0) __extension__ ({ \ + int32x4x3_t __ret; \ + __builtin_neon_vld3q_v(&__ret, __p0, 34); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld3q_s16(__p0) __extension__ ({ \ + int16x8x3_t __ret; \ + __builtin_neon_vld3q_v(&__ret, __p0, 33); \ + __ret; \ +}) +#else +#define vld3q_s16(__p0) __extension__ ({ \ + int16x8x3_t __ret; \ + __builtin_neon_vld3q_v(&__ret, __p0, 33); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld3_u8(__p0) __extension__ ({ \ + uint8x8x3_t __ret; \ + __builtin_neon_vld3_v(&__ret, __p0, 16); \ + __ret; \ +}) +#else +#define vld3_u8(__p0) __extension__ ({ \ + uint8x8x3_t __ret; \ + __builtin_neon_vld3_v(&__ret, __p0, 16); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld3_u32(__p0) __extension__ ({ \ + uint32x2x3_t __ret; \ + __builtin_neon_vld3_v(&__ret, __p0, 18); \ + __ret; \ +}) +#else +#define vld3_u32(__p0) __extension__ ({ \ + uint32x2x3_t __ret; \ + __builtin_neon_vld3_v(&__ret, __p0, 18); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ + __ret; \ +}) +#endif + +#define vld3_u64(__p0) __extension__ ({ \ + uint64x1x3_t __ret; \ + __builtin_neon_vld3_v(&__ret, __p0, 19); \ + __ret; \ +}) +#ifdef __LITTLE_ENDIAN__ +#define vld3_u16(__p0) __extension__ ({ \ + uint16x4x3_t __ret; \ + __builtin_neon_vld3_v(&__ret, __p0, 17); \ + __ret; \ +}) +#else +#define vld3_u16(__p0) __extension__ ({ \ + uint16x4x3_t __ret; \ + __builtin_neon_vld3_v(&__ret, __p0, 17); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld3_s8(__p0) __extension__ ({ \ + int8x8x3_t __ret; \ + __builtin_neon_vld3_v(&__ret, __p0, 0); \ + __ret; \ +}) +#else +#define vld3_s8(__p0) __extension__ ({ \ + int8x8x3_t __ret; \ + __builtin_neon_vld3_v(&__ret, __p0, 0); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld3_f32(__p0) __extension__ ({ \ + float32x2x3_t __ret; \ + __builtin_neon_vld3_v(&__ret, __p0, 9); \ + __ret; \ +}) +#else +#define vld3_f32(__p0) __extension__ ({ \ + float32x2x3_t __ret; \ + __builtin_neon_vld3_v(&__ret, __p0, 9); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld3_s32(__p0) __extension__ ({ \ + int32x2x3_t __ret; \ + __builtin_neon_vld3_v(&__ret, __p0, 2); \ + __ret; \ +}) +#else +#define vld3_s32(__p0) __extension__ ({ \ + int32x2x3_t __ret; \ + __builtin_neon_vld3_v(&__ret, __p0, 2); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ + __ret; \ +}) +#endif + +#define vld3_s64(__p0) __extension__ ({ \ + int64x1x3_t __ret; \ + __builtin_neon_vld3_v(&__ret, __p0, 3); \ + __ret; \ +}) +#ifdef __LITTLE_ENDIAN__ +#define vld3_s16(__p0) __extension__ ({ \ + int16x4x3_t __ret; \ + __builtin_neon_vld3_v(&__ret, __p0, 1); \ + __ret; \ +}) +#else +#define vld3_s16(__p0) __extension__ ({ \ + int16x4x3_t __ret; \ + __builtin_neon_vld3_v(&__ret, __p0, 1); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld3_dup_p8(__p0) __extension__ ({ \ + poly8x8x3_t __ret; \ + __builtin_neon_vld3_dup_v(&__ret, __p0, 4); \ + __ret; \ +}) +#else +#define vld3_dup_p8(__p0) __extension__ ({ \ + poly8x8x3_t __ret; \ + __builtin_neon_vld3_dup_v(&__ret, __p0, 4); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld3_dup_p16(__p0) __extension__ ({ \ + poly16x4x3_t __ret; \ + __builtin_neon_vld3_dup_v(&__ret, __p0, 5); \ + __ret; \ +}) +#else +#define vld3_dup_p16(__p0) __extension__ ({ \ + poly16x4x3_t __ret; \ + __builtin_neon_vld3_dup_v(&__ret, __p0, 5); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld3q_dup_p8(__p0) __extension__ ({ \ + poly8x16x3_t __ret; \ + __builtin_neon_vld3q_dup_v(&__ret, __p0, 36); \ + __ret; \ +}) +#else +#define vld3q_dup_p8(__p0) __extension__ ({ \ + poly8x16x3_t __ret; \ + __builtin_neon_vld3q_dup_v(&__ret, __p0, 36); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld3q_dup_p16(__p0) __extension__ ({ \ + poly16x8x3_t __ret; \ + __builtin_neon_vld3q_dup_v(&__ret, __p0, 37); \ + __ret; \ +}) +#else +#define vld3q_dup_p16(__p0) __extension__ ({ \ + poly16x8x3_t __ret; \ + __builtin_neon_vld3q_dup_v(&__ret, __p0, 37); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld3q_dup_u8(__p0) __extension__ ({ \ + uint8x16x3_t __ret; \ + __builtin_neon_vld3q_dup_v(&__ret, __p0, 48); \ + __ret; \ +}) +#else +#define vld3q_dup_u8(__p0) __extension__ ({ \ + uint8x16x3_t __ret; \ + __builtin_neon_vld3q_dup_v(&__ret, __p0, 48); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld3q_dup_u32(__p0) __extension__ ({ \ + uint32x4x3_t __ret; \ + __builtin_neon_vld3q_dup_v(&__ret, __p0, 50); \ + __ret; \ +}) +#else +#define vld3q_dup_u32(__p0) __extension__ ({ \ + uint32x4x3_t __ret; \ + __builtin_neon_vld3q_dup_v(&__ret, __p0, 50); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld3q_dup_u64(__p0) __extension__ ({ \ + uint64x2x3_t __ret; \ + __builtin_neon_vld3q_dup_v(&__ret, __p0, 51); \ + __ret; \ +}) +#else +#define vld3q_dup_u64(__p0) __extension__ ({ \ + uint64x2x3_t __ret; \ + __builtin_neon_vld3q_dup_v(&__ret, __p0, 51); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld3q_dup_u16(__p0) __extension__ ({ \ + uint16x8x3_t __ret; \ + __builtin_neon_vld3q_dup_v(&__ret, __p0, 49); \ + __ret; \ +}) +#else +#define vld3q_dup_u16(__p0) __extension__ ({ \ + uint16x8x3_t __ret; \ + __builtin_neon_vld3q_dup_v(&__ret, __p0, 49); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld3q_dup_s8(__p0) __extension__ ({ \ + int8x16x3_t __ret; \ + __builtin_neon_vld3q_dup_v(&__ret, __p0, 32); \ + __ret; \ +}) +#else +#define vld3q_dup_s8(__p0) __extension__ ({ \ + int8x16x3_t __ret; \ + __builtin_neon_vld3q_dup_v(&__ret, __p0, 32); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld3q_dup_f32(__p0) __extension__ ({ \ + float32x4x3_t __ret; \ + __builtin_neon_vld3q_dup_v(&__ret, __p0, 41); \ + __ret; \ +}) +#else +#define vld3q_dup_f32(__p0) __extension__ ({ \ + float32x4x3_t __ret; \ + __builtin_neon_vld3q_dup_v(&__ret, __p0, 41); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld3q_dup_s32(__p0) __extension__ ({ \ + int32x4x3_t __ret; \ + __builtin_neon_vld3q_dup_v(&__ret, __p0, 34); \ + __ret; \ +}) +#else +#define vld3q_dup_s32(__p0) __extension__ ({ \ + int32x4x3_t __ret; \ + __builtin_neon_vld3q_dup_v(&__ret, __p0, 34); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld3q_dup_s64(__p0) __extension__ ({ \ + int64x2x3_t __ret; \ + __builtin_neon_vld3q_dup_v(&__ret, __p0, 35); \ + __ret; \ +}) +#else +#define vld3q_dup_s64(__p0) __extension__ ({ \ + int64x2x3_t __ret; \ + __builtin_neon_vld3q_dup_v(&__ret, __p0, 35); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld3q_dup_s16(__p0) __extension__ ({ \ + int16x8x3_t __ret; \ + __builtin_neon_vld3q_dup_v(&__ret, __p0, 33); \ + __ret; \ +}) +#else +#define vld3q_dup_s16(__p0) __extension__ ({ \ + int16x8x3_t __ret; \ + __builtin_neon_vld3q_dup_v(&__ret, __p0, 33); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld3_dup_u8(__p0) __extension__ ({ \ + uint8x8x3_t __ret; \ + __builtin_neon_vld3_dup_v(&__ret, __p0, 16); \ + __ret; \ +}) +#else +#define vld3_dup_u8(__p0) __extension__ ({ \ + uint8x8x3_t __ret; \ + __builtin_neon_vld3_dup_v(&__ret, __p0, 16); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld3_dup_u32(__p0) __extension__ ({ \ + uint32x2x3_t __ret; \ + __builtin_neon_vld3_dup_v(&__ret, __p0, 18); \ + __ret; \ +}) +#else +#define vld3_dup_u32(__p0) __extension__ ({ \ + uint32x2x3_t __ret; \ + __builtin_neon_vld3_dup_v(&__ret, __p0, 18); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ + __ret; \ +}) +#endif + +#define vld3_dup_u64(__p0) __extension__ ({ \ + uint64x1x3_t __ret; \ + __builtin_neon_vld3_dup_v(&__ret, __p0, 19); \ + __ret; \ +}) +#ifdef __LITTLE_ENDIAN__ +#define vld3_dup_u16(__p0) __extension__ ({ \ + uint16x4x3_t __ret; \ + __builtin_neon_vld3_dup_v(&__ret, __p0, 17); \ + __ret; \ +}) +#else +#define vld3_dup_u16(__p0) __extension__ ({ \ + uint16x4x3_t __ret; \ + __builtin_neon_vld3_dup_v(&__ret, __p0, 17); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld3_dup_s8(__p0) __extension__ ({ \ + int8x8x3_t __ret; \ + __builtin_neon_vld3_dup_v(&__ret, __p0, 0); \ + __ret; \ +}) +#else +#define vld3_dup_s8(__p0) __extension__ ({ \ + int8x8x3_t __ret; \ + __builtin_neon_vld3_dup_v(&__ret, __p0, 0); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld3_dup_f32(__p0) __extension__ ({ \ + float32x2x3_t __ret; \ + __builtin_neon_vld3_dup_v(&__ret, __p0, 9); \ + __ret; \ +}) +#else +#define vld3_dup_f32(__p0) __extension__ ({ \ + float32x2x3_t __ret; \ + __builtin_neon_vld3_dup_v(&__ret, __p0, 9); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld3_dup_s32(__p0) __extension__ ({ \ + int32x2x3_t __ret; \ + __builtin_neon_vld3_dup_v(&__ret, __p0, 2); \ + __ret; \ +}) +#else +#define vld3_dup_s32(__p0) __extension__ ({ \ + int32x2x3_t __ret; \ + __builtin_neon_vld3_dup_v(&__ret, __p0, 2); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ + __ret; \ +}) +#endif + +#define vld3_dup_s64(__p0) __extension__ ({ \ + int64x1x3_t __ret; \ + __builtin_neon_vld3_dup_v(&__ret, __p0, 3); \ + __ret; \ +}) +#ifdef __LITTLE_ENDIAN__ +#define vld3_dup_s16(__p0) __extension__ ({ \ + int16x4x3_t __ret; \ + __builtin_neon_vld3_dup_v(&__ret, __p0, 1); \ + __ret; \ +}) +#else +#define vld3_dup_s16(__p0) __extension__ ({ \ + int16x4x3_t __ret; \ + __builtin_neon_vld3_dup_v(&__ret, __p0, 1); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld3_lane_p8(__p0, __p1, __p2) __extension__ ({ \ + poly8x8x3_t __ret; \ + poly8x8x3_t __s1 = __p1; \ + __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 4); \ + __ret; \ +}) +#else +#define vld3_lane_p8(__p0, __p1, __p2) __extension__ ({ \ + poly8x8x3_t __ret; \ + poly8x8x3_t __s1 = __p1; \ + poly8x8x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], __p2, 4); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld3_lane_p16(__p0, __p1, __p2) __extension__ ({ \ + poly16x4x3_t __ret; \ + poly16x4x3_t __s1 = __p1; \ + __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 5); \ + __ret; \ +}) +#else +#define vld3_lane_p16(__p0, __p1, __p2) __extension__ ({ \ + poly16x4x3_t __ret; \ + poly16x4x3_t __s1 = __p1; \ + poly16x4x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ + __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], __p2, 5); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld3q_lane_p16(__p0, __p1, __p2) __extension__ ({ \ + poly16x8x3_t __ret; \ + poly16x8x3_t __s1 = __p1; \ + __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 37); \ + __ret; \ +}) +#else +#define vld3q_lane_p16(__p0, __p1, __p2) __extension__ ({ \ + poly16x8x3_t __ret; \ + poly16x8x3_t __s1 = __p1; \ + poly16x8x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 37); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld3q_lane_u32(__p0, __p1, __p2) __extension__ ({ \ + uint32x4x3_t __ret; \ + uint32x4x3_t __s1 = __p1; \ + __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 50); \ + __ret; \ +}) +#else +#define vld3q_lane_u32(__p0, __p1, __p2) __extension__ ({ \ + uint32x4x3_t __ret; \ + uint32x4x3_t __s1 = __p1; \ + uint32x4x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ + __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 50); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld3q_lane_u16(__p0, __p1, __p2) __extension__ ({ \ + uint16x8x3_t __ret; \ + uint16x8x3_t __s1 = __p1; \ + __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 49); \ + __ret; \ +}) +#else +#define vld3q_lane_u16(__p0, __p1, __p2) __extension__ ({ \ + uint16x8x3_t __ret; \ + uint16x8x3_t __s1 = __p1; \ + uint16x8x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 49); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld3q_lane_f32(__p0, __p1, __p2) __extension__ ({ \ + float32x4x3_t __ret; \ + float32x4x3_t __s1 = __p1; \ + __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 41); \ + __ret; \ +}) +#else +#define vld3q_lane_f32(__p0, __p1, __p2) __extension__ ({ \ + float32x4x3_t __ret; \ + float32x4x3_t __s1 = __p1; \ + float32x4x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ + __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 41); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld3q_lane_s32(__p0, __p1, __p2) __extension__ ({ \ + int32x4x3_t __ret; \ + int32x4x3_t __s1 = __p1; \ + __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 34); \ + __ret; \ +}) +#else +#define vld3q_lane_s32(__p0, __p1, __p2) __extension__ ({ \ + int32x4x3_t __ret; \ + int32x4x3_t __s1 = __p1; \ + int32x4x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ + __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 34); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld3q_lane_s16(__p0, __p1, __p2) __extension__ ({ \ + int16x8x3_t __ret; \ + int16x8x3_t __s1 = __p1; \ + __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 33); \ + __ret; \ +}) +#else +#define vld3q_lane_s16(__p0, __p1, __p2) __extension__ ({ \ + int16x8x3_t __ret; \ + int16x8x3_t __s1 = __p1; \ + int16x8x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 33); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld3_lane_u8(__p0, __p1, __p2) __extension__ ({ \ + uint8x8x3_t __ret; \ + uint8x8x3_t __s1 = __p1; \ + __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 16); \ + __ret; \ +}) +#else +#define vld3_lane_u8(__p0, __p1, __p2) __extension__ ({ \ + uint8x8x3_t __ret; \ + uint8x8x3_t __s1 = __p1; \ + uint8x8x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], __p2, 16); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld3_lane_u32(__p0, __p1, __p2) __extension__ ({ \ + uint32x2x3_t __ret; \ + uint32x2x3_t __s1 = __p1; \ + __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 18); \ + __ret; \ +}) +#else +#define vld3_lane_u32(__p0, __p1, __p2) __extension__ ({ \ + uint32x2x3_t __ret; \ + uint32x2x3_t __s1 = __p1; \ + uint32x2x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ + __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], __p2, 18); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld3_lane_u16(__p0, __p1, __p2) __extension__ ({ \ + uint16x4x3_t __ret; \ + uint16x4x3_t __s1 = __p1; \ + __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 17); \ + __ret; \ +}) +#else +#define vld3_lane_u16(__p0, __p1, __p2) __extension__ ({ \ + uint16x4x3_t __ret; \ + uint16x4x3_t __s1 = __p1; \ + uint16x4x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ + __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], __p2, 17); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld3_lane_s8(__p0, __p1, __p2) __extension__ ({ \ + int8x8x3_t __ret; \ + int8x8x3_t __s1 = __p1; \ + __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 0); \ + __ret; \ +}) +#else +#define vld3_lane_s8(__p0, __p1, __p2) __extension__ ({ \ + int8x8x3_t __ret; \ + int8x8x3_t __s1 = __p1; \ + int8x8x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], __p2, 0); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld3_lane_f32(__p0, __p1, __p2) __extension__ ({ \ + float32x2x3_t __ret; \ + float32x2x3_t __s1 = __p1; \ + __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 9); \ + __ret; \ +}) +#else +#define vld3_lane_f32(__p0, __p1, __p2) __extension__ ({ \ + float32x2x3_t __ret; \ + float32x2x3_t __s1 = __p1; \ + float32x2x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ + __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], __p2, 9); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld3_lane_s32(__p0, __p1, __p2) __extension__ ({ \ + int32x2x3_t __ret; \ + int32x2x3_t __s1 = __p1; \ + __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 2); \ + __ret; \ +}) +#else +#define vld3_lane_s32(__p0, __p1, __p2) __extension__ ({ \ + int32x2x3_t __ret; \ + int32x2x3_t __s1 = __p1; \ + int32x2x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ + __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], __p2, 2); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld3_lane_s16(__p0, __p1, __p2) __extension__ ({ \ + int16x4x3_t __ret; \ + int16x4x3_t __s1 = __p1; \ + __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 1); \ + __ret; \ +}) +#else +#define vld3_lane_s16(__p0, __p1, __p2) __extension__ ({ \ + int16x4x3_t __ret; \ + int16x4x3_t __s1 = __p1; \ + int16x4x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ + __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], __p2, 1); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld4_p8(__p0) __extension__ ({ \ + poly8x8x4_t __ret; \ + __builtin_neon_vld4_v(&__ret, __p0, 4); \ + __ret; \ +}) +#else +#define vld4_p8(__p0) __extension__ ({ \ + poly8x8x4_t __ret; \ + __builtin_neon_vld4_v(&__ret, __p0, 4); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld4_p16(__p0) __extension__ ({ \ + poly16x4x4_t __ret; \ + __builtin_neon_vld4_v(&__ret, __p0, 5); \ + __ret; \ +}) +#else +#define vld4_p16(__p0) __extension__ ({ \ + poly16x4x4_t __ret; \ + __builtin_neon_vld4_v(&__ret, __p0, 5); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld4q_p8(__p0) __extension__ ({ \ + poly8x16x4_t __ret; \ + __builtin_neon_vld4q_v(&__ret, __p0, 36); \ + __ret; \ +}) +#else +#define vld4q_p8(__p0) __extension__ ({ \ + poly8x16x4_t __ret; \ + __builtin_neon_vld4q_v(&__ret, __p0, 36); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld4q_p16(__p0) __extension__ ({ \ + poly16x8x4_t __ret; \ + __builtin_neon_vld4q_v(&__ret, __p0, 37); \ + __ret; \ +}) +#else +#define vld4q_p16(__p0) __extension__ ({ \ + poly16x8x4_t __ret; \ + __builtin_neon_vld4q_v(&__ret, __p0, 37); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld4q_u8(__p0) __extension__ ({ \ + uint8x16x4_t __ret; \ + __builtin_neon_vld4q_v(&__ret, __p0, 48); \ + __ret; \ +}) +#else +#define vld4q_u8(__p0) __extension__ ({ \ + uint8x16x4_t __ret; \ + __builtin_neon_vld4q_v(&__ret, __p0, 48); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld4q_u32(__p0) __extension__ ({ \ + uint32x4x4_t __ret; \ + __builtin_neon_vld4q_v(&__ret, __p0, 50); \ + __ret; \ +}) +#else +#define vld4q_u32(__p0) __extension__ ({ \ + uint32x4x4_t __ret; \ + __builtin_neon_vld4q_v(&__ret, __p0, 50); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld4q_u16(__p0) __extension__ ({ \ + uint16x8x4_t __ret; \ + __builtin_neon_vld4q_v(&__ret, __p0, 49); \ + __ret; \ +}) +#else +#define vld4q_u16(__p0) __extension__ ({ \ + uint16x8x4_t __ret; \ + __builtin_neon_vld4q_v(&__ret, __p0, 49); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld4q_s8(__p0) __extension__ ({ \ + int8x16x4_t __ret; \ + __builtin_neon_vld4q_v(&__ret, __p0, 32); \ + __ret; \ +}) +#else +#define vld4q_s8(__p0) __extension__ ({ \ + int8x16x4_t __ret; \ + __builtin_neon_vld4q_v(&__ret, __p0, 32); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld4q_f32(__p0) __extension__ ({ \ + float32x4x4_t __ret; \ + __builtin_neon_vld4q_v(&__ret, __p0, 41); \ + __ret; \ +}) +#else +#define vld4q_f32(__p0) __extension__ ({ \ + float32x4x4_t __ret; \ + __builtin_neon_vld4q_v(&__ret, __p0, 41); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld4q_s32(__p0) __extension__ ({ \ + int32x4x4_t __ret; \ + __builtin_neon_vld4q_v(&__ret, __p0, 34); \ + __ret; \ +}) +#else +#define vld4q_s32(__p0) __extension__ ({ \ + int32x4x4_t __ret; \ + __builtin_neon_vld4q_v(&__ret, __p0, 34); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld4q_s16(__p0) __extension__ ({ \ + int16x8x4_t __ret; \ + __builtin_neon_vld4q_v(&__ret, __p0, 33); \ + __ret; \ +}) +#else +#define vld4q_s16(__p0) __extension__ ({ \ + int16x8x4_t __ret; \ + __builtin_neon_vld4q_v(&__ret, __p0, 33); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld4_u8(__p0) __extension__ ({ \ + uint8x8x4_t __ret; \ + __builtin_neon_vld4_v(&__ret, __p0, 16); \ + __ret; \ +}) +#else +#define vld4_u8(__p0) __extension__ ({ \ + uint8x8x4_t __ret; \ + __builtin_neon_vld4_v(&__ret, __p0, 16); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld4_u32(__p0) __extension__ ({ \ + uint32x2x4_t __ret; \ + __builtin_neon_vld4_v(&__ret, __p0, 18); \ + __ret; \ +}) +#else +#define vld4_u32(__p0) __extension__ ({ \ + uint32x2x4_t __ret; \ + __builtin_neon_vld4_v(&__ret, __p0, 18); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \ + __ret; \ +}) +#endif + +#define vld4_u64(__p0) __extension__ ({ \ + uint64x1x4_t __ret; \ + __builtin_neon_vld4_v(&__ret, __p0, 19); \ + __ret; \ +}) +#ifdef __LITTLE_ENDIAN__ +#define vld4_u16(__p0) __extension__ ({ \ + uint16x4x4_t __ret; \ + __builtin_neon_vld4_v(&__ret, __p0, 17); \ + __ret; \ +}) +#else +#define vld4_u16(__p0) __extension__ ({ \ + uint16x4x4_t __ret; \ + __builtin_neon_vld4_v(&__ret, __p0, 17); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld4_s8(__p0) __extension__ ({ \ + int8x8x4_t __ret; \ + __builtin_neon_vld4_v(&__ret, __p0, 0); \ + __ret; \ +}) +#else +#define vld4_s8(__p0) __extension__ ({ \ + int8x8x4_t __ret; \ + __builtin_neon_vld4_v(&__ret, __p0, 0); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld4_f32(__p0) __extension__ ({ \ + float32x2x4_t __ret; \ + __builtin_neon_vld4_v(&__ret, __p0, 9); \ + __ret; \ +}) +#else +#define vld4_f32(__p0) __extension__ ({ \ + float32x2x4_t __ret; \ + __builtin_neon_vld4_v(&__ret, __p0, 9); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld4_s32(__p0) __extension__ ({ \ + int32x2x4_t __ret; \ + __builtin_neon_vld4_v(&__ret, __p0, 2); \ + __ret; \ +}) +#else +#define vld4_s32(__p0) __extension__ ({ \ + int32x2x4_t __ret; \ + __builtin_neon_vld4_v(&__ret, __p0, 2); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \ + __ret; \ +}) +#endif + +#define vld4_s64(__p0) __extension__ ({ \ + int64x1x4_t __ret; \ + __builtin_neon_vld4_v(&__ret, __p0, 3); \ + __ret; \ +}) +#ifdef __LITTLE_ENDIAN__ +#define vld4_s16(__p0) __extension__ ({ \ + int16x4x4_t __ret; \ + __builtin_neon_vld4_v(&__ret, __p0, 1); \ + __ret; \ +}) +#else +#define vld4_s16(__p0) __extension__ ({ \ + int16x4x4_t __ret; \ + __builtin_neon_vld4_v(&__ret, __p0, 1); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld4_dup_p8(__p0) __extension__ ({ \ + poly8x8x4_t __ret; \ + __builtin_neon_vld4_dup_v(&__ret, __p0, 4); \ + __ret; \ +}) +#else +#define vld4_dup_p8(__p0) __extension__ ({ \ + poly8x8x4_t __ret; \ + __builtin_neon_vld4_dup_v(&__ret, __p0, 4); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld4_dup_p16(__p0) __extension__ ({ \ + poly16x4x4_t __ret; \ + __builtin_neon_vld4_dup_v(&__ret, __p0, 5); \ + __ret; \ +}) +#else +#define vld4_dup_p16(__p0) __extension__ ({ \ + poly16x4x4_t __ret; \ + __builtin_neon_vld4_dup_v(&__ret, __p0, 5); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld4q_dup_p8(__p0) __extension__ ({ \ + poly8x16x4_t __ret; \ + __builtin_neon_vld4q_dup_v(&__ret, __p0, 36); \ + __ret; \ +}) +#else +#define vld4q_dup_p8(__p0) __extension__ ({ \ + poly8x16x4_t __ret; \ + __builtin_neon_vld4q_dup_v(&__ret, __p0, 36); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld4q_dup_p16(__p0) __extension__ ({ \ + poly16x8x4_t __ret; \ + __builtin_neon_vld4q_dup_v(&__ret, __p0, 37); \ + __ret; \ +}) +#else +#define vld4q_dup_p16(__p0) __extension__ ({ \ + poly16x8x4_t __ret; \ + __builtin_neon_vld4q_dup_v(&__ret, __p0, 37); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld4q_dup_u8(__p0) __extension__ ({ \ + uint8x16x4_t __ret; \ + __builtin_neon_vld4q_dup_v(&__ret, __p0, 48); \ + __ret; \ +}) +#else +#define vld4q_dup_u8(__p0) __extension__ ({ \ + uint8x16x4_t __ret; \ + __builtin_neon_vld4q_dup_v(&__ret, __p0, 48); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld4q_dup_u32(__p0) __extension__ ({ \ + uint32x4x4_t __ret; \ + __builtin_neon_vld4q_dup_v(&__ret, __p0, 50); \ + __ret; \ +}) +#else +#define vld4q_dup_u32(__p0) __extension__ ({ \ + uint32x4x4_t __ret; \ + __builtin_neon_vld4q_dup_v(&__ret, __p0, 50); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld4q_dup_u64(__p0) __extension__ ({ \ + uint64x2x4_t __ret; \ + __builtin_neon_vld4q_dup_v(&__ret, __p0, 51); \ + __ret; \ +}) +#else +#define vld4q_dup_u64(__p0) __extension__ ({ \ + uint64x2x4_t __ret; \ + __builtin_neon_vld4q_dup_v(&__ret, __p0, 51); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld4q_dup_u16(__p0) __extension__ ({ \ + uint16x8x4_t __ret; \ + __builtin_neon_vld4q_dup_v(&__ret, __p0, 49); \ + __ret; \ +}) +#else +#define vld4q_dup_u16(__p0) __extension__ ({ \ + uint16x8x4_t __ret; \ + __builtin_neon_vld4q_dup_v(&__ret, __p0, 49); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld4q_dup_s8(__p0) __extension__ ({ \ + int8x16x4_t __ret; \ + __builtin_neon_vld4q_dup_v(&__ret, __p0, 32); \ + __ret; \ +}) +#else +#define vld4q_dup_s8(__p0) __extension__ ({ \ + int8x16x4_t __ret; \ + __builtin_neon_vld4q_dup_v(&__ret, __p0, 32); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld4q_dup_f32(__p0) __extension__ ({ \ + float32x4x4_t __ret; \ + __builtin_neon_vld4q_dup_v(&__ret, __p0, 41); \ + __ret; \ +}) +#else +#define vld4q_dup_f32(__p0) __extension__ ({ \ + float32x4x4_t __ret; \ + __builtin_neon_vld4q_dup_v(&__ret, __p0, 41); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld4q_dup_s32(__p0) __extension__ ({ \ + int32x4x4_t __ret; \ + __builtin_neon_vld4q_dup_v(&__ret, __p0, 34); \ + __ret; \ +}) +#else +#define vld4q_dup_s32(__p0) __extension__ ({ \ + int32x4x4_t __ret; \ + __builtin_neon_vld4q_dup_v(&__ret, __p0, 34); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld4q_dup_s64(__p0) __extension__ ({ \ + int64x2x4_t __ret; \ + __builtin_neon_vld4q_dup_v(&__ret, __p0, 35); \ + __ret; \ +}) +#else +#define vld4q_dup_s64(__p0) __extension__ ({ \ + int64x2x4_t __ret; \ + __builtin_neon_vld4q_dup_v(&__ret, __p0, 35); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld4q_dup_s16(__p0) __extension__ ({ \ + int16x8x4_t __ret; \ + __builtin_neon_vld4q_dup_v(&__ret, __p0, 33); \ + __ret; \ +}) +#else +#define vld4q_dup_s16(__p0) __extension__ ({ \ + int16x8x4_t __ret; \ + __builtin_neon_vld4q_dup_v(&__ret, __p0, 33); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld4_dup_u8(__p0) __extension__ ({ \ + uint8x8x4_t __ret; \ + __builtin_neon_vld4_dup_v(&__ret, __p0, 16); \ + __ret; \ +}) +#else +#define vld4_dup_u8(__p0) __extension__ ({ \ + uint8x8x4_t __ret; \ + __builtin_neon_vld4_dup_v(&__ret, __p0, 16); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld4_dup_u32(__p0) __extension__ ({ \ + uint32x2x4_t __ret; \ + __builtin_neon_vld4_dup_v(&__ret, __p0, 18); \ + __ret; \ +}) +#else +#define vld4_dup_u32(__p0) __extension__ ({ \ + uint32x2x4_t __ret; \ + __builtin_neon_vld4_dup_v(&__ret, __p0, 18); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \ + __ret; \ +}) +#endif + +#define vld4_dup_u64(__p0) __extension__ ({ \ + uint64x1x4_t __ret; \ + __builtin_neon_vld4_dup_v(&__ret, __p0, 19); \ + __ret; \ +}) +#ifdef __LITTLE_ENDIAN__ +#define vld4_dup_u16(__p0) __extension__ ({ \ + uint16x4x4_t __ret; \ + __builtin_neon_vld4_dup_v(&__ret, __p0, 17); \ + __ret; \ +}) +#else +#define vld4_dup_u16(__p0) __extension__ ({ \ + uint16x4x4_t __ret; \ + __builtin_neon_vld4_dup_v(&__ret, __p0, 17); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld4_dup_s8(__p0) __extension__ ({ \ + int8x8x4_t __ret; \ + __builtin_neon_vld4_dup_v(&__ret, __p0, 0); \ + __ret; \ +}) +#else +#define vld4_dup_s8(__p0) __extension__ ({ \ + int8x8x4_t __ret; \ + __builtin_neon_vld4_dup_v(&__ret, __p0, 0); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld4_dup_f32(__p0) __extension__ ({ \ + float32x2x4_t __ret; \ + __builtin_neon_vld4_dup_v(&__ret, __p0, 9); \ + __ret; \ +}) +#else +#define vld4_dup_f32(__p0) __extension__ ({ \ + float32x2x4_t __ret; \ + __builtin_neon_vld4_dup_v(&__ret, __p0, 9); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld4_dup_s32(__p0) __extension__ ({ \ + int32x2x4_t __ret; \ + __builtin_neon_vld4_dup_v(&__ret, __p0, 2); \ + __ret; \ +}) +#else +#define vld4_dup_s32(__p0) __extension__ ({ \ + int32x2x4_t __ret; \ + __builtin_neon_vld4_dup_v(&__ret, __p0, 2); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \ + __ret; \ +}) +#endif + +#define vld4_dup_s64(__p0) __extension__ ({ \ + int64x1x4_t __ret; \ + __builtin_neon_vld4_dup_v(&__ret, __p0, 3); \ + __ret; \ +}) +#ifdef __LITTLE_ENDIAN__ +#define vld4_dup_s16(__p0) __extension__ ({ \ + int16x4x4_t __ret; \ + __builtin_neon_vld4_dup_v(&__ret, __p0, 1); \ + __ret; \ +}) +#else +#define vld4_dup_s16(__p0) __extension__ ({ \ + int16x4x4_t __ret; \ + __builtin_neon_vld4_dup_v(&__ret, __p0, 1); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld4_lane_p8(__p0, __p1, __p2) __extension__ ({ \ + poly8x8x4_t __ret; \ + poly8x8x4_t __s1 = __p1; \ + __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 4); \ + __ret; \ +}) +#else +#define vld4_lane_p8(__p0, __p1, __p2) __extension__ ({ \ + poly8x8x4_t __ret; \ + poly8x8x4_t __s1 = __p1; \ + poly8x8x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], __p2, 4); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld4_lane_p16(__p0, __p1, __p2) __extension__ ({ \ + poly16x4x4_t __ret; \ + poly16x4x4_t __s1 = __p1; \ + __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 5); \ + __ret; \ +}) +#else +#define vld4_lane_p16(__p0, __p1, __p2) __extension__ ({ \ + poly16x4x4_t __ret; \ + poly16x4x4_t __s1 = __p1; \ + poly16x4x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \ + __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], __p2, 5); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld4q_lane_p16(__p0, __p1, __p2) __extension__ ({ \ + poly16x8x4_t __ret; \ + poly16x8x4_t __s1 = __p1; \ + __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 37); \ + __ret; \ +}) +#else +#define vld4q_lane_p16(__p0, __p1, __p2) __extension__ ({ \ + poly16x8x4_t __ret; \ + poly16x8x4_t __s1 = __p1; \ + poly16x8x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 37); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld4q_lane_u32(__p0, __p1, __p2) __extension__ ({ \ + uint32x4x4_t __ret; \ + uint32x4x4_t __s1 = __p1; \ + __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 50); \ + __ret; \ +}) +#else +#define vld4q_lane_u32(__p0, __p1, __p2) __extension__ ({ \ + uint32x4x4_t __ret; \ + uint32x4x4_t __s1 = __p1; \ + uint32x4x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \ + __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 50); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld4q_lane_u16(__p0, __p1, __p2) __extension__ ({ \ + uint16x8x4_t __ret; \ + uint16x8x4_t __s1 = __p1; \ + __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 49); \ + __ret; \ +}) +#else +#define vld4q_lane_u16(__p0, __p1, __p2) __extension__ ({ \ + uint16x8x4_t __ret; \ + uint16x8x4_t __s1 = __p1; \ + uint16x8x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 49); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld4q_lane_f32(__p0, __p1, __p2) __extension__ ({ \ + float32x4x4_t __ret; \ + float32x4x4_t __s1 = __p1; \ + __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 41); \ + __ret; \ +}) +#else +#define vld4q_lane_f32(__p0, __p1, __p2) __extension__ ({ \ + float32x4x4_t __ret; \ + float32x4x4_t __s1 = __p1; \ + float32x4x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \ + __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 41); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld4q_lane_s32(__p0, __p1, __p2) __extension__ ({ \ + int32x4x4_t __ret; \ + int32x4x4_t __s1 = __p1; \ + __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 34); \ + __ret; \ +}) +#else +#define vld4q_lane_s32(__p0, __p1, __p2) __extension__ ({ \ + int32x4x4_t __ret; \ + int32x4x4_t __s1 = __p1; \ + int32x4x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \ + __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 34); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld4q_lane_s16(__p0, __p1, __p2) __extension__ ({ \ + int16x8x4_t __ret; \ + int16x8x4_t __s1 = __p1; \ + __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 33); \ + __ret; \ +}) +#else +#define vld4q_lane_s16(__p0, __p1, __p2) __extension__ ({ \ + int16x8x4_t __ret; \ + int16x8x4_t __s1 = __p1; \ + int16x8x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 33); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld4_lane_u8(__p0, __p1, __p2) __extension__ ({ \ + uint8x8x4_t __ret; \ + uint8x8x4_t __s1 = __p1; \ + __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 16); \ + __ret; \ +}) +#else +#define vld4_lane_u8(__p0, __p1, __p2) __extension__ ({ \ + uint8x8x4_t __ret; \ + uint8x8x4_t __s1 = __p1; \ + uint8x8x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], __p2, 16); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld4_lane_u32(__p0, __p1, __p2) __extension__ ({ \ + uint32x2x4_t __ret; \ + uint32x2x4_t __s1 = __p1; \ + __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 18); \ + __ret; \ +}) +#else +#define vld4_lane_u32(__p0, __p1, __p2) __extension__ ({ \ + uint32x2x4_t __ret; \ + uint32x2x4_t __s1 = __p1; \ + uint32x2x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \ + __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], __p2, 18); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld4_lane_u16(__p0, __p1, __p2) __extension__ ({ \ + uint16x4x4_t __ret; \ + uint16x4x4_t __s1 = __p1; \ + __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 17); \ + __ret; \ +}) +#else +#define vld4_lane_u16(__p0, __p1, __p2) __extension__ ({ \ + uint16x4x4_t __ret; \ + uint16x4x4_t __s1 = __p1; \ + uint16x4x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \ + __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], __p2, 17); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld4_lane_s8(__p0, __p1, __p2) __extension__ ({ \ + int8x8x4_t __ret; \ + int8x8x4_t __s1 = __p1; \ + __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 0); \ + __ret; \ +}) +#else +#define vld4_lane_s8(__p0, __p1, __p2) __extension__ ({ \ + int8x8x4_t __ret; \ + int8x8x4_t __s1 = __p1; \ + int8x8x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], __p2, 0); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld4_lane_f32(__p0, __p1, __p2) __extension__ ({ \ + float32x2x4_t __ret; \ + float32x2x4_t __s1 = __p1; \ + __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 9); \ + __ret; \ +}) +#else +#define vld4_lane_f32(__p0, __p1, __p2) __extension__ ({ \ + float32x2x4_t __ret; \ + float32x2x4_t __s1 = __p1; \ + float32x2x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \ + __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], __p2, 9); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld4_lane_s32(__p0, __p1, __p2) __extension__ ({ \ + int32x2x4_t __ret; \ + int32x2x4_t __s1 = __p1; \ + __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 2); \ + __ret; \ +}) +#else +#define vld4_lane_s32(__p0, __p1, __p2) __extension__ ({ \ + int32x2x4_t __ret; \ + int32x2x4_t __s1 = __p1; \ + int32x2x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \ + __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], __p2, 2); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld4_lane_s16(__p0, __p1, __p2) __extension__ ({ \ + int16x4x4_t __ret; \ + int16x4x4_t __s1 = __p1; \ + __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 1); \ + __ret; \ +}) +#else +#define vld4_lane_s16(__p0, __p1, __p2) __extension__ ({ \ + int16x4x4_t __ret; \ + int16x4x4_t __s1 = __p1; \ + int16x4x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \ + __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], __p2, 1); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint8x16_t vmaxq_u8(uint8x16_t __p0, uint8x16_t __p1) { + uint8x16_t __ret; + __ret = (uint8x16_t) __builtin_neon_vmaxq_v((int8x16_t)__p0, (int8x16_t)__p1, 48); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint8x16_t vmaxq_u8(uint8x16_t __p0, uint8x16_t __p1) { + uint8x16_t __ret; + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint8x16_t) __builtin_neon_vmaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint32x4_t vmaxq_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vmaxq_v((int8x16_t)__p0, (int8x16_t)__p1, 50); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint32x4_t vmaxq_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint32x4_t __ret; + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (uint32x4_t) __builtin_neon_vmaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint16x8_t vmaxq_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint16x8_t __ret; + __ret = (uint16x8_t) __builtin_neon_vmaxq_v((int8x16_t)__p0, (int8x16_t)__p1, 49); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint16x8_t vmaxq_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint16x8_t __ret; + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint16x8_t) __builtin_neon_vmaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int8x16_t vmaxq_s8(int8x16_t __p0, int8x16_t __p1) { + int8x16_t __ret; + __ret = (int8x16_t) __builtin_neon_vmaxq_v((int8x16_t)__p0, (int8x16_t)__p1, 32); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int8x16_t vmaxq_s8(int8x16_t __p0, int8x16_t __p1) { + int8x16_t __ret; + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int8x16_t) __builtin_neon_vmaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 32); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) float32x4_t vmaxq_f32(float32x4_t __p0, float32x4_t __p1) { + float32x4_t __ret; + __ret = (float32x4_t) __builtin_neon_vmaxq_v((int8x16_t)__p0, (int8x16_t)__p1, 41); + return __ret; +} +#else +__ai __attribute__((target("neon"))) float32x4_t vmaxq_f32(float32x4_t __p0, float32x4_t __p1) { + float32x4_t __ret; + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (float32x4_t) __builtin_neon_vmaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 41); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int32x4_t vmaxq_s32(int32x4_t __p0, int32x4_t __p1) { + int32x4_t __ret; + __ret = (int32x4_t) __builtin_neon_vmaxq_v((int8x16_t)__p0, (int8x16_t)__p1, 34); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int32x4_t vmaxq_s32(int32x4_t __p0, int32x4_t __p1) { + int32x4_t __ret; + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (int32x4_t) __builtin_neon_vmaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 34); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int16x8_t vmaxq_s16(int16x8_t __p0, int16x8_t __p1) { + int16x8_t __ret; + __ret = (int16x8_t) __builtin_neon_vmaxq_v((int8x16_t)__p0, (int8x16_t)__p1, 33); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int16x8_t vmaxq_s16(int16x8_t __p0, int16x8_t __p1) { + int16x8_t __ret; + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int16x8_t) __builtin_neon_vmaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 33); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint8x8_t vmax_u8(uint8x8_t __p0, uint8x8_t __p1) { + uint8x8_t __ret; + __ret = (uint8x8_t) __builtin_neon_vmax_v((int8x8_t)__p0, (int8x8_t)__p1, 16); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint8x8_t vmax_u8(uint8x8_t __p0, uint8x8_t __p1) { + uint8x8_t __ret; + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint8x8_t) __builtin_neon_vmax_v((int8x8_t)__rev0, (int8x8_t)__rev1, 16); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint32x2_t vmax_u32(uint32x2_t __p0, uint32x2_t __p1) { + uint32x2_t __ret; + __ret = (uint32x2_t) __builtin_neon_vmax_v((int8x8_t)__p0, (int8x8_t)__p1, 18); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint32x2_t vmax_u32(uint32x2_t __p0, uint32x2_t __p1) { + uint32x2_t __ret; + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (uint32x2_t) __builtin_neon_vmax_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint16x4_t vmax_u16(uint16x4_t __p0, uint16x4_t __p1) { + uint16x4_t __ret; + __ret = (uint16x4_t) __builtin_neon_vmax_v((int8x8_t)__p0, (int8x8_t)__p1, 17); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint16x4_t vmax_u16(uint16x4_t __p0, uint16x4_t __p1) { + uint16x4_t __ret; + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (uint16x4_t) __builtin_neon_vmax_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int8x8_t vmax_s8(int8x8_t __p0, int8x8_t __p1) { + int8x8_t __ret; + __ret = (int8x8_t) __builtin_neon_vmax_v((int8x8_t)__p0, (int8x8_t)__p1, 0); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int8x8_t vmax_s8(int8x8_t __p0, int8x8_t __p1) { + int8x8_t __ret; + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int8x8_t) __builtin_neon_vmax_v((int8x8_t)__rev0, (int8x8_t)__rev1, 0); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) float32x2_t vmax_f32(float32x2_t __p0, float32x2_t __p1) { + float32x2_t __ret; + __ret = (float32x2_t) __builtin_neon_vmax_v((int8x8_t)__p0, (int8x8_t)__p1, 9); + return __ret; +} +#else +__ai __attribute__((target("neon"))) float32x2_t vmax_f32(float32x2_t __p0, float32x2_t __p1) { + float32x2_t __ret; + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (float32x2_t) __builtin_neon_vmax_v((int8x8_t)__rev0, (int8x8_t)__rev1, 9); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int32x2_t vmax_s32(int32x2_t __p0, int32x2_t __p1) { + int32x2_t __ret; + __ret = (int32x2_t) __builtin_neon_vmax_v((int8x8_t)__p0, (int8x8_t)__p1, 2); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int32x2_t vmax_s32(int32x2_t __p0, int32x2_t __p1) { + int32x2_t __ret; + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (int32x2_t) __builtin_neon_vmax_v((int8x8_t)__rev0, (int8x8_t)__rev1, 2); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int16x4_t vmax_s16(int16x4_t __p0, int16x4_t __p1) { + int16x4_t __ret; + __ret = (int16x4_t) __builtin_neon_vmax_v((int8x8_t)__p0, (int8x8_t)__p1, 1); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int16x4_t vmax_s16(int16x4_t __p0, int16x4_t __p1) { + int16x4_t __ret; + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (int16x4_t) __builtin_neon_vmax_v((int8x8_t)__rev0, (int8x8_t)__rev1, 1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint8x16_t vminq_u8(uint8x16_t __p0, uint8x16_t __p1) { + uint8x16_t __ret; + __ret = (uint8x16_t) __builtin_neon_vminq_v((int8x16_t)__p0, (int8x16_t)__p1, 48); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint8x16_t vminq_u8(uint8x16_t __p0, uint8x16_t __p1) { + uint8x16_t __ret; + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint8x16_t) __builtin_neon_vminq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint32x4_t vminq_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vminq_v((int8x16_t)__p0, (int8x16_t)__p1, 50); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint32x4_t vminq_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint32x4_t __ret; + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (uint32x4_t) __builtin_neon_vminq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint16x8_t vminq_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint16x8_t __ret; + __ret = (uint16x8_t) __builtin_neon_vminq_v((int8x16_t)__p0, (int8x16_t)__p1, 49); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint16x8_t vminq_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint16x8_t __ret; + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint16x8_t) __builtin_neon_vminq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int8x16_t vminq_s8(int8x16_t __p0, int8x16_t __p1) { + int8x16_t __ret; + __ret = (int8x16_t) __builtin_neon_vminq_v((int8x16_t)__p0, (int8x16_t)__p1, 32); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int8x16_t vminq_s8(int8x16_t __p0, int8x16_t __p1) { + int8x16_t __ret; + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int8x16_t) __builtin_neon_vminq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 32); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) float32x4_t vminq_f32(float32x4_t __p0, float32x4_t __p1) { + float32x4_t __ret; + __ret = (float32x4_t) __builtin_neon_vminq_v((int8x16_t)__p0, (int8x16_t)__p1, 41); + return __ret; +} +#else +__ai __attribute__((target("neon"))) float32x4_t vminq_f32(float32x4_t __p0, float32x4_t __p1) { + float32x4_t __ret; + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (float32x4_t) __builtin_neon_vminq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 41); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int32x4_t vminq_s32(int32x4_t __p0, int32x4_t __p1) { + int32x4_t __ret; + __ret = (int32x4_t) __builtin_neon_vminq_v((int8x16_t)__p0, (int8x16_t)__p1, 34); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int32x4_t vminq_s32(int32x4_t __p0, int32x4_t __p1) { + int32x4_t __ret; + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (int32x4_t) __builtin_neon_vminq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 34); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int16x8_t vminq_s16(int16x8_t __p0, int16x8_t __p1) { + int16x8_t __ret; + __ret = (int16x8_t) __builtin_neon_vminq_v((int8x16_t)__p0, (int8x16_t)__p1, 33); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int16x8_t vminq_s16(int16x8_t __p0, int16x8_t __p1) { + int16x8_t __ret; + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int16x8_t) __builtin_neon_vminq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 33); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint8x8_t vmin_u8(uint8x8_t __p0, uint8x8_t __p1) { + uint8x8_t __ret; + __ret = (uint8x8_t) __builtin_neon_vmin_v((int8x8_t)__p0, (int8x8_t)__p1, 16); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint8x8_t vmin_u8(uint8x8_t __p0, uint8x8_t __p1) { + uint8x8_t __ret; + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint8x8_t) __builtin_neon_vmin_v((int8x8_t)__rev0, (int8x8_t)__rev1, 16); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint32x2_t vmin_u32(uint32x2_t __p0, uint32x2_t __p1) { + uint32x2_t __ret; + __ret = (uint32x2_t) __builtin_neon_vmin_v((int8x8_t)__p0, (int8x8_t)__p1, 18); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint32x2_t vmin_u32(uint32x2_t __p0, uint32x2_t __p1) { + uint32x2_t __ret; + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (uint32x2_t) __builtin_neon_vmin_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint16x4_t vmin_u16(uint16x4_t __p0, uint16x4_t __p1) { + uint16x4_t __ret; + __ret = (uint16x4_t) __builtin_neon_vmin_v((int8x8_t)__p0, (int8x8_t)__p1, 17); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint16x4_t vmin_u16(uint16x4_t __p0, uint16x4_t __p1) { + uint16x4_t __ret; + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (uint16x4_t) __builtin_neon_vmin_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int8x8_t vmin_s8(int8x8_t __p0, int8x8_t __p1) { + int8x8_t __ret; + __ret = (int8x8_t) __builtin_neon_vmin_v((int8x8_t)__p0, (int8x8_t)__p1, 0); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int8x8_t vmin_s8(int8x8_t __p0, int8x8_t __p1) { + int8x8_t __ret; + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int8x8_t) __builtin_neon_vmin_v((int8x8_t)__rev0, (int8x8_t)__rev1, 0); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) float32x2_t vmin_f32(float32x2_t __p0, float32x2_t __p1) { + float32x2_t __ret; + __ret = (float32x2_t) __builtin_neon_vmin_v((int8x8_t)__p0, (int8x8_t)__p1, 9); + return __ret; +} +#else +__ai __attribute__((target("neon"))) float32x2_t vmin_f32(float32x2_t __p0, float32x2_t __p1) { + float32x2_t __ret; + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (float32x2_t) __builtin_neon_vmin_v((int8x8_t)__rev0, (int8x8_t)__rev1, 9); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int32x2_t vmin_s32(int32x2_t __p0, int32x2_t __p1) { + int32x2_t __ret; + __ret = (int32x2_t) __builtin_neon_vmin_v((int8x8_t)__p0, (int8x8_t)__p1, 2); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int32x2_t vmin_s32(int32x2_t __p0, int32x2_t __p1) { + int32x2_t __ret; + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (int32x2_t) __builtin_neon_vmin_v((int8x8_t)__rev0, (int8x8_t)__rev1, 2); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int16x4_t vmin_s16(int16x4_t __p0, int16x4_t __p1) { + int16x4_t __ret; + __ret = (int16x4_t) __builtin_neon_vmin_v((int8x8_t)__p0, (int8x8_t)__p1, 1); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int16x4_t vmin_s16(int16x4_t __p0, int16x4_t __p1) { + int16x4_t __ret; + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (int16x4_t) __builtin_neon_vmin_v((int8x8_t)__rev0, (int8x8_t)__rev1, 1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint8x16_t vmlaq_u8(uint8x16_t __p0, uint8x16_t __p1, uint8x16_t __p2) { + uint8x16_t __ret; + __ret = __p0 + __p1 * __p2; + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint8x16_t vmlaq_u8(uint8x16_t __p0, uint8x16_t __p1, uint8x16_t __p2) { + uint8x16_t __ret; + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __rev0 + __rev1 * __rev2; + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint32x4_t vmlaq_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) { + uint32x4_t __ret; + __ret = __p0 + __p1 * __p2; + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint32x4_t vmlaq_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) { + uint32x4_t __ret; + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + uint32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); + __ret = __rev0 + __rev1 * __rev2; + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint16x8_t vmlaq_u16(uint16x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) { + uint16x8_t __ret; + __ret = __p0 + __p1 * __p2; + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint16x8_t vmlaq_u16(uint16x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) { + uint16x8_t __ret; + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __rev0 + __rev1 * __rev2; + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int8x16_t vmlaq_s8(int8x16_t __p0, int8x16_t __p1, int8x16_t __p2) { + int8x16_t __ret; + __ret = __p0 + __p1 * __p2; + return __ret; +} +#else +__ai __attribute__((target("neon"))) int8x16_t vmlaq_s8(int8x16_t __p0, int8x16_t __p1, int8x16_t __p2) { + int8x16_t __ret; + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __rev0 + __rev1 * __rev2; + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) float32x4_t vmlaq_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) { + float32x4_t __ret; + __ret = __p0 + __p1 * __p2; + return __ret; +} +#else +__ai __attribute__((target("neon"))) float32x4_t vmlaq_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) { + float32x4_t __ret; + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + float32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); + __ret = __rev0 + __rev1 * __rev2; + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int32x4_t vmlaq_s32(int32x4_t __p0, int32x4_t __p1, int32x4_t __p2) { + int32x4_t __ret; + __ret = __p0 + __p1 * __p2; + return __ret; +} +#else +__ai __attribute__((target("neon"))) int32x4_t vmlaq_s32(int32x4_t __p0, int32x4_t __p1, int32x4_t __p2) { + int32x4_t __ret; + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + int32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); + __ret = __rev0 + __rev1 * __rev2; + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int16x8_t vmlaq_s16(int16x8_t __p0, int16x8_t __p1, int16x8_t __p2) { + int16x8_t __ret; + __ret = __p0 + __p1 * __p2; + return __ret; +} +#else +__ai __attribute__((target("neon"))) int16x8_t vmlaq_s16(int16x8_t __p0, int16x8_t __p1, int16x8_t __p2) { + int16x8_t __ret; + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __rev0 + __rev1 * __rev2; + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint8x8_t vmla_u8(uint8x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) { + uint8x8_t __ret; + __ret = __p0 + __p1 * __p2; + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint8x8_t vmla_u8(uint8x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) { + uint8x8_t __ret; + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __rev0 + __rev1 * __rev2; + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint32x2_t vmla_u32(uint32x2_t __p0, uint32x2_t __p1, uint32x2_t __p2) { + uint32x2_t __ret; + __ret = __p0 + __p1 * __p2; + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint32x2_t vmla_u32(uint32x2_t __p0, uint32x2_t __p1, uint32x2_t __p2) { + uint32x2_t __ret; + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + uint32x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0); + __ret = __rev0 + __rev1 * __rev2; + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint16x4_t vmla_u16(uint16x4_t __p0, uint16x4_t __p1, uint16x4_t __p2) { + uint16x4_t __ret; + __ret = __p0 + __p1 * __p2; + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint16x4_t vmla_u16(uint16x4_t __p0, uint16x4_t __p1, uint16x4_t __p2) { + uint16x4_t __ret; + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + uint16x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); + __ret = __rev0 + __rev1 * __rev2; + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int8x8_t vmla_s8(int8x8_t __p0, int8x8_t __p1, int8x8_t __p2) { + int8x8_t __ret; + __ret = __p0 + __p1 * __p2; + return __ret; +} +#else +__ai __attribute__((target("neon"))) int8x8_t vmla_s8(int8x8_t __p0, int8x8_t __p1, int8x8_t __p2) { + int8x8_t __ret; + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __rev0 + __rev1 * __rev2; + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) float32x2_t vmla_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) { + float32x2_t __ret; + __ret = __p0 + __p1 * __p2; + return __ret; +} +#else +__ai __attribute__((target("neon"))) float32x2_t vmla_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) { + float32x2_t __ret; + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + float32x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0); + __ret = __rev0 + __rev1 * __rev2; + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int32x2_t vmla_s32(int32x2_t __p0, int32x2_t __p1, int32x2_t __p2) { + int32x2_t __ret; + __ret = __p0 + __p1 * __p2; + return __ret; +} +#else +__ai __attribute__((target("neon"))) int32x2_t vmla_s32(int32x2_t __p0, int32x2_t __p1, int32x2_t __p2) { + int32x2_t __ret; + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + int32x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0); + __ret = __rev0 + __rev1 * __rev2; + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int16x4_t vmla_s16(int16x4_t __p0, int16x4_t __p1, int16x4_t __p2) { + int16x4_t __ret; + __ret = __p0 + __p1 * __p2; + return __ret; +} +#else +__ai __attribute__((target("neon"))) int16x4_t vmla_s16(int16x4_t __p0, int16x4_t __p1, int16x4_t __p2) { + int16x4_t __ret; + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + int16x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); + __ret = __rev0 + __rev1 * __rev2; + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmlaq_lane_u32(__p0_54, __p1_54, __p2_54, __p3_54) __extension__ ({ \ + uint32x4_t __ret_54; \ + uint32x4_t __s0_54 = __p0_54; \ + uint32x4_t __s1_54 = __p1_54; \ + uint32x2_t __s2_54 = __p2_54; \ + __ret_54 = __s0_54 + __s1_54 * splatq_lane_u32(__s2_54, __p3_54); \ + __ret_54; \ +}) +#else +#define vmlaq_lane_u32(__p0_55, __p1_55, __p2_55, __p3_55) __extension__ ({ \ + uint32x4_t __ret_55; \ + uint32x4_t __s0_55 = __p0_55; \ + uint32x4_t __s1_55 = __p1_55; \ + uint32x2_t __s2_55 = __p2_55; \ + uint32x4_t __rev0_55; __rev0_55 = __builtin_shufflevector(__s0_55, __s0_55, 3, 2, 1, 0); \ + uint32x4_t __rev1_55; __rev1_55 = __builtin_shufflevector(__s1_55, __s1_55, 3, 2, 1, 0); \ + uint32x2_t __rev2_55; __rev2_55 = __builtin_shufflevector(__s2_55, __s2_55, 1, 0); \ + __ret_55 = __rev0_55 + __rev1_55 * __noswap_splatq_lane_u32(__rev2_55, __p3_55); \ + __ret_55 = __builtin_shufflevector(__ret_55, __ret_55, 3, 2, 1, 0); \ + __ret_55; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmlaq_lane_u16(__p0_56, __p1_56, __p2_56, __p3_56) __extension__ ({ \ + uint16x8_t __ret_56; \ + uint16x8_t __s0_56 = __p0_56; \ + uint16x8_t __s1_56 = __p1_56; \ + uint16x4_t __s2_56 = __p2_56; \ + __ret_56 = __s0_56 + __s1_56 * splatq_lane_u16(__s2_56, __p3_56); \ + __ret_56; \ +}) +#else +#define vmlaq_lane_u16(__p0_57, __p1_57, __p2_57, __p3_57) __extension__ ({ \ + uint16x8_t __ret_57; \ + uint16x8_t __s0_57 = __p0_57; \ + uint16x8_t __s1_57 = __p1_57; \ + uint16x4_t __s2_57 = __p2_57; \ + uint16x8_t __rev0_57; __rev0_57 = __builtin_shufflevector(__s0_57, __s0_57, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint16x8_t __rev1_57; __rev1_57 = __builtin_shufflevector(__s1_57, __s1_57, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint16x4_t __rev2_57; __rev2_57 = __builtin_shufflevector(__s2_57, __s2_57, 3, 2, 1, 0); \ + __ret_57 = __rev0_57 + __rev1_57 * __noswap_splatq_lane_u16(__rev2_57, __p3_57); \ + __ret_57 = __builtin_shufflevector(__ret_57, __ret_57, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_57; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmlaq_lane_f32(__p0_58, __p1_58, __p2_58, __p3_58) __extension__ ({ \ + float32x4_t __ret_58; \ + float32x4_t __s0_58 = __p0_58; \ + float32x4_t __s1_58 = __p1_58; \ + float32x2_t __s2_58 = __p2_58; \ + __ret_58 = __s0_58 + __s1_58 * splatq_lane_f32(__s2_58, __p3_58); \ + __ret_58; \ +}) +#else +#define vmlaq_lane_f32(__p0_59, __p1_59, __p2_59, __p3_59) __extension__ ({ \ + float32x4_t __ret_59; \ + float32x4_t __s0_59 = __p0_59; \ + float32x4_t __s1_59 = __p1_59; \ + float32x2_t __s2_59 = __p2_59; \ + float32x4_t __rev0_59; __rev0_59 = __builtin_shufflevector(__s0_59, __s0_59, 3, 2, 1, 0); \ + float32x4_t __rev1_59; __rev1_59 = __builtin_shufflevector(__s1_59, __s1_59, 3, 2, 1, 0); \ + float32x2_t __rev2_59; __rev2_59 = __builtin_shufflevector(__s2_59, __s2_59, 1, 0); \ + __ret_59 = __rev0_59 + __rev1_59 * __noswap_splatq_lane_f32(__rev2_59, __p3_59); \ + __ret_59 = __builtin_shufflevector(__ret_59, __ret_59, 3, 2, 1, 0); \ + __ret_59; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmlaq_lane_s32(__p0_60, __p1_60, __p2_60, __p3_60) __extension__ ({ \ + int32x4_t __ret_60; \ + int32x4_t __s0_60 = __p0_60; \ + int32x4_t __s1_60 = __p1_60; \ + int32x2_t __s2_60 = __p2_60; \ + __ret_60 = __s0_60 + __s1_60 * splatq_lane_s32(__s2_60, __p3_60); \ + __ret_60; \ +}) +#else +#define vmlaq_lane_s32(__p0_61, __p1_61, __p2_61, __p3_61) __extension__ ({ \ + int32x4_t __ret_61; \ + int32x4_t __s0_61 = __p0_61; \ + int32x4_t __s1_61 = __p1_61; \ + int32x2_t __s2_61 = __p2_61; \ + int32x4_t __rev0_61; __rev0_61 = __builtin_shufflevector(__s0_61, __s0_61, 3, 2, 1, 0); \ + int32x4_t __rev1_61; __rev1_61 = __builtin_shufflevector(__s1_61, __s1_61, 3, 2, 1, 0); \ + int32x2_t __rev2_61; __rev2_61 = __builtin_shufflevector(__s2_61, __s2_61, 1, 0); \ + __ret_61 = __rev0_61 + __rev1_61 * __noswap_splatq_lane_s32(__rev2_61, __p3_61); \ + __ret_61 = __builtin_shufflevector(__ret_61, __ret_61, 3, 2, 1, 0); \ + __ret_61; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmlaq_lane_s16(__p0_62, __p1_62, __p2_62, __p3_62) __extension__ ({ \ + int16x8_t __ret_62; \ + int16x8_t __s0_62 = __p0_62; \ + int16x8_t __s1_62 = __p1_62; \ + int16x4_t __s2_62 = __p2_62; \ + __ret_62 = __s0_62 + __s1_62 * splatq_lane_s16(__s2_62, __p3_62); \ + __ret_62; \ +}) +#else +#define vmlaq_lane_s16(__p0_63, __p1_63, __p2_63, __p3_63) __extension__ ({ \ + int16x8_t __ret_63; \ + int16x8_t __s0_63 = __p0_63; \ + int16x8_t __s1_63 = __p1_63; \ + int16x4_t __s2_63 = __p2_63; \ + int16x8_t __rev0_63; __rev0_63 = __builtin_shufflevector(__s0_63, __s0_63, 7, 6, 5, 4, 3, 2, 1, 0); \ + int16x8_t __rev1_63; __rev1_63 = __builtin_shufflevector(__s1_63, __s1_63, 7, 6, 5, 4, 3, 2, 1, 0); \ + int16x4_t __rev2_63; __rev2_63 = __builtin_shufflevector(__s2_63, __s2_63, 3, 2, 1, 0); \ + __ret_63 = __rev0_63 + __rev1_63 * __noswap_splatq_lane_s16(__rev2_63, __p3_63); \ + __ret_63 = __builtin_shufflevector(__ret_63, __ret_63, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_63; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmla_lane_u32(__p0_64, __p1_64, __p2_64, __p3_64) __extension__ ({ \ + uint32x2_t __ret_64; \ + uint32x2_t __s0_64 = __p0_64; \ + uint32x2_t __s1_64 = __p1_64; \ + uint32x2_t __s2_64 = __p2_64; \ + __ret_64 = __s0_64 + __s1_64 * splat_lane_u32(__s2_64, __p3_64); \ + __ret_64; \ +}) +#else +#define vmla_lane_u32(__p0_65, __p1_65, __p2_65, __p3_65) __extension__ ({ \ + uint32x2_t __ret_65; \ + uint32x2_t __s0_65 = __p0_65; \ + uint32x2_t __s1_65 = __p1_65; \ + uint32x2_t __s2_65 = __p2_65; \ + uint32x2_t __rev0_65; __rev0_65 = __builtin_shufflevector(__s0_65, __s0_65, 1, 0); \ + uint32x2_t __rev1_65; __rev1_65 = __builtin_shufflevector(__s1_65, __s1_65, 1, 0); \ + uint32x2_t __rev2_65; __rev2_65 = __builtin_shufflevector(__s2_65, __s2_65, 1, 0); \ + __ret_65 = __rev0_65 + __rev1_65 * __noswap_splat_lane_u32(__rev2_65, __p3_65); \ + __ret_65 = __builtin_shufflevector(__ret_65, __ret_65, 1, 0); \ + __ret_65; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmla_lane_u16(__p0_66, __p1_66, __p2_66, __p3_66) __extension__ ({ \ + uint16x4_t __ret_66; \ + uint16x4_t __s0_66 = __p0_66; \ + uint16x4_t __s1_66 = __p1_66; \ + uint16x4_t __s2_66 = __p2_66; \ + __ret_66 = __s0_66 + __s1_66 * splat_lane_u16(__s2_66, __p3_66); \ + __ret_66; \ +}) +#else +#define vmla_lane_u16(__p0_67, __p1_67, __p2_67, __p3_67) __extension__ ({ \ + uint16x4_t __ret_67; \ + uint16x4_t __s0_67 = __p0_67; \ + uint16x4_t __s1_67 = __p1_67; \ + uint16x4_t __s2_67 = __p2_67; \ + uint16x4_t __rev0_67; __rev0_67 = __builtin_shufflevector(__s0_67, __s0_67, 3, 2, 1, 0); \ + uint16x4_t __rev1_67; __rev1_67 = __builtin_shufflevector(__s1_67, __s1_67, 3, 2, 1, 0); \ + uint16x4_t __rev2_67; __rev2_67 = __builtin_shufflevector(__s2_67, __s2_67, 3, 2, 1, 0); \ + __ret_67 = __rev0_67 + __rev1_67 * __noswap_splat_lane_u16(__rev2_67, __p3_67); \ + __ret_67 = __builtin_shufflevector(__ret_67, __ret_67, 3, 2, 1, 0); \ + __ret_67; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmla_lane_f32(__p0_68, __p1_68, __p2_68, __p3_68) __extension__ ({ \ + float32x2_t __ret_68; \ + float32x2_t __s0_68 = __p0_68; \ + float32x2_t __s1_68 = __p1_68; \ + float32x2_t __s2_68 = __p2_68; \ + __ret_68 = __s0_68 + __s1_68 * splat_lane_f32(__s2_68, __p3_68); \ + __ret_68; \ +}) +#else +#define vmla_lane_f32(__p0_69, __p1_69, __p2_69, __p3_69) __extension__ ({ \ + float32x2_t __ret_69; \ + float32x2_t __s0_69 = __p0_69; \ + float32x2_t __s1_69 = __p1_69; \ + float32x2_t __s2_69 = __p2_69; \ + float32x2_t __rev0_69; __rev0_69 = __builtin_shufflevector(__s0_69, __s0_69, 1, 0); \ + float32x2_t __rev1_69; __rev1_69 = __builtin_shufflevector(__s1_69, __s1_69, 1, 0); \ + float32x2_t __rev2_69; __rev2_69 = __builtin_shufflevector(__s2_69, __s2_69, 1, 0); \ + __ret_69 = __rev0_69 + __rev1_69 * __noswap_splat_lane_f32(__rev2_69, __p3_69); \ + __ret_69 = __builtin_shufflevector(__ret_69, __ret_69, 1, 0); \ + __ret_69; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmla_lane_s32(__p0_70, __p1_70, __p2_70, __p3_70) __extension__ ({ \ + int32x2_t __ret_70; \ + int32x2_t __s0_70 = __p0_70; \ + int32x2_t __s1_70 = __p1_70; \ + int32x2_t __s2_70 = __p2_70; \ + __ret_70 = __s0_70 + __s1_70 * splat_lane_s32(__s2_70, __p3_70); \ + __ret_70; \ +}) +#else +#define vmla_lane_s32(__p0_71, __p1_71, __p2_71, __p3_71) __extension__ ({ \ + int32x2_t __ret_71; \ + int32x2_t __s0_71 = __p0_71; \ + int32x2_t __s1_71 = __p1_71; \ + int32x2_t __s2_71 = __p2_71; \ + int32x2_t __rev0_71; __rev0_71 = __builtin_shufflevector(__s0_71, __s0_71, 1, 0); \ + int32x2_t __rev1_71; __rev1_71 = __builtin_shufflevector(__s1_71, __s1_71, 1, 0); \ + int32x2_t __rev2_71; __rev2_71 = __builtin_shufflevector(__s2_71, __s2_71, 1, 0); \ + __ret_71 = __rev0_71 + __rev1_71 * __noswap_splat_lane_s32(__rev2_71, __p3_71); \ + __ret_71 = __builtin_shufflevector(__ret_71, __ret_71, 1, 0); \ + __ret_71; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmla_lane_s16(__p0_72, __p1_72, __p2_72, __p3_72) __extension__ ({ \ + int16x4_t __ret_72; \ + int16x4_t __s0_72 = __p0_72; \ + int16x4_t __s1_72 = __p1_72; \ + int16x4_t __s2_72 = __p2_72; \ + __ret_72 = __s0_72 + __s1_72 * splat_lane_s16(__s2_72, __p3_72); \ + __ret_72; \ +}) +#else +#define vmla_lane_s16(__p0_73, __p1_73, __p2_73, __p3_73) __extension__ ({ \ + int16x4_t __ret_73; \ + int16x4_t __s0_73 = __p0_73; \ + int16x4_t __s1_73 = __p1_73; \ + int16x4_t __s2_73 = __p2_73; \ + int16x4_t __rev0_73; __rev0_73 = __builtin_shufflevector(__s0_73, __s0_73, 3, 2, 1, 0); \ + int16x4_t __rev1_73; __rev1_73 = __builtin_shufflevector(__s1_73, __s1_73, 3, 2, 1, 0); \ + int16x4_t __rev2_73; __rev2_73 = __builtin_shufflevector(__s2_73, __s2_73, 3, 2, 1, 0); \ + __ret_73 = __rev0_73 + __rev1_73 * __noswap_splat_lane_s16(__rev2_73, __p3_73); \ + __ret_73 = __builtin_shufflevector(__ret_73, __ret_73, 3, 2, 1, 0); \ + __ret_73; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint32x4_t vmlaq_n_u32(uint32x4_t __p0, uint32x4_t __p1, uint32_t __p2) { + uint32x4_t __ret; + __ret = __p0 + __p1 * (uint32x4_t) {__p2, __p2, __p2, __p2}; + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint32x4_t vmlaq_n_u32(uint32x4_t __p0, uint32x4_t __p1, uint32_t __p2) { + uint32x4_t __ret; + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = __rev0 + __rev1 * (uint32x4_t) {__p2, __p2, __p2, __p2}; + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint16x8_t vmlaq_n_u16(uint16x8_t __p0, uint16x8_t __p1, uint16_t __p2) { + uint16x8_t __ret; + __ret = __p0 + __p1 * (uint16x8_t) {__p2, __p2, __p2, __p2, __p2, __p2, __p2, __p2}; + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint16x8_t vmlaq_n_u16(uint16x8_t __p0, uint16x8_t __p1, uint16_t __p2) { + uint16x8_t __ret; + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __rev0 + __rev1 * (uint16x8_t) {__p2, __p2, __p2, __p2, __p2, __p2, __p2, __p2}; + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) float32x4_t vmlaq_n_f32(float32x4_t __p0, float32x4_t __p1, float32_t __p2) { + float32x4_t __ret; + __ret = __p0 + __p1 * (float32x4_t) {__p2, __p2, __p2, __p2}; + return __ret; +} +#else +__ai __attribute__((target("neon"))) float32x4_t vmlaq_n_f32(float32x4_t __p0, float32x4_t __p1, float32_t __p2) { + float32x4_t __ret; + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = __rev0 + __rev1 * (float32x4_t) {__p2, __p2, __p2, __p2}; + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int32x4_t vmlaq_n_s32(int32x4_t __p0, int32x4_t __p1, int32_t __p2) { + int32x4_t __ret; + __ret = __p0 + __p1 * (int32x4_t) {__p2, __p2, __p2, __p2}; + return __ret; +} +#else +__ai __attribute__((target("neon"))) int32x4_t vmlaq_n_s32(int32x4_t __p0, int32x4_t __p1, int32_t __p2) { + int32x4_t __ret; + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = __rev0 + __rev1 * (int32x4_t) {__p2, __p2, __p2, __p2}; + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int16x8_t vmlaq_n_s16(int16x8_t __p0, int16x8_t __p1, int16_t __p2) { + int16x8_t __ret; + __ret = __p0 + __p1 * (int16x8_t) {__p2, __p2, __p2, __p2, __p2, __p2, __p2, __p2}; + return __ret; +} +#else +__ai __attribute__((target("neon"))) int16x8_t vmlaq_n_s16(int16x8_t __p0, int16x8_t __p1, int16_t __p2) { + int16x8_t __ret; + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __rev0 + __rev1 * (int16x8_t) {__p2, __p2, __p2, __p2, __p2, __p2, __p2, __p2}; + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint32x2_t vmla_n_u32(uint32x2_t __p0, uint32x2_t __p1, uint32_t __p2) { + uint32x2_t __ret; + __ret = __p0 + __p1 * (uint32x2_t) {__p2, __p2}; + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint32x2_t vmla_n_u32(uint32x2_t __p0, uint32x2_t __p1, uint32_t __p2) { + uint32x2_t __ret; + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = __rev0 + __rev1 * (uint32x2_t) {__p2, __p2}; + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint16x4_t vmla_n_u16(uint16x4_t __p0, uint16x4_t __p1, uint16_t __p2) { + uint16x4_t __ret; + __ret = __p0 + __p1 * (uint16x4_t) {__p2, __p2, __p2, __p2}; + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint16x4_t vmla_n_u16(uint16x4_t __p0, uint16x4_t __p1, uint16_t __p2) { + uint16x4_t __ret; + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = __rev0 + __rev1 * (uint16x4_t) {__p2, __p2, __p2, __p2}; + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) float32x2_t vmla_n_f32(float32x2_t __p0, float32x2_t __p1, float32_t __p2) { + float32x2_t __ret; + __ret = __p0 + __p1 * (float32x2_t) {__p2, __p2}; + return __ret; +} +#else +__ai __attribute__((target("neon"))) float32x2_t vmla_n_f32(float32x2_t __p0, float32x2_t __p1, float32_t __p2) { + float32x2_t __ret; + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = __rev0 + __rev1 * (float32x2_t) {__p2, __p2}; + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int32x2_t vmla_n_s32(int32x2_t __p0, int32x2_t __p1, int32_t __p2) { + int32x2_t __ret; + __ret = __p0 + __p1 * (int32x2_t) {__p2, __p2}; + return __ret; +} +#else +__ai __attribute__((target("neon"))) int32x2_t vmla_n_s32(int32x2_t __p0, int32x2_t __p1, int32_t __p2) { + int32x2_t __ret; + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = __rev0 + __rev1 * (int32x2_t) {__p2, __p2}; + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int16x4_t vmla_n_s16(int16x4_t __p0, int16x4_t __p1, int16_t __p2) { + int16x4_t __ret; + __ret = __p0 + __p1 * (int16x4_t) {__p2, __p2, __p2, __p2}; + return __ret; +} +#else +__ai __attribute__((target("neon"))) int16x4_t vmla_n_s16(int16x4_t __p0, int16x4_t __p1, int16_t __p2) { + int16x4_t __ret; + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = __rev0 + __rev1 * (int16x4_t) {__p2, __p2, __p2, __p2}; + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint8x16_t vmlsq_u8(uint8x16_t __p0, uint8x16_t __p1, uint8x16_t __p2) { + uint8x16_t __ret; + __ret = __p0 - __p1 * __p2; + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint8x16_t vmlsq_u8(uint8x16_t __p0, uint8x16_t __p1, uint8x16_t __p2) { + uint8x16_t __ret; + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __rev0 - __rev1 * __rev2; + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint32x4_t vmlsq_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) { + uint32x4_t __ret; + __ret = __p0 - __p1 * __p2; + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint32x4_t vmlsq_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) { + uint32x4_t __ret; + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + uint32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); + __ret = __rev0 - __rev1 * __rev2; + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint16x8_t vmlsq_u16(uint16x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) { + uint16x8_t __ret; + __ret = __p0 - __p1 * __p2; + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint16x8_t vmlsq_u16(uint16x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) { + uint16x8_t __ret; + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __rev0 - __rev1 * __rev2; + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int8x16_t vmlsq_s8(int8x16_t __p0, int8x16_t __p1, int8x16_t __p2) { + int8x16_t __ret; + __ret = __p0 - __p1 * __p2; + return __ret; +} +#else +__ai __attribute__((target("neon"))) int8x16_t vmlsq_s8(int8x16_t __p0, int8x16_t __p1, int8x16_t __p2) { + int8x16_t __ret; + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __rev0 - __rev1 * __rev2; + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) float32x4_t vmlsq_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) { + float32x4_t __ret; + __ret = __p0 - __p1 * __p2; + return __ret; +} +#else +__ai __attribute__((target("neon"))) float32x4_t vmlsq_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) { + float32x4_t __ret; + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + float32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); + __ret = __rev0 - __rev1 * __rev2; + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int32x4_t vmlsq_s32(int32x4_t __p0, int32x4_t __p1, int32x4_t __p2) { + int32x4_t __ret; + __ret = __p0 - __p1 * __p2; + return __ret; +} +#else +__ai __attribute__((target("neon"))) int32x4_t vmlsq_s32(int32x4_t __p0, int32x4_t __p1, int32x4_t __p2) { + int32x4_t __ret; + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + int32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); + __ret = __rev0 - __rev1 * __rev2; + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int16x8_t vmlsq_s16(int16x8_t __p0, int16x8_t __p1, int16x8_t __p2) { + int16x8_t __ret; + __ret = __p0 - __p1 * __p2; + return __ret; +} +#else +__ai __attribute__((target("neon"))) int16x8_t vmlsq_s16(int16x8_t __p0, int16x8_t __p1, int16x8_t __p2) { + int16x8_t __ret; + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __rev0 - __rev1 * __rev2; + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint8x8_t vmls_u8(uint8x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) { + uint8x8_t __ret; + __ret = __p0 - __p1 * __p2; + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint8x8_t vmls_u8(uint8x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) { + uint8x8_t __ret; + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __rev0 - __rev1 * __rev2; + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint32x2_t vmls_u32(uint32x2_t __p0, uint32x2_t __p1, uint32x2_t __p2) { + uint32x2_t __ret; + __ret = __p0 - __p1 * __p2; + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint32x2_t vmls_u32(uint32x2_t __p0, uint32x2_t __p1, uint32x2_t __p2) { + uint32x2_t __ret; + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + uint32x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0); + __ret = __rev0 - __rev1 * __rev2; + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint16x4_t vmls_u16(uint16x4_t __p0, uint16x4_t __p1, uint16x4_t __p2) { + uint16x4_t __ret; + __ret = __p0 - __p1 * __p2; + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint16x4_t vmls_u16(uint16x4_t __p0, uint16x4_t __p1, uint16x4_t __p2) { + uint16x4_t __ret; + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + uint16x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); + __ret = __rev0 - __rev1 * __rev2; + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int8x8_t vmls_s8(int8x8_t __p0, int8x8_t __p1, int8x8_t __p2) { + int8x8_t __ret; + __ret = __p0 - __p1 * __p2; + return __ret; +} +#else +__ai __attribute__((target("neon"))) int8x8_t vmls_s8(int8x8_t __p0, int8x8_t __p1, int8x8_t __p2) { + int8x8_t __ret; + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __rev0 - __rev1 * __rev2; + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) float32x2_t vmls_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) { + float32x2_t __ret; + __ret = __p0 - __p1 * __p2; + return __ret; +} +#else +__ai __attribute__((target("neon"))) float32x2_t vmls_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) { + float32x2_t __ret; + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + float32x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0); + __ret = __rev0 - __rev1 * __rev2; + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int32x2_t vmls_s32(int32x2_t __p0, int32x2_t __p1, int32x2_t __p2) { + int32x2_t __ret; + __ret = __p0 - __p1 * __p2; + return __ret; +} +#else +__ai __attribute__((target("neon"))) int32x2_t vmls_s32(int32x2_t __p0, int32x2_t __p1, int32x2_t __p2) { + int32x2_t __ret; + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + int32x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0); + __ret = __rev0 - __rev1 * __rev2; + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int16x4_t vmls_s16(int16x4_t __p0, int16x4_t __p1, int16x4_t __p2) { + int16x4_t __ret; + __ret = __p0 - __p1 * __p2; + return __ret; +} +#else +__ai __attribute__((target("neon"))) int16x4_t vmls_s16(int16x4_t __p0, int16x4_t __p1, int16x4_t __p2) { + int16x4_t __ret; + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + int16x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); + __ret = __rev0 - __rev1 * __rev2; + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmlsq_lane_u32(__p0_74, __p1_74, __p2_74, __p3_74) __extension__ ({ \ + uint32x4_t __ret_74; \ + uint32x4_t __s0_74 = __p0_74; \ + uint32x4_t __s1_74 = __p1_74; \ + uint32x2_t __s2_74 = __p2_74; \ + __ret_74 = __s0_74 - __s1_74 * splatq_lane_u32(__s2_74, __p3_74); \ + __ret_74; \ +}) +#else +#define vmlsq_lane_u32(__p0_75, __p1_75, __p2_75, __p3_75) __extension__ ({ \ + uint32x4_t __ret_75; \ + uint32x4_t __s0_75 = __p0_75; \ + uint32x4_t __s1_75 = __p1_75; \ + uint32x2_t __s2_75 = __p2_75; \ + uint32x4_t __rev0_75; __rev0_75 = __builtin_shufflevector(__s0_75, __s0_75, 3, 2, 1, 0); \ + uint32x4_t __rev1_75; __rev1_75 = __builtin_shufflevector(__s1_75, __s1_75, 3, 2, 1, 0); \ + uint32x2_t __rev2_75; __rev2_75 = __builtin_shufflevector(__s2_75, __s2_75, 1, 0); \ + __ret_75 = __rev0_75 - __rev1_75 * __noswap_splatq_lane_u32(__rev2_75, __p3_75); \ + __ret_75 = __builtin_shufflevector(__ret_75, __ret_75, 3, 2, 1, 0); \ + __ret_75; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmlsq_lane_u16(__p0_76, __p1_76, __p2_76, __p3_76) __extension__ ({ \ + uint16x8_t __ret_76; \ + uint16x8_t __s0_76 = __p0_76; \ + uint16x8_t __s1_76 = __p1_76; \ + uint16x4_t __s2_76 = __p2_76; \ + __ret_76 = __s0_76 - __s1_76 * splatq_lane_u16(__s2_76, __p3_76); \ + __ret_76; \ +}) +#else +#define vmlsq_lane_u16(__p0_77, __p1_77, __p2_77, __p3_77) __extension__ ({ \ + uint16x8_t __ret_77; \ + uint16x8_t __s0_77 = __p0_77; \ + uint16x8_t __s1_77 = __p1_77; \ + uint16x4_t __s2_77 = __p2_77; \ + uint16x8_t __rev0_77; __rev0_77 = __builtin_shufflevector(__s0_77, __s0_77, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint16x8_t __rev1_77; __rev1_77 = __builtin_shufflevector(__s1_77, __s1_77, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint16x4_t __rev2_77; __rev2_77 = __builtin_shufflevector(__s2_77, __s2_77, 3, 2, 1, 0); \ + __ret_77 = __rev0_77 - __rev1_77 * __noswap_splatq_lane_u16(__rev2_77, __p3_77); \ + __ret_77 = __builtin_shufflevector(__ret_77, __ret_77, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_77; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmlsq_lane_f32(__p0_78, __p1_78, __p2_78, __p3_78) __extension__ ({ \ + float32x4_t __ret_78; \ + float32x4_t __s0_78 = __p0_78; \ + float32x4_t __s1_78 = __p1_78; \ + float32x2_t __s2_78 = __p2_78; \ + __ret_78 = __s0_78 - __s1_78 * splatq_lane_f32(__s2_78, __p3_78); \ + __ret_78; \ +}) +#else +#define vmlsq_lane_f32(__p0_79, __p1_79, __p2_79, __p3_79) __extension__ ({ \ + float32x4_t __ret_79; \ + float32x4_t __s0_79 = __p0_79; \ + float32x4_t __s1_79 = __p1_79; \ + float32x2_t __s2_79 = __p2_79; \ + float32x4_t __rev0_79; __rev0_79 = __builtin_shufflevector(__s0_79, __s0_79, 3, 2, 1, 0); \ + float32x4_t __rev1_79; __rev1_79 = __builtin_shufflevector(__s1_79, __s1_79, 3, 2, 1, 0); \ + float32x2_t __rev2_79; __rev2_79 = __builtin_shufflevector(__s2_79, __s2_79, 1, 0); \ + __ret_79 = __rev0_79 - __rev1_79 * __noswap_splatq_lane_f32(__rev2_79, __p3_79); \ + __ret_79 = __builtin_shufflevector(__ret_79, __ret_79, 3, 2, 1, 0); \ + __ret_79; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmlsq_lane_s32(__p0_80, __p1_80, __p2_80, __p3_80) __extension__ ({ \ + int32x4_t __ret_80; \ + int32x4_t __s0_80 = __p0_80; \ + int32x4_t __s1_80 = __p1_80; \ + int32x2_t __s2_80 = __p2_80; \ + __ret_80 = __s0_80 - __s1_80 * splatq_lane_s32(__s2_80, __p3_80); \ + __ret_80; \ +}) +#else +#define vmlsq_lane_s32(__p0_81, __p1_81, __p2_81, __p3_81) __extension__ ({ \ + int32x4_t __ret_81; \ + int32x4_t __s0_81 = __p0_81; \ + int32x4_t __s1_81 = __p1_81; \ + int32x2_t __s2_81 = __p2_81; \ + int32x4_t __rev0_81; __rev0_81 = __builtin_shufflevector(__s0_81, __s0_81, 3, 2, 1, 0); \ + int32x4_t __rev1_81; __rev1_81 = __builtin_shufflevector(__s1_81, __s1_81, 3, 2, 1, 0); \ + int32x2_t __rev2_81; __rev2_81 = __builtin_shufflevector(__s2_81, __s2_81, 1, 0); \ + __ret_81 = __rev0_81 - __rev1_81 * __noswap_splatq_lane_s32(__rev2_81, __p3_81); \ + __ret_81 = __builtin_shufflevector(__ret_81, __ret_81, 3, 2, 1, 0); \ + __ret_81; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmlsq_lane_s16(__p0_82, __p1_82, __p2_82, __p3_82) __extension__ ({ \ + int16x8_t __ret_82; \ + int16x8_t __s0_82 = __p0_82; \ + int16x8_t __s1_82 = __p1_82; \ + int16x4_t __s2_82 = __p2_82; \ + __ret_82 = __s0_82 - __s1_82 * splatq_lane_s16(__s2_82, __p3_82); \ + __ret_82; \ +}) +#else +#define vmlsq_lane_s16(__p0_83, __p1_83, __p2_83, __p3_83) __extension__ ({ \ + int16x8_t __ret_83; \ + int16x8_t __s0_83 = __p0_83; \ + int16x8_t __s1_83 = __p1_83; \ + int16x4_t __s2_83 = __p2_83; \ + int16x8_t __rev0_83; __rev0_83 = __builtin_shufflevector(__s0_83, __s0_83, 7, 6, 5, 4, 3, 2, 1, 0); \ + int16x8_t __rev1_83; __rev1_83 = __builtin_shufflevector(__s1_83, __s1_83, 7, 6, 5, 4, 3, 2, 1, 0); \ + int16x4_t __rev2_83; __rev2_83 = __builtin_shufflevector(__s2_83, __s2_83, 3, 2, 1, 0); \ + __ret_83 = __rev0_83 - __rev1_83 * __noswap_splatq_lane_s16(__rev2_83, __p3_83); \ + __ret_83 = __builtin_shufflevector(__ret_83, __ret_83, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_83; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmls_lane_u32(__p0_84, __p1_84, __p2_84, __p3_84) __extension__ ({ \ + uint32x2_t __ret_84; \ + uint32x2_t __s0_84 = __p0_84; \ + uint32x2_t __s1_84 = __p1_84; \ + uint32x2_t __s2_84 = __p2_84; \ + __ret_84 = __s0_84 - __s1_84 * splat_lane_u32(__s2_84, __p3_84); \ + __ret_84; \ +}) +#else +#define vmls_lane_u32(__p0_85, __p1_85, __p2_85, __p3_85) __extension__ ({ \ + uint32x2_t __ret_85; \ + uint32x2_t __s0_85 = __p0_85; \ + uint32x2_t __s1_85 = __p1_85; \ + uint32x2_t __s2_85 = __p2_85; \ + uint32x2_t __rev0_85; __rev0_85 = __builtin_shufflevector(__s0_85, __s0_85, 1, 0); \ + uint32x2_t __rev1_85; __rev1_85 = __builtin_shufflevector(__s1_85, __s1_85, 1, 0); \ + uint32x2_t __rev2_85; __rev2_85 = __builtin_shufflevector(__s2_85, __s2_85, 1, 0); \ + __ret_85 = __rev0_85 - __rev1_85 * __noswap_splat_lane_u32(__rev2_85, __p3_85); \ + __ret_85 = __builtin_shufflevector(__ret_85, __ret_85, 1, 0); \ + __ret_85; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmls_lane_u16(__p0_86, __p1_86, __p2_86, __p3_86) __extension__ ({ \ + uint16x4_t __ret_86; \ + uint16x4_t __s0_86 = __p0_86; \ + uint16x4_t __s1_86 = __p1_86; \ + uint16x4_t __s2_86 = __p2_86; \ + __ret_86 = __s0_86 - __s1_86 * splat_lane_u16(__s2_86, __p3_86); \ + __ret_86; \ +}) +#else +#define vmls_lane_u16(__p0_87, __p1_87, __p2_87, __p3_87) __extension__ ({ \ + uint16x4_t __ret_87; \ + uint16x4_t __s0_87 = __p0_87; \ + uint16x4_t __s1_87 = __p1_87; \ + uint16x4_t __s2_87 = __p2_87; \ + uint16x4_t __rev0_87; __rev0_87 = __builtin_shufflevector(__s0_87, __s0_87, 3, 2, 1, 0); \ + uint16x4_t __rev1_87; __rev1_87 = __builtin_shufflevector(__s1_87, __s1_87, 3, 2, 1, 0); \ + uint16x4_t __rev2_87; __rev2_87 = __builtin_shufflevector(__s2_87, __s2_87, 3, 2, 1, 0); \ + __ret_87 = __rev0_87 - __rev1_87 * __noswap_splat_lane_u16(__rev2_87, __p3_87); \ + __ret_87 = __builtin_shufflevector(__ret_87, __ret_87, 3, 2, 1, 0); \ + __ret_87; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmls_lane_f32(__p0_88, __p1_88, __p2_88, __p3_88) __extension__ ({ \ + float32x2_t __ret_88; \ + float32x2_t __s0_88 = __p0_88; \ + float32x2_t __s1_88 = __p1_88; \ + float32x2_t __s2_88 = __p2_88; \ + __ret_88 = __s0_88 - __s1_88 * splat_lane_f32(__s2_88, __p3_88); \ + __ret_88; \ +}) +#else +#define vmls_lane_f32(__p0_89, __p1_89, __p2_89, __p3_89) __extension__ ({ \ + float32x2_t __ret_89; \ + float32x2_t __s0_89 = __p0_89; \ + float32x2_t __s1_89 = __p1_89; \ + float32x2_t __s2_89 = __p2_89; \ + float32x2_t __rev0_89; __rev0_89 = __builtin_shufflevector(__s0_89, __s0_89, 1, 0); \ + float32x2_t __rev1_89; __rev1_89 = __builtin_shufflevector(__s1_89, __s1_89, 1, 0); \ + float32x2_t __rev2_89; __rev2_89 = __builtin_shufflevector(__s2_89, __s2_89, 1, 0); \ + __ret_89 = __rev0_89 - __rev1_89 * __noswap_splat_lane_f32(__rev2_89, __p3_89); \ + __ret_89 = __builtin_shufflevector(__ret_89, __ret_89, 1, 0); \ + __ret_89; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmls_lane_s32(__p0_90, __p1_90, __p2_90, __p3_90) __extension__ ({ \ + int32x2_t __ret_90; \ + int32x2_t __s0_90 = __p0_90; \ + int32x2_t __s1_90 = __p1_90; \ + int32x2_t __s2_90 = __p2_90; \ + __ret_90 = __s0_90 - __s1_90 * splat_lane_s32(__s2_90, __p3_90); \ + __ret_90; \ +}) +#else +#define vmls_lane_s32(__p0_91, __p1_91, __p2_91, __p3_91) __extension__ ({ \ + int32x2_t __ret_91; \ + int32x2_t __s0_91 = __p0_91; \ + int32x2_t __s1_91 = __p1_91; \ + int32x2_t __s2_91 = __p2_91; \ + int32x2_t __rev0_91; __rev0_91 = __builtin_shufflevector(__s0_91, __s0_91, 1, 0); \ + int32x2_t __rev1_91; __rev1_91 = __builtin_shufflevector(__s1_91, __s1_91, 1, 0); \ + int32x2_t __rev2_91; __rev2_91 = __builtin_shufflevector(__s2_91, __s2_91, 1, 0); \ + __ret_91 = __rev0_91 - __rev1_91 * __noswap_splat_lane_s32(__rev2_91, __p3_91); \ + __ret_91 = __builtin_shufflevector(__ret_91, __ret_91, 1, 0); \ + __ret_91; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmls_lane_s16(__p0_92, __p1_92, __p2_92, __p3_92) __extension__ ({ \ + int16x4_t __ret_92; \ + int16x4_t __s0_92 = __p0_92; \ + int16x4_t __s1_92 = __p1_92; \ + int16x4_t __s2_92 = __p2_92; \ + __ret_92 = __s0_92 - __s1_92 * splat_lane_s16(__s2_92, __p3_92); \ + __ret_92; \ +}) +#else +#define vmls_lane_s16(__p0_93, __p1_93, __p2_93, __p3_93) __extension__ ({ \ + int16x4_t __ret_93; \ + int16x4_t __s0_93 = __p0_93; \ + int16x4_t __s1_93 = __p1_93; \ + int16x4_t __s2_93 = __p2_93; \ + int16x4_t __rev0_93; __rev0_93 = __builtin_shufflevector(__s0_93, __s0_93, 3, 2, 1, 0); \ + int16x4_t __rev1_93; __rev1_93 = __builtin_shufflevector(__s1_93, __s1_93, 3, 2, 1, 0); \ + int16x4_t __rev2_93; __rev2_93 = __builtin_shufflevector(__s2_93, __s2_93, 3, 2, 1, 0); \ + __ret_93 = __rev0_93 - __rev1_93 * __noswap_splat_lane_s16(__rev2_93, __p3_93); \ + __ret_93 = __builtin_shufflevector(__ret_93, __ret_93, 3, 2, 1, 0); \ + __ret_93; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint32x4_t vmlsq_n_u32(uint32x4_t __p0, uint32x4_t __p1, uint32_t __p2) { + uint32x4_t __ret; + __ret = __p0 - __p1 * (uint32x4_t) {__p2, __p2, __p2, __p2}; + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint32x4_t vmlsq_n_u32(uint32x4_t __p0, uint32x4_t __p1, uint32_t __p2) { + uint32x4_t __ret; + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = __rev0 - __rev1 * (uint32x4_t) {__p2, __p2, __p2, __p2}; + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint16x8_t vmlsq_n_u16(uint16x8_t __p0, uint16x8_t __p1, uint16_t __p2) { + uint16x8_t __ret; + __ret = __p0 - __p1 * (uint16x8_t) {__p2, __p2, __p2, __p2, __p2, __p2, __p2, __p2}; + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint16x8_t vmlsq_n_u16(uint16x8_t __p0, uint16x8_t __p1, uint16_t __p2) { + uint16x8_t __ret; + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __rev0 - __rev1 * (uint16x8_t) {__p2, __p2, __p2, __p2, __p2, __p2, __p2, __p2}; + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) float32x4_t vmlsq_n_f32(float32x4_t __p0, float32x4_t __p1, float32_t __p2) { + float32x4_t __ret; + __ret = __p0 - __p1 * (float32x4_t) {__p2, __p2, __p2, __p2}; + return __ret; +} +#else +__ai __attribute__((target("neon"))) float32x4_t vmlsq_n_f32(float32x4_t __p0, float32x4_t __p1, float32_t __p2) { + float32x4_t __ret; + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = __rev0 - __rev1 * (float32x4_t) {__p2, __p2, __p2, __p2}; + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int32x4_t vmlsq_n_s32(int32x4_t __p0, int32x4_t __p1, int32_t __p2) { + int32x4_t __ret; + __ret = __p0 - __p1 * (int32x4_t) {__p2, __p2, __p2, __p2}; + return __ret; +} +#else +__ai __attribute__((target("neon"))) int32x4_t vmlsq_n_s32(int32x4_t __p0, int32x4_t __p1, int32_t __p2) { + int32x4_t __ret; + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = __rev0 - __rev1 * (int32x4_t) {__p2, __p2, __p2, __p2}; + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int16x8_t vmlsq_n_s16(int16x8_t __p0, int16x8_t __p1, int16_t __p2) { + int16x8_t __ret; + __ret = __p0 - __p1 * (int16x8_t) {__p2, __p2, __p2, __p2, __p2, __p2, __p2, __p2}; + return __ret; +} +#else +__ai __attribute__((target("neon"))) int16x8_t vmlsq_n_s16(int16x8_t __p0, int16x8_t __p1, int16_t __p2) { + int16x8_t __ret; + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __rev0 - __rev1 * (int16x8_t) {__p2, __p2, __p2, __p2, __p2, __p2, __p2, __p2}; + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint32x2_t vmls_n_u32(uint32x2_t __p0, uint32x2_t __p1, uint32_t __p2) { + uint32x2_t __ret; + __ret = __p0 - __p1 * (uint32x2_t) {__p2, __p2}; + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint32x2_t vmls_n_u32(uint32x2_t __p0, uint32x2_t __p1, uint32_t __p2) { + uint32x2_t __ret; + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = __rev0 - __rev1 * (uint32x2_t) {__p2, __p2}; + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint16x4_t vmls_n_u16(uint16x4_t __p0, uint16x4_t __p1, uint16_t __p2) { + uint16x4_t __ret; + __ret = __p0 - __p1 * (uint16x4_t) {__p2, __p2, __p2, __p2}; + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint16x4_t vmls_n_u16(uint16x4_t __p0, uint16x4_t __p1, uint16_t __p2) { + uint16x4_t __ret; + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = __rev0 - __rev1 * (uint16x4_t) {__p2, __p2, __p2, __p2}; + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) float32x2_t vmls_n_f32(float32x2_t __p0, float32x2_t __p1, float32_t __p2) { + float32x2_t __ret; + __ret = __p0 - __p1 * (float32x2_t) {__p2, __p2}; + return __ret; +} +#else +__ai __attribute__((target("neon"))) float32x2_t vmls_n_f32(float32x2_t __p0, float32x2_t __p1, float32_t __p2) { + float32x2_t __ret; + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = __rev0 - __rev1 * (float32x2_t) {__p2, __p2}; + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int32x2_t vmls_n_s32(int32x2_t __p0, int32x2_t __p1, int32_t __p2) { + int32x2_t __ret; + __ret = __p0 - __p1 * (int32x2_t) {__p2, __p2}; + return __ret; +} +#else +__ai __attribute__((target("neon"))) int32x2_t vmls_n_s32(int32x2_t __p0, int32x2_t __p1, int32_t __p2) { + int32x2_t __ret; + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = __rev0 - __rev1 * (int32x2_t) {__p2, __p2}; + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int16x4_t vmls_n_s16(int16x4_t __p0, int16x4_t __p1, int16_t __p2) { + int16x4_t __ret; + __ret = __p0 - __p1 * (int16x4_t) {__p2, __p2, __p2, __p2}; + return __ret; +} +#else +__ai __attribute__((target("neon"))) int16x4_t vmls_n_s16(int16x4_t __p0, int16x4_t __p1, int16_t __p2) { + int16x4_t __ret; + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = __rev0 - __rev1 * (int16x4_t) {__p2, __p2, __p2, __p2}; + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) poly8x8_t vmov_n_p8(poly8_t __p0) { + poly8x8_t __ret; + __ret = (poly8x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0}; + return __ret; +} +#else +__ai __attribute__((target("neon"))) poly8x8_t vmov_n_p8(poly8_t __p0) { + poly8x8_t __ret; + __ret = (poly8x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0}; + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) poly16x4_t vmov_n_p16(poly16_t __p0) { + poly16x4_t __ret; + __ret = (poly16x4_t) {__p0, __p0, __p0, __p0}; + return __ret; +} +#else +__ai __attribute__((target("neon"))) poly16x4_t vmov_n_p16(poly16_t __p0) { + poly16x4_t __ret; + __ret = (poly16x4_t) {__p0, __p0, __p0, __p0}; + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) poly8x16_t vmovq_n_p8(poly8_t __p0) { + poly8x16_t __ret; + __ret = (poly8x16_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0}; + return __ret; +} +#else +__ai __attribute__((target("neon"))) poly8x16_t vmovq_n_p8(poly8_t __p0) { + poly8x16_t __ret; + __ret = (poly8x16_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0}; + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) poly16x8_t vmovq_n_p16(poly16_t __p0) { + poly16x8_t __ret; + __ret = (poly16x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0}; + return __ret; +} +#else +__ai __attribute__((target("neon"))) poly16x8_t vmovq_n_p16(poly16_t __p0) { + poly16x8_t __ret; + __ret = (poly16x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0}; + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint8x16_t vmovq_n_u8(uint8_t __p0) { + uint8x16_t __ret; + __ret = (uint8x16_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0}; + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint8x16_t vmovq_n_u8(uint8_t __p0) { + uint8x16_t __ret; + __ret = (uint8x16_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0}; + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint32x4_t vmovq_n_u32(uint32_t __p0) { + uint32x4_t __ret; + __ret = (uint32x4_t) {__p0, __p0, __p0, __p0}; + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint32x4_t vmovq_n_u32(uint32_t __p0) { + uint32x4_t __ret; + __ret = (uint32x4_t) {__p0, __p0, __p0, __p0}; + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint64x2_t vmovq_n_u64(uint64_t __p0) { + uint64x2_t __ret; + __ret = (uint64x2_t) {__p0, __p0}; + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint64x2_t vmovq_n_u64(uint64_t __p0) { + uint64x2_t __ret; + __ret = (uint64x2_t) {__p0, __p0}; + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint16x8_t vmovq_n_u16(uint16_t __p0) { + uint16x8_t __ret; + __ret = (uint16x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0}; + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint16x8_t vmovq_n_u16(uint16_t __p0) { + uint16x8_t __ret; + __ret = (uint16x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0}; + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int8x16_t vmovq_n_s8(int8_t __p0) { + int8x16_t __ret; + __ret = (int8x16_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0}; + return __ret; +} +#else +__ai __attribute__((target("neon"))) int8x16_t vmovq_n_s8(int8_t __p0) { + int8x16_t __ret; + __ret = (int8x16_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0}; + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) float32x4_t vmovq_n_f32(float32_t __p0) { + float32x4_t __ret; + __ret = (float32x4_t) {__p0, __p0, __p0, __p0}; + return __ret; +} +#else +__ai __attribute__((target("neon"))) float32x4_t vmovq_n_f32(float32_t __p0) { + float32x4_t __ret; + __ret = (float32x4_t) {__p0, __p0, __p0, __p0}; + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmovq_n_f16(__p0) __extension__ ({ \ + float16x8_t __ret; \ + float16_t __s0 = __p0; \ + __ret = (float16x8_t) {__s0, __s0, __s0, __s0, __s0, __s0, __s0, __s0}; \ + __ret; \ +}) +#else +#define vmovq_n_f16(__p0) __extension__ ({ \ + float16x8_t __ret; \ + float16_t __s0 = __p0; \ + __ret = (float16x8_t) {__s0, __s0, __s0, __s0, __s0, __s0, __s0, __s0}; \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int32x4_t vmovq_n_s32(int32_t __p0) { + int32x4_t __ret; + __ret = (int32x4_t) {__p0, __p0, __p0, __p0}; + return __ret; +} +#else +__ai __attribute__((target("neon"))) int32x4_t vmovq_n_s32(int32_t __p0) { + int32x4_t __ret; + __ret = (int32x4_t) {__p0, __p0, __p0, __p0}; + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int64x2_t vmovq_n_s64(int64_t __p0) { + int64x2_t __ret; + __ret = (int64x2_t) {__p0, __p0}; + return __ret; +} +#else +__ai __attribute__((target("neon"))) int64x2_t vmovq_n_s64(int64_t __p0) { + int64x2_t __ret; + __ret = (int64x2_t) {__p0, __p0}; + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int16x8_t vmovq_n_s16(int16_t __p0) { + int16x8_t __ret; + __ret = (int16x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0}; + return __ret; +} +#else +__ai __attribute__((target("neon"))) int16x8_t vmovq_n_s16(int16_t __p0) { + int16x8_t __ret; + __ret = (int16x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0}; + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint8x8_t vmov_n_u8(uint8_t __p0) { + uint8x8_t __ret; + __ret = (uint8x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0}; + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint8x8_t vmov_n_u8(uint8_t __p0) { + uint8x8_t __ret; + __ret = (uint8x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0}; + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint32x2_t vmov_n_u32(uint32_t __p0) { + uint32x2_t __ret; + __ret = (uint32x2_t) {__p0, __p0}; + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint32x2_t vmov_n_u32(uint32_t __p0) { + uint32x2_t __ret; + __ret = (uint32x2_t) {__p0, __p0}; + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +__ai __attribute__((target("neon"))) uint64x1_t vmov_n_u64(uint64_t __p0) { + uint64x1_t __ret; + __ret = (uint64x1_t) {__p0}; + return __ret; +} +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint16x4_t vmov_n_u16(uint16_t __p0) { + uint16x4_t __ret; + __ret = (uint16x4_t) {__p0, __p0, __p0, __p0}; + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint16x4_t vmov_n_u16(uint16_t __p0) { + uint16x4_t __ret; + __ret = (uint16x4_t) {__p0, __p0, __p0, __p0}; + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int8x8_t vmov_n_s8(int8_t __p0) { + int8x8_t __ret; + __ret = (int8x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0}; + return __ret; +} +#else +__ai __attribute__((target("neon"))) int8x8_t vmov_n_s8(int8_t __p0) { + int8x8_t __ret; + __ret = (int8x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0}; + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) float32x2_t vmov_n_f32(float32_t __p0) { + float32x2_t __ret; + __ret = (float32x2_t) {__p0, __p0}; + return __ret; +} +#else +__ai __attribute__((target("neon"))) float32x2_t vmov_n_f32(float32_t __p0) { + float32x2_t __ret; + __ret = (float32x2_t) {__p0, __p0}; + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmov_n_f16(__p0) __extension__ ({ \ + float16x4_t __ret; \ + float16_t __s0 = __p0; \ + __ret = (float16x4_t) {__s0, __s0, __s0, __s0}; \ + __ret; \ +}) +#else +#define vmov_n_f16(__p0) __extension__ ({ \ + float16x4_t __ret; \ + float16_t __s0 = __p0; \ + __ret = (float16x4_t) {__s0, __s0, __s0, __s0}; \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int32x2_t vmov_n_s32(int32_t __p0) { + int32x2_t __ret; + __ret = (int32x2_t) {__p0, __p0}; + return __ret; +} +#else +__ai __attribute__((target("neon"))) int32x2_t vmov_n_s32(int32_t __p0) { + int32x2_t __ret; + __ret = (int32x2_t) {__p0, __p0}; + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +__ai __attribute__((target("neon"))) int64x1_t vmov_n_s64(int64_t __p0) { + int64x1_t __ret; + __ret = (int64x1_t) {__p0}; + return __ret; +} +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int16x4_t vmov_n_s16(int16_t __p0) { + int16x4_t __ret; + __ret = (int16x4_t) {__p0, __p0, __p0, __p0}; + return __ret; +} +#else +__ai __attribute__((target("neon"))) int16x4_t vmov_n_s16(int16_t __p0) { + int16x4_t __ret; + __ret = (int16x4_t) {__p0, __p0, __p0, __p0}; + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint16x8_t vmovl_u8(uint8x8_t __p0) { + uint16x8_t __ret; + __ret = (uint16x8_t) __builtin_neon_vmovl_v((int8x8_t)__p0, 49); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint16x8_t vmovl_u8(uint8x8_t __p0) { + uint16x8_t __ret; + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint16x8_t) __builtin_neon_vmovl_v((int8x8_t)__rev0, 49); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +__ai __attribute__((target("neon"))) uint16x8_t __noswap_vmovl_u8(uint8x8_t __p0) { + uint16x8_t __ret; + __ret = (uint16x8_t) __builtin_neon_vmovl_v((int8x8_t)__p0, 49); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint64x2_t vmovl_u32(uint32x2_t __p0) { + uint64x2_t __ret; + __ret = (uint64x2_t) __builtin_neon_vmovl_v((int8x8_t)__p0, 51); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint64x2_t vmovl_u32(uint32x2_t __p0) { + uint64x2_t __ret; + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = (uint64x2_t) __builtin_neon_vmovl_v((int8x8_t)__rev0, 51); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +__ai __attribute__((target("neon"))) uint64x2_t __noswap_vmovl_u32(uint32x2_t __p0) { + uint64x2_t __ret; + __ret = (uint64x2_t) __builtin_neon_vmovl_v((int8x8_t)__p0, 51); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint32x4_t vmovl_u16(uint16x4_t __p0) { + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vmovl_v((int8x8_t)__p0, 50); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint32x4_t vmovl_u16(uint16x4_t __p0) { + uint32x4_t __ret; + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = (uint32x4_t) __builtin_neon_vmovl_v((int8x8_t)__rev0, 50); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +__ai __attribute__((target("neon"))) uint32x4_t __noswap_vmovl_u16(uint16x4_t __p0) { + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vmovl_v((int8x8_t)__p0, 50); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int16x8_t vmovl_s8(int8x8_t __p0) { + int16x8_t __ret; + __ret = (int16x8_t) __builtin_neon_vmovl_v((int8x8_t)__p0, 33); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int16x8_t vmovl_s8(int8x8_t __p0) { + int16x8_t __ret; + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int16x8_t) __builtin_neon_vmovl_v((int8x8_t)__rev0, 33); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +__ai __attribute__((target("neon"))) int16x8_t __noswap_vmovl_s8(int8x8_t __p0) { + int16x8_t __ret; + __ret = (int16x8_t) __builtin_neon_vmovl_v((int8x8_t)__p0, 33); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int64x2_t vmovl_s32(int32x2_t __p0) { + int64x2_t __ret; + __ret = (int64x2_t) __builtin_neon_vmovl_v((int8x8_t)__p0, 35); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int64x2_t vmovl_s32(int32x2_t __p0) { + int64x2_t __ret; + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = (int64x2_t) __builtin_neon_vmovl_v((int8x8_t)__rev0, 35); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +__ai __attribute__((target("neon"))) int64x2_t __noswap_vmovl_s32(int32x2_t __p0) { + int64x2_t __ret; + __ret = (int64x2_t) __builtin_neon_vmovl_v((int8x8_t)__p0, 35); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int32x4_t vmovl_s16(int16x4_t __p0) { + int32x4_t __ret; + __ret = (int32x4_t) __builtin_neon_vmovl_v((int8x8_t)__p0, 34); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int32x4_t vmovl_s16(int16x4_t __p0) { + int32x4_t __ret; + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = (int32x4_t) __builtin_neon_vmovl_v((int8x8_t)__rev0, 34); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +__ai __attribute__((target("neon"))) int32x4_t __noswap_vmovl_s16(int16x4_t __p0) { + int32x4_t __ret; + __ret = (int32x4_t) __builtin_neon_vmovl_v((int8x8_t)__p0, 34); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint16x4_t vmovn_u32(uint32x4_t __p0) { + uint16x4_t __ret; + __ret = (uint16x4_t) __builtin_neon_vmovn_v((int8x16_t)__p0, 17); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint16x4_t vmovn_u32(uint32x4_t __p0) { + uint16x4_t __ret; + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = (uint16x4_t) __builtin_neon_vmovn_v((int8x16_t)__rev0, 17); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +__ai __attribute__((target("neon"))) uint16x4_t __noswap_vmovn_u32(uint32x4_t __p0) { + uint16x4_t __ret; + __ret = (uint16x4_t) __builtin_neon_vmovn_v((int8x16_t)__p0, 17); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint32x2_t vmovn_u64(uint64x2_t __p0) { + uint32x2_t __ret; + __ret = (uint32x2_t) __builtin_neon_vmovn_v((int8x16_t)__p0, 18); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint32x2_t vmovn_u64(uint64x2_t __p0) { + uint32x2_t __ret; + uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = (uint32x2_t) __builtin_neon_vmovn_v((int8x16_t)__rev0, 18); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +__ai __attribute__((target("neon"))) uint32x2_t __noswap_vmovn_u64(uint64x2_t __p0) { + uint32x2_t __ret; + __ret = (uint32x2_t) __builtin_neon_vmovn_v((int8x16_t)__p0, 18); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint8x8_t vmovn_u16(uint16x8_t __p0) { + uint8x8_t __ret; + __ret = (uint8x8_t) __builtin_neon_vmovn_v((int8x16_t)__p0, 16); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint8x8_t vmovn_u16(uint16x8_t __p0) { + uint8x8_t __ret; + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint8x8_t) __builtin_neon_vmovn_v((int8x16_t)__rev0, 16); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +__ai __attribute__((target("neon"))) uint8x8_t __noswap_vmovn_u16(uint16x8_t __p0) { + uint8x8_t __ret; + __ret = (uint8x8_t) __builtin_neon_vmovn_v((int8x16_t)__p0, 16); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int16x4_t vmovn_s32(int32x4_t __p0) { + int16x4_t __ret; + __ret = (int16x4_t) __builtin_neon_vmovn_v((int8x16_t)__p0, 1); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int16x4_t vmovn_s32(int32x4_t __p0) { + int16x4_t __ret; + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = (int16x4_t) __builtin_neon_vmovn_v((int8x16_t)__rev0, 1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +__ai __attribute__((target("neon"))) int16x4_t __noswap_vmovn_s32(int32x4_t __p0) { + int16x4_t __ret; + __ret = (int16x4_t) __builtin_neon_vmovn_v((int8x16_t)__p0, 1); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int32x2_t vmovn_s64(int64x2_t __p0) { + int32x2_t __ret; + __ret = (int32x2_t) __builtin_neon_vmovn_v((int8x16_t)__p0, 2); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int32x2_t vmovn_s64(int64x2_t __p0) { + int32x2_t __ret; + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = (int32x2_t) __builtin_neon_vmovn_v((int8x16_t)__rev0, 2); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +__ai __attribute__((target("neon"))) int32x2_t __noswap_vmovn_s64(int64x2_t __p0) { + int32x2_t __ret; + __ret = (int32x2_t) __builtin_neon_vmovn_v((int8x16_t)__p0, 2); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int8x8_t vmovn_s16(int16x8_t __p0) { + int8x8_t __ret; + __ret = (int8x8_t) __builtin_neon_vmovn_v((int8x16_t)__p0, 0); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int8x8_t vmovn_s16(int16x8_t __p0) { + int8x8_t __ret; + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int8x8_t) __builtin_neon_vmovn_v((int8x16_t)__rev0, 0); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +__ai __attribute__((target("neon"))) int8x8_t __noswap_vmovn_s16(int16x8_t __p0) { + int8x8_t __ret; + __ret = (int8x8_t) __builtin_neon_vmovn_v((int8x16_t)__p0, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint8x16_t vmulq_u8(uint8x16_t __p0, uint8x16_t __p1) { + uint8x16_t __ret; + __ret = __p0 * __p1; + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint8x16_t vmulq_u8(uint8x16_t __p0, uint8x16_t __p1) { + uint8x16_t __ret; + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __rev0 * __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint32x4_t vmulq_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint32x4_t __ret; + __ret = __p0 * __p1; + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint32x4_t vmulq_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint32x4_t __ret; + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = __rev0 * __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint16x8_t vmulq_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint16x8_t __ret; + __ret = __p0 * __p1; + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint16x8_t vmulq_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint16x8_t __ret; + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __rev0 * __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int8x16_t vmulq_s8(int8x16_t __p0, int8x16_t __p1) { + int8x16_t __ret; + __ret = __p0 * __p1; + return __ret; +} +#else +__ai __attribute__((target("neon"))) int8x16_t vmulq_s8(int8x16_t __p0, int8x16_t __p1) { + int8x16_t __ret; + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __rev0 * __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) float32x4_t vmulq_f32(float32x4_t __p0, float32x4_t __p1) { + float32x4_t __ret; + __ret = __p0 * __p1; + return __ret; +} +#else +__ai __attribute__((target("neon"))) float32x4_t vmulq_f32(float32x4_t __p0, float32x4_t __p1) { + float32x4_t __ret; + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = __rev0 * __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int32x4_t vmulq_s32(int32x4_t __p0, int32x4_t __p1) { + int32x4_t __ret; + __ret = __p0 * __p1; + return __ret; +} +#else +__ai __attribute__((target("neon"))) int32x4_t vmulq_s32(int32x4_t __p0, int32x4_t __p1) { + int32x4_t __ret; + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = __rev0 * __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int16x8_t vmulq_s16(int16x8_t __p0, int16x8_t __p1) { + int16x8_t __ret; + __ret = __p0 * __p1; + return __ret; +} +#else +__ai __attribute__((target("neon"))) int16x8_t vmulq_s16(int16x8_t __p0, int16x8_t __p1) { + int16x8_t __ret; + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __rev0 * __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint8x8_t vmul_u8(uint8x8_t __p0, uint8x8_t __p1) { + uint8x8_t __ret; + __ret = __p0 * __p1; + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint8x8_t vmul_u8(uint8x8_t __p0, uint8x8_t __p1) { + uint8x8_t __ret; + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __rev0 * __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint32x2_t vmul_u32(uint32x2_t __p0, uint32x2_t __p1) { + uint32x2_t __ret; + __ret = __p0 * __p1; + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint32x2_t vmul_u32(uint32x2_t __p0, uint32x2_t __p1) { + uint32x2_t __ret; + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = __rev0 * __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint16x4_t vmul_u16(uint16x4_t __p0, uint16x4_t __p1) { + uint16x4_t __ret; + __ret = __p0 * __p1; + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint16x4_t vmul_u16(uint16x4_t __p0, uint16x4_t __p1) { + uint16x4_t __ret; + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = __rev0 * __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int8x8_t vmul_s8(int8x8_t __p0, int8x8_t __p1) { + int8x8_t __ret; + __ret = __p0 * __p1; + return __ret; +} +#else +__ai __attribute__((target("neon"))) int8x8_t vmul_s8(int8x8_t __p0, int8x8_t __p1) { + int8x8_t __ret; + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __rev0 * __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) float32x2_t vmul_f32(float32x2_t __p0, float32x2_t __p1) { + float32x2_t __ret; + __ret = __p0 * __p1; + return __ret; +} +#else +__ai __attribute__((target("neon"))) float32x2_t vmul_f32(float32x2_t __p0, float32x2_t __p1) { + float32x2_t __ret; + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = __rev0 * __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int32x2_t vmul_s32(int32x2_t __p0, int32x2_t __p1) { + int32x2_t __ret; + __ret = __p0 * __p1; + return __ret; +} +#else +__ai __attribute__((target("neon"))) int32x2_t vmul_s32(int32x2_t __p0, int32x2_t __p1) { + int32x2_t __ret; + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = __rev0 * __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int16x4_t vmul_s16(int16x4_t __p0, int16x4_t __p1) { + int16x4_t __ret; + __ret = __p0 * __p1; + return __ret; +} +#else +__ai __attribute__((target("neon"))) int16x4_t vmul_s16(int16x4_t __p0, int16x4_t __p1) { + int16x4_t __ret; + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = __rev0 * __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) poly8x8_t vmul_p8(poly8x8_t __p0, poly8x8_t __p1) { + poly8x8_t __ret; + __ret = (poly8x8_t) __builtin_neon_vmul_v((int8x8_t)__p0, (int8x8_t)__p1, 4); + return __ret; +} +#else +__ai __attribute__((target("neon"))) poly8x8_t vmul_p8(poly8x8_t __p0, poly8x8_t __p1) { + poly8x8_t __ret; + poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + poly8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (poly8x8_t) __builtin_neon_vmul_v((int8x8_t)__rev0, (int8x8_t)__rev1, 4); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) poly8x16_t vmulq_p8(poly8x16_t __p0, poly8x16_t __p1) { + poly8x16_t __ret; + __ret = (poly8x16_t) __builtin_neon_vmulq_v((int8x16_t)__p0, (int8x16_t)__p1, 36); + return __ret; +} +#else +__ai __attribute__((target("neon"))) poly8x16_t vmulq_p8(poly8x16_t __p0, poly8x16_t __p1) { + poly8x16_t __ret; + poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + poly8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (poly8x16_t) __builtin_neon_vmulq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 36); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmulq_lane_u32(__p0_94, __p1_94, __p2_94) __extension__ ({ \ + uint32x4_t __ret_94; \ + uint32x4_t __s0_94 = __p0_94; \ + uint32x2_t __s1_94 = __p1_94; \ + __ret_94 = __s0_94 * splatq_lane_u32(__s1_94, __p2_94); \ + __ret_94; \ +}) +#else +#define vmulq_lane_u32(__p0_95, __p1_95, __p2_95) __extension__ ({ \ + uint32x4_t __ret_95; \ + uint32x4_t __s0_95 = __p0_95; \ + uint32x2_t __s1_95 = __p1_95; \ + uint32x4_t __rev0_95; __rev0_95 = __builtin_shufflevector(__s0_95, __s0_95, 3, 2, 1, 0); \ + uint32x2_t __rev1_95; __rev1_95 = __builtin_shufflevector(__s1_95, __s1_95, 1, 0); \ + __ret_95 = __rev0_95 * __noswap_splatq_lane_u32(__rev1_95, __p2_95); \ + __ret_95 = __builtin_shufflevector(__ret_95, __ret_95, 3, 2, 1, 0); \ + __ret_95; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmulq_lane_u16(__p0_96, __p1_96, __p2_96) __extension__ ({ \ + uint16x8_t __ret_96; \ + uint16x8_t __s0_96 = __p0_96; \ + uint16x4_t __s1_96 = __p1_96; \ + __ret_96 = __s0_96 * splatq_lane_u16(__s1_96, __p2_96); \ + __ret_96; \ +}) +#else +#define vmulq_lane_u16(__p0_97, __p1_97, __p2_97) __extension__ ({ \ + uint16x8_t __ret_97; \ + uint16x8_t __s0_97 = __p0_97; \ + uint16x4_t __s1_97 = __p1_97; \ + uint16x8_t __rev0_97; __rev0_97 = __builtin_shufflevector(__s0_97, __s0_97, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint16x4_t __rev1_97; __rev1_97 = __builtin_shufflevector(__s1_97, __s1_97, 3, 2, 1, 0); \ + __ret_97 = __rev0_97 * __noswap_splatq_lane_u16(__rev1_97, __p2_97); \ + __ret_97 = __builtin_shufflevector(__ret_97, __ret_97, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_97; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmulq_lane_f32(__p0_98, __p1_98, __p2_98) __extension__ ({ \ + float32x4_t __ret_98; \ + float32x4_t __s0_98 = __p0_98; \ + float32x2_t __s1_98 = __p1_98; \ + __ret_98 = __s0_98 * splatq_lane_f32(__s1_98, __p2_98); \ + __ret_98; \ +}) +#else +#define vmulq_lane_f32(__p0_99, __p1_99, __p2_99) __extension__ ({ \ + float32x4_t __ret_99; \ + float32x4_t __s0_99 = __p0_99; \ + float32x2_t __s1_99 = __p1_99; \ + float32x4_t __rev0_99; __rev0_99 = __builtin_shufflevector(__s0_99, __s0_99, 3, 2, 1, 0); \ + float32x2_t __rev1_99; __rev1_99 = __builtin_shufflevector(__s1_99, __s1_99, 1, 0); \ + __ret_99 = __rev0_99 * __noswap_splatq_lane_f32(__rev1_99, __p2_99); \ + __ret_99 = __builtin_shufflevector(__ret_99, __ret_99, 3, 2, 1, 0); \ + __ret_99; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmulq_lane_s32(__p0_100, __p1_100, __p2_100) __extension__ ({ \ + int32x4_t __ret_100; \ + int32x4_t __s0_100 = __p0_100; \ + int32x2_t __s1_100 = __p1_100; \ + __ret_100 = __s0_100 * splatq_lane_s32(__s1_100, __p2_100); \ + __ret_100; \ +}) +#else +#define vmulq_lane_s32(__p0_101, __p1_101, __p2_101) __extension__ ({ \ + int32x4_t __ret_101; \ + int32x4_t __s0_101 = __p0_101; \ + int32x2_t __s1_101 = __p1_101; \ + int32x4_t __rev0_101; __rev0_101 = __builtin_shufflevector(__s0_101, __s0_101, 3, 2, 1, 0); \ + int32x2_t __rev1_101; __rev1_101 = __builtin_shufflevector(__s1_101, __s1_101, 1, 0); \ + __ret_101 = __rev0_101 * __noswap_splatq_lane_s32(__rev1_101, __p2_101); \ + __ret_101 = __builtin_shufflevector(__ret_101, __ret_101, 3, 2, 1, 0); \ + __ret_101; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmulq_lane_s16(__p0_102, __p1_102, __p2_102) __extension__ ({ \ + int16x8_t __ret_102; \ + int16x8_t __s0_102 = __p0_102; \ + int16x4_t __s1_102 = __p1_102; \ + __ret_102 = __s0_102 * splatq_lane_s16(__s1_102, __p2_102); \ + __ret_102; \ +}) +#else +#define vmulq_lane_s16(__p0_103, __p1_103, __p2_103) __extension__ ({ \ + int16x8_t __ret_103; \ + int16x8_t __s0_103 = __p0_103; \ + int16x4_t __s1_103 = __p1_103; \ + int16x8_t __rev0_103; __rev0_103 = __builtin_shufflevector(__s0_103, __s0_103, 7, 6, 5, 4, 3, 2, 1, 0); \ + int16x4_t __rev1_103; __rev1_103 = __builtin_shufflevector(__s1_103, __s1_103, 3, 2, 1, 0); \ + __ret_103 = __rev0_103 * __noswap_splatq_lane_s16(__rev1_103, __p2_103); \ + __ret_103 = __builtin_shufflevector(__ret_103, __ret_103, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_103; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmul_lane_u32(__p0_104, __p1_104, __p2_104) __extension__ ({ \ + uint32x2_t __ret_104; \ + uint32x2_t __s0_104 = __p0_104; \ + uint32x2_t __s1_104 = __p1_104; \ + __ret_104 = __s0_104 * splat_lane_u32(__s1_104, __p2_104); \ + __ret_104; \ +}) +#else +#define vmul_lane_u32(__p0_105, __p1_105, __p2_105) __extension__ ({ \ + uint32x2_t __ret_105; \ + uint32x2_t __s0_105 = __p0_105; \ + uint32x2_t __s1_105 = __p1_105; \ + uint32x2_t __rev0_105; __rev0_105 = __builtin_shufflevector(__s0_105, __s0_105, 1, 0); \ + uint32x2_t __rev1_105; __rev1_105 = __builtin_shufflevector(__s1_105, __s1_105, 1, 0); \ + __ret_105 = __rev0_105 * __noswap_splat_lane_u32(__rev1_105, __p2_105); \ + __ret_105 = __builtin_shufflevector(__ret_105, __ret_105, 1, 0); \ + __ret_105; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmul_lane_u16(__p0_106, __p1_106, __p2_106) __extension__ ({ \ + uint16x4_t __ret_106; \ + uint16x4_t __s0_106 = __p0_106; \ + uint16x4_t __s1_106 = __p1_106; \ + __ret_106 = __s0_106 * splat_lane_u16(__s1_106, __p2_106); \ + __ret_106; \ +}) +#else +#define vmul_lane_u16(__p0_107, __p1_107, __p2_107) __extension__ ({ \ + uint16x4_t __ret_107; \ + uint16x4_t __s0_107 = __p0_107; \ + uint16x4_t __s1_107 = __p1_107; \ + uint16x4_t __rev0_107; __rev0_107 = __builtin_shufflevector(__s0_107, __s0_107, 3, 2, 1, 0); \ + uint16x4_t __rev1_107; __rev1_107 = __builtin_shufflevector(__s1_107, __s1_107, 3, 2, 1, 0); \ + __ret_107 = __rev0_107 * __noswap_splat_lane_u16(__rev1_107, __p2_107); \ + __ret_107 = __builtin_shufflevector(__ret_107, __ret_107, 3, 2, 1, 0); \ + __ret_107; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmul_lane_f32(__p0_108, __p1_108, __p2_108) __extension__ ({ \ + float32x2_t __ret_108; \ + float32x2_t __s0_108 = __p0_108; \ + float32x2_t __s1_108 = __p1_108; \ + __ret_108 = __s0_108 * splat_lane_f32(__s1_108, __p2_108); \ + __ret_108; \ +}) +#else +#define vmul_lane_f32(__p0_109, __p1_109, __p2_109) __extension__ ({ \ + float32x2_t __ret_109; \ + float32x2_t __s0_109 = __p0_109; \ + float32x2_t __s1_109 = __p1_109; \ + float32x2_t __rev0_109; __rev0_109 = __builtin_shufflevector(__s0_109, __s0_109, 1, 0); \ + float32x2_t __rev1_109; __rev1_109 = __builtin_shufflevector(__s1_109, __s1_109, 1, 0); \ + __ret_109 = __rev0_109 * __noswap_splat_lane_f32(__rev1_109, __p2_109); \ + __ret_109 = __builtin_shufflevector(__ret_109, __ret_109, 1, 0); \ + __ret_109; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmul_lane_s32(__p0_110, __p1_110, __p2_110) __extension__ ({ \ + int32x2_t __ret_110; \ + int32x2_t __s0_110 = __p0_110; \ + int32x2_t __s1_110 = __p1_110; \ + __ret_110 = __s0_110 * splat_lane_s32(__s1_110, __p2_110); \ + __ret_110; \ +}) +#else +#define vmul_lane_s32(__p0_111, __p1_111, __p2_111) __extension__ ({ \ + int32x2_t __ret_111; \ + int32x2_t __s0_111 = __p0_111; \ + int32x2_t __s1_111 = __p1_111; \ + int32x2_t __rev0_111; __rev0_111 = __builtin_shufflevector(__s0_111, __s0_111, 1, 0); \ + int32x2_t __rev1_111; __rev1_111 = __builtin_shufflevector(__s1_111, __s1_111, 1, 0); \ + __ret_111 = __rev0_111 * __noswap_splat_lane_s32(__rev1_111, __p2_111); \ + __ret_111 = __builtin_shufflevector(__ret_111, __ret_111, 1, 0); \ + __ret_111; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmul_lane_s16(__p0_112, __p1_112, __p2_112) __extension__ ({ \ + int16x4_t __ret_112; \ + int16x4_t __s0_112 = __p0_112; \ + int16x4_t __s1_112 = __p1_112; \ + __ret_112 = __s0_112 * splat_lane_s16(__s1_112, __p2_112); \ + __ret_112; \ +}) +#else +#define vmul_lane_s16(__p0_113, __p1_113, __p2_113) __extension__ ({ \ + int16x4_t __ret_113; \ + int16x4_t __s0_113 = __p0_113; \ + int16x4_t __s1_113 = __p1_113; \ + int16x4_t __rev0_113; __rev0_113 = __builtin_shufflevector(__s0_113, __s0_113, 3, 2, 1, 0); \ + int16x4_t __rev1_113; __rev1_113 = __builtin_shufflevector(__s1_113, __s1_113, 3, 2, 1, 0); \ + __ret_113 = __rev0_113 * __noswap_splat_lane_s16(__rev1_113, __p2_113); \ + __ret_113 = __builtin_shufflevector(__ret_113, __ret_113, 3, 2, 1, 0); \ + __ret_113; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint32x4_t vmulq_n_u32(uint32x4_t __p0, uint32_t __p1) { + uint32x4_t __ret; + __ret = __p0 * (uint32x4_t) {__p1, __p1, __p1, __p1}; + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint32x4_t vmulq_n_u32(uint32x4_t __p0, uint32_t __p1) { + uint32x4_t __ret; + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = __rev0 * (uint32x4_t) {__p1, __p1, __p1, __p1}; + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint16x8_t vmulq_n_u16(uint16x8_t __p0, uint16_t __p1) { + uint16x8_t __ret; + __ret = __p0 * (uint16x8_t) {__p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1}; + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint16x8_t vmulq_n_u16(uint16x8_t __p0, uint16_t __p1) { + uint16x8_t __ret; + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __rev0 * (uint16x8_t) {__p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1}; + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) float32x4_t vmulq_n_f32(float32x4_t __p0, float32_t __p1) { + float32x4_t __ret; + __ret = __p0 * (float32x4_t) {__p1, __p1, __p1, __p1}; + return __ret; +} +#else +__ai __attribute__((target("neon"))) float32x4_t vmulq_n_f32(float32x4_t __p0, float32_t __p1) { + float32x4_t __ret; + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = __rev0 * (float32x4_t) {__p1, __p1, __p1, __p1}; + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int32x4_t vmulq_n_s32(int32x4_t __p0, int32_t __p1) { + int32x4_t __ret; + __ret = __p0 * (int32x4_t) {__p1, __p1, __p1, __p1}; + return __ret; +} +#else +__ai __attribute__((target("neon"))) int32x4_t vmulq_n_s32(int32x4_t __p0, int32_t __p1) { + int32x4_t __ret; + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = __rev0 * (int32x4_t) {__p1, __p1, __p1, __p1}; + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int16x8_t vmulq_n_s16(int16x8_t __p0, int16_t __p1) { + int16x8_t __ret; + __ret = __p0 * (int16x8_t) {__p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1}; + return __ret; +} +#else +__ai __attribute__((target("neon"))) int16x8_t vmulq_n_s16(int16x8_t __p0, int16_t __p1) { + int16x8_t __ret; + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __rev0 * (int16x8_t) {__p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1}; + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint32x2_t vmul_n_u32(uint32x2_t __p0, uint32_t __p1) { + uint32x2_t __ret; + __ret = __p0 * (uint32x2_t) {__p1, __p1}; + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint32x2_t vmul_n_u32(uint32x2_t __p0, uint32_t __p1) { + uint32x2_t __ret; + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = __rev0 * (uint32x2_t) {__p1, __p1}; + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint16x4_t vmul_n_u16(uint16x4_t __p0, uint16_t __p1) { + uint16x4_t __ret; + __ret = __p0 * (uint16x4_t) {__p1, __p1, __p1, __p1}; + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint16x4_t vmul_n_u16(uint16x4_t __p0, uint16_t __p1) { + uint16x4_t __ret; + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = __rev0 * (uint16x4_t) {__p1, __p1, __p1, __p1}; + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) float32x2_t vmul_n_f32(float32x2_t __p0, float32_t __p1) { + float32x2_t __ret; + __ret = __p0 * (float32x2_t) {__p1, __p1}; + return __ret; +} +#else +__ai __attribute__((target("neon"))) float32x2_t vmul_n_f32(float32x2_t __p0, float32_t __p1) { + float32x2_t __ret; + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = __rev0 * (float32x2_t) {__p1, __p1}; + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int32x2_t vmul_n_s32(int32x2_t __p0, int32_t __p1) { + int32x2_t __ret; + __ret = __p0 * (int32x2_t) {__p1, __p1}; + return __ret; +} +#else +__ai __attribute__((target("neon"))) int32x2_t vmul_n_s32(int32x2_t __p0, int32_t __p1) { + int32x2_t __ret; + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = __rev0 * (int32x2_t) {__p1, __p1}; + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int16x4_t vmul_n_s16(int16x4_t __p0, int16_t __p1) { + int16x4_t __ret; + __ret = __p0 * (int16x4_t) {__p1, __p1, __p1, __p1}; + return __ret; +} +#else +__ai __attribute__((target("neon"))) int16x4_t vmul_n_s16(int16x4_t __p0, int16_t __p1) { + int16x4_t __ret; + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = __rev0 * (int16x4_t) {__p1, __p1, __p1, __p1}; + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) poly16x8_t vmull_p8(poly8x8_t __p0, poly8x8_t __p1) { + poly16x8_t __ret; + __ret = (poly16x8_t) __builtin_neon_vmull_v((int8x8_t)__p0, (int8x8_t)__p1, 37); + return __ret; +} +#else +__ai __attribute__((target("neon"))) poly16x8_t vmull_p8(poly8x8_t __p0, poly8x8_t __p1) { + poly16x8_t __ret; + poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + poly8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (poly16x8_t) __builtin_neon_vmull_v((int8x8_t)__rev0, (int8x8_t)__rev1, 37); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +__ai __attribute__((target("neon"))) poly16x8_t __noswap_vmull_p8(poly8x8_t __p0, poly8x8_t __p1) { + poly16x8_t __ret; + __ret = (poly16x8_t) __builtin_neon_vmull_v((int8x8_t)__p0, (int8x8_t)__p1, 37); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint16x8_t vmull_u8(uint8x8_t __p0, uint8x8_t __p1) { + uint16x8_t __ret; + __ret = (uint16x8_t) __builtin_neon_vmull_v((int8x8_t)__p0, (int8x8_t)__p1, 49); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint16x8_t vmull_u8(uint8x8_t __p0, uint8x8_t __p1) { + uint16x8_t __ret; + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint16x8_t) __builtin_neon_vmull_v((int8x8_t)__rev0, (int8x8_t)__rev1, 49); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +__ai __attribute__((target("neon"))) uint16x8_t __noswap_vmull_u8(uint8x8_t __p0, uint8x8_t __p1) { + uint16x8_t __ret; + __ret = (uint16x8_t) __builtin_neon_vmull_v((int8x8_t)__p0, (int8x8_t)__p1, 49); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint64x2_t vmull_u32(uint32x2_t __p0, uint32x2_t __p1) { + uint64x2_t __ret; + __ret = (uint64x2_t) __builtin_neon_vmull_v((int8x8_t)__p0, (int8x8_t)__p1, 51); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint64x2_t vmull_u32(uint32x2_t __p0, uint32x2_t __p1) { + uint64x2_t __ret; + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (uint64x2_t) __builtin_neon_vmull_v((int8x8_t)__rev0, (int8x8_t)__rev1, 51); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +__ai __attribute__((target("neon"))) uint64x2_t __noswap_vmull_u32(uint32x2_t __p0, uint32x2_t __p1) { + uint64x2_t __ret; + __ret = (uint64x2_t) __builtin_neon_vmull_v((int8x8_t)__p0, (int8x8_t)__p1, 51); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint32x4_t vmull_u16(uint16x4_t __p0, uint16x4_t __p1) { + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vmull_v((int8x8_t)__p0, (int8x8_t)__p1, 50); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint32x4_t vmull_u16(uint16x4_t __p0, uint16x4_t __p1) { + uint32x4_t __ret; + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (uint32x4_t) __builtin_neon_vmull_v((int8x8_t)__rev0, (int8x8_t)__rev1, 50); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +__ai __attribute__((target("neon"))) uint32x4_t __noswap_vmull_u16(uint16x4_t __p0, uint16x4_t __p1) { + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vmull_v((int8x8_t)__p0, (int8x8_t)__p1, 50); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int16x8_t vmull_s8(int8x8_t __p0, int8x8_t __p1) { + int16x8_t __ret; + __ret = (int16x8_t) __builtin_neon_vmull_v((int8x8_t)__p0, (int8x8_t)__p1, 33); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int16x8_t vmull_s8(int8x8_t __p0, int8x8_t __p1) { + int16x8_t __ret; + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int16x8_t) __builtin_neon_vmull_v((int8x8_t)__rev0, (int8x8_t)__rev1, 33); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +__ai __attribute__((target("neon"))) int16x8_t __noswap_vmull_s8(int8x8_t __p0, int8x8_t __p1) { + int16x8_t __ret; + __ret = (int16x8_t) __builtin_neon_vmull_v((int8x8_t)__p0, (int8x8_t)__p1, 33); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int64x2_t vmull_s32(int32x2_t __p0, int32x2_t __p1) { + int64x2_t __ret; + __ret = (int64x2_t) __builtin_neon_vmull_v((int8x8_t)__p0, (int8x8_t)__p1, 35); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int64x2_t vmull_s32(int32x2_t __p0, int32x2_t __p1) { + int64x2_t __ret; + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (int64x2_t) __builtin_neon_vmull_v((int8x8_t)__rev0, (int8x8_t)__rev1, 35); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +__ai __attribute__((target("neon"))) int64x2_t __noswap_vmull_s32(int32x2_t __p0, int32x2_t __p1) { + int64x2_t __ret; + __ret = (int64x2_t) __builtin_neon_vmull_v((int8x8_t)__p0, (int8x8_t)__p1, 35); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int32x4_t vmull_s16(int16x4_t __p0, int16x4_t __p1) { + int32x4_t __ret; + __ret = (int32x4_t) __builtin_neon_vmull_v((int8x8_t)__p0, (int8x8_t)__p1, 34); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int32x4_t vmull_s16(int16x4_t __p0, int16x4_t __p1) { + int32x4_t __ret; + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (int32x4_t) __builtin_neon_vmull_v((int8x8_t)__rev0, (int8x8_t)__rev1, 34); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +__ai __attribute__((target("neon"))) int32x4_t __noswap_vmull_s16(int16x4_t __p0, int16x4_t __p1) { + int32x4_t __ret; + __ret = (int32x4_t) __builtin_neon_vmull_v((int8x8_t)__p0, (int8x8_t)__p1, 34); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmull_lane_u32(__p0_114, __p1_114, __p2_114) __extension__ ({ \ + uint64x2_t __ret_114; \ + uint32x2_t __s0_114 = __p0_114; \ + uint32x2_t __s1_114 = __p1_114; \ + __ret_114 = vmull_u32(__s0_114, splat_lane_u32(__s1_114, __p2_114)); \ + __ret_114; \ +}) +#else +#define vmull_lane_u32(__p0_115, __p1_115, __p2_115) __extension__ ({ \ + uint64x2_t __ret_115; \ + uint32x2_t __s0_115 = __p0_115; \ + uint32x2_t __s1_115 = __p1_115; \ + uint32x2_t __rev0_115; __rev0_115 = __builtin_shufflevector(__s0_115, __s0_115, 1, 0); \ + uint32x2_t __rev1_115; __rev1_115 = __builtin_shufflevector(__s1_115, __s1_115, 1, 0); \ + __ret_115 = __noswap_vmull_u32(__rev0_115, __noswap_splat_lane_u32(__rev1_115, __p2_115)); \ + __ret_115 = __builtin_shufflevector(__ret_115, __ret_115, 1, 0); \ + __ret_115; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmull_lane_u16(__p0_116, __p1_116, __p2_116) __extension__ ({ \ + uint32x4_t __ret_116; \ + uint16x4_t __s0_116 = __p0_116; \ + uint16x4_t __s1_116 = __p1_116; \ + __ret_116 = vmull_u16(__s0_116, splat_lane_u16(__s1_116, __p2_116)); \ + __ret_116; \ +}) +#else +#define vmull_lane_u16(__p0_117, __p1_117, __p2_117) __extension__ ({ \ + uint32x4_t __ret_117; \ + uint16x4_t __s0_117 = __p0_117; \ + uint16x4_t __s1_117 = __p1_117; \ + uint16x4_t __rev0_117; __rev0_117 = __builtin_shufflevector(__s0_117, __s0_117, 3, 2, 1, 0); \ + uint16x4_t __rev1_117; __rev1_117 = __builtin_shufflevector(__s1_117, __s1_117, 3, 2, 1, 0); \ + __ret_117 = __noswap_vmull_u16(__rev0_117, __noswap_splat_lane_u16(__rev1_117, __p2_117)); \ + __ret_117 = __builtin_shufflevector(__ret_117, __ret_117, 3, 2, 1, 0); \ + __ret_117; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmull_lane_s32(__p0_118, __p1_118, __p2_118) __extension__ ({ \ + int64x2_t __ret_118; \ + int32x2_t __s0_118 = __p0_118; \ + int32x2_t __s1_118 = __p1_118; \ + __ret_118 = vmull_s32(__s0_118, splat_lane_s32(__s1_118, __p2_118)); \ + __ret_118; \ +}) +#else +#define vmull_lane_s32(__p0_119, __p1_119, __p2_119) __extension__ ({ \ + int64x2_t __ret_119; \ + int32x2_t __s0_119 = __p0_119; \ + int32x2_t __s1_119 = __p1_119; \ + int32x2_t __rev0_119; __rev0_119 = __builtin_shufflevector(__s0_119, __s0_119, 1, 0); \ + int32x2_t __rev1_119; __rev1_119 = __builtin_shufflevector(__s1_119, __s1_119, 1, 0); \ + __ret_119 = __noswap_vmull_s32(__rev0_119, __noswap_splat_lane_s32(__rev1_119, __p2_119)); \ + __ret_119 = __builtin_shufflevector(__ret_119, __ret_119, 1, 0); \ + __ret_119; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmull_lane_s16(__p0_120, __p1_120, __p2_120) __extension__ ({ \ + int32x4_t __ret_120; \ + int16x4_t __s0_120 = __p0_120; \ + int16x4_t __s1_120 = __p1_120; \ + __ret_120 = vmull_s16(__s0_120, splat_lane_s16(__s1_120, __p2_120)); \ + __ret_120; \ +}) +#else +#define vmull_lane_s16(__p0_121, __p1_121, __p2_121) __extension__ ({ \ + int32x4_t __ret_121; \ + int16x4_t __s0_121 = __p0_121; \ + int16x4_t __s1_121 = __p1_121; \ + int16x4_t __rev0_121; __rev0_121 = __builtin_shufflevector(__s0_121, __s0_121, 3, 2, 1, 0); \ + int16x4_t __rev1_121; __rev1_121 = __builtin_shufflevector(__s1_121, __s1_121, 3, 2, 1, 0); \ + __ret_121 = __noswap_vmull_s16(__rev0_121, __noswap_splat_lane_s16(__rev1_121, __p2_121)); \ + __ret_121 = __builtin_shufflevector(__ret_121, __ret_121, 3, 2, 1, 0); \ + __ret_121; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint64x2_t vmull_n_u32(uint32x2_t __p0, uint32_t __p1) { + uint64x2_t __ret; + __ret = vmull_u32(__p0, (uint32x2_t) {__p1, __p1}); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint64x2_t vmull_n_u32(uint32x2_t __p0, uint32_t __p1) { + uint64x2_t __ret; + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = __noswap_vmull_u32(__rev0, (uint32x2_t) {__p1, __p1}); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +__ai __attribute__((target("neon"))) uint64x2_t __noswap_vmull_n_u32(uint32x2_t __p0, uint32_t __p1) { + uint64x2_t __ret; + __ret = __noswap_vmull_u32(__p0, (uint32x2_t) {__p1, __p1}); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint32x4_t vmull_n_u16(uint16x4_t __p0, uint16_t __p1) { + uint32x4_t __ret; + __ret = vmull_u16(__p0, (uint16x4_t) {__p1, __p1, __p1, __p1}); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint32x4_t vmull_n_u16(uint16x4_t __p0, uint16_t __p1) { + uint32x4_t __ret; + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = __noswap_vmull_u16(__rev0, (uint16x4_t) {__p1, __p1, __p1, __p1}); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +__ai __attribute__((target("neon"))) uint32x4_t __noswap_vmull_n_u16(uint16x4_t __p0, uint16_t __p1) { + uint32x4_t __ret; + __ret = __noswap_vmull_u16(__p0, (uint16x4_t) {__p1, __p1, __p1, __p1}); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int64x2_t vmull_n_s32(int32x2_t __p0, int32_t __p1) { + int64x2_t __ret; + __ret = vmull_s32(__p0, (int32x2_t) {__p1, __p1}); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int64x2_t vmull_n_s32(int32x2_t __p0, int32_t __p1) { + int64x2_t __ret; + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = __noswap_vmull_s32(__rev0, (int32x2_t) {__p1, __p1}); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +__ai __attribute__((target("neon"))) int64x2_t __noswap_vmull_n_s32(int32x2_t __p0, int32_t __p1) { + int64x2_t __ret; + __ret = __noswap_vmull_s32(__p0, (int32x2_t) {__p1, __p1}); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int32x4_t vmull_n_s16(int16x4_t __p0, int16_t __p1) { + int32x4_t __ret; + __ret = vmull_s16(__p0, (int16x4_t) {__p1, __p1, __p1, __p1}); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int32x4_t vmull_n_s16(int16x4_t __p0, int16_t __p1) { + int32x4_t __ret; + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = __noswap_vmull_s16(__rev0, (int16x4_t) {__p1, __p1, __p1, __p1}); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +__ai __attribute__((target("neon"))) int32x4_t __noswap_vmull_n_s16(int16x4_t __p0, int16_t __p1) { + int32x4_t __ret; + __ret = __noswap_vmull_s16(__p0, (int16x4_t) {__p1, __p1, __p1, __p1}); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) poly8x8_t vmvn_p8(poly8x8_t __p0) { + poly8x8_t __ret; + __ret = ~__p0; + return __ret; +} +#else +__ai __attribute__((target("neon"))) poly8x8_t vmvn_p8(poly8x8_t __p0) { + poly8x8_t __ret; + poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = ~__rev0; + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) poly8x16_t vmvnq_p8(poly8x16_t __p0) { + poly8x16_t __ret; + __ret = ~__p0; + return __ret; +} +#else +__ai __attribute__((target("neon"))) poly8x16_t vmvnq_p8(poly8x16_t __p0) { + poly8x16_t __ret; + poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = ~__rev0; + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint8x16_t vmvnq_u8(uint8x16_t __p0) { + uint8x16_t __ret; + __ret = ~__p0; + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint8x16_t vmvnq_u8(uint8x16_t __p0) { + uint8x16_t __ret; + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = ~__rev0; + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint32x4_t vmvnq_u32(uint32x4_t __p0) { + uint32x4_t __ret; + __ret = ~__p0; + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint32x4_t vmvnq_u32(uint32x4_t __p0) { + uint32x4_t __ret; + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = ~__rev0; + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint16x8_t vmvnq_u16(uint16x8_t __p0) { + uint16x8_t __ret; + __ret = ~__p0; + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint16x8_t vmvnq_u16(uint16x8_t __p0) { + uint16x8_t __ret; + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = ~__rev0; + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int8x16_t vmvnq_s8(int8x16_t __p0) { + int8x16_t __ret; + __ret = ~__p0; + return __ret; +} +#else +__ai __attribute__((target("neon"))) int8x16_t vmvnq_s8(int8x16_t __p0) { + int8x16_t __ret; + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = ~__rev0; + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int32x4_t vmvnq_s32(int32x4_t __p0) { + int32x4_t __ret; + __ret = ~__p0; + return __ret; +} +#else +__ai __attribute__((target("neon"))) int32x4_t vmvnq_s32(int32x4_t __p0) { + int32x4_t __ret; + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = ~__rev0; + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int16x8_t vmvnq_s16(int16x8_t __p0) { + int16x8_t __ret; + __ret = ~__p0; + return __ret; +} +#else +__ai __attribute__((target("neon"))) int16x8_t vmvnq_s16(int16x8_t __p0) { + int16x8_t __ret; + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = ~__rev0; + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint8x8_t vmvn_u8(uint8x8_t __p0) { + uint8x8_t __ret; + __ret = ~__p0; + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint8x8_t vmvn_u8(uint8x8_t __p0) { + uint8x8_t __ret; + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = ~__rev0; + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint32x2_t vmvn_u32(uint32x2_t __p0) { + uint32x2_t __ret; + __ret = ~__p0; + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint32x2_t vmvn_u32(uint32x2_t __p0) { + uint32x2_t __ret; + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = ~__rev0; + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint16x4_t vmvn_u16(uint16x4_t __p0) { + uint16x4_t __ret; + __ret = ~__p0; + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint16x4_t vmvn_u16(uint16x4_t __p0) { + uint16x4_t __ret; + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = ~__rev0; + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int8x8_t vmvn_s8(int8x8_t __p0) { + int8x8_t __ret; + __ret = ~__p0; + return __ret; +} +#else +__ai __attribute__((target("neon"))) int8x8_t vmvn_s8(int8x8_t __p0) { + int8x8_t __ret; + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = ~__rev0; + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int32x2_t vmvn_s32(int32x2_t __p0) { + int32x2_t __ret; + __ret = ~__p0; + return __ret; +} +#else +__ai __attribute__((target("neon"))) int32x2_t vmvn_s32(int32x2_t __p0) { + int32x2_t __ret; + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = ~__rev0; + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int16x4_t vmvn_s16(int16x4_t __p0) { + int16x4_t __ret; + __ret = ~__p0; + return __ret; +} +#else +__ai __attribute__((target("neon"))) int16x4_t vmvn_s16(int16x4_t __p0) { + int16x4_t __ret; + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = ~__rev0; + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int8x16_t vnegq_s8(int8x16_t __p0) { + int8x16_t __ret; + __ret = -__p0; + return __ret; +} +#else +__ai __attribute__((target("neon"))) int8x16_t vnegq_s8(int8x16_t __p0) { + int8x16_t __ret; + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = -__rev0; + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) float32x4_t vnegq_f32(float32x4_t __p0) { + float32x4_t __ret; + __ret = -__p0; + return __ret; +} +#else +__ai __attribute__((target("neon"))) float32x4_t vnegq_f32(float32x4_t __p0) { + float32x4_t __ret; + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = -__rev0; + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int32x4_t vnegq_s32(int32x4_t __p0) { + int32x4_t __ret; + __ret = -__p0; + return __ret; +} +#else +__ai __attribute__((target("neon"))) int32x4_t vnegq_s32(int32x4_t __p0) { + int32x4_t __ret; + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = -__rev0; + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int16x8_t vnegq_s16(int16x8_t __p0) { + int16x8_t __ret; + __ret = -__p0; + return __ret; +} +#else +__ai __attribute__((target("neon"))) int16x8_t vnegq_s16(int16x8_t __p0) { + int16x8_t __ret; + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = -__rev0; + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int8x8_t vneg_s8(int8x8_t __p0) { + int8x8_t __ret; + __ret = -__p0; + return __ret; +} +#else +__ai __attribute__((target("neon"))) int8x8_t vneg_s8(int8x8_t __p0) { + int8x8_t __ret; + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = -__rev0; + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) float32x2_t vneg_f32(float32x2_t __p0) { + float32x2_t __ret; + __ret = -__p0; + return __ret; +} +#else +__ai __attribute__((target("neon"))) float32x2_t vneg_f32(float32x2_t __p0) { + float32x2_t __ret; + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = -__rev0; + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int32x2_t vneg_s32(int32x2_t __p0) { + int32x2_t __ret; + __ret = -__p0; + return __ret; +} +#else +__ai __attribute__((target("neon"))) int32x2_t vneg_s32(int32x2_t __p0) { + int32x2_t __ret; + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = -__rev0; + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int16x4_t vneg_s16(int16x4_t __p0) { + int16x4_t __ret; + __ret = -__p0; + return __ret; +} +#else +__ai __attribute__((target("neon"))) int16x4_t vneg_s16(int16x4_t __p0) { + int16x4_t __ret; + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = -__rev0; + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint8x16_t vornq_u8(uint8x16_t __p0, uint8x16_t __p1) { + uint8x16_t __ret; + __ret = __p0 | ~__p1; + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint8x16_t vornq_u8(uint8x16_t __p0, uint8x16_t __p1) { + uint8x16_t __ret; + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __rev0 | ~__rev1; + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint32x4_t vornq_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint32x4_t __ret; + __ret = __p0 | ~__p1; + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint32x4_t vornq_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint32x4_t __ret; + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = __rev0 | ~__rev1; + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint64x2_t vornq_u64(uint64x2_t __p0, uint64x2_t __p1) { + uint64x2_t __ret; + __ret = __p0 | ~__p1; + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint64x2_t vornq_u64(uint64x2_t __p0, uint64x2_t __p1) { + uint64x2_t __ret; + uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = __rev0 | ~__rev1; + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint16x8_t vornq_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint16x8_t __ret; + __ret = __p0 | ~__p1; + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint16x8_t vornq_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint16x8_t __ret; + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __rev0 | ~__rev1; + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int8x16_t vornq_s8(int8x16_t __p0, int8x16_t __p1) { + int8x16_t __ret; + __ret = __p0 | ~__p1; + return __ret; +} +#else +__ai __attribute__((target("neon"))) int8x16_t vornq_s8(int8x16_t __p0, int8x16_t __p1) { + int8x16_t __ret; + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __rev0 | ~__rev1; + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int32x4_t vornq_s32(int32x4_t __p0, int32x4_t __p1) { + int32x4_t __ret; + __ret = __p0 | ~__p1; + return __ret; +} +#else +__ai __attribute__((target("neon"))) int32x4_t vornq_s32(int32x4_t __p0, int32x4_t __p1) { + int32x4_t __ret; + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = __rev0 | ~__rev1; + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int64x2_t vornq_s64(int64x2_t __p0, int64x2_t __p1) { + int64x2_t __ret; + __ret = __p0 | ~__p1; + return __ret; +} +#else +__ai __attribute__((target("neon"))) int64x2_t vornq_s64(int64x2_t __p0, int64x2_t __p1) { + int64x2_t __ret; + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = __rev0 | ~__rev1; + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int16x8_t vornq_s16(int16x8_t __p0, int16x8_t __p1) { + int16x8_t __ret; + __ret = __p0 | ~__p1; + return __ret; +} +#else +__ai __attribute__((target("neon"))) int16x8_t vornq_s16(int16x8_t __p0, int16x8_t __p1) { + int16x8_t __ret; + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __rev0 | ~__rev1; + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint8x8_t vorn_u8(uint8x8_t __p0, uint8x8_t __p1) { + uint8x8_t __ret; + __ret = __p0 | ~__p1; + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint8x8_t vorn_u8(uint8x8_t __p0, uint8x8_t __p1) { + uint8x8_t __ret; + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __rev0 | ~__rev1; + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint32x2_t vorn_u32(uint32x2_t __p0, uint32x2_t __p1) { + uint32x2_t __ret; + __ret = __p0 | ~__p1; + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint32x2_t vorn_u32(uint32x2_t __p0, uint32x2_t __p1) { + uint32x2_t __ret; + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = __rev0 | ~__rev1; + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +__ai __attribute__((target("neon"))) uint64x1_t vorn_u64(uint64x1_t __p0, uint64x1_t __p1) { + uint64x1_t __ret; + __ret = __p0 | ~__p1; + return __ret; +} +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint16x4_t vorn_u16(uint16x4_t __p0, uint16x4_t __p1) { + uint16x4_t __ret; + __ret = __p0 | ~__p1; + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint16x4_t vorn_u16(uint16x4_t __p0, uint16x4_t __p1) { + uint16x4_t __ret; + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = __rev0 | ~__rev1; + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int8x8_t vorn_s8(int8x8_t __p0, int8x8_t __p1) { + int8x8_t __ret; + __ret = __p0 | ~__p1; + return __ret; +} +#else +__ai __attribute__((target("neon"))) int8x8_t vorn_s8(int8x8_t __p0, int8x8_t __p1) { + int8x8_t __ret; + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __rev0 | ~__rev1; + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int32x2_t vorn_s32(int32x2_t __p0, int32x2_t __p1) { + int32x2_t __ret; + __ret = __p0 | ~__p1; + return __ret; +} +#else +__ai __attribute__((target("neon"))) int32x2_t vorn_s32(int32x2_t __p0, int32x2_t __p1) { + int32x2_t __ret; + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = __rev0 | ~__rev1; + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +__ai __attribute__((target("neon"))) int64x1_t vorn_s64(int64x1_t __p0, int64x1_t __p1) { + int64x1_t __ret; + __ret = __p0 | ~__p1; + return __ret; +} +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int16x4_t vorn_s16(int16x4_t __p0, int16x4_t __p1) { + int16x4_t __ret; + __ret = __p0 | ~__p1; + return __ret; +} +#else +__ai __attribute__((target("neon"))) int16x4_t vorn_s16(int16x4_t __p0, int16x4_t __p1) { + int16x4_t __ret; + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = __rev0 | ~__rev1; + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint8x16_t vorrq_u8(uint8x16_t __p0, uint8x16_t __p1) { + uint8x16_t __ret; + __ret = __p0 | __p1; + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint8x16_t vorrq_u8(uint8x16_t __p0, uint8x16_t __p1) { + uint8x16_t __ret; + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __rev0 | __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint32x4_t vorrq_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint32x4_t __ret; + __ret = __p0 | __p1; + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint32x4_t vorrq_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint32x4_t __ret; + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = __rev0 | __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint64x2_t vorrq_u64(uint64x2_t __p0, uint64x2_t __p1) { + uint64x2_t __ret; + __ret = __p0 | __p1; + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint64x2_t vorrq_u64(uint64x2_t __p0, uint64x2_t __p1) { + uint64x2_t __ret; + uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = __rev0 | __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint16x8_t vorrq_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint16x8_t __ret; + __ret = __p0 | __p1; + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint16x8_t vorrq_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint16x8_t __ret; + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __rev0 | __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int8x16_t vorrq_s8(int8x16_t __p0, int8x16_t __p1) { + int8x16_t __ret; + __ret = __p0 | __p1; + return __ret; +} +#else +__ai __attribute__((target("neon"))) int8x16_t vorrq_s8(int8x16_t __p0, int8x16_t __p1) { + int8x16_t __ret; + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __rev0 | __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int32x4_t vorrq_s32(int32x4_t __p0, int32x4_t __p1) { + int32x4_t __ret; + __ret = __p0 | __p1; + return __ret; +} +#else +__ai __attribute__((target("neon"))) int32x4_t vorrq_s32(int32x4_t __p0, int32x4_t __p1) { + int32x4_t __ret; + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = __rev0 | __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int64x2_t vorrq_s64(int64x2_t __p0, int64x2_t __p1) { + int64x2_t __ret; + __ret = __p0 | __p1; + return __ret; +} +#else +__ai __attribute__((target("neon"))) int64x2_t vorrq_s64(int64x2_t __p0, int64x2_t __p1) { + int64x2_t __ret; + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = __rev0 | __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int16x8_t vorrq_s16(int16x8_t __p0, int16x8_t __p1) { + int16x8_t __ret; + __ret = __p0 | __p1; + return __ret; +} +#else +__ai __attribute__((target("neon"))) int16x8_t vorrq_s16(int16x8_t __p0, int16x8_t __p1) { + int16x8_t __ret; + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __rev0 | __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint8x8_t vorr_u8(uint8x8_t __p0, uint8x8_t __p1) { + uint8x8_t __ret; + __ret = __p0 | __p1; + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint8x8_t vorr_u8(uint8x8_t __p0, uint8x8_t __p1) { + uint8x8_t __ret; + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __rev0 | __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint32x2_t vorr_u32(uint32x2_t __p0, uint32x2_t __p1) { + uint32x2_t __ret; + __ret = __p0 | __p1; + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint32x2_t vorr_u32(uint32x2_t __p0, uint32x2_t __p1) { + uint32x2_t __ret; + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = __rev0 | __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +__ai __attribute__((target("neon"))) uint64x1_t vorr_u64(uint64x1_t __p0, uint64x1_t __p1) { + uint64x1_t __ret; + __ret = __p0 | __p1; + return __ret; +} +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint16x4_t vorr_u16(uint16x4_t __p0, uint16x4_t __p1) { + uint16x4_t __ret; + __ret = __p0 | __p1; + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint16x4_t vorr_u16(uint16x4_t __p0, uint16x4_t __p1) { + uint16x4_t __ret; + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = __rev0 | __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int8x8_t vorr_s8(int8x8_t __p0, int8x8_t __p1) { + int8x8_t __ret; + __ret = __p0 | __p1; + return __ret; +} +#else +__ai __attribute__((target("neon"))) int8x8_t vorr_s8(int8x8_t __p0, int8x8_t __p1) { + int8x8_t __ret; + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __rev0 | __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int32x2_t vorr_s32(int32x2_t __p0, int32x2_t __p1) { + int32x2_t __ret; + __ret = __p0 | __p1; + return __ret; +} +#else +__ai __attribute__((target("neon"))) int32x2_t vorr_s32(int32x2_t __p0, int32x2_t __p1) { + int32x2_t __ret; + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = __rev0 | __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +__ai __attribute__((target("neon"))) int64x1_t vorr_s64(int64x1_t __p0, int64x1_t __p1) { + int64x1_t __ret; + __ret = __p0 | __p1; + return __ret; +} +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int16x4_t vorr_s16(int16x4_t __p0, int16x4_t __p1) { + int16x4_t __ret; + __ret = __p0 | __p1; + return __ret; +} +#else +__ai __attribute__((target("neon"))) int16x4_t vorr_s16(int16x4_t __p0, int16x4_t __p1) { + int16x4_t __ret; + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = __rev0 | __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint16x8_t vpadalq_u8(uint16x8_t __p0, uint8x16_t __p1) { + uint16x8_t __ret; + __ret = (uint16x8_t) __builtin_neon_vpadalq_v((int8x16_t)__p0, (int8x16_t)__p1, 49); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint16x8_t vpadalq_u8(uint16x8_t __p0, uint8x16_t __p1) { + uint16x8_t __ret; + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint16x8_t) __builtin_neon_vpadalq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint64x2_t vpadalq_u32(uint64x2_t __p0, uint32x4_t __p1) { + uint64x2_t __ret; + __ret = (uint64x2_t) __builtin_neon_vpadalq_v((int8x16_t)__p0, (int8x16_t)__p1, 51); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint64x2_t vpadalq_u32(uint64x2_t __p0, uint32x4_t __p1) { + uint64x2_t __ret; + uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (uint64x2_t) __builtin_neon_vpadalq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 51); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint32x4_t vpadalq_u16(uint32x4_t __p0, uint16x8_t __p1) { + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vpadalq_v((int8x16_t)__p0, (int8x16_t)__p1, 50); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint32x4_t vpadalq_u16(uint32x4_t __p0, uint16x8_t __p1) { + uint32x4_t __ret; + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint32x4_t) __builtin_neon_vpadalq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int16x8_t vpadalq_s8(int16x8_t __p0, int8x16_t __p1) { + int16x8_t __ret; + __ret = (int16x8_t) __builtin_neon_vpadalq_v((int8x16_t)__p0, (int8x16_t)__p1, 33); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int16x8_t vpadalq_s8(int16x8_t __p0, int8x16_t __p1) { + int16x8_t __ret; + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int16x8_t) __builtin_neon_vpadalq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 33); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int64x2_t vpadalq_s32(int64x2_t __p0, int32x4_t __p1) { + int64x2_t __ret; + __ret = (int64x2_t) __builtin_neon_vpadalq_v((int8x16_t)__p0, (int8x16_t)__p1, 35); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int64x2_t vpadalq_s32(int64x2_t __p0, int32x4_t __p1) { + int64x2_t __ret; + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (int64x2_t) __builtin_neon_vpadalq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 35); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int32x4_t vpadalq_s16(int32x4_t __p0, int16x8_t __p1) { + int32x4_t __ret; + __ret = (int32x4_t) __builtin_neon_vpadalq_v((int8x16_t)__p0, (int8x16_t)__p1, 34); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int32x4_t vpadalq_s16(int32x4_t __p0, int16x8_t __p1) { + int32x4_t __ret; + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int32x4_t) __builtin_neon_vpadalq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 34); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint16x4_t vpadal_u8(uint16x4_t __p0, uint8x8_t __p1) { + uint16x4_t __ret; + __ret = (uint16x4_t) __builtin_neon_vpadal_v((int8x8_t)__p0, (int8x8_t)__p1, 17); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint16x4_t vpadal_u8(uint16x4_t __p0, uint8x8_t __p1) { + uint16x4_t __ret; + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint16x4_t) __builtin_neon_vpadal_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint64x1_t vpadal_u32(uint64x1_t __p0, uint32x2_t __p1) { + uint64x1_t __ret; + __ret = (uint64x1_t) __builtin_neon_vpadal_v((int8x8_t)__p0, (int8x8_t)__p1, 19); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint64x1_t vpadal_u32(uint64x1_t __p0, uint32x2_t __p1) { + uint64x1_t __ret; + uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (uint64x1_t) __builtin_neon_vpadal_v((int8x8_t)__p0, (int8x8_t)__rev1, 19); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint32x2_t vpadal_u16(uint32x2_t __p0, uint16x4_t __p1) { + uint32x2_t __ret; + __ret = (uint32x2_t) __builtin_neon_vpadal_v((int8x8_t)__p0, (int8x8_t)__p1, 18); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint32x2_t vpadal_u16(uint32x2_t __p0, uint16x4_t __p1) { + uint32x2_t __ret; + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (uint32x2_t) __builtin_neon_vpadal_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int16x4_t vpadal_s8(int16x4_t __p0, int8x8_t __p1) { + int16x4_t __ret; + __ret = (int16x4_t) __builtin_neon_vpadal_v((int8x8_t)__p0, (int8x8_t)__p1, 1); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int16x4_t vpadal_s8(int16x4_t __p0, int8x8_t __p1) { + int16x4_t __ret; + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int16x4_t) __builtin_neon_vpadal_v((int8x8_t)__rev0, (int8x8_t)__rev1, 1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int64x1_t vpadal_s32(int64x1_t __p0, int32x2_t __p1) { + int64x1_t __ret; + __ret = (int64x1_t) __builtin_neon_vpadal_v((int8x8_t)__p0, (int8x8_t)__p1, 3); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int64x1_t vpadal_s32(int64x1_t __p0, int32x2_t __p1) { + int64x1_t __ret; + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (int64x1_t) __builtin_neon_vpadal_v((int8x8_t)__p0, (int8x8_t)__rev1, 3); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int32x2_t vpadal_s16(int32x2_t __p0, int16x4_t __p1) { + int32x2_t __ret; + __ret = (int32x2_t) __builtin_neon_vpadal_v((int8x8_t)__p0, (int8x8_t)__p1, 2); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int32x2_t vpadal_s16(int32x2_t __p0, int16x4_t __p1) { + int32x2_t __ret; + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (int32x2_t) __builtin_neon_vpadal_v((int8x8_t)__rev0, (int8x8_t)__rev1, 2); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint8x8_t vpadd_u8(uint8x8_t __p0, uint8x8_t __p1) { + uint8x8_t __ret; + __ret = (uint8x8_t) __builtin_neon_vpadd_v((int8x8_t)__p0, (int8x8_t)__p1, 16); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint8x8_t vpadd_u8(uint8x8_t __p0, uint8x8_t __p1) { + uint8x8_t __ret; + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint8x8_t) __builtin_neon_vpadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 16); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint32x2_t vpadd_u32(uint32x2_t __p0, uint32x2_t __p1) { + uint32x2_t __ret; + __ret = (uint32x2_t) __builtin_neon_vpadd_v((int8x8_t)__p0, (int8x8_t)__p1, 18); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint32x2_t vpadd_u32(uint32x2_t __p0, uint32x2_t __p1) { + uint32x2_t __ret; + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (uint32x2_t) __builtin_neon_vpadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint16x4_t vpadd_u16(uint16x4_t __p0, uint16x4_t __p1) { + uint16x4_t __ret; + __ret = (uint16x4_t) __builtin_neon_vpadd_v((int8x8_t)__p0, (int8x8_t)__p1, 17); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint16x4_t vpadd_u16(uint16x4_t __p0, uint16x4_t __p1) { + uint16x4_t __ret; + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (uint16x4_t) __builtin_neon_vpadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int8x8_t vpadd_s8(int8x8_t __p0, int8x8_t __p1) { + int8x8_t __ret; + __ret = (int8x8_t) __builtin_neon_vpadd_v((int8x8_t)__p0, (int8x8_t)__p1, 0); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int8x8_t vpadd_s8(int8x8_t __p0, int8x8_t __p1) { + int8x8_t __ret; + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int8x8_t) __builtin_neon_vpadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 0); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) float32x2_t vpadd_f32(float32x2_t __p0, float32x2_t __p1) { + float32x2_t __ret; + __ret = (float32x2_t) __builtin_neon_vpadd_v((int8x8_t)__p0, (int8x8_t)__p1, 9); + return __ret; +} +#else +__ai __attribute__((target("neon"))) float32x2_t vpadd_f32(float32x2_t __p0, float32x2_t __p1) { + float32x2_t __ret; + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (float32x2_t) __builtin_neon_vpadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 9); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int32x2_t vpadd_s32(int32x2_t __p0, int32x2_t __p1) { + int32x2_t __ret; + __ret = (int32x2_t) __builtin_neon_vpadd_v((int8x8_t)__p0, (int8x8_t)__p1, 2); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int32x2_t vpadd_s32(int32x2_t __p0, int32x2_t __p1) { + int32x2_t __ret; + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (int32x2_t) __builtin_neon_vpadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 2); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int16x4_t vpadd_s16(int16x4_t __p0, int16x4_t __p1) { + int16x4_t __ret; + __ret = (int16x4_t) __builtin_neon_vpadd_v((int8x8_t)__p0, (int8x8_t)__p1, 1); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int16x4_t vpadd_s16(int16x4_t __p0, int16x4_t __p1) { + int16x4_t __ret; + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (int16x4_t) __builtin_neon_vpadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint16x8_t vpaddlq_u8(uint8x16_t __p0) { + uint16x8_t __ret; + __ret = (uint16x8_t) __builtin_neon_vpaddlq_v((int8x16_t)__p0, 49); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint16x8_t vpaddlq_u8(uint8x16_t __p0) { + uint16x8_t __ret; + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint16x8_t) __builtin_neon_vpaddlq_v((int8x16_t)__rev0, 49); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint64x2_t vpaddlq_u32(uint32x4_t __p0) { + uint64x2_t __ret; + __ret = (uint64x2_t) __builtin_neon_vpaddlq_v((int8x16_t)__p0, 51); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint64x2_t vpaddlq_u32(uint32x4_t __p0) { + uint64x2_t __ret; + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = (uint64x2_t) __builtin_neon_vpaddlq_v((int8x16_t)__rev0, 51); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint32x4_t vpaddlq_u16(uint16x8_t __p0) { + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vpaddlq_v((int8x16_t)__p0, 50); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint32x4_t vpaddlq_u16(uint16x8_t __p0) { + uint32x4_t __ret; + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint32x4_t) __builtin_neon_vpaddlq_v((int8x16_t)__rev0, 50); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int16x8_t vpaddlq_s8(int8x16_t __p0) { + int16x8_t __ret; + __ret = (int16x8_t) __builtin_neon_vpaddlq_v((int8x16_t)__p0, 33); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int16x8_t vpaddlq_s8(int8x16_t __p0) { + int16x8_t __ret; + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int16x8_t) __builtin_neon_vpaddlq_v((int8x16_t)__rev0, 33); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int64x2_t vpaddlq_s32(int32x4_t __p0) { + int64x2_t __ret; + __ret = (int64x2_t) __builtin_neon_vpaddlq_v((int8x16_t)__p0, 35); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int64x2_t vpaddlq_s32(int32x4_t __p0) { + int64x2_t __ret; + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = (int64x2_t) __builtin_neon_vpaddlq_v((int8x16_t)__rev0, 35); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int32x4_t vpaddlq_s16(int16x8_t __p0) { + int32x4_t __ret; + __ret = (int32x4_t) __builtin_neon_vpaddlq_v((int8x16_t)__p0, 34); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int32x4_t vpaddlq_s16(int16x8_t __p0) { + int32x4_t __ret; + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int32x4_t) __builtin_neon_vpaddlq_v((int8x16_t)__rev0, 34); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint16x4_t vpaddl_u8(uint8x8_t __p0) { + uint16x4_t __ret; + __ret = (uint16x4_t) __builtin_neon_vpaddl_v((int8x8_t)__p0, 17); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint16x4_t vpaddl_u8(uint8x8_t __p0) { + uint16x4_t __ret; + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint16x4_t) __builtin_neon_vpaddl_v((int8x8_t)__rev0, 17); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint64x1_t vpaddl_u32(uint32x2_t __p0) { + uint64x1_t __ret; + __ret = (uint64x1_t) __builtin_neon_vpaddl_v((int8x8_t)__p0, 19); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint64x1_t vpaddl_u32(uint32x2_t __p0) { + uint64x1_t __ret; + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = (uint64x1_t) __builtin_neon_vpaddl_v((int8x8_t)__rev0, 19); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint32x2_t vpaddl_u16(uint16x4_t __p0) { + uint32x2_t __ret; + __ret = (uint32x2_t) __builtin_neon_vpaddl_v((int8x8_t)__p0, 18); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint32x2_t vpaddl_u16(uint16x4_t __p0) { + uint32x2_t __ret; + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = (uint32x2_t) __builtin_neon_vpaddl_v((int8x8_t)__rev0, 18); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int16x4_t vpaddl_s8(int8x8_t __p0) { + int16x4_t __ret; + __ret = (int16x4_t) __builtin_neon_vpaddl_v((int8x8_t)__p0, 1); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int16x4_t vpaddl_s8(int8x8_t __p0) { + int16x4_t __ret; + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int16x4_t) __builtin_neon_vpaddl_v((int8x8_t)__rev0, 1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int64x1_t vpaddl_s32(int32x2_t __p0) { + int64x1_t __ret; + __ret = (int64x1_t) __builtin_neon_vpaddl_v((int8x8_t)__p0, 3); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int64x1_t vpaddl_s32(int32x2_t __p0) { + int64x1_t __ret; + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = (int64x1_t) __builtin_neon_vpaddl_v((int8x8_t)__rev0, 3); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int32x2_t vpaddl_s16(int16x4_t __p0) { + int32x2_t __ret; + __ret = (int32x2_t) __builtin_neon_vpaddl_v((int8x8_t)__p0, 2); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int32x2_t vpaddl_s16(int16x4_t __p0) { + int32x2_t __ret; + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = (int32x2_t) __builtin_neon_vpaddl_v((int8x8_t)__rev0, 2); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint8x8_t vpmax_u8(uint8x8_t __p0, uint8x8_t __p1) { + uint8x8_t __ret; + __ret = (uint8x8_t) __builtin_neon_vpmax_v((int8x8_t)__p0, (int8x8_t)__p1, 16); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint8x8_t vpmax_u8(uint8x8_t __p0, uint8x8_t __p1) { + uint8x8_t __ret; + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint8x8_t) __builtin_neon_vpmax_v((int8x8_t)__rev0, (int8x8_t)__rev1, 16); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint32x2_t vpmax_u32(uint32x2_t __p0, uint32x2_t __p1) { + uint32x2_t __ret; + __ret = (uint32x2_t) __builtin_neon_vpmax_v((int8x8_t)__p0, (int8x8_t)__p1, 18); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint32x2_t vpmax_u32(uint32x2_t __p0, uint32x2_t __p1) { + uint32x2_t __ret; + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (uint32x2_t) __builtin_neon_vpmax_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint16x4_t vpmax_u16(uint16x4_t __p0, uint16x4_t __p1) { + uint16x4_t __ret; + __ret = (uint16x4_t) __builtin_neon_vpmax_v((int8x8_t)__p0, (int8x8_t)__p1, 17); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint16x4_t vpmax_u16(uint16x4_t __p0, uint16x4_t __p1) { + uint16x4_t __ret; + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (uint16x4_t) __builtin_neon_vpmax_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int8x8_t vpmax_s8(int8x8_t __p0, int8x8_t __p1) { + int8x8_t __ret; + __ret = (int8x8_t) __builtin_neon_vpmax_v((int8x8_t)__p0, (int8x8_t)__p1, 0); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int8x8_t vpmax_s8(int8x8_t __p0, int8x8_t __p1) { + int8x8_t __ret; + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int8x8_t) __builtin_neon_vpmax_v((int8x8_t)__rev0, (int8x8_t)__rev1, 0); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) float32x2_t vpmax_f32(float32x2_t __p0, float32x2_t __p1) { + float32x2_t __ret; + __ret = (float32x2_t) __builtin_neon_vpmax_v((int8x8_t)__p0, (int8x8_t)__p1, 9); + return __ret; +} +#else +__ai __attribute__((target("neon"))) float32x2_t vpmax_f32(float32x2_t __p0, float32x2_t __p1) { + float32x2_t __ret; + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (float32x2_t) __builtin_neon_vpmax_v((int8x8_t)__rev0, (int8x8_t)__rev1, 9); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int32x2_t vpmax_s32(int32x2_t __p0, int32x2_t __p1) { + int32x2_t __ret; + __ret = (int32x2_t) __builtin_neon_vpmax_v((int8x8_t)__p0, (int8x8_t)__p1, 2); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int32x2_t vpmax_s32(int32x2_t __p0, int32x2_t __p1) { + int32x2_t __ret; + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (int32x2_t) __builtin_neon_vpmax_v((int8x8_t)__rev0, (int8x8_t)__rev1, 2); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int16x4_t vpmax_s16(int16x4_t __p0, int16x4_t __p1) { + int16x4_t __ret; + __ret = (int16x4_t) __builtin_neon_vpmax_v((int8x8_t)__p0, (int8x8_t)__p1, 1); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int16x4_t vpmax_s16(int16x4_t __p0, int16x4_t __p1) { + int16x4_t __ret; + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (int16x4_t) __builtin_neon_vpmax_v((int8x8_t)__rev0, (int8x8_t)__rev1, 1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint8x8_t vpmin_u8(uint8x8_t __p0, uint8x8_t __p1) { + uint8x8_t __ret; + __ret = (uint8x8_t) __builtin_neon_vpmin_v((int8x8_t)__p0, (int8x8_t)__p1, 16); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint8x8_t vpmin_u8(uint8x8_t __p0, uint8x8_t __p1) { + uint8x8_t __ret; + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint8x8_t) __builtin_neon_vpmin_v((int8x8_t)__rev0, (int8x8_t)__rev1, 16); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint32x2_t vpmin_u32(uint32x2_t __p0, uint32x2_t __p1) { + uint32x2_t __ret; + __ret = (uint32x2_t) __builtin_neon_vpmin_v((int8x8_t)__p0, (int8x8_t)__p1, 18); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint32x2_t vpmin_u32(uint32x2_t __p0, uint32x2_t __p1) { + uint32x2_t __ret; + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (uint32x2_t) __builtin_neon_vpmin_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint16x4_t vpmin_u16(uint16x4_t __p0, uint16x4_t __p1) { + uint16x4_t __ret; + __ret = (uint16x4_t) __builtin_neon_vpmin_v((int8x8_t)__p0, (int8x8_t)__p1, 17); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint16x4_t vpmin_u16(uint16x4_t __p0, uint16x4_t __p1) { + uint16x4_t __ret; + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (uint16x4_t) __builtin_neon_vpmin_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int8x8_t vpmin_s8(int8x8_t __p0, int8x8_t __p1) { + int8x8_t __ret; + __ret = (int8x8_t) __builtin_neon_vpmin_v((int8x8_t)__p0, (int8x8_t)__p1, 0); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int8x8_t vpmin_s8(int8x8_t __p0, int8x8_t __p1) { + int8x8_t __ret; + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int8x8_t) __builtin_neon_vpmin_v((int8x8_t)__rev0, (int8x8_t)__rev1, 0); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) float32x2_t vpmin_f32(float32x2_t __p0, float32x2_t __p1) { + float32x2_t __ret; + __ret = (float32x2_t) __builtin_neon_vpmin_v((int8x8_t)__p0, (int8x8_t)__p1, 9); + return __ret; +} +#else +__ai __attribute__((target("neon"))) float32x2_t vpmin_f32(float32x2_t __p0, float32x2_t __p1) { + float32x2_t __ret; + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (float32x2_t) __builtin_neon_vpmin_v((int8x8_t)__rev0, (int8x8_t)__rev1, 9); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int32x2_t vpmin_s32(int32x2_t __p0, int32x2_t __p1) { + int32x2_t __ret; + __ret = (int32x2_t) __builtin_neon_vpmin_v((int8x8_t)__p0, (int8x8_t)__p1, 2); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int32x2_t vpmin_s32(int32x2_t __p0, int32x2_t __p1) { + int32x2_t __ret; + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (int32x2_t) __builtin_neon_vpmin_v((int8x8_t)__rev0, (int8x8_t)__rev1, 2); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int16x4_t vpmin_s16(int16x4_t __p0, int16x4_t __p1) { + int16x4_t __ret; + __ret = (int16x4_t) __builtin_neon_vpmin_v((int8x8_t)__p0, (int8x8_t)__p1, 1); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int16x4_t vpmin_s16(int16x4_t __p0, int16x4_t __p1) { + int16x4_t __ret; + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (int16x4_t) __builtin_neon_vpmin_v((int8x8_t)__rev0, (int8x8_t)__rev1, 1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int8x16_t vqabsq_s8(int8x16_t __p0) { + int8x16_t __ret; + __ret = (int8x16_t) __builtin_neon_vqabsq_v((int8x16_t)__p0, 32); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int8x16_t vqabsq_s8(int8x16_t __p0) { + int8x16_t __ret; + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int8x16_t) __builtin_neon_vqabsq_v((int8x16_t)__rev0, 32); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int32x4_t vqabsq_s32(int32x4_t __p0) { + int32x4_t __ret; + __ret = (int32x4_t) __builtin_neon_vqabsq_v((int8x16_t)__p0, 34); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int32x4_t vqabsq_s32(int32x4_t __p0) { + int32x4_t __ret; + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = (int32x4_t) __builtin_neon_vqabsq_v((int8x16_t)__rev0, 34); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int16x8_t vqabsq_s16(int16x8_t __p0) { + int16x8_t __ret; + __ret = (int16x8_t) __builtin_neon_vqabsq_v((int8x16_t)__p0, 33); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int16x8_t vqabsq_s16(int16x8_t __p0) { + int16x8_t __ret; + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int16x8_t) __builtin_neon_vqabsq_v((int8x16_t)__rev0, 33); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int8x8_t vqabs_s8(int8x8_t __p0) { + int8x8_t __ret; + __ret = (int8x8_t) __builtin_neon_vqabs_v((int8x8_t)__p0, 0); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int8x8_t vqabs_s8(int8x8_t __p0) { + int8x8_t __ret; + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int8x8_t) __builtin_neon_vqabs_v((int8x8_t)__rev0, 0); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int32x2_t vqabs_s32(int32x2_t __p0) { + int32x2_t __ret; + __ret = (int32x2_t) __builtin_neon_vqabs_v((int8x8_t)__p0, 2); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int32x2_t vqabs_s32(int32x2_t __p0) { + int32x2_t __ret; + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = (int32x2_t) __builtin_neon_vqabs_v((int8x8_t)__rev0, 2); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int16x4_t vqabs_s16(int16x4_t __p0) { + int16x4_t __ret; + __ret = (int16x4_t) __builtin_neon_vqabs_v((int8x8_t)__p0, 1); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int16x4_t vqabs_s16(int16x4_t __p0) { + int16x4_t __ret; + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = (int16x4_t) __builtin_neon_vqabs_v((int8x8_t)__rev0, 1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint8x16_t vqaddq_u8(uint8x16_t __p0, uint8x16_t __p1) { + uint8x16_t __ret; + __ret = (uint8x16_t) __builtin_neon_vqaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 48); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint8x16_t vqaddq_u8(uint8x16_t __p0, uint8x16_t __p1) { + uint8x16_t __ret; + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint8x16_t) __builtin_neon_vqaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint32x4_t vqaddq_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vqaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 50); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint32x4_t vqaddq_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint32x4_t __ret; + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (uint32x4_t) __builtin_neon_vqaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint64x2_t vqaddq_u64(uint64x2_t __p0, uint64x2_t __p1) { + uint64x2_t __ret; + __ret = (uint64x2_t) __builtin_neon_vqaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 51); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint64x2_t vqaddq_u64(uint64x2_t __p0, uint64x2_t __p1) { + uint64x2_t __ret; + uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (uint64x2_t) __builtin_neon_vqaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 51); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint16x8_t vqaddq_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint16x8_t __ret; + __ret = (uint16x8_t) __builtin_neon_vqaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 49); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint16x8_t vqaddq_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint16x8_t __ret; + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint16x8_t) __builtin_neon_vqaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int8x16_t vqaddq_s8(int8x16_t __p0, int8x16_t __p1) { + int8x16_t __ret; + __ret = (int8x16_t) __builtin_neon_vqaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 32); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int8x16_t vqaddq_s8(int8x16_t __p0, int8x16_t __p1) { + int8x16_t __ret; + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int8x16_t) __builtin_neon_vqaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 32); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int32x4_t vqaddq_s32(int32x4_t __p0, int32x4_t __p1) { + int32x4_t __ret; + __ret = (int32x4_t) __builtin_neon_vqaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 34); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int32x4_t vqaddq_s32(int32x4_t __p0, int32x4_t __p1) { + int32x4_t __ret; + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (int32x4_t) __builtin_neon_vqaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 34); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int64x2_t vqaddq_s64(int64x2_t __p0, int64x2_t __p1) { + int64x2_t __ret; + __ret = (int64x2_t) __builtin_neon_vqaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 35); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int64x2_t vqaddq_s64(int64x2_t __p0, int64x2_t __p1) { + int64x2_t __ret; + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (int64x2_t) __builtin_neon_vqaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 35); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int16x8_t vqaddq_s16(int16x8_t __p0, int16x8_t __p1) { + int16x8_t __ret; + __ret = (int16x8_t) __builtin_neon_vqaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 33); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int16x8_t vqaddq_s16(int16x8_t __p0, int16x8_t __p1) { + int16x8_t __ret; + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int16x8_t) __builtin_neon_vqaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 33); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint8x8_t vqadd_u8(uint8x8_t __p0, uint8x8_t __p1) { + uint8x8_t __ret; + __ret = (uint8x8_t) __builtin_neon_vqadd_v((int8x8_t)__p0, (int8x8_t)__p1, 16); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint8x8_t vqadd_u8(uint8x8_t __p0, uint8x8_t __p1) { + uint8x8_t __ret; + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint8x8_t) __builtin_neon_vqadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 16); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint32x2_t vqadd_u32(uint32x2_t __p0, uint32x2_t __p1) { + uint32x2_t __ret; + __ret = (uint32x2_t) __builtin_neon_vqadd_v((int8x8_t)__p0, (int8x8_t)__p1, 18); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint32x2_t vqadd_u32(uint32x2_t __p0, uint32x2_t __p1) { + uint32x2_t __ret; + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (uint32x2_t) __builtin_neon_vqadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +__ai __attribute__((target("neon"))) uint64x1_t vqadd_u64(uint64x1_t __p0, uint64x1_t __p1) { + uint64x1_t __ret; + __ret = (uint64x1_t) __builtin_neon_vqadd_v((int8x8_t)__p0, (int8x8_t)__p1, 19); + return __ret; +} +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint16x4_t vqadd_u16(uint16x4_t __p0, uint16x4_t __p1) { + uint16x4_t __ret; + __ret = (uint16x4_t) __builtin_neon_vqadd_v((int8x8_t)__p0, (int8x8_t)__p1, 17); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint16x4_t vqadd_u16(uint16x4_t __p0, uint16x4_t __p1) { + uint16x4_t __ret; + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (uint16x4_t) __builtin_neon_vqadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int8x8_t vqadd_s8(int8x8_t __p0, int8x8_t __p1) { + int8x8_t __ret; + __ret = (int8x8_t) __builtin_neon_vqadd_v((int8x8_t)__p0, (int8x8_t)__p1, 0); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int8x8_t vqadd_s8(int8x8_t __p0, int8x8_t __p1) { + int8x8_t __ret; + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int8x8_t) __builtin_neon_vqadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 0); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int32x2_t vqadd_s32(int32x2_t __p0, int32x2_t __p1) { + int32x2_t __ret; + __ret = (int32x2_t) __builtin_neon_vqadd_v((int8x8_t)__p0, (int8x8_t)__p1, 2); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int32x2_t vqadd_s32(int32x2_t __p0, int32x2_t __p1) { + int32x2_t __ret; + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (int32x2_t) __builtin_neon_vqadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 2); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +__ai __attribute__((target("neon"))) int64x1_t vqadd_s64(int64x1_t __p0, int64x1_t __p1) { + int64x1_t __ret; + __ret = (int64x1_t) __builtin_neon_vqadd_v((int8x8_t)__p0, (int8x8_t)__p1, 3); + return __ret; +} +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int16x4_t vqadd_s16(int16x4_t __p0, int16x4_t __p1) { + int16x4_t __ret; + __ret = (int16x4_t) __builtin_neon_vqadd_v((int8x8_t)__p0, (int8x8_t)__p1, 1); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int16x4_t vqadd_s16(int16x4_t __p0, int16x4_t __p1) { + int16x4_t __ret; + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (int16x4_t) __builtin_neon_vqadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int64x2_t vqdmlal_s32(int64x2_t __p0, int32x2_t __p1, int32x2_t __p2) { + int64x2_t __ret; + __ret = (int64x2_t) __builtin_neon_vqdmlal_v((int8x16_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 35); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int64x2_t vqdmlal_s32(int64x2_t __p0, int32x2_t __p1, int32x2_t __p2) { + int64x2_t __ret; + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + int32x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0); + __ret = (int64x2_t) __builtin_neon_vqdmlal_v((int8x16_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 35); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +__ai __attribute__((target("neon"))) int64x2_t __noswap_vqdmlal_s32(int64x2_t __p0, int32x2_t __p1, int32x2_t __p2) { + int64x2_t __ret; + __ret = (int64x2_t) __builtin_neon_vqdmlal_v((int8x16_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 35); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int32x4_t vqdmlal_s16(int32x4_t __p0, int16x4_t __p1, int16x4_t __p2) { + int32x4_t __ret; + __ret = (int32x4_t) __builtin_neon_vqdmlal_v((int8x16_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 34); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int32x4_t vqdmlal_s16(int32x4_t __p0, int16x4_t __p1, int16x4_t __p2) { + int32x4_t __ret; + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + int16x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); + __ret = (int32x4_t) __builtin_neon_vqdmlal_v((int8x16_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 34); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +__ai __attribute__((target("neon"))) int32x4_t __noswap_vqdmlal_s16(int32x4_t __p0, int16x4_t __p1, int16x4_t __p2) { + int32x4_t __ret; + __ret = (int32x4_t) __builtin_neon_vqdmlal_v((int8x16_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 34); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqdmlal_lane_s32(__p0_122, __p1_122, __p2_122, __p3_122) __extension__ ({ \ + int64x2_t __ret_122; \ + int64x2_t __s0_122 = __p0_122; \ + int32x2_t __s1_122 = __p1_122; \ + int32x2_t __s2_122 = __p2_122; \ + __ret_122 = vqdmlal_s32(__s0_122, __s1_122, splat_lane_s32(__s2_122, __p3_122)); \ + __ret_122; \ +}) +#else +#define vqdmlal_lane_s32(__p0_123, __p1_123, __p2_123, __p3_123) __extension__ ({ \ + int64x2_t __ret_123; \ + int64x2_t __s0_123 = __p0_123; \ + int32x2_t __s1_123 = __p1_123; \ + int32x2_t __s2_123 = __p2_123; \ + int64x2_t __rev0_123; __rev0_123 = __builtin_shufflevector(__s0_123, __s0_123, 1, 0); \ + int32x2_t __rev1_123; __rev1_123 = __builtin_shufflevector(__s1_123, __s1_123, 1, 0); \ + int32x2_t __rev2_123; __rev2_123 = __builtin_shufflevector(__s2_123, __s2_123, 1, 0); \ + __ret_123 = __noswap_vqdmlal_s32(__rev0_123, __rev1_123, __noswap_splat_lane_s32(__rev2_123, __p3_123)); \ + __ret_123 = __builtin_shufflevector(__ret_123, __ret_123, 1, 0); \ + __ret_123; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqdmlal_lane_s16(__p0_124, __p1_124, __p2_124, __p3_124) __extension__ ({ \ + int32x4_t __ret_124; \ + int32x4_t __s0_124 = __p0_124; \ + int16x4_t __s1_124 = __p1_124; \ + int16x4_t __s2_124 = __p2_124; \ + __ret_124 = vqdmlal_s16(__s0_124, __s1_124, splat_lane_s16(__s2_124, __p3_124)); \ + __ret_124; \ +}) +#else +#define vqdmlal_lane_s16(__p0_125, __p1_125, __p2_125, __p3_125) __extension__ ({ \ + int32x4_t __ret_125; \ + int32x4_t __s0_125 = __p0_125; \ + int16x4_t __s1_125 = __p1_125; \ + int16x4_t __s2_125 = __p2_125; \ + int32x4_t __rev0_125; __rev0_125 = __builtin_shufflevector(__s0_125, __s0_125, 3, 2, 1, 0); \ + int16x4_t __rev1_125; __rev1_125 = __builtin_shufflevector(__s1_125, __s1_125, 3, 2, 1, 0); \ + int16x4_t __rev2_125; __rev2_125 = __builtin_shufflevector(__s2_125, __s2_125, 3, 2, 1, 0); \ + __ret_125 = __noswap_vqdmlal_s16(__rev0_125, __rev1_125, __noswap_splat_lane_s16(__rev2_125, __p3_125)); \ + __ret_125 = __builtin_shufflevector(__ret_125, __ret_125, 3, 2, 1, 0); \ + __ret_125; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int64x2_t vqdmlal_n_s32(int64x2_t __p0, int32x2_t __p1, int32_t __p2) { + int64x2_t __ret; + __ret = vqdmlal_s32(__p0, __p1, (int32x2_t) {__p2, __p2}); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int64x2_t vqdmlal_n_s32(int64x2_t __p0, int32x2_t __p1, int32_t __p2) { + int64x2_t __ret; + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = __noswap_vqdmlal_s32(__rev0, __rev1, (int32x2_t) {__p2, __p2}); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +__ai __attribute__((target("neon"))) int64x2_t __noswap_vqdmlal_n_s32(int64x2_t __p0, int32x2_t __p1, int32_t __p2) { + int64x2_t __ret; + __ret = __noswap_vqdmlal_s32(__p0, __p1, (int32x2_t) {__p2, __p2}); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int32x4_t vqdmlal_n_s16(int32x4_t __p0, int16x4_t __p1, int16_t __p2) { + int32x4_t __ret; + __ret = vqdmlal_s16(__p0, __p1, (int16x4_t) {__p2, __p2, __p2, __p2}); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int32x4_t vqdmlal_n_s16(int32x4_t __p0, int16x4_t __p1, int16_t __p2) { + int32x4_t __ret; + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = __noswap_vqdmlal_s16(__rev0, __rev1, (int16x4_t) {__p2, __p2, __p2, __p2}); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +__ai __attribute__((target("neon"))) int32x4_t __noswap_vqdmlal_n_s16(int32x4_t __p0, int16x4_t __p1, int16_t __p2) { + int32x4_t __ret; + __ret = __noswap_vqdmlal_s16(__p0, __p1, (int16x4_t) {__p2, __p2, __p2, __p2}); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int64x2_t vqdmlsl_s32(int64x2_t __p0, int32x2_t __p1, int32x2_t __p2) { + int64x2_t __ret; + __ret = (int64x2_t) __builtin_neon_vqdmlsl_v((int8x16_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 35); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int64x2_t vqdmlsl_s32(int64x2_t __p0, int32x2_t __p1, int32x2_t __p2) { + int64x2_t __ret; + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + int32x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0); + __ret = (int64x2_t) __builtin_neon_vqdmlsl_v((int8x16_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 35); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +__ai __attribute__((target("neon"))) int64x2_t __noswap_vqdmlsl_s32(int64x2_t __p0, int32x2_t __p1, int32x2_t __p2) { + int64x2_t __ret; + __ret = (int64x2_t) __builtin_neon_vqdmlsl_v((int8x16_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 35); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int32x4_t vqdmlsl_s16(int32x4_t __p0, int16x4_t __p1, int16x4_t __p2) { + int32x4_t __ret; + __ret = (int32x4_t) __builtin_neon_vqdmlsl_v((int8x16_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 34); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int32x4_t vqdmlsl_s16(int32x4_t __p0, int16x4_t __p1, int16x4_t __p2) { + int32x4_t __ret; + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + int16x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); + __ret = (int32x4_t) __builtin_neon_vqdmlsl_v((int8x16_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 34); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +__ai __attribute__((target("neon"))) int32x4_t __noswap_vqdmlsl_s16(int32x4_t __p0, int16x4_t __p1, int16x4_t __p2) { + int32x4_t __ret; + __ret = (int32x4_t) __builtin_neon_vqdmlsl_v((int8x16_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 34); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqdmlsl_lane_s32(__p0_126, __p1_126, __p2_126, __p3_126) __extension__ ({ \ + int64x2_t __ret_126; \ + int64x2_t __s0_126 = __p0_126; \ + int32x2_t __s1_126 = __p1_126; \ + int32x2_t __s2_126 = __p2_126; \ + __ret_126 = vqdmlsl_s32(__s0_126, __s1_126, splat_lane_s32(__s2_126, __p3_126)); \ + __ret_126; \ +}) +#else +#define vqdmlsl_lane_s32(__p0_127, __p1_127, __p2_127, __p3_127) __extension__ ({ \ + int64x2_t __ret_127; \ + int64x2_t __s0_127 = __p0_127; \ + int32x2_t __s1_127 = __p1_127; \ + int32x2_t __s2_127 = __p2_127; \ + int64x2_t __rev0_127; __rev0_127 = __builtin_shufflevector(__s0_127, __s0_127, 1, 0); \ + int32x2_t __rev1_127; __rev1_127 = __builtin_shufflevector(__s1_127, __s1_127, 1, 0); \ + int32x2_t __rev2_127; __rev2_127 = __builtin_shufflevector(__s2_127, __s2_127, 1, 0); \ + __ret_127 = __noswap_vqdmlsl_s32(__rev0_127, __rev1_127, __noswap_splat_lane_s32(__rev2_127, __p3_127)); \ + __ret_127 = __builtin_shufflevector(__ret_127, __ret_127, 1, 0); \ + __ret_127; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqdmlsl_lane_s16(__p0_128, __p1_128, __p2_128, __p3_128) __extension__ ({ \ + int32x4_t __ret_128; \ + int32x4_t __s0_128 = __p0_128; \ + int16x4_t __s1_128 = __p1_128; \ + int16x4_t __s2_128 = __p2_128; \ + __ret_128 = vqdmlsl_s16(__s0_128, __s1_128, splat_lane_s16(__s2_128, __p3_128)); \ + __ret_128; \ +}) +#else +#define vqdmlsl_lane_s16(__p0_129, __p1_129, __p2_129, __p3_129) __extension__ ({ \ + int32x4_t __ret_129; \ + int32x4_t __s0_129 = __p0_129; \ + int16x4_t __s1_129 = __p1_129; \ + int16x4_t __s2_129 = __p2_129; \ + int32x4_t __rev0_129; __rev0_129 = __builtin_shufflevector(__s0_129, __s0_129, 3, 2, 1, 0); \ + int16x4_t __rev1_129; __rev1_129 = __builtin_shufflevector(__s1_129, __s1_129, 3, 2, 1, 0); \ + int16x4_t __rev2_129; __rev2_129 = __builtin_shufflevector(__s2_129, __s2_129, 3, 2, 1, 0); \ + __ret_129 = __noswap_vqdmlsl_s16(__rev0_129, __rev1_129, __noswap_splat_lane_s16(__rev2_129, __p3_129)); \ + __ret_129 = __builtin_shufflevector(__ret_129, __ret_129, 3, 2, 1, 0); \ + __ret_129; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int64x2_t vqdmlsl_n_s32(int64x2_t __p0, int32x2_t __p1, int32_t __p2) { + int64x2_t __ret; + __ret = vqdmlsl_s32(__p0, __p1, (int32x2_t) {__p2, __p2}); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int64x2_t vqdmlsl_n_s32(int64x2_t __p0, int32x2_t __p1, int32_t __p2) { + int64x2_t __ret; + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = __noswap_vqdmlsl_s32(__rev0, __rev1, (int32x2_t) {__p2, __p2}); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +__ai __attribute__((target("neon"))) int64x2_t __noswap_vqdmlsl_n_s32(int64x2_t __p0, int32x2_t __p1, int32_t __p2) { + int64x2_t __ret; + __ret = __noswap_vqdmlsl_s32(__p0, __p1, (int32x2_t) {__p2, __p2}); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int32x4_t vqdmlsl_n_s16(int32x4_t __p0, int16x4_t __p1, int16_t __p2) { + int32x4_t __ret; + __ret = vqdmlsl_s16(__p0, __p1, (int16x4_t) {__p2, __p2, __p2, __p2}); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int32x4_t vqdmlsl_n_s16(int32x4_t __p0, int16x4_t __p1, int16_t __p2) { + int32x4_t __ret; + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = __noswap_vqdmlsl_s16(__rev0, __rev1, (int16x4_t) {__p2, __p2, __p2, __p2}); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +__ai __attribute__((target("neon"))) int32x4_t __noswap_vqdmlsl_n_s16(int32x4_t __p0, int16x4_t __p1, int16_t __p2) { + int32x4_t __ret; + __ret = __noswap_vqdmlsl_s16(__p0, __p1, (int16x4_t) {__p2, __p2, __p2, __p2}); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int32x4_t vqdmulhq_s32(int32x4_t __p0, int32x4_t __p1) { + int32x4_t __ret; + __ret = (int32x4_t) __builtin_neon_vqdmulhq_v((int8x16_t)__p0, (int8x16_t)__p1, 34); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int32x4_t vqdmulhq_s32(int32x4_t __p0, int32x4_t __p1) { + int32x4_t __ret; + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (int32x4_t) __builtin_neon_vqdmulhq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 34); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +__ai __attribute__((target("neon"))) int32x4_t __noswap_vqdmulhq_s32(int32x4_t __p0, int32x4_t __p1) { + int32x4_t __ret; + __ret = (int32x4_t) __builtin_neon_vqdmulhq_v((int8x16_t)__p0, (int8x16_t)__p1, 34); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int16x8_t vqdmulhq_s16(int16x8_t __p0, int16x8_t __p1) { + int16x8_t __ret; + __ret = (int16x8_t) __builtin_neon_vqdmulhq_v((int8x16_t)__p0, (int8x16_t)__p1, 33); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int16x8_t vqdmulhq_s16(int16x8_t __p0, int16x8_t __p1) { + int16x8_t __ret; + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int16x8_t) __builtin_neon_vqdmulhq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 33); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +__ai __attribute__((target("neon"))) int16x8_t __noswap_vqdmulhq_s16(int16x8_t __p0, int16x8_t __p1) { + int16x8_t __ret; + __ret = (int16x8_t) __builtin_neon_vqdmulhq_v((int8x16_t)__p0, (int8x16_t)__p1, 33); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int32x2_t vqdmulh_s32(int32x2_t __p0, int32x2_t __p1) { + int32x2_t __ret; + __ret = (int32x2_t) __builtin_neon_vqdmulh_v((int8x8_t)__p0, (int8x8_t)__p1, 2); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int32x2_t vqdmulh_s32(int32x2_t __p0, int32x2_t __p1) { + int32x2_t __ret; + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (int32x2_t) __builtin_neon_vqdmulh_v((int8x8_t)__rev0, (int8x8_t)__rev1, 2); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +__ai __attribute__((target("neon"))) int32x2_t __noswap_vqdmulh_s32(int32x2_t __p0, int32x2_t __p1) { + int32x2_t __ret; + __ret = (int32x2_t) __builtin_neon_vqdmulh_v((int8x8_t)__p0, (int8x8_t)__p1, 2); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int16x4_t vqdmulh_s16(int16x4_t __p0, int16x4_t __p1) { + int16x4_t __ret; + __ret = (int16x4_t) __builtin_neon_vqdmulh_v((int8x8_t)__p0, (int8x8_t)__p1, 1); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int16x4_t vqdmulh_s16(int16x4_t __p0, int16x4_t __p1) { + int16x4_t __ret; + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (int16x4_t) __builtin_neon_vqdmulh_v((int8x8_t)__rev0, (int8x8_t)__rev1, 1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +__ai __attribute__((target("neon"))) int16x4_t __noswap_vqdmulh_s16(int16x4_t __p0, int16x4_t __p1) { + int16x4_t __ret; + __ret = (int16x4_t) __builtin_neon_vqdmulh_v((int8x8_t)__p0, (int8x8_t)__p1, 1); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int32x4_t vqdmulhq_n_s32(int32x4_t __p0, int32_t __p1) { + int32x4_t __ret; + __ret = vqdmulhq_s32(__p0, (int32x4_t) {__p1, __p1, __p1, __p1}); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int32x4_t vqdmulhq_n_s32(int32x4_t __p0, int32_t __p1) { + int32x4_t __ret; + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = __noswap_vqdmulhq_s32(__rev0, (int32x4_t) {__p1, __p1, __p1, __p1}); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int16x8_t vqdmulhq_n_s16(int16x8_t __p0, int16_t __p1) { + int16x8_t __ret; + __ret = vqdmulhq_s16(__p0, (int16x8_t) {__p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1}); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int16x8_t vqdmulhq_n_s16(int16x8_t __p0, int16_t __p1) { + int16x8_t __ret; + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __noswap_vqdmulhq_s16(__rev0, (int16x8_t) {__p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1}); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int32x2_t vqdmulh_n_s32(int32x2_t __p0, int32_t __p1) { + int32x2_t __ret; + __ret = vqdmulh_s32(__p0, (int32x2_t) {__p1, __p1}); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int32x2_t vqdmulh_n_s32(int32x2_t __p0, int32_t __p1) { + int32x2_t __ret; + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = __noswap_vqdmulh_s32(__rev0, (int32x2_t) {__p1, __p1}); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int16x4_t vqdmulh_n_s16(int16x4_t __p0, int16_t __p1) { + int16x4_t __ret; + __ret = vqdmulh_s16(__p0, (int16x4_t) {__p1, __p1, __p1, __p1}); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int16x4_t vqdmulh_n_s16(int16x4_t __p0, int16_t __p1) { + int16x4_t __ret; + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = __noswap_vqdmulh_s16(__rev0, (int16x4_t) {__p1, __p1, __p1, __p1}); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int64x2_t vqdmull_s32(int32x2_t __p0, int32x2_t __p1) { + int64x2_t __ret; + __ret = (int64x2_t) __builtin_neon_vqdmull_v((int8x8_t)__p0, (int8x8_t)__p1, 35); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int64x2_t vqdmull_s32(int32x2_t __p0, int32x2_t __p1) { + int64x2_t __ret; + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (int64x2_t) __builtin_neon_vqdmull_v((int8x8_t)__rev0, (int8x8_t)__rev1, 35); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +__ai __attribute__((target("neon"))) int64x2_t __noswap_vqdmull_s32(int32x2_t __p0, int32x2_t __p1) { + int64x2_t __ret; + __ret = (int64x2_t) __builtin_neon_vqdmull_v((int8x8_t)__p0, (int8x8_t)__p1, 35); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int32x4_t vqdmull_s16(int16x4_t __p0, int16x4_t __p1) { + int32x4_t __ret; + __ret = (int32x4_t) __builtin_neon_vqdmull_v((int8x8_t)__p0, (int8x8_t)__p1, 34); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int32x4_t vqdmull_s16(int16x4_t __p0, int16x4_t __p1) { + int32x4_t __ret; + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (int32x4_t) __builtin_neon_vqdmull_v((int8x8_t)__rev0, (int8x8_t)__rev1, 34); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +__ai __attribute__((target("neon"))) int32x4_t __noswap_vqdmull_s16(int16x4_t __p0, int16x4_t __p1) { + int32x4_t __ret; + __ret = (int32x4_t) __builtin_neon_vqdmull_v((int8x8_t)__p0, (int8x8_t)__p1, 34); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqdmull_lane_s32(__p0_130, __p1_130, __p2_130) __extension__ ({ \ + int64x2_t __ret_130; \ + int32x2_t __s0_130 = __p0_130; \ + int32x2_t __s1_130 = __p1_130; \ + __ret_130 = vqdmull_s32(__s0_130, splat_lane_s32(__s1_130, __p2_130)); \ + __ret_130; \ +}) +#else +#define vqdmull_lane_s32(__p0_131, __p1_131, __p2_131) __extension__ ({ \ + int64x2_t __ret_131; \ + int32x2_t __s0_131 = __p0_131; \ + int32x2_t __s1_131 = __p1_131; \ + int32x2_t __rev0_131; __rev0_131 = __builtin_shufflevector(__s0_131, __s0_131, 1, 0); \ + int32x2_t __rev1_131; __rev1_131 = __builtin_shufflevector(__s1_131, __s1_131, 1, 0); \ + __ret_131 = __noswap_vqdmull_s32(__rev0_131, __noswap_splat_lane_s32(__rev1_131, __p2_131)); \ + __ret_131 = __builtin_shufflevector(__ret_131, __ret_131, 1, 0); \ + __ret_131; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqdmull_lane_s16(__p0_132, __p1_132, __p2_132) __extension__ ({ \ + int32x4_t __ret_132; \ + int16x4_t __s0_132 = __p0_132; \ + int16x4_t __s1_132 = __p1_132; \ + __ret_132 = vqdmull_s16(__s0_132, splat_lane_s16(__s1_132, __p2_132)); \ + __ret_132; \ +}) +#else +#define vqdmull_lane_s16(__p0_133, __p1_133, __p2_133) __extension__ ({ \ + int32x4_t __ret_133; \ + int16x4_t __s0_133 = __p0_133; \ + int16x4_t __s1_133 = __p1_133; \ + int16x4_t __rev0_133; __rev0_133 = __builtin_shufflevector(__s0_133, __s0_133, 3, 2, 1, 0); \ + int16x4_t __rev1_133; __rev1_133 = __builtin_shufflevector(__s1_133, __s1_133, 3, 2, 1, 0); \ + __ret_133 = __noswap_vqdmull_s16(__rev0_133, __noswap_splat_lane_s16(__rev1_133, __p2_133)); \ + __ret_133 = __builtin_shufflevector(__ret_133, __ret_133, 3, 2, 1, 0); \ + __ret_133; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int64x2_t vqdmull_n_s32(int32x2_t __p0, int32_t __p1) { + int64x2_t __ret; + __ret = vqdmull_s32(__p0, (int32x2_t) {__p1, __p1}); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int64x2_t vqdmull_n_s32(int32x2_t __p0, int32_t __p1) { + int64x2_t __ret; + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = __noswap_vqdmull_s32(__rev0, (int32x2_t) {__p1, __p1}); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +__ai __attribute__((target("neon"))) int64x2_t __noswap_vqdmull_n_s32(int32x2_t __p0, int32_t __p1) { + int64x2_t __ret; + __ret = __noswap_vqdmull_s32(__p0, (int32x2_t) {__p1, __p1}); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int32x4_t vqdmull_n_s16(int16x4_t __p0, int16_t __p1) { + int32x4_t __ret; + __ret = vqdmull_s16(__p0, (int16x4_t) {__p1, __p1, __p1, __p1}); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int32x4_t vqdmull_n_s16(int16x4_t __p0, int16_t __p1) { + int32x4_t __ret; + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = __noswap_vqdmull_s16(__rev0, (int16x4_t) {__p1, __p1, __p1, __p1}); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +__ai __attribute__((target("neon"))) int32x4_t __noswap_vqdmull_n_s16(int16x4_t __p0, int16_t __p1) { + int32x4_t __ret; + __ret = __noswap_vqdmull_s16(__p0, (int16x4_t) {__p1, __p1, __p1, __p1}); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint16x4_t vqmovn_u32(uint32x4_t __p0) { + uint16x4_t __ret; + __ret = (uint16x4_t) __builtin_neon_vqmovn_v((int8x16_t)__p0, 17); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint16x4_t vqmovn_u32(uint32x4_t __p0) { + uint16x4_t __ret; + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = (uint16x4_t) __builtin_neon_vqmovn_v((int8x16_t)__rev0, 17); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +__ai __attribute__((target("neon"))) uint16x4_t __noswap_vqmovn_u32(uint32x4_t __p0) { + uint16x4_t __ret; + __ret = (uint16x4_t) __builtin_neon_vqmovn_v((int8x16_t)__p0, 17); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint32x2_t vqmovn_u64(uint64x2_t __p0) { + uint32x2_t __ret; + __ret = (uint32x2_t) __builtin_neon_vqmovn_v((int8x16_t)__p0, 18); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint32x2_t vqmovn_u64(uint64x2_t __p0) { + uint32x2_t __ret; + uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = (uint32x2_t) __builtin_neon_vqmovn_v((int8x16_t)__rev0, 18); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +__ai __attribute__((target("neon"))) uint32x2_t __noswap_vqmovn_u64(uint64x2_t __p0) { + uint32x2_t __ret; + __ret = (uint32x2_t) __builtin_neon_vqmovn_v((int8x16_t)__p0, 18); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint8x8_t vqmovn_u16(uint16x8_t __p0) { + uint8x8_t __ret; + __ret = (uint8x8_t) __builtin_neon_vqmovn_v((int8x16_t)__p0, 16); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint8x8_t vqmovn_u16(uint16x8_t __p0) { + uint8x8_t __ret; + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint8x8_t) __builtin_neon_vqmovn_v((int8x16_t)__rev0, 16); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +__ai __attribute__((target("neon"))) uint8x8_t __noswap_vqmovn_u16(uint16x8_t __p0) { + uint8x8_t __ret; + __ret = (uint8x8_t) __builtin_neon_vqmovn_v((int8x16_t)__p0, 16); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int16x4_t vqmovn_s32(int32x4_t __p0) { + int16x4_t __ret; + __ret = (int16x4_t) __builtin_neon_vqmovn_v((int8x16_t)__p0, 1); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int16x4_t vqmovn_s32(int32x4_t __p0) { + int16x4_t __ret; + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = (int16x4_t) __builtin_neon_vqmovn_v((int8x16_t)__rev0, 1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +__ai __attribute__((target("neon"))) int16x4_t __noswap_vqmovn_s32(int32x4_t __p0) { + int16x4_t __ret; + __ret = (int16x4_t) __builtin_neon_vqmovn_v((int8x16_t)__p0, 1); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int32x2_t vqmovn_s64(int64x2_t __p0) { + int32x2_t __ret; + __ret = (int32x2_t) __builtin_neon_vqmovn_v((int8x16_t)__p0, 2); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int32x2_t vqmovn_s64(int64x2_t __p0) { + int32x2_t __ret; + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = (int32x2_t) __builtin_neon_vqmovn_v((int8x16_t)__rev0, 2); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +__ai __attribute__((target("neon"))) int32x2_t __noswap_vqmovn_s64(int64x2_t __p0) { + int32x2_t __ret; + __ret = (int32x2_t) __builtin_neon_vqmovn_v((int8x16_t)__p0, 2); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int8x8_t vqmovn_s16(int16x8_t __p0) { + int8x8_t __ret; + __ret = (int8x8_t) __builtin_neon_vqmovn_v((int8x16_t)__p0, 0); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int8x8_t vqmovn_s16(int16x8_t __p0) { + int8x8_t __ret; + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int8x8_t) __builtin_neon_vqmovn_v((int8x16_t)__rev0, 0); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +__ai __attribute__((target("neon"))) int8x8_t __noswap_vqmovn_s16(int16x8_t __p0) { + int8x8_t __ret; + __ret = (int8x8_t) __builtin_neon_vqmovn_v((int8x16_t)__p0, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint16x4_t vqmovun_s32(int32x4_t __p0) { + uint16x4_t __ret; + __ret = (uint16x4_t) __builtin_neon_vqmovun_v((int8x16_t)__p0, 17); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint16x4_t vqmovun_s32(int32x4_t __p0) { + uint16x4_t __ret; + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = (uint16x4_t) __builtin_neon_vqmovun_v((int8x16_t)__rev0, 17); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +__ai __attribute__((target("neon"))) uint16x4_t __noswap_vqmovun_s32(int32x4_t __p0) { + uint16x4_t __ret; + __ret = (uint16x4_t) __builtin_neon_vqmovun_v((int8x16_t)__p0, 17); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint32x2_t vqmovun_s64(int64x2_t __p0) { + uint32x2_t __ret; + __ret = (uint32x2_t) __builtin_neon_vqmovun_v((int8x16_t)__p0, 18); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint32x2_t vqmovun_s64(int64x2_t __p0) { + uint32x2_t __ret; + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = (uint32x2_t) __builtin_neon_vqmovun_v((int8x16_t)__rev0, 18); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +__ai __attribute__((target("neon"))) uint32x2_t __noswap_vqmovun_s64(int64x2_t __p0) { + uint32x2_t __ret; + __ret = (uint32x2_t) __builtin_neon_vqmovun_v((int8x16_t)__p0, 18); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint8x8_t vqmovun_s16(int16x8_t __p0) { + uint8x8_t __ret; + __ret = (uint8x8_t) __builtin_neon_vqmovun_v((int8x16_t)__p0, 16); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint8x8_t vqmovun_s16(int16x8_t __p0) { + uint8x8_t __ret; + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint8x8_t) __builtin_neon_vqmovun_v((int8x16_t)__rev0, 16); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +__ai __attribute__((target("neon"))) uint8x8_t __noswap_vqmovun_s16(int16x8_t __p0) { + uint8x8_t __ret; + __ret = (uint8x8_t) __builtin_neon_vqmovun_v((int8x16_t)__p0, 16); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int8x16_t vqnegq_s8(int8x16_t __p0) { + int8x16_t __ret; + __ret = (int8x16_t) __builtin_neon_vqnegq_v((int8x16_t)__p0, 32); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int8x16_t vqnegq_s8(int8x16_t __p0) { + int8x16_t __ret; + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int8x16_t) __builtin_neon_vqnegq_v((int8x16_t)__rev0, 32); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int32x4_t vqnegq_s32(int32x4_t __p0) { + int32x4_t __ret; + __ret = (int32x4_t) __builtin_neon_vqnegq_v((int8x16_t)__p0, 34); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int32x4_t vqnegq_s32(int32x4_t __p0) { + int32x4_t __ret; + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = (int32x4_t) __builtin_neon_vqnegq_v((int8x16_t)__rev0, 34); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int16x8_t vqnegq_s16(int16x8_t __p0) { + int16x8_t __ret; + __ret = (int16x8_t) __builtin_neon_vqnegq_v((int8x16_t)__p0, 33); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int16x8_t vqnegq_s16(int16x8_t __p0) { + int16x8_t __ret; + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int16x8_t) __builtin_neon_vqnegq_v((int8x16_t)__rev0, 33); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int8x8_t vqneg_s8(int8x8_t __p0) { + int8x8_t __ret; + __ret = (int8x8_t) __builtin_neon_vqneg_v((int8x8_t)__p0, 0); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int8x8_t vqneg_s8(int8x8_t __p0) { + int8x8_t __ret; + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int8x8_t) __builtin_neon_vqneg_v((int8x8_t)__rev0, 0); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int32x2_t vqneg_s32(int32x2_t __p0) { + int32x2_t __ret; + __ret = (int32x2_t) __builtin_neon_vqneg_v((int8x8_t)__p0, 2); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int32x2_t vqneg_s32(int32x2_t __p0) { + int32x2_t __ret; + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = (int32x2_t) __builtin_neon_vqneg_v((int8x8_t)__rev0, 2); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int16x4_t vqneg_s16(int16x4_t __p0) { + int16x4_t __ret; + __ret = (int16x4_t) __builtin_neon_vqneg_v((int8x8_t)__p0, 1); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int16x4_t vqneg_s16(int16x4_t __p0) { + int16x4_t __ret; + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = (int16x4_t) __builtin_neon_vqneg_v((int8x8_t)__rev0, 1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int32x4_t vqrdmulhq_s32(int32x4_t __p0, int32x4_t __p1) { + int32x4_t __ret; + __ret = (int32x4_t) __builtin_neon_vqrdmulhq_v((int8x16_t)__p0, (int8x16_t)__p1, 34); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int32x4_t vqrdmulhq_s32(int32x4_t __p0, int32x4_t __p1) { + int32x4_t __ret; + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (int32x4_t) __builtin_neon_vqrdmulhq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 34); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +__ai __attribute__((target("neon"))) int32x4_t __noswap_vqrdmulhq_s32(int32x4_t __p0, int32x4_t __p1) { + int32x4_t __ret; + __ret = (int32x4_t) __builtin_neon_vqrdmulhq_v((int8x16_t)__p0, (int8x16_t)__p1, 34); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int16x8_t vqrdmulhq_s16(int16x8_t __p0, int16x8_t __p1) { + int16x8_t __ret; + __ret = (int16x8_t) __builtin_neon_vqrdmulhq_v((int8x16_t)__p0, (int8x16_t)__p1, 33); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int16x8_t vqrdmulhq_s16(int16x8_t __p0, int16x8_t __p1) { + int16x8_t __ret; + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int16x8_t) __builtin_neon_vqrdmulhq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 33); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +__ai __attribute__((target("neon"))) int16x8_t __noswap_vqrdmulhq_s16(int16x8_t __p0, int16x8_t __p1) { + int16x8_t __ret; + __ret = (int16x8_t) __builtin_neon_vqrdmulhq_v((int8x16_t)__p0, (int8x16_t)__p1, 33); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int32x2_t vqrdmulh_s32(int32x2_t __p0, int32x2_t __p1) { + int32x2_t __ret; + __ret = (int32x2_t) __builtin_neon_vqrdmulh_v((int8x8_t)__p0, (int8x8_t)__p1, 2); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int32x2_t vqrdmulh_s32(int32x2_t __p0, int32x2_t __p1) { + int32x2_t __ret; + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (int32x2_t) __builtin_neon_vqrdmulh_v((int8x8_t)__rev0, (int8x8_t)__rev1, 2); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +__ai __attribute__((target("neon"))) int32x2_t __noswap_vqrdmulh_s32(int32x2_t __p0, int32x2_t __p1) { + int32x2_t __ret; + __ret = (int32x2_t) __builtin_neon_vqrdmulh_v((int8x8_t)__p0, (int8x8_t)__p1, 2); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int16x4_t vqrdmulh_s16(int16x4_t __p0, int16x4_t __p1) { + int16x4_t __ret; + __ret = (int16x4_t) __builtin_neon_vqrdmulh_v((int8x8_t)__p0, (int8x8_t)__p1, 1); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int16x4_t vqrdmulh_s16(int16x4_t __p0, int16x4_t __p1) { + int16x4_t __ret; + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (int16x4_t) __builtin_neon_vqrdmulh_v((int8x8_t)__rev0, (int8x8_t)__rev1, 1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +__ai __attribute__((target("neon"))) int16x4_t __noswap_vqrdmulh_s16(int16x4_t __p0, int16x4_t __p1) { + int16x4_t __ret; + __ret = (int16x4_t) __builtin_neon_vqrdmulh_v((int8x8_t)__p0, (int8x8_t)__p1, 1); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int32x4_t vqrdmulhq_n_s32(int32x4_t __p0, int32_t __p1) { + int32x4_t __ret; + __ret = vqrdmulhq_s32(__p0, (int32x4_t) {__p1, __p1, __p1, __p1}); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int32x4_t vqrdmulhq_n_s32(int32x4_t __p0, int32_t __p1) { + int32x4_t __ret; + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = __noswap_vqrdmulhq_s32(__rev0, (int32x4_t) {__p1, __p1, __p1, __p1}); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int16x8_t vqrdmulhq_n_s16(int16x8_t __p0, int16_t __p1) { + int16x8_t __ret; + __ret = vqrdmulhq_s16(__p0, (int16x8_t) {__p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1}); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int16x8_t vqrdmulhq_n_s16(int16x8_t __p0, int16_t __p1) { + int16x8_t __ret; + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __noswap_vqrdmulhq_s16(__rev0, (int16x8_t) {__p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1}); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int32x2_t vqrdmulh_n_s32(int32x2_t __p0, int32_t __p1) { + int32x2_t __ret; + __ret = vqrdmulh_s32(__p0, (int32x2_t) {__p1, __p1}); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int32x2_t vqrdmulh_n_s32(int32x2_t __p0, int32_t __p1) { + int32x2_t __ret; + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = __noswap_vqrdmulh_s32(__rev0, (int32x2_t) {__p1, __p1}); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int16x4_t vqrdmulh_n_s16(int16x4_t __p0, int16_t __p1) { + int16x4_t __ret; + __ret = vqrdmulh_s16(__p0, (int16x4_t) {__p1, __p1, __p1, __p1}); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int16x4_t vqrdmulh_n_s16(int16x4_t __p0, int16_t __p1) { + int16x4_t __ret; + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = __noswap_vqrdmulh_s16(__rev0, (int16x4_t) {__p1, __p1, __p1, __p1}); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint8x16_t vqrshlq_u8(uint8x16_t __p0, int8x16_t __p1) { + uint8x16_t __ret; + __ret = (uint8x16_t) __builtin_neon_vqrshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 48); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint8x16_t vqrshlq_u8(uint8x16_t __p0, int8x16_t __p1) { + uint8x16_t __ret; + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint8x16_t) __builtin_neon_vqrshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint32x4_t vqrshlq_u32(uint32x4_t __p0, int32x4_t __p1) { + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vqrshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 50); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint32x4_t vqrshlq_u32(uint32x4_t __p0, int32x4_t __p1) { + uint32x4_t __ret; + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (uint32x4_t) __builtin_neon_vqrshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint64x2_t vqrshlq_u64(uint64x2_t __p0, int64x2_t __p1) { + uint64x2_t __ret; + __ret = (uint64x2_t) __builtin_neon_vqrshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 51); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint64x2_t vqrshlq_u64(uint64x2_t __p0, int64x2_t __p1) { + uint64x2_t __ret; + uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (uint64x2_t) __builtin_neon_vqrshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 51); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint16x8_t vqrshlq_u16(uint16x8_t __p0, int16x8_t __p1) { + uint16x8_t __ret; + __ret = (uint16x8_t) __builtin_neon_vqrshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 49); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint16x8_t vqrshlq_u16(uint16x8_t __p0, int16x8_t __p1) { + uint16x8_t __ret; + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint16x8_t) __builtin_neon_vqrshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int8x16_t vqrshlq_s8(int8x16_t __p0, int8x16_t __p1) { + int8x16_t __ret; + __ret = (int8x16_t) __builtin_neon_vqrshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 32); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int8x16_t vqrshlq_s8(int8x16_t __p0, int8x16_t __p1) { + int8x16_t __ret; + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int8x16_t) __builtin_neon_vqrshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 32); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int32x4_t vqrshlq_s32(int32x4_t __p0, int32x4_t __p1) { + int32x4_t __ret; + __ret = (int32x4_t) __builtin_neon_vqrshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 34); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int32x4_t vqrshlq_s32(int32x4_t __p0, int32x4_t __p1) { + int32x4_t __ret; + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (int32x4_t) __builtin_neon_vqrshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 34); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int64x2_t vqrshlq_s64(int64x2_t __p0, int64x2_t __p1) { + int64x2_t __ret; + __ret = (int64x2_t) __builtin_neon_vqrshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 35); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int64x2_t vqrshlq_s64(int64x2_t __p0, int64x2_t __p1) { + int64x2_t __ret; + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (int64x2_t) __builtin_neon_vqrshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 35); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int16x8_t vqrshlq_s16(int16x8_t __p0, int16x8_t __p1) { + int16x8_t __ret; + __ret = (int16x8_t) __builtin_neon_vqrshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 33); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int16x8_t vqrshlq_s16(int16x8_t __p0, int16x8_t __p1) { + int16x8_t __ret; + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int16x8_t) __builtin_neon_vqrshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 33); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint8x8_t vqrshl_u8(uint8x8_t __p0, int8x8_t __p1) { + uint8x8_t __ret; + __ret = (uint8x8_t) __builtin_neon_vqrshl_v((int8x8_t)__p0, (int8x8_t)__p1, 16); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint8x8_t vqrshl_u8(uint8x8_t __p0, int8x8_t __p1) { + uint8x8_t __ret; + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint8x8_t) __builtin_neon_vqrshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 16); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint32x2_t vqrshl_u32(uint32x2_t __p0, int32x2_t __p1) { + uint32x2_t __ret; + __ret = (uint32x2_t) __builtin_neon_vqrshl_v((int8x8_t)__p0, (int8x8_t)__p1, 18); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint32x2_t vqrshl_u32(uint32x2_t __p0, int32x2_t __p1) { + uint32x2_t __ret; + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (uint32x2_t) __builtin_neon_vqrshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +__ai __attribute__((target("neon"))) uint64x1_t vqrshl_u64(uint64x1_t __p0, int64x1_t __p1) { + uint64x1_t __ret; + __ret = (uint64x1_t) __builtin_neon_vqrshl_v((int8x8_t)__p0, (int8x8_t)__p1, 19); + return __ret; +} +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint16x4_t vqrshl_u16(uint16x4_t __p0, int16x4_t __p1) { + uint16x4_t __ret; + __ret = (uint16x4_t) __builtin_neon_vqrshl_v((int8x8_t)__p0, (int8x8_t)__p1, 17); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint16x4_t vqrshl_u16(uint16x4_t __p0, int16x4_t __p1) { + uint16x4_t __ret; + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (uint16x4_t) __builtin_neon_vqrshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int8x8_t vqrshl_s8(int8x8_t __p0, int8x8_t __p1) { + int8x8_t __ret; + __ret = (int8x8_t) __builtin_neon_vqrshl_v((int8x8_t)__p0, (int8x8_t)__p1, 0); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int8x8_t vqrshl_s8(int8x8_t __p0, int8x8_t __p1) { + int8x8_t __ret; + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int8x8_t) __builtin_neon_vqrshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 0); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int32x2_t vqrshl_s32(int32x2_t __p0, int32x2_t __p1) { + int32x2_t __ret; + __ret = (int32x2_t) __builtin_neon_vqrshl_v((int8x8_t)__p0, (int8x8_t)__p1, 2); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int32x2_t vqrshl_s32(int32x2_t __p0, int32x2_t __p1) { + int32x2_t __ret; + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (int32x2_t) __builtin_neon_vqrshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 2); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +__ai __attribute__((target("neon"))) int64x1_t vqrshl_s64(int64x1_t __p0, int64x1_t __p1) { + int64x1_t __ret; + __ret = (int64x1_t) __builtin_neon_vqrshl_v((int8x8_t)__p0, (int8x8_t)__p1, 3); + return __ret; +} +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int16x4_t vqrshl_s16(int16x4_t __p0, int16x4_t __p1) { + int16x4_t __ret; + __ret = (int16x4_t) __builtin_neon_vqrshl_v((int8x8_t)__p0, (int8x8_t)__p1, 1); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int16x4_t vqrshl_s16(int16x4_t __p0, int16x4_t __p1) { + int16x4_t __ret; + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (int16x4_t) __builtin_neon_vqrshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqrshrn_n_u32(__p0, __p1) __extension__ ({ \ + uint16x4_t __ret; \ + uint32x4_t __s0 = __p0; \ + __ret = (uint16x4_t) __builtin_neon_vqrshrn_n_v((int8x16_t)__s0, __p1, 17); \ + __ret; \ +}) +#else +#define vqrshrn_n_u32(__p0, __p1) __extension__ ({ \ + uint16x4_t __ret; \ + uint32x4_t __s0 = __p0; \ + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + __ret = (uint16x4_t) __builtin_neon_vqrshrn_n_v((int8x16_t)__rev0, __p1, 17); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#define __noswap_vqrshrn_n_u32(__p0, __p1) __extension__ ({ \ + uint16x4_t __ret; \ + uint32x4_t __s0 = __p0; \ + __ret = (uint16x4_t) __builtin_neon_vqrshrn_n_v((int8x16_t)__s0, __p1, 17); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqrshrn_n_u64(__p0, __p1) __extension__ ({ \ + uint32x2_t __ret; \ + uint64x2_t __s0 = __p0; \ + __ret = (uint32x2_t) __builtin_neon_vqrshrn_n_v((int8x16_t)__s0, __p1, 18); \ + __ret; \ +}) +#else +#define vqrshrn_n_u64(__p0, __p1) __extension__ ({ \ + uint32x2_t __ret; \ + uint64x2_t __s0 = __p0; \ + uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + __ret = (uint32x2_t) __builtin_neon_vqrshrn_n_v((int8x16_t)__rev0, __p1, 18); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#define __noswap_vqrshrn_n_u64(__p0, __p1) __extension__ ({ \ + uint32x2_t __ret; \ + uint64x2_t __s0 = __p0; \ + __ret = (uint32x2_t) __builtin_neon_vqrshrn_n_v((int8x16_t)__s0, __p1, 18); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqrshrn_n_u16(__p0, __p1) __extension__ ({ \ + uint8x8_t __ret; \ + uint16x8_t __s0 = __p0; \ + __ret = (uint8x8_t) __builtin_neon_vqrshrn_n_v((int8x16_t)__s0, __p1, 16); \ + __ret; \ +}) +#else +#define vqrshrn_n_u16(__p0, __p1) __extension__ ({ \ + uint8x8_t __ret; \ + uint16x8_t __s0 = __p0; \ + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (uint8x8_t) __builtin_neon_vqrshrn_n_v((int8x16_t)__rev0, __p1, 16); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#define __noswap_vqrshrn_n_u16(__p0, __p1) __extension__ ({ \ + uint8x8_t __ret; \ + uint16x8_t __s0 = __p0; \ + __ret = (uint8x8_t) __builtin_neon_vqrshrn_n_v((int8x16_t)__s0, __p1, 16); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqrshrn_n_s32(__p0, __p1) __extension__ ({ \ + int16x4_t __ret; \ + int32x4_t __s0 = __p0; \ + __ret = (int16x4_t) __builtin_neon_vqrshrn_n_v((int8x16_t)__s0, __p1, 1); \ + __ret; \ +}) +#else +#define vqrshrn_n_s32(__p0, __p1) __extension__ ({ \ + int16x4_t __ret; \ + int32x4_t __s0 = __p0; \ + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + __ret = (int16x4_t) __builtin_neon_vqrshrn_n_v((int8x16_t)__rev0, __p1, 1); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#define __noswap_vqrshrn_n_s32(__p0, __p1) __extension__ ({ \ + int16x4_t __ret; \ + int32x4_t __s0 = __p0; \ + __ret = (int16x4_t) __builtin_neon_vqrshrn_n_v((int8x16_t)__s0, __p1, 1); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqrshrn_n_s64(__p0, __p1) __extension__ ({ \ + int32x2_t __ret; \ + int64x2_t __s0 = __p0; \ + __ret = (int32x2_t) __builtin_neon_vqrshrn_n_v((int8x16_t)__s0, __p1, 2); \ + __ret; \ +}) +#else +#define vqrshrn_n_s64(__p0, __p1) __extension__ ({ \ + int32x2_t __ret; \ + int64x2_t __s0 = __p0; \ + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + __ret = (int32x2_t) __builtin_neon_vqrshrn_n_v((int8x16_t)__rev0, __p1, 2); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#define __noswap_vqrshrn_n_s64(__p0, __p1) __extension__ ({ \ + int32x2_t __ret; \ + int64x2_t __s0 = __p0; \ + __ret = (int32x2_t) __builtin_neon_vqrshrn_n_v((int8x16_t)__s0, __p1, 2); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqrshrn_n_s16(__p0, __p1) __extension__ ({ \ + int8x8_t __ret; \ + int16x8_t __s0 = __p0; \ + __ret = (int8x8_t) __builtin_neon_vqrshrn_n_v((int8x16_t)__s0, __p1, 0); \ + __ret; \ +}) +#else +#define vqrshrn_n_s16(__p0, __p1) __extension__ ({ \ + int8x8_t __ret; \ + int16x8_t __s0 = __p0; \ + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (int8x8_t) __builtin_neon_vqrshrn_n_v((int8x16_t)__rev0, __p1, 0); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#define __noswap_vqrshrn_n_s16(__p0, __p1) __extension__ ({ \ + int8x8_t __ret; \ + int16x8_t __s0 = __p0; \ + __ret = (int8x8_t) __builtin_neon_vqrshrn_n_v((int8x16_t)__s0, __p1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqrshrun_n_s32(__p0, __p1) __extension__ ({ \ + uint16x4_t __ret; \ + int32x4_t __s0 = __p0; \ + __ret = (uint16x4_t) __builtin_neon_vqrshrun_n_v((int8x16_t)__s0, __p1, 17); \ + __ret; \ +}) +#else +#define vqrshrun_n_s32(__p0, __p1) __extension__ ({ \ + uint16x4_t __ret; \ + int32x4_t __s0 = __p0; \ + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + __ret = (uint16x4_t) __builtin_neon_vqrshrun_n_v((int8x16_t)__rev0, __p1, 17); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#define __noswap_vqrshrun_n_s32(__p0, __p1) __extension__ ({ \ + uint16x4_t __ret; \ + int32x4_t __s0 = __p0; \ + __ret = (uint16x4_t) __builtin_neon_vqrshrun_n_v((int8x16_t)__s0, __p1, 17); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqrshrun_n_s64(__p0, __p1) __extension__ ({ \ + uint32x2_t __ret; \ + int64x2_t __s0 = __p0; \ + __ret = (uint32x2_t) __builtin_neon_vqrshrun_n_v((int8x16_t)__s0, __p1, 18); \ + __ret; \ +}) +#else +#define vqrshrun_n_s64(__p0, __p1) __extension__ ({ \ + uint32x2_t __ret; \ + int64x2_t __s0 = __p0; \ + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + __ret = (uint32x2_t) __builtin_neon_vqrshrun_n_v((int8x16_t)__rev0, __p1, 18); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#define __noswap_vqrshrun_n_s64(__p0, __p1) __extension__ ({ \ + uint32x2_t __ret; \ + int64x2_t __s0 = __p0; \ + __ret = (uint32x2_t) __builtin_neon_vqrshrun_n_v((int8x16_t)__s0, __p1, 18); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqrshrun_n_s16(__p0, __p1) __extension__ ({ \ + uint8x8_t __ret; \ + int16x8_t __s0 = __p0; \ + __ret = (uint8x8_t) __builtin_neon_vqrshrun_n_v((int8x16_t)__s0, __p1, 16); \ + __ret; \ +}) +#else +#define vqrshrun_n_s16(__p0, __p1) __extension__ ({ \ + uint8x8_t __ret; \ + int16x8_t __s0 = __p0; \ + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (uint8x8_t) __builtin_neon_vqrshrun_n_v((int8x16_t)__rev0, __p1, 16); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#define __noswap_vqrshrun_n_s16(__p0, __p1) __extension__ ({ \ + uint8x8_t __ret; \ + int16x8_t __s0 = __p0; \ + __ret = (uint8x8_t) __builtin_neon_vqrshrun_n_v((int8x16_t)__s0, __p1, 16); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint8x16_t vqshlq_u8(uint8x16_t __p0, int8x16_t __p1) { + uint8x16_t __ret; + __ret = (uint8x16_t) __builtin_neon_vqshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 48); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint8x16_t vqshlq_u8(uint8x16_t __p0, int8x16_t __p1) { + uint8x16_t __ret; + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint8x16_t) __builtin_neon_vqshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint32x4_t vqshlq_u32(uint32x4_t __p0, int32x4_t __p1) { + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vqshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 50); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint32x4_t vqshlq_u32(uint32x4_t __p0, int32x4_t __p1) { + uint32x4_t __ret; + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (uint32x4_t) __builtin_neon_vqshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint64x2_t vqshlq_u64(uint64x2_t __p0, int64x2_t __p1) { + uint64x2_t __ret; + __ret = (uint64x2_t) __builtin_neon_vqshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 51); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint64x2_t vqshlq_u64(uint64x2_t __p0, int64x2_t __p1) { + uint64x2_t __ret; + uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (uint64x2_t) __builtin_neon_vqshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 51); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint16x8_t vqshlq_u16(uint16x8_t __p0, int16x8_t __p1) { + uint16x8_t __ret; + __ret = (uint16x8_t) __builtin_neon_vqshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 49); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint16x8_t vqshlq_u16(uint16x8_t __p0, int16x8_t __p1) { + uint16x8_t __ret; + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint16x8_t) __builtin_neon_vqshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int8x16_t vqshlq_s8(int8x16_t __p0, int8x16_t __p1) { + int8x16_t __ret; + __ret = (int8x16_t) __builtin_neon_vqshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 32); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int8x16_t vqshlq_s8(int8x16_t __p0, int8x16_t __p1) { + int8x16_t __ret; + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int8x16_t) __builtin_neon_vqshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 32); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int32x4_t vqshlq_s32(int32x4_t __p0, int32x4_t __p1) { + int32x4_t __ret; + __ret = (int32x4_t) __builtin_neon_vqshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 34); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int32x4_t vqshlq_s32(int32x4_t __p0, int32x4_t __p1) { + int32x4_t __ret; + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (int32x4_t) __builtin_neon_vqshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 34); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int64x2_t vqshlq_s64(int64x2_t __p0, int64x2_t __p1) { + int64x2_t __ret; + __ret = (int64x2_t) __builtin_neon_vqshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 35); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int64x2_t vqshlq_s64(int64x2_t __p0, int64x2_t __p1) { + int64x2_t __ret; + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (int64x2_t) __builtin_neon_vqshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 35); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int16x8_t vqshlq_s16(int16x8_t __p0, int16x8_t __p1) { + int16x8_t __ret; + __ret = (int16x8_t) __builtin_neon_vqshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 33); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int16x8_t vqshlq_s16(int16x8_t __p0, int16x8_t __p1) { + int16x8_t __ret; + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int16x8_t) __builtin_neon_vqshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 33); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint8x8_t vqshl_u8(uint8x8_t __p0, int8x8_t __p1) { + uint8x8_t __ret; + __ret = (uint8x8_t) __builtin_neon_vqshl_v((int8x8_t)__p0, (int8x8_t)__p1, 16); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint8x8_t vqshl_u8(uint8x8_t __p0, int8x8_t __p1) { + uint8x8_t __ret; + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint8x8_t) __builtin_neon_vqshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 16); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint32x2_t vqshl_u32(uint32x2_t __p0, int32x2_t __p1) { + uint32x2_t __ret; + __ret = (uint32x2_t) __builtin_neon_vqshl_v((int8x8_t)__p0, (int8x8_t)__p1, 18); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint32x2_t vqshl_u32(uint32x2_t __p0, int32x2_t __p1) { + uint32x2_t __ret; + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (uint32x2_t) __builtin_neon_vqshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +__ai __attribute__((target("neon"))) uint64x1_t vqshl_u64(uint64x1_t __p0, int64x1_t __p1) { + uint64x1_t __ret; + __ret = (uint64x1_t) __builtin_neon_vqshl_v((int8x8_t)__p0, (int8x8_t)__p1, 19); + return __ret; +} +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint16x4_t vqshl_u16(uint16x4_t __p0, int16x4_t __p1) { + uint16x4_t __ret; + __ret = (uint16x4_t) __builtin_neon_vqshl_v((int8x8_t)__p0, (int8x8_t)__p1, 17); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint16x4_t vqshl_u16(uint16x4_t __p0, int16x4_t __p1) { + uint16x4_t __ret; + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (uint16x4_t) __builtin_neon_vqshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int8x8_t vqshl_s8(int8x8_t __p0, int8x8_t __p1) { + int8x8_t __ret; + __ret = (int8x8_t) __builtin_neon_vqshl_v((int8x8_t)__p0, (int8x8_t)__p1, 0); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int8x8_t vqshl_s8(int8x8_t __p0, int8x8_t __p1) { + int8x8_t __ret; + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int8x8_t) __builtin_neon_vqshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 0); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int32x2_t vqshl_s32(int32x2_t __p0, int32x2_t __p1) { + int32x2_t __ret; + __ret = (int32x2_t) __builtin_neon_vqshl_v((int8x8_t)__p0, (int8x8_t)__p1, 2); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int32x2_t vqshl_s32(int32x2_t __p0, int32x2_t __p1) { + int32x2_t __ret; + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (int32x2_t) __builtin_neon_vqshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 2); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +__ai __attribute__((target("neon"))) int64x1_t vqshl_s64(int64x1_t __p0, int64x1_t __p1) { + int64x1_t __ret; + __ret = (int64x1_t) __builtin_neon_vqshl_v((int8x8_t)__p0, (int8x8_t)__p1, 3); + return __ret; +} +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int16x4_t vqshl_s16(int16x4_t __p0, int16x4_t __p1) { + int16x4_t __ret; + __ret = (int16x4_t) __builtin_neon_vqshl_v((int8x8_t)__p0, (int8x8_t)__p1, 1); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int16x4_t vqshl_s16(int16x4_t __p0, int16x4_t __p1) { + int16x4_t __ret; + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (int16x4_t) __builtin_neon_vqshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqshlq_n_u8(__p0, __p1) __extension__ ({ \ + uint8x16_t __ret; \ + uint8x16_t __s0 = __p0; \ + __ret = (uint8x16_t) __builtin_neon_vqshlq_n_v((int8x16_t)__s0, __p1, 48); \ + __ret; \ +}) +#else +#define vqshlq_n_u8(__p0, __p1) __extension__ ({ \ + uint8x16_t __ret; \ + uint8x16_t __s0 = __p0; \ + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (uint8x16_t) __builtin_neon_vqshlq_n_v((int8x16_t)__rev0, __p1, 48); \ + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqshlq_n_u32(__p0, __p1) __extension__ ({ \ + uint32x4_t __ret; \ + uint32x4_t __s0 = __p0; \ + __ret = (uint32x4_t) __builtin_neon_vqshlq_n_v((int8x16_t)__s0, __p1, 50); \ + __ret; \ +}) +#else +#define vqshlq_n_u32(__p0, __p1) __extension__ ({ \ + uint32x4_t __ret; \ + uint32x4_t __s0 = __p0; \ + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + __ret = (uint32x4_t) __builtin_neon_vqshlq_n_v((int8x16_t)__rev0, __p1, 50); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqshlq_n_u64(__p0, __p1) __extension__ ({ \ + uint64x2_t __ret; \ + uint64x2_t __s0 = __p0; \ + __ret = (uint64x2_t) __builtin_neon_vqshlq_n_v((int8x16_t)__s0, __p1, 51); \ + __ret; \ +}) +#else +#define vqshlq_n_u64(__p0, __p1) __extension__ ({ \ + uint64x2_t __ret; \ + uint64x2_t __s0 = __p0; \ + uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + __ret = (uint64x2_t) __builtin_neon_vqshlq_n_v((int8x16_t)__rev0, __p1, 51); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqshlq_n_u16(__p0, __p1) __extension__ ({ \ + uint16x8_t __ret; \ + uint16x8_t __s0 = __p0; \ + __ret = (uint16x8_t) __builtin_neon_vqshlq_n_v((int8x16_t)__s0, __p1, 49); \ + __ret; \ +}) +#else +#define vqshlq_n_u16(__p0, __p1) __extension__ ({ \ + uint16x8_t __ret; \ + uint16x8_t __s0 = __p0; \ + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (uint16x8_t) __builtin_neon_vqshlq_n_v((int8x16_t)__rev0, __p1, 49); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqshlq_n_s8(__p0, __p1) __extension__ ({ \ + int8x16_t __ret; \ + int8x16_t __s0 = __p0; \ + __ret = (int8x16_t) __builtin_neon_vqshlq_n_v((int8x16_t)__s0, __p1, 32); \ + __ret; \ +}) +#else +#define vqshlq_n_s8(__p0, __p1) __extension__ ({ \ + int8x16_t __ret; \ + int8x16_t __s0 = __p0; \ + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (int8x16_t) __builtin_neon_vqshlq_n_v((int8x16_t)__rev0, __p1, 32); \ + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqshlq_n_s32(__p0, __p1) __extension__ ({ \ + int32x4_t __ret; \ + int32x4_t __s0 = __p0; \ + __ret = (int32x4_t) __builtin_neon_vqshlq_n_v((int8x16_t)__s0, __p1, 34); \ + __ret; \ +}) +#else +#define vqshlq_n_s32(__p0, __p1) __extension__ ({ \ + int32x4_t __ret; \ + int32x4_t __s0 = __p0; \ + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + __ret = (int32x4_t) __builtin_neon_vqshlq_n_v((int8x16_t)__rev0, __p1, 34); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqshlq_n_s64(__p0, __p1) __extension__ ({ \ + int64x2_t __ret; \ + int64x2_t __s0 = __p0; \ + __ret = (int64x2_t) __builtin_neon_vqshlq_n_v((int8x16_t)__s0, __p1, 35); \ + __ret; \ +}) +#else +#define vqshlq_n_s64(__p0, __p1) __extension__ ({ \ + int64x2_t __ret; \ + int64x2_t __s0 = __p0; \ + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + __ret = (int64x2_t) __builtin_neon_vqshlq_n_v((int8x16_t)__rev0, __p1, 35); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqshlq_n_s16(__p0, __p1) __extension__ ({ \ + int16x8_t __ret; \ + int16x8_t __s0 = __p0; \ + __ret = (int16x8_t) __builtin_neon_vqshlq_n_v((int8x16_t)__s0, __p1, 33); \ + __ret; \ +}) +#else +#define vqshlq_n_s16(__p0, __p1) __extension__ ({ \ + int16x8_t __ret; \ + int16x8_t __s0 = __p0; \ + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (int16x8_t) __builtin_neon_vqshlq_n_v((int8x16_t)__rev0, __p1, 33); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqshl_n_u8(__p0, __p1) __extension__ ({ \ + uint8x8_t __ret; \ + uint8x8_t __s0 = __p0; \ + __ret = (uint8x8_t) __builtin_neon_vqshl_n_v((int8x8_t)__s0, __p1, 16); \ + __ret; \ +}) +#else +#define vqshl_n_u8(__p0, __p1) __extension__ ({ \ + uint8x8_t __ret; \ + uint8x8_t __s0 = __p0; \ + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (uint8x8_t) __builtin_neon_vqshl_n_v((int8x8_t)__rev0, __p1, 16); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqshl_n_u32(__p0, __p1) __extension__ ({ \ + uint32x2_t __ret; \ + uint32x2_t __s0 = __p0; \ + __ret = (uint32x2_t) __builtin_neon_vqshl_n_v((int8x8_t)__s0, __p1, 18); \ + __ret; \ +}) +#else +#define vqshl_n_u32(__p0, __p1) __extension__ ({ \ + uint32x2_t __ret; \ + uint32x2_t __s0 = __p0; \ + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + __ret = (uint32x2_t) __builtin_neon_vqshl_n_v((int8x8_t)__rev0, __p1, 18); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#endif + +#define vqshl_n_u64(__p0, __p1) __extension__ ({ \ + uint64x1_t __ret; \ + uint64x1_t __s0 = __p0; \ + __ret = (uint64x1_t) __builtin_neon_vqshl_n_v((int8x8_t)__s0, __p1, 19); \ + __ret; \ +}) +#ifdef __LITTLE_ENDIAN__ +#define vqshl_n_u16(__p0, __p1) __extension__ ({ \ + uint16x4_t __ret; \ + uint16x4_t __s0 = __p0; \ + __ret = (uint16x4_t) __builtin_neon_vqshl_n_v((int8x8_t)__s0, __p1, 17); \ + __ret; \ +}) +#else +#define vqshl_n_u16(__p0, __p1) __extension__ ({ \ + uint16x4_t __ret; \ + uint16x4_t __s0 = __p0; \ + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + __ret = (uint16x4_t) __builtin_neon_vqshl_n_v((int8x8_t)__rev0, __p1, 17); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqshl_n_s8(__p0, __p1) __extension__ ({ \ + int8x8_t __ret; \ + int8x8_t __s0 = __p0; \ + __ret = (int8x8_t) __builtin_neon_vqshl_n_v((int8x8_t)__s0, __p1, 0); \ + __ret; \ +}) +#else +#define vqshl_n_s8(__p0, __p1) __extension__ ({ \ + int8x8_t __ret; \ + int8x8_t __s0 = __p0; \ + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (int8x8_t) __builtin_neon_vqshl_n_v((int8x8_t)__rev0, __p1, 0); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqshl_n_s32(__p0, __p1) __extension__ ({ \ + int32x2_t __ret; \ + int32x2_t __s0 = __p0; \ + __ret = (int32x2_t) __builtin_neon_vqshl_n_v((int8x8_t)__s0, __p1, 2); \ + __ret; \ +}) +#else +#define vqshl_n_s32(__p0, __p1) __extension__ ({ \ + int32x2_t __ret; \ + int32x2_t __s0 = __p0; \ + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + __ret = (int32x2_t) __builtin_neon_vqshl_n_v((int8x8_t)__rev0, __p1, 2); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#endif + +#define vqshl_n_s64(__p0, __p1) __extension__ ({ \ + int64x1_t __ret; \ + int64x1_t __s0 = __p0; \ + __ret = (int64x1_t) __builtin_neon_vqshl_n_v((int8x8_t)__s0, __p1, 3); \ + __ret; \ +}) +#ifdef __LITTLE_ENDIAN__ +#define vqshl_n_s16(__p0, __p1) __extension__ ({ \ + int16x4_t __ret; \ + int16x4_t __s0 = __p0; \ + __ret = (int16x4_t) __builtin_neon_vqshl_n_v((int8x8_t)__s0, __p1, 1); \ + __ret; \ +}) +#else +#define vqshl_n_s16(__p0, __p1) __extension__ ({ \ + int16x4_t __ret; \ + int16x4_t __s0 = __p0; \ + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + __ret = (int16x4_t) __builtin_neon_vqshl_n_v((int8x8_t)__rev0, __p1, 1); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqshluq_n_s8(__p0, __p1) __extension__ ({ \ + uint8x16_t __ret; \ + int8x16_t __s0 = __p0; \ + __ret = (uint8x16_t) __builtin_neon_vqshluq_n_v((int8x16_t)__s0, __p1, 48); \ + __ret; \ +}) +#else +#define vqshluq_n_s8(__p0, __p1) __extension__ ({ \ + uint8x16_t __ret; \ + int8x16_t __s0 = __p0; \ + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (uint8x16_t) __builtin_neon_vqshluq_n_v((int8x16_t)__rev0, __p1, 48); \ + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqshluq_n_s32(__p0, __p1) __extension__ ({ \ + uint32x4_t __ret; \ + int32x4_t __s0 = __p0; \ + __ret = (uint32x4_t) __builtin_neon_vqshluq_n_v((int8x16_t)__s0, __p1, 50); \ + __ret; \ +}) +#else +#define vqshluq_n_s32(__p0, __p1) __extension__ ({ \ + uint32x4_t __ret; \ + int32x4_t __s0 = __p0; \ + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + __ret = (uint32x4_t) __builtin_neon_vqshluq_n_v((int8x16_t)__rev0, __p1, 50); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqshluq_n_s64(__p0, __p1) __extension__ ({ \ + uint64x2_t __ret; \ + int64x2_t __s0 = __p0; \ + __ret = (uint64x2_t) __builtin_neon_vqshluq_n_v((int8x16_t)__s0, __p1, 51); \ + __ret; \ +}) +#else +#define vqshluq_n_s64(__p0, __p1) __extension__ ({ \ + uint64x2_t __ret; \ + int64x2_t __s0 = __p0; \ + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + __ret = (uint64x2_t) __builtin_neon_vqshluq_n_v((int8x16_t)__rev0, __p1, 51); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqshluq_n_s16(__p0, __p1) __extension__ ({ \ + uint16x8_t __ret; \ + int16x8_t __s0 = __p0; \ + __ret = (uint16x8_t) __builtin_neon_vqshluq_n_v((int8x16_t)__s0, __p1, 49); \ + __ret; \ +}) +#else +#define vqshluq_n_s16(__p0, __p1) __extension__ ({ \ + uint16x8_t __ret; \ + int16x8_t __s0 = __p0; \ + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (uint16x8_t) __builtin_neon_vqshluq_n_v((int8x16_t)__rev0, __p1, 49); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqshlu_n_s8(__p0, __p1) __extension__ ({ \ + uint8x8_t __ret; \ + int8x8_t __s0 = __p0; \ + __ret = (uint8x8_t) __builtin_neon_vqshlu_n_v((int8x8_t)__s0, __p1, 16); \ + __ret; \ +}) +#else +#define vqshlu_n_s8(__p0, __p1) __extension__ ({ \ + uint8x8_t __ret; \ + int8x8_t __s0 = __p0; \ + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (uint8x8_t) __builtin_neon_vqshlu_n_v((int8x8_t)__rev0, __p1, 16); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqshlu_n_s32(__p0, __p1) __extension__ ({ \ + uint32x2_t __ret; \ + int32x2_t __s0 = __p0; \ + __ret = (uint32x2_t) __builtin_neon_vqshlu_n_v((int8x8_t)__s0, __p1, 18); \ + __ret; \ +}) +#else +#define vqshlu_n_s32(__p0, __p1) __extension__ ({ \ + uint32x2_t __ret; \ + int32x2_t __s0 = __p0; \ + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + __ret = (uint32x2_t) __builtin_neon_vqshlu_n_v((int8x8_t)__rev0, __p1, 18); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#endif + +#define vqshlu_n_s64(__p0, __p1) __extension__ ({ \ + uint64x1_t __ret; \ + int64x1_t __s0 = __p0; \ + __ret = (uint64x1_t) __builtin_neon_vqshlu_n_v((int8x8_t)__s0, __p1, 19); \ + __ret; \ +}) +#ifdef __LITTLE_ENDIAN__ +#define vqshlu_n_s16(__p0, __p1) __extension__ ({ \ + uint16x4_t __ret; \ + int16x4_t __s0 = __p0; \ + __ret = (uint16x4_t) __builtin_neon_vqshlu_n_v((int8x8_t)__s0, __p1, 17); \ + __ret; \ +}) +#else +#define vqshlu_n_s16(__p0, __p1) __extension__ ({ \ + uint16x4_t __ret; \ + int16x4_t __s0 = __p0; \ + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + __ret = (uint16x4_t) __builtin_neon_vqshlu_n_v((int8x8_t)__rev0, __p1, 17); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqshrn_n_u32(__p0, __p1) __extension__ ({ \ + uint16x4_t __ret; \ + uint32x4_t __s0 = __p0; \ + __ret = (uint16x4_t) __builtin_neon_vqshrn_n_v((int8x16_t)__s0, __p1, 17); \ + __ret; \ +}) +#else +#define vqshrn_n_u32(__p0, __p1) __extension__ ({ \ + uint16x4_t __ret; \ + uint32x4_t __s0 = __p0; \ + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + __ret = (uint16x4_t) __builtin_neon_vqshrn_n_v((int8x16_t)__rev0, __p1, 17); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#define __noswap_vqshrn_n_u32(__p0, __p1) __extension__ ({ \ + uint16x4_t __ret; \ + uint32x4_t __s0 = __p0; \ + __ret = (uint16x4_t) __builtin_neon_vqshrn_n_v((int8x16_t)__s0, __p1, 17); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqshrn_n_u64(__p0, __p1) __extension__ ({ \ + uint32x2_t __ret; \ + uint64x2_t __s0 = __p0; \ + __ret = (uint32x2_t) __builtin_neon_vqshrn_n_v((int8x16_t)__s0, __p1, 18); \ + __ret; \ +}) +#else +#define vqshrn_n_u64(__p0, __p1) __extension__ ({ \ + uint32x2_t __ret; \ + uint64x2_t __s0 = __p0; \ + uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + __ret = (uint32x2_t) __builtin_neon_vqshrn_n_v((int8x16_t)__rev0, __p1, 18); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#define __noswap_vqshrn_n_u64(__p0, __p1) __extension__ ({ \ + uint32x2_t __ret; \ + uint64x2_t __s0 = __p0; \ + __ret = (uint32x2_t) __builtin_neon_vqshrn_n_v((int8x16_t)__s0, __p1, 18); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqshrn_n_u16(__p0, __p1) __extension__ ({ \ + uint8x8_t __ret; \ + uint16x8_t __s0 = __p0; \ + __ret = (uint8x8_t) __builtin_neon_vqshrn_n_v((int8x16_t)__s0, __p1, 16); \ + __ret; \ +}) +#else +#define vqshrn_n_u16(__p0, __p1) __extension__ ({ \ + uint8x8_t __ret; \ + uint16x8_t __s0 = __p0; \ + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (uint8x8_t) __builtin_neon_vqshrn_n_v((int8x16_t)__rev0, __p1, 16); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#define __noswap_vqshrn_n_u16(__p0, __p1) __extension__ ({ \ + uint8x8_t __ret; \ + uint16x8_t __s0 = __p0; \ + __ret = (uint8x8_t) __builtin_neon_vqshrn_n_v((int8x16_t)__s0, __p1, 16); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqshrn_n_s32(__p0, __p1) __extension__ ({ \ + int16x4_t __ret; \ + int32x4_t __s0 = __p0; \ + __ret = (int16x4_t) __builtin_neon_vqshrn_n_v((int8x16_t)__s0, __p1, 1); \ + __ret; \ +}) +#else +#define vqshrn_n_s32(__p0, __p1) __extension__ ({ \ + int16x4_t __ret; \ + int32x4_t __s0 = __p0; \ + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + __ret = (int16x4_t) __builtin_neon_vqshrn_n_v((int8x16_t)__rev0, __p1, 1); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#define __noswap_vqshrn_n_s32(__p0, __p1) __extension__ ({ \ + int16x4_t __ret; \ + int32x4_t __s0 = __p0; \ + __ret = (int16x4_t) __builtin_neon_vqshrn_n_v((int8x16_t)__s0, __p1, 1); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqshrn_n_s64(__p0, __p1) __extension__ ({ \ + int32x2_t __ret; \ + int64x2_t __s0 = __p0; \ + __ret = (int32x2_t) __builtin_neon_vqshrn_n_v((int8x16_t)__s0, __p1, 2); \ + __ret; \ +}) +#else +#define vqshrn_n_s64(__p0, __p1) __extension__ ({ \ + int32x2_t __ret; \ + int64x2_t __s0 = __p0; \ + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + __ret = (int32x2_t) __builtin_neon_vqshrn_n_v((int8x16_t)__rev0, __p1, 2); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#define __noswap_vqshrn_n_s64(__p0, __p1) __extension__ ({ \ + int32x2_t __ret; \ + int64x2_t __s0 = __p0; \ + __ret = (int32x2_t) __builtin_neon_vqshrn_n_v((int8x16_t)__s0, __p1, 2); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqshrn_n_s16(__p0, __p1) __extension__ ({ \ + int8x8_t __ret; \ + int16x8_t __s0 = __p0; \ + __ret = (int8x8_t) __builtin_neon_vqshrn_n_v((int8x16_t)__s0, __p1, 0); \ + __ret; \ +}) +#else +#define vqshrn_n_s16(__p0, __p1) __extension__ ({ \ + int8x8_t __ret; \ + int16x8_t __s0 = __p0; \ + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (int8x8_t) __builtin_neon_vqshrn_n_v((int8x16_t)__rev0, __p1, 0); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#define __noswap_vqshrn_n_s16(__p0, __p1) __extension__ ({ \ + int8x8_t __ret; \ + int16x8_t __s0 = __p0; \ + __ret = (int8x8_t) __builtin_neon_vqshrn_n_v((int8x16_t)__s0, __p1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqshrun_n_s32(__p0, __p1) __extension__ ({ \ + uint16x4_t __ret; \ + int32x4_t __s0 = __p0; \ + __ret = (uint16x4_t) __builtin_neon_vqshrun_n_v((int8x16_t)__s0, __p1, 17); \ + __ret; \ +}) +#else +#define vqshrun_n_s32(__p0, __p1) __extension__ ({ \ + uint16x4_t __ret; \ + int32x4_t __s0 = __p0; \ + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + __ret = (uint16x4_t) __builtin_neon_vqshrun_n_v((int8x16_t)__rev0, __p1, 17); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#define __noswap_vqshrun_n_s32(__p0, __p1) __extension__ ({ \ + uint16x4_t __ret; \ + int32x4_t __s0 = __p0; \ + __ret = (uint16x4_t) __builtin_neon_vqshrun_n_v((int8x16_t)__s0, __p1, 17); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqshrun_n_s64(__p0, __p1) __extension__ ({ \ + uint32x2_t __ret; \ + int64x2_t __s0 = __p0; \ + __ret = (uint32x2_t) __builtin_neon_vqshrun_n_v((int8x16_t)__s0, __p1, 18); \ + __ret; \ +}) +#else +#define vqshrun_n_s64(__p0, __p1) __extension__ ({ \ + uint32x2_t __ret; \ + int64x2_t __s0 = __p0; \ + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + __ret = (uint32x2_t) __builtin_neon_vqshrun_n_v((int8x16_t)__rev0, __p1, 18); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#define __noswap_vqshrun_n_s64(__p0, __p1) __extension__ ({ \ + uint32x2_t __ret; \ + int64x2_t __s0 = __p0; \ + __ret = (uint32x2_t) __builtin_neon_vqshrun_n_v((int8x16_t)__s0, __p1, 18); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqshrun_n_s16(__p0, __p1) __extension__ ({ \ + uint8x8_t __ret; \ + int16x8_t __s0 = __p0; \ + __ret = (uint8x8_t) __builtin_neon_vqshrun_n_v((int8x16_t)__s0, __p1, 16); \ + __ret; \ +}) +#else +#define vqshrun_n_s16(__p0, __p1) __extension__ ({ \ + uint8x8_t __ret; \ + int16x8_t __s0 = __p0; \ + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (uint8x8_t) __builtin_neon_vqshrun_n_v((int8x16_t)__rev0, __p1, 16); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#define __noswap_vqshrun_n_s16(__p0, __p1) __extension__ ({ \ + uint8x8_t __ret; \ + int16x8_t __s0 = __p0; \ + __ret = (uint8x8_t) __builtin_neon_vqshrun_n_v((int8x16_t)__s0, __p1, 16); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint8x16_t vqsubq_u8(uint8x16_t __p0, uint8x16_t __p1) { + uint8x16_t __ret; + __ret = (uint8x16_t) __builtin_neon_vqsubq_v((int8x16_t)__p0, (int8x16_t)__p1, 48); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint8x16_t vqsubq_u8(uint8x16_t __p0, uint8x16_t __p1) { + uint8x16_t __ret; + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint8x16_t) __builtin_neon_vqsubq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint32x4_t vqsubq_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vqsubq_v((int8x16_t)__p0, (int8x16_t)__p1, 50); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint32x4_t vqsubq_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint32x4_t __ret; + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (uint32x4_t) __builtin_neon_vqsubq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint64x2_t vqsubq_u64(uint64x2_t __p0, uint64x2_t __p1) { + uint64x2_t __ret; + __ret = (uint64x2_t) __builtin_neon_vqsubq_v((int8x16_t)__p0, (int8x16_t)__p1, 51); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint64x2_t vqsubq_u64(uint64x2_t __p0, uint64x2_t __p1) { + uint64x2_t __ret; + uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (uint64x2_t) __builtin_neon_vqsubq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 51); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint16x8_t vqsubq_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint16x8_t __ret; + __ret = (uint16x8_t) __builtin_neon_vqsubq_v((int8x16_t)__p0, (int8x16_t)__p1, 49); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint16x8_t vqsubq_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint16x8_t __ret; + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint16x8_t) __builtin_neon_vqsubq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int8x16_t vqsubq_s8(int8x16_t __p0, int8x16_t __p1) { + int8x16_t __ret; + __ret = (int8x16_t) __builtin_neon_vqsubq_v((int8x16_t)__p0, (int8x16_t)__p1, 32); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int8x16_t vqsubq_s8(int8x16_t __p0, int8x16_t __p1) { + int8x16_t __ret; + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int8x16_t) __builtin_neon_vqsubq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 32); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int32x4_t vqsubq_s32(int32x4_t __p0, int32x4_t __p1) { + int32x4_t __ret; + __ret = (int32x4_t) __builtin_neon_vqsubq_v((int8x16_t)__p0, (int8x16_t)__p1, 34); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int32x4_t vqsubq_s32(int32x4_t __p0, int32x4_t __p1) { + int32x4_t __ret; + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (int32x4_t) __builtin_neon_vqsubq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 34); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int64x2_t vqsubq_s64(int64x2_t __p0, int64x2_t __p1) { + int64x2_t __ret; + __ret = (int64x2_t) __builtin_neon_vqsubq_v((int8x16_t)__p0, (int8x16_t)__p1, 35); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int64x2_t vqsubq_s64(int64x2_t __p0, int64x2_t __p1) { + int64x2_t __ret; + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (int64x2_t) __builtin_neon_vqsubq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 35); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int16x8_t vqsubq_s16(int16x8_t __p0, int16x8_t __p1) { + int16x8_t __ret; + __ret = (int16x8_t) __builtin_neon_vqsubq_v((int8x16_t)__p0, (int8x16_t)__p1, 33); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int16x8_t vqsubq_s16(int16x8_t __p0, int16x8_t __p1) { + int16x8_t __ret; + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int16x8_t) __builtin_neon_vqsubq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 33); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint8x8_t vqsub_u8(uint8x8_t __p0, uint8x8_t __p1) { + uint8x8_t __ret; + __ret = (uint8x8_t) __builtin_neon_vqsub_v((int8x8_t)__p0, (int8x8_t)__p1, 16); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint8x8_t vqsub_u8(uint8x8_t __p0, uint8x8_t __p1) { + uint8x8_t __ret; + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint8x8_t) __builtin_neon_vqsub_v((int8x8_t)__rev0, (int8x8_t)__rev1, 16); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint32x2_t vqsub_u32(uint32x2_t __p0, uint32x2_t __p1) { + uint32x2_t __ret; + __ret = (uint32x2_t) __builtin_neon_vqsub_v((int8x8_t)__p0, (int8x8_t)__p1, 18); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint32x2_t vqsub_u32(uint32x2_t __p0, uint32x2_t __p1) { + uint32x2_t __ret; + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (uint32x2_t) __builtin_neon_vqsub_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +__ai __attribute__((target("neon"))) uint64x1_t vqsub_u64(uint64x1_t __p0, uint64x1_t __p1) { + uint64x1_t __ret; + __ret = (uint64x1_t) __builtin_neon_vqsub_v((int8x8_t)__p0, (int8x8_t)__p1, 19); + return __ret; +} +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint16x4_t vqsub_u16(uint16x4_t __p0, uint16x4_t __p1) { + uint16x4_t __ret; + __ret = (uint16x4_t) __builtin_neon_vqsub_v((int8x8_t)__p0, (int8x8_t)__p1, 17); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint16x4_t vqsub_u16(uint16x4_t __p0, uint16x4_t __p1) { + uint16x4_t __ret; + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (uint16x4_t) __builtin_neon_vqsub_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int8x8_t vqsub_s8(int8x8_t __p0, int8x8_t __p1) { + int8x8_t __ret; + __ret = (int8x8_t) __builtin_neon_vqsub_v((int8x8_t)__p0, (int8x8_t)__p1, 0); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int8x8_t vqsub_s8(int8x8_t __p0, int8x8_t __p1) { + int8x8_t __ret; + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int8x8_t) __builtin_neon_vqsub_v((int8x8_t)__rev0, (int8x8_t)__rev1, 0); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int32x2_t vqsub_s32(int32x2_t __p0, int32x2_t __p1) { + int32x2_t __ret; + __ret = (int32x2_t) __builtin_neon_vqsub_v((int8x8_t)__p0, (int8x8_t)__p1, 2); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int32x2_t vqsub_s32(int32x2_t __p0, int32x2_t __p1) { + int32x2_t __ret; + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (int32x2_t) __builtin_neon_vqsub_v((int8x8_t)__rev0, (int8x8_t)__rev1, 2); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +__ai __attribute__((target("neon"))) int64x1_t vqsub_s64(int64x1_t __p0, int64x1_t __p1) { + int64x1_t __ret; + __ret = (int64x1_t) __builtin_neon_vqsub_v((int8x8_t)__p0, (int8x8_t)__p1, 3); + return __ret; +} +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int16x4_t vqsub_s16(int16x4_t __p0, int16x4_t __p1) { + int16x4_t __ret; + __ret = (int16x4_t) __builtin_neon_vqsub_v((int8x8_t)__p0, (int8x8_t)__p1, 1); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int16x4_t vqsub_s16(int16x4_t __p0, int16x4_t __p1) { + int16x4_t __ret; + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (int16x4_t) __builtin_neon_vqsub_v((int8x8_t)__rev0, (int8x8_t)__rev1, 1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint16x4_t vraddhn_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint16x4_t __ret; + __ret = (uint16x4_t) __builtin_neon_vraddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 17); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint16x4_t vraddhn_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint16x4_t __ret; + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (uint16x4_t) __builtin_neon_vraddhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 17); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +__ai __attribute__((target("neon"))) uint16x4_t __noswap_vraddhn_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint16x4_t __ret; + __ret = (uint16x4_t) __builtin_neon_vraddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 17); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint32x2_t vraddhn_u64(uint64x2_t __p0, uint64x2_t __p1) { + uint32x2_t __ret; + __ret = (uint32x2_t) __builtin_neon_vraddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 18); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint32x2_t vraddhn_u64(uint64x2_t __p0, uint64x2_t __p1) { + uint32x2_t __ret; + uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (uint32x2_t) __builtin_neon_vraddhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 18); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +__ai __attribute__((target("neon"))) uint32x2_t __noswap_vraddhn_u64(uint64x2_t __p0, uint64x2_t __p1) { + uint32x2_t __ret; + __ret = (uint32x2_t) __builtin_neon_vraddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 18); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint8x8_t vraddhn_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint8x8_t __ret; + __ret = (uint8x8_t) __builtin_neon_vraddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 16); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint8x8_t vraddhn_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint8x8_t __ret; + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint8x8_t) __builtin_neon_vraddhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 16); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +__ai __attribute__((target("neon"))) uint8x8_t __noswap_vraddhn_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint8x8_t __ret; + __ret = (uint8x8_t) __builtin_neon_vraddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 16); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int16x4_t vraddhn_s32(int32x4_t __p0, int32x4_t __p1) { + int16x4_t __ret; + __ret = (int16x4_t) __builtin_neon_vraddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 1); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int16x4_t vraddhn_s32(int32x4_t __p0, int32x4_t __p1) { + int16x4_t __ret; + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (int16x4_t) __builtin_neon_vraddhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +__ai __attribute__((target("neon"))) int16x4_t __noswap_vraddhn_s32(int32x4_t __p0, int32x4_t __p1) { + int16x4_t __ret; + __ret = (int16x4_t) __builtin_neon_vraddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 1); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int32x2_t vraddhn_s64(int64x2_t __p0, int64x2_t __p1) { + int32x2_t __ret; + __ret = (int32x2_t) __builtin_neon_vraddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 2); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int32x2_t vraddhn_s64(int64x2_t __p0, int64x2_t __p1) { + int32x2_t __ret; + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (int32x2_t) __builtin_neon_vraddhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 2); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +__ai __attribute__((target("neon"))) int32x2_t __noswap_vraddhn_s64(int64x2_t __p0, int64x2_t __p1) { + int32x2_t __ret; + __ret = (int32x2_t) __builtin_neon_vraddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 2); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int8x8_t vraddhn_s16(int16x8_t __p0, int16x8_t __p1) { + int8x8_t __ret; + __ret = (int8x8_t) __builtin_neon_vraddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 0); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int8x8_t vraddhn_s16(int16x8_t __p0, int16x8_t __p1) { + int8x8_t __ret; + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int8x8_t) __builtin_neon_vraddhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 0); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +__ai __attribute__((target("neon"))) int8x8_t __noswap_vraddhn_s16(int16x8_t __p0, int16x8_t __p1) { + int8x8_t __ret; + __ret = (int8x8_t) __builtin_neon_vraddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint32x4_t vrecpeq_u32(uint32x4_t __p0) { + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vrecpeq_v((int8x16_t)__p0, 50); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint32x4_t vrecpeq_u32(uint32x4_t __p0) { + uint32x4_t __ret; + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = (uint32x4_t) __builtin_neon_vrecpeq_v((int8x16_t)__rev0, 50); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) float32x4_t vrecpeq_f32(float32x4_t __p0) { + float32x4_t __ret; + __ret = (float32x4_t) __builtin_neon_vrecpeq_v((int8x16_t)__p0, 41); + return __ret; +} +#else +__ai __attribute__((target("neon"))) float32x4_t vrecpeq_f32(float32x4_t __p0) { + float32x4_t __ret; + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = (float32x4_t) __builtin_neon_vrecpeq_v((int8x16_t)__rev0, 41); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint32x2_t vrecpe_u32(uint32x2_t __p0) { + uint32x2_t __ret; + __ret = (uint32x2_t) __builtin_neon_vrecpe_v((int8x8_t)__p0, 18); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint32x2_t vrecpe_u32(uint32x2_t __p0) { + uint32x2_t __ret; + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = (uint32x2_t) __builtin_neon_vrecpe_v((int8x8_t)__rev0, 18); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) float32x2_t vrecpe_f32(float32x2_t __p0) { + float32x2_t __ret; + __ret = (float32x2_t) __builtin_neon_vrecpe_v((int8x8_t)__p0, 9); + return __ret; +} +#else +__ai __attribute__((target("neon"))) float32x2_t vrecpe_f32(float32x2_t __p0) { + float32x2_t __ret; + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = (float32x2_t) __builtin_neon_vrecpe_v((int8x8_t)__rev0, 9); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) float32x4_t vrecpsq_f32(float32x4_t __p0, float32x4_t __p1) { + float32x4_t __ret; + __ret = (float32x4_t) __builtin_neon_vrecpsq_v((int8x16_t)__p0, (int8x16_t)__p1, 41); + return __ret; +} +#else +__ai __attribute__((target("neon"))) float32x4_t vrecpsq_f32(float32x4_t __p0, float32x4_t __p1) { + float32x4_t __ret; + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (float32x4_t) __builtin_neon_vrecpsq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 41); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) float32x2_t vrecps_f32(float32x2_t __p0, float32x2_t __p1) { + float32x2_t __ret; + __ret = (float32x2_t) __builtin_neon_vrecps_v((int8x8_t)__p0, (int8x8_t)__p1, 9); + return __ret; +} +#else +__ai __attribute__((target("neon"))) float32x2_t vrecps_f32(float32x2_t __p0, float32x2_t __p1) { + float32x2_t __ret; + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (float32x2_t) __builtin_neon_vrecps_v((int8x8_t)__rev0, (int8x8_t)__rev1, 9); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) poly8x8_t vrev16_p8(poly8x8_t __p0) { + poly8x8_t __ret; + __ret = __builtin_shufflevector(__p0, __p0, 1, 0, 3, 2, 5, 4, 7, 6); + return __ret; +} +#else +__ai __attribute__((target("neon"))) poly8x8_t vrev16_p8(poly8x8_t __p0) { + poly8x8_t __ret; + poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev0, 1, 0, 3, 2, 5, 4, 7, 6); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) poly8x16_t vrev16q_p8(poly8x16_t __p0) { + poly8x16_t __ret; + __ret = __builtin_shufflevector(__p0, __p0, 1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14); + return __ret; +} +#else +__ai __attribute__((target("neon"))) poly8x16_t vrev16q_p8(poly8x16_t __p0) { + poly8x16_t __ret; + poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev0, 1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint8x16_t vrev16q_u8(uint8x16_t __p0) { + uint8x16_t __ret; + __ret = __builtin_shufflevector(__p0, __p0, 1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint8x16_t vrev16q_u8(uint8x16_t __p0) { + uint8x16_t __ret; + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev0, 1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int8x16_t vrev16q_s8(int8x16_t __p0) { + int8x16_t __ret; + __ret = __builtin_shufflevector(__p0, __p0, 1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int8x16_t vrev16q_s8(int8x16_t __p0) { + int8x16_t __ret; + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev0, 1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint8x8_t vrev16_u8(uint8x8_t __p0) { + uint8x8_t __ret; + __ret = __builtin_shufflevector(__p0, __p0, 1, 0, 3, 2, 5, 4, 7, 6); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint8x8_t vrev16_u8(uint8x8_t __p0) { + uint8x8_t __ret; + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev0, 1, 0, 3, 2, 5, 4, 7, 6); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int8x8_t vrev16_s8(int8x8_t __p0) { + int8x8_t __ret; + __ret = __builtin_shufflevector(__p0, __p0, 1, 0, 3, 2, 5, 4, 7, 6); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int8x8_t vrev16_s8(int8x8_t __p0) { + int8x8_t __ret; + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev0, 1, 0, 3, 2, 5, 4, 7, 6); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) poly8x8_t vrev32_p8(poly8x8_t __p0) { + poly8x8_t __ret; + __ret = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0, 7, 6, 5, 4); + return __ret; +} +#else +__ai __attribute__((target("neon"))) poly8x8_t vrev32_p8(poly8x8_t __p0) { + poly8x8_t __ret; + poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev0, 3, 2, 1, 0, 7, 6, 5, 4); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) poly16x4_t vrev32_p16(poly16x4_t __p0) { + poly16x4_t __ret; + __ret = __builtin_shufflevector(__p0, __p0, 1, 0, 3, 2); + return __ret; +} +#else +__ai __attribute__((target("neon"))) poly16x4_t vrev32_p16(poly16x4_t __p0) { + poly16x4_t __ret; + poly16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev0, 1, 0, 3, 2); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) poly8x16_t vrev32q_p8(poly8x16_t __p0) { + poly8x16_t __ret; + __ret = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12); + return __ret; +} +#else +__ai __attribute__((target("neon"))) poly8x16_t vrev32q_p8(poly8x16_t __p0) { + poly8x16_t __ret; + poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev0, 3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) poly16x8_t vrev32q_p16(poly16x8_t __p0) { + poly16x8_t __ret; + __ret = __builtin_shufflevector(__p0, __p0, 1, 0, 3, 2, 5, 4, 7, 6); + return __ret; +} +#else +__ai __attribute__((target("neon"))) poly16x8_t vrev32q_p16(poly16x8_t __p0) { + poly16x8_t __ret; + poly16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev0, 1, 0, 3, 2, 5, 4, 7, 6); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint8x16_t vrev32q_u8(uint8x16_t __p0) { + uint8x16_t __ret; + __ret = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint8x16_t vrev32q_u8(uint8x16_t __p0) { + uint8x16_t __ret; + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev0, 3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint16x8_t vrev32q_u16(uint16x8_t __p0) { + uint16x8_t __ret; + __ret = __builtin_shufflevector(__p0, __p0, 1, 0, 3, 2, 5, 4, 7, 6); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint16x8_t vrev32q_u16(uint16x8_t __p0) { + uint16x8_t __ret; + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev0, 1, 0, 3, 2, 5, 4, 7, 6); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int8x16_t vrev32q_s8(int8x16_t __p0) { + int8x16_t __ret; + __ret = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int8x16_t vrev32q_s8(int8x16_t __p0) { + int8x16_t __ret; + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev0, 3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int16x8_t vrev32q_s16(int16x8_t __p0) { + int16x8_t __ret; + __ret = __builtin_shufflevector(__p0, __p0, 1, 0, 3, 2, 5, 4, 7, 6); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int16x8_t vrev32q_s16(int16x8_t __p0) { + int16x8_t __ret; + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev0, 1, 0, 3, 2, 5, 4, 7, 6); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint8x8_t vrev32_u8(uint8x8_t __p0) { + uint8x8_t __ret; + __ret = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0, 7, 6, 5, 4); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint8x8_t vrev32_u8(uint8x8_t __p0) { + uint8x8_t __ret; + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev0, 3, 2, 1, 0, 7, 6, 5, 4); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint16x4_t vrev32_u16(uint16x4_t __p0) { + uint16x4_t __ret; + __ret = __builtin_shufflevector(__p0, __p0, 1, 0, 3, 2); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint16x4_t vrev32_u16(uint16x4_t __p0) { + uint16x4_t __ret; + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev0, 1, 0, 3, 2); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int8x8_t vrev32_s8(int8x8_t __p0) { + int8x8_t __ret; + __ret = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0, 7, 6, 5, 4); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int8x8_t vrev32_s8(int8x8_t __p0) { + int8x8_t __ret; + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev0, 3, 2, 1, 0, 7, 6, 5, 4); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int16x4_t vrev32_s16(int16x4_t __p0) { + int16x4_t __ret; + __ret = __builtin_shufflevector(__p0, __p0, 1, 0, 3, 2); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int16x4_t vrev32_s16(int16x4_t __p0) { + int16x4_t __ret; + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev0, 1, 0, 3, 2); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) poly8x8_t vrev64_p8(poly8x8_t __p0) { + poly8x8_t __ret; + __ret = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#else +__ai __attribute__((target("neon"))) poly8x8_t vrev64_p8(poly8x8_t __p0) { + poly8x8_t __ret; + poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) poly16x4_t vrev64_p16(poly16x4_t __p0) { + poly16x4_t __ret; + __ret = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + return __ret; +} +#else +__ai __attribute__((target("neon"))) poly16x4_t vrev64_p16(poly16x4_t __p0) { + poly16x4_t __ret; + poly16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev0, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) poly8x16_t vrev64q_p8(poly8x16_t __p0) { + poly8x16_t __ret; + __ret = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0, 15, 14, 13, 12, 11, 10, 9, 8); + return __ret; +} +#else +__ai __attribute__((target("neon"))) poly8x16_t vrev64q_p8(poly8x16_t __p0) { + poly8x16_t __ret; + poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev0, 7, 6, 5, 4, 3, 2, 1, 0, 15, 14, 13, 12, 11, 10, 9, 8); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) poly16x8_t vrev64q_p16(poly16x8_t __p0) { + poly16x8_t __ret; + __ret = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0, 7, 6, 5, 4); + return __ret; +} +#else +__ai __attribute__((target("neon"))) poly16x8_t vrev64q_p16(poly16x8_t __p0) { + poly16x8_t __ret; + poly16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev0, 3, 2, 1, 0, 7, 6, 5, 4); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint8x16_t vrev64q_u8(uint8x16_t __p0) { + uint8x16_t __ret; + __ret = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0, 15, 14, 13, 12, 11, 10, 9, 8); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint8x16_t vrev64q_u8(uint8x16_t __p0) { + uint8x16_t __ret; + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev0, 7, 6, 5, 4, 3, 2, 1, 0, 15, 14, 13, 12, 11, 10, 9, 8); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint32x4_t vrev64q_u32(uint32x4_t __p0) { + uint32x4_t __ret; + __ret = __builtin_shufflevector(__p0, __p0, 1, 0, 3, 2); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint32x4_t vrev64q_u32(uint32x4_t __p0) { + uint32x4_t __ret; + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev0, 1, 0, 3, 2); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint16x8_t vrev64q_u16(uint16x8_t __p0) { + uint16x8_t __ret; + __ret = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0, 7, 6, 5, 4); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint16x8_t vrev64q_u16(uint16x8_t __p0) { + uint16x8_t __ret; + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev0, 3, 2, 1, 0, 7, 6, 5, 4); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int8x16_t vrev64q_s8(int8x16_t __p0) { + int8x16_t __ret; + __ret = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0, 15, 14, 13, 12, 11, 10, 9, 8); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int8x16_t vrev64q_s8(int8x16_t __p0) { + int8x16_t __ret; + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev0, 7, 6, 5, 4, 3, 2, 1, 0, 15, 14, 13, 12, 11, 10, 9, 8); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) float32x4_t vrev64q_f32(float32x4_t __p0) { + float32x4_t __ret; + __ret = __builtin_shufflevector(__p0, __p0, 1, 0, 3, 2); + return __ret; +} +#else +__ai __attribute__((target("neon"))) float32x4_t vrev64q_f32(float32x4_t __p0) { + float32x4_t __ret; + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev0, 1, 0, 3, 2); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int32x4_t vrev64q_s32(int32x4_t __p0) { + int32x4_t __ret; + __ret = __builtin_shufflevector(__p0, __p0, 1, 0, 3, 2); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int32x4_t vrev64q_s32(int32x4_t __p0) { + int32x4_t __ret; + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev0, 1, 0, 3, 2); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int16x8_t vrev64q_s16(int16x8_t __p0) { + int16x8_t __ret; + __ret = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0, 7, 6, 5, 4); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int16x8_t vrev64q_s16(int16x8_t __p0) { + int16x8_t __ret; + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev0, 3, 2, 1, 0, 7, 6, 5, 4); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint8x8_t vrev64_u8(uint8x8_t __p0) { + uint8x8_t __ret; + __ret = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint8x8_t vrev64_u8(uint8x8_t __p0) { + uint8x8_t __ret; + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint32x2_t vrev64_u32(uint32x2_t __p0) { + uint32x2_t __ret; + __ret = __builtin_shufflevector(__p0, __p0, 1, 0); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint32x2_t vrev64_u32(uint32x2_t __p0) { + uint32x2_t __ret; + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev0, 1, 0); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint16x4_t vrev64_u16(uint16x4_t __p0) { + uint16x4_t __ret; + __ret = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint16x4_t vrev64_u16(uint16x4_t __p0) { + uint16x4_t __ret; + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev0, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int8x8_t vrev64_s8(int8x8_t __p0) { + int8x8_t __ret; + __ret = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int8x8_t vrev64_s8(int8x8_t __p0) { + int8x8_t __ret; + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) float32x2_t vrev64_f32(float32x2_t __p0) { + float32x2_t __ret; + __ret = __builtin_shufflevector(__p0, __p0, 1, 0); + return __ret; +} +#else +__ai __attribute__((target("neon"))) float32x2_t vrev64_f32(float32x2_t __p0) { + float32x2_t __ret; + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev0, 1, 0); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int32x2_t vrev64_s32(int32x2_t __p0) { + int32x2_t __ret; + __ret = __builtin_shufflevector(__p0, __p0, 1, 0); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int32x2_t vrev64_s32(int32x2_t __p0) { + int32x2_t __ret; + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev0, 1, 0); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int16x4_t vrev64_s16(int16x4_t __p0) { + int16x4_t __ret; + __ret = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int16x4_t vrev64_s16(int16x4_t __p0) { + int16x4_t __ret; + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev0, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) float16x8_t vrev64q_f16(float16x8_t __p0) { + float16x8_t __ret; + __ret = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0, 7, 6, 5, 4); + return __ret; +} +#else +__ai __attribute__((target("neon"))) float16x8_t vrev64q_f16(float16x8_t __p0) { + float16x8_t __ret; + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev0, 3, 2, 1, 0, 7, 6, 5, 4); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) float16x4_t vrev64_f16(float16x4_t __p0) { + float16x4_t __ret; + __ret = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + return __ret; +} +#else +__ai __attribute__((target("neon"))) float16x4_t vrev64_f16(float16x4_t __p0) { + float16x4_t __ret; + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev0, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint8x16_t vrhaddq_u8(uint8x16_t __p0, uint8x16_t __p1) { + uint8x16_t __ret; + __ret = (uint8x16_t) __builtin_neon_vrhaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 48); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint8x16_t vrhaddq_u8(uint8x16_t __p0, uint8x16_t __p1) { + uint8x16_t __ret; + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint8x16_t) __builtin_neon_vrhaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint32x4_t vrhaddq_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vrhaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 50); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint32x4_t vrhaddq_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint32x4_t __ret; + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (uint32x4_t) __builtin_neon_vrhaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint16x8_t vrhaddq_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint16x8_t __ret; + __ret = (uint16x8_t) __builtin_neon_vrhaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 49); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint16x8_t vrhaddq_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint16x8_t __ret; + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint16x8_t) __builtin_neon_vrhaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int8x16_t vrhaddq_s8(int8x16_t __p0, int8x16_t __p1) { + int8x16_t __ret; + __ret = (int8x16_t) __builtin_neon_vrhaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 32); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int8x16_t vrhaddq_s8(int8x16_t __p0, int8x16_t __p1) { + int8x16_t __ret; + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int8x16_t) __builtin_neon_vrhaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 32); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int32x4_t vrhaddq_s32(int32x4_t __p0, int32x4_t __p1) { + int32x4_t __ret; + __ret = (int32x4_t) __builtin_neon_vrhaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 34); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int32x4_t vrhaddq_s32(int32x4_t __p0, int32x4_t __p1) { + int32x4_t __ret; + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (int32x4_t) __builtin_neon_vrhaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 34); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int16x8_t vrhaddq_s16(int16x8_t __p0, int16x8_t __p1) { + int16x8_t __ret; + __ret = (int16x8_t) __builtin_neon_vrhaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 33); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int16x8_t vrhaddq_s16(int16x8_t __p0, int16x8_t __p1) { + int16x8_t __ret; + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int16x8_t) __builtin_neon_vrhaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 33); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint8x8_t vrhadd_u8(uint8x8_t __p0, uint8x8_t __p1) { + uint8x8_t __ret; + __ret = (uint8x8_t) __builtin_neon_vrhadd_v((int8x8_t)__p0, (int8x8_t)__p1, 16); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint8x8_t vrhadd_u8(uint8x8_t __p0, uint8x8_t __p1) { + uint8x8_t __ret; + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint8x8_t) __builtin_neon_vrhadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 16); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint32x2_t vrhadd_u32(uint32x2_t __p0, uint32x2_t __p1) { + uint32x2_t __ret; + __ret = (uint32x2_t) __builtin_neon_vrhadd_v((int8x8_t)__p0, (int8x8_t)__p1, 18); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint32x2_t vrhadd_u32(uint32x2_t __p0, uint32x2_t __p1) { + uint32x2_t __ret; + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (uint32x2_t) __builtin_neon_vrhadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint16x4_t vrhadd_u16(uint16x4_t __p0, uint16x4_t __p1) { + uint16x4_t __ret; + __ret = (uint16x4_t) __builtin_neon_vrhadd_v((int8x8_t)__p0, (int8x8_t)__p1, 17); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint16x4_t vrhadd_u16(uint16x4_t __p0, uint16x4_t __p1) { + uint16x4_t __ret; + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (uint16x4_t) __builtin_neon_vrhadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int8x8_t vrhadd_s8(int8x8_t __p0, int8x8_t __p1) { + int8x8_t __ret; + __ret = (int8x8_t) __builtin_neon_vrhadd_v((int8x8_t)__p0, (int8x8_t)__p1, 0); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int8x8_t vrhadd_s8(int8x8_t __p0, int8x8_t __p1) { + int8x8_t __ret; + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int8x8_t) __builtin_neon_vrhadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 0); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int32x2_t vrhadd_s32(int32x2_t __p0, int32x2_t __p1) { + int32x2_t __ret; + __ret = (int32x2_t) __builtin_neon_vrhadd_v((int8x8_t)__p0, (int8x8_t)__p1, 2); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int32x2_t vrhadd_s32(int32x2_t __p0, int32x2_t __p1) { + int32x2_t __ret; + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (int32x2_t) __builtin_neon_vrhadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 2); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int16x4_t vrhadd_s16(int16x4_t __p0, int16x4_t __p1) { + int16x4_t __ret; + __ret = (int16x4_t) __builtin_neon_vrhadd_v((int8x8_t)__p0, (int8x8_t)__p1, 1); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int16x4_t vrhadd_s16(int16x4_t __p0, int16x4_t __p1) { + int16x4_t __ret; + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (int16x4_t) __builtin_neon_vrhadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint8x16_t vrshlq_u8(uint8x16_t __p0, int8x16_t __p1) { + uint8x16_t __ret; + __ret = (uint8x16_t) __builtin_neon_vrshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 48); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint8x16_t vrshlq_u8(uint8x16_t __p0, int8x16_t __p1) { + uint8x16_t __ret; + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint8x16_t) __builtin_neon_vrshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint32x4_t vrshlq_u32(uint32x4_t __p0, int32x4_t __p1) { + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vrshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 50); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint32x4_t vrshlq_u32(uint32x4_t __p0, int32x4_t __p1) { + uint32x4_t __ret; + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (uint32x4_t) __builtin_neon_vrshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint64x2_t vrshlq_u64(uint64x2_t __p0, int64x2_t __p1) { + uint64x2_t __ret; + __ret = (uint64x2_t) __builtin_neon_vrshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 51); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint64x2_t vrshlq_u64(uint64x2_t __p0, int64x2_t __p1) { + uint64x2_t __ret; + uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (uint64x2_t) __builtin_neon_vrshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 51); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint16x8_t vrshlq_u16(uint16x8_t __p0, int16x8_t __p1) { + uint16x8_t __ret; + __ret = (uint16x8_t) __builtin_neon_vrshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 49); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint16x8_t vrshlq_u16(uint16x8_t __p0, int16x8_t __p1) { + uint16x8_t __ret; + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint16x8_t) __builtin_neon_vrshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int8x16_t vrshlq_s8(int8x16_t __p0, int8x16_t __p1) { + int8x16_t __ret; + __ret = (int8x16_t) __builtin_neon_vrshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 32); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int8x16_t vrshlq_s8(int8x16_t __p0, int8x16_t __p1) { + int8x16_t __ret; + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int8x16_t) __builtin_neon_vrshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 32); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int32x4_t vrshlq_s32(int32x4_t __p0, int32x4_t __p1) { + int32x4_t __ret; + __ret = (int32x4_t) __builtin_neon_vrshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 34); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int32x4_t vrshlq_s32(int32x4_t __p0, int32x4_t __p1) { + int32x4_t __ret; + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (int32x4_t) __builtin_neon_vrshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 34); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int64x2_t vrshlq_s64(int64x2_t __p0, int64x2_t __p1) { + int64x2_t __ret; + __ret = (int64x2_t) __builtin_neon_vrshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 35); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int64x2_t vrshlq_s64(int64x2_t __p0, int64x2_t __p1) { + int64x2_t __ret; + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (int64x2_t) __builtin_neon_vrshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 35); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int16x8_t vrshlq_s16(int16x8_t __p0, int16x8_t __p1) { + int16x8_t __ret; + __ret = (int16x8_t) __builtin_neon_vrshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 33); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int16x8_t vrshlq_s16(int16x8_t __p0, int16x8_t __p1) { + int16x8_t __ret; + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int16x8_t) __builtin_neon_vrshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 33); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint8x8_t vrshl_u8(uint8x8_t __p0, int8x8_t __p1) { + uint8x8_t __ret; + __ret = (uint8x8_t) __builtin_neon_vrshl_v((int8x8_t)__p0, (int8x8_t)__p1, 16); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint8x8_t vrshl_u8(uint8x8_t __p0, int8x8_t __p1) { + uint8x8_t __ret; + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint8x8_t) __builtin_neon_vrshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 16); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint32x2_t vrshl_u32(uint32x2_t __p0, int32x2_t __p1) { + uint32x2_t __ret; + __ret = (uint32x2_t) __builtin_neon_vrshl_v((int8x8_t)__p0, (int8x8_t)__p1, 18); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint32x2_t vrshl_u32(uint32x2_t __p0, int32x2_t __p1) { + uint32x2_t __ret; + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (uint32x2_t) __builtin_neon_vrshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +__ai __attribute__((target("neon"))) uint64x1_t vrshl_u64(uint64x1_t __p0, int64x1_t __p1) { + uint64x1_t __ret; + __ret = (uint64x1_t) __builtin_neon_vrshl_v((int8x8_t)__p0, (int8x8_t)__p1, 19); + return __ret; +} +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint16x4_t vrshl_u16(uint16x4_t __p0, int16x4_t __p1) { + uint16x4_t __ret; + __ret = (uint16x4_t) __builtin_neon_vrshl_v((int8x8_t)__p0, (int8x8_t)__p1, 17); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint16x4_t vrshl_u16(uint16x4_t __p0, int16x4_t __p1) { + uint16x4_t __ret; + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (uint16x4_t) __builtin_neon_vrshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int8x8_t vrshl_s8(int8x8_t __p0, int8x8_t __p1) { + int8x8_t __ret; + __ret = (int8x8_t) __builtin_neon_vrshl_v((int8x8_t)__p0, (int8x8_t)__p1, 0); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int8x8_t vrshl_s8(int8x8_t __p0, int8x8_t __p1) { + int8x8_t __ret; + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int8x8_t) __builtin_neon_vrshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 0); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int32x2_t vrshl_s32(int32x2_t __p0, int32x2_t __p1) { + int32x2_t __ret; + __ret = (int32x2_t) __builtin_neon_vrshl_v((int8x8_t)__p0, (int8x8_t)__p1, 2); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int32x2_t vrshl_s32(int32x2_t __p0, int32x2_t __p1) { + int32x2_t __ret; + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (int32x2_t) __builtin_neon_vrshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 2); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +__ai __attribute__((target("neon"))) int64x1_t vrshl_s64(int64x1_t __p0, int64x1_t __p1) { + int64x1_t __ret; + __ret = (int64x1_t) __builtin_neon_vrshl_v((int8x8_t)__p0, (int8x8_t)__p1, 3); + return __ret; +} +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int16x4_t vrshl_s16(int16x4_t __p0, int16x4_t __p1) { + int16x4_t __ret; + __ret = (int16x4_t) __builtin_neon_vrshl_v((int8x8_t)__p0, (int8x8_t)__p1, 1); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int16x4_t vrshl_s16(int16x4_t __p0, int16x4_t __p1) { + int16x4_t __ret; + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (int16x4_t) __builtin_neon_vrshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vrshrq_n_u8(__p0, __p1) __extension__ ({ \ + uint8x16_t __ret; \ + uint8x16_t __s0 = __p0; \ + __ret = (uint8x16_t) __builtin_neon_vrshrq_n_v((int8x16_t)__s0, __p1, 48); \ + __ret; \ +}) +#else +#define vrshrq_n_u8(__p0, __p1) __extension__ ({ \ + uint8x16_t __ret; \ + uint8x16_t __s0 = __p0; \ + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (uint8x16_t) __builtin_neon_vrshrq_n_v((int8x16_t)__rev0, __p1, 48); \ + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vrshrq_n_u32(__p0, __p1) __extension__ ({ \ + uint32x4_t __ret; \ + uint32x4_t __s0 = __p0; \ + __ret = (uint32x4_t) __builtin_neon_vrshrq_n_v((int8x16_t)__s0, __p1, 50); \ + __ret; \ +}) +#else +#define vrshrq_n_u32(__p0, __p1) __extension__ ({ \ + uint32x4_t __ret; \ + uint32x4_t __s0 = __p0; \ + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + __ret = (uint32x4_t) __builtin_neon_vrshrq_n_v((int8x16_t)__rev0, __p1, 50); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vrshrq_n_u64(__p0, __p1) __extension__ ({ \ + uint64x2_t __ret; \ + uint64x2_t __s0 = __p0; \ + __ret = (uint64x2_t) __builtin_neon_vrshrq_n_v((int8x16_t)__s0, __p1, 51); \ + __ret; \ +}) +#else +#define vrshrq_n_u64(__p0, __p1) __extension__ ({ \ + uint64x2_t __ret; \ + uint64x2_t __s0 = __p0; \ + uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + __ret = (uint64x2_t) __builtin_neon_vrshrq_n_v((int8x16_t)__rev0, __p1, 51); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vrshrq_n_u16(__p0, __p1) __extension__ ({ \ + uint16x8_t __ret; \ + uint16x8_t __s0 = __p0; \ + __ret = (uint16x8_t) __builtin_neon_vrshrq_n_v((int8x16_t)__s0, __p1, 49); \ + __ret; \ +}) +#else +#define vrshrq_n_u16(__p0, __p1) __extension__ ({ \ + uint16x8_t __ret; \ + uint16x8_t __s0 = __p0; \ + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (uint16x8_t) __builtin_neon_vrshrq_n_v((int8x16_t)__rev0, __p1, 49); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vrshrq_n_s8(__p0, __p1) __extension__ ({ \ + int8x16_t __ret; \ + int8x16_t __s0 = __p0; \ + __ret = (int8x16_t) __builtin_neon_vrshrq_n_v((int8x16_t)__s0, __p1, 32); \ + __ret; \ +}) +#else +#define vrshrq_n_s8(__p0, __p1) __extension__ ({ \ + int8x16_t __ret; \ + int8x16_t __s0 = __p0; \ + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (int8x16_t) __builtin_neon_vrshrq_n_v((int8x16_t)__rev0, __p1, 32); \ + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vrshrq_n_s32(__p0, __p1) __extension__ ({ \ + int32x4_t __ret; \ + int32x4_t __s0 = __p0; \ + __ret = (int32x4_t) __builtin_neon_vrshrq_n_v((int8x16_t)__s0, __p1, 34); \ + __ret; \ +}) +#else +#define vrshrq_n_s32(__p0, __p1) __extension__ ({ \ + int32x4_t __ret; \ + int32x4_t __s0 = __p0; \ + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + __ret = (int32x4_t) __builtin_neon_vrshrq_n_v((int8x16_t)__rev0, __p1, 34); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vrshrq_n_s64(__p0, __p1) __extension__ ({ \ + int64x2_t __ret; \ + int64x2_t __s0 = __p0; \ + __ret = (int64x2_t) __builtin_neon_vrshrq_n_v((int8x16_t)__s0, __p1, 35); \ + __ret; \ +}) +#else +#define vrshrq_n_s64(__p0, __p1) __extension__ ({ \ + int64x2_t __ret; \ + int64x2_t __s0 = __p0; \ + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + __ret = (int64x2_t) __builtin_neon_vrshrq_n_v((int8x16_t)__rev0, __p1, 35); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vrshrq_n_s16(__p0, __p1) __extension__ ({ \ + int16x8_t __ret; \ + int16x8_t __s0 = __p0; \ + __ret = (int16x8_t) __builtin_neon_vrshrq_n_v((int8x16_t)__s0, __p1, 33); \ + __ret; \ +}) +#else +#define vrshrq_n_s16(__p0, __p1) __extension__ ({ \ + int16x8_t __ret; \ + int16x8_t __s0 = __p0; \ + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (int16x8_t) __builtin_neon_vrshrq_n_v((int8x16_t)__rev0, __p1, 33); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vrshr_n_u8(__p0, __p1) __extension__ ({ \ + uint8x8_t __ret; \ + uint8x8_t __s0 = __p0; \ + __ret = (uint8x8_t) __builtin_neon_vrshr_n_v((int8x8_t)__s0, __p1, 16); \ + __ret; \ +}) +#else +#define vrshr_n_u8(__p0, __p1) __extension__ ({ \ + uint8x8_t __ret; \ + uint8x8_t __s0 = __p0; \ + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (uint8x8_t) __builtin_neon_vrshr_n_v((int8x8_t)__rev0, __p1, 16); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vrshr_n_u32(__p0, __p1) __extension__ ({ \ + uint32x2_t __ret; \ + uint32x2_t __s0 = __p0; \ + __ret = (uint32x2_t) __builtin_neon_vrshr_n_v((int8x8_t)__s0, __p1, 18); \ + __ret; \ +}) +#else +#define vrshr_n_u32(__p0, __p1) __extension__ ({ \ + uint32x2_t __ret; \ + uint32x2_t __s0 = __p0; \ + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + __ret = (uint32x2_t) __builtin_neon_vrshr_n_v((int8x8_t)__rev0, __p1, 18); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#endif + +#define vrshr_n_u64(__p0, __p1) __extension__ ({ \ + uint64x1_t __ret; \ + uint64x1_t __s0 = __p0; \ + __ret = (uint64x1_t) __builtin_neon_vrshr_n_v((int8x8_t)__s0, __p1, 19); \ + __ret; \ +}) +#ifdef __LITTLE_ENDIAN__ +#define vrshr_n_u16(__p0, __p1) __extension__ ({ \ + uint16x4_t __ret; \ + uint16x4_t __s0 = __p0; \ + __ret = (uint16x4_t) __builtin_neon_vrshr_n_v((int8x8_t)__s0, __p1, 17); \ + __ret; \ +}) +#else +#define vrshr_n_u16(__p0, __p1) __extension__ ({ \ + uint16x4_t __ret; \ + uint16x4_t __s0 = __p0; \ + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + __ret = (uint16x4_t) __builtin_neon_vrshr_n_v((int8x8_t)__rev0, __p1, 17); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vrshr_n_s8(__p0, __p1) __extension__ ({ \ + int8x8_t __ret; \ + int8x8_t __s0 = __p0; \ + __ret = (int8x8_t) __builtin_neon_vrshr_n_v((int8x8_t)__s0, __p1, 0); \ + __ret; \ +}) +#else +#define vrshr_n_s8(__p0, __p1) __extension__ ({ \ + int8x8_t __ret; \ + int8x8_t __s0 = __p0; \ + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (int8x8_t) __builtin_neon_vrshr_n_v((int8x8_t)__rev0, __p1, 0); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vrshr_n_s32(__p0, __p1) __extension__ ({ \ + int32x2_t __ret; \ + int32x2_t __s0 = __p0; \ + __ret = (int32x2_t) __builtin_neon_vrshr_n_v((int8x8_t)__s0, __p1, 2); \ + __ret; \ +}) +#else +#define vrshr_n_s32(__p0, __p1) __extension__ ({ \ + int32x2_t __ret; \ + int32x2_t __s0 = __p0; \ + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + __ret = (int32x2_t) __builtin_neon_vrshr_n_v((int8x8_t)__rev0, __p1, 2); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#endif + +#define vrshr_n_s64(__p0, __p1) __extension__ ({ \ + int64x1_t __ret; \ + int64x1_t __s0 = __p0; \ + __ret = (int64x1_t) __builtin_neon_vrshr_n_v((int8x8_t)__s0, __p1, 3); \ + __ret; \ +}) +#ifdef __LITTLE_ENDIAN__ +#define vrshr_n_s16(__p0, __p1) __extension__ ({ \ + int16x4_t __ret; \ + int16x4_t __s0 = __p0; \ + __ret = (int16x4_t) __builtin_neon_vrshr_n_v((int8x8_t)__s0, __p1, 1); \ + __ret; \ +}) +#else +#define vrshr_n_s16(__p0, __p1) __extension__ ({ \ + int16x4_t __ret; \ + int16x4_t __s0 = __p0; \ + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + __ret = (int16x4_t) __builtin_neon_vrshr_n_v((int8x8_t)__rev0, __p1, 1); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vrshrn_n_u32(__p0, __p1) __extension__ ({ \ + uint16x4_t __ret; \ + uint32x4_t __s0 = __p0; \ + __ret = (uint16x4_t) __builtin_neon_vrshrn_n_v((int8x16_t)__s0, __p1, 17); \ + __ret; \ +}) +#else +#define vrshrn_n_u32(__p0, __p1) __extension__ ({ \ + uint16x4_t __ret; \ + uint32x4_t __s0 = __p0; \ + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + __ret = (uint16x4_t) __builtin_neon_vrshrn_n_v((int8x16_t)__rev0, __p1, 17); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#define __noswap_vrshrn_n_u32(__p0, __p1) __extension__ ({ \ + uint16x4_t __ret; \ + uint32x4_t __s0 = __p0; \ + __ret = (uint16x4_t) __builtin_neon_vrshrn_n_v((int8x16_t)__s0, __p1, 17); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vrshrn_n_u64(__p0, __p1) __extension__ ({ \ + uint32x2_t __ret; \ + uint64x2_t __s0 = __p0; \ + __ret = (uint32x2_t) __builtin_neon_vrshrn_n_v((int8x16_t)__s0, __p1, 18); \ + __ret; \ +}) +#else +#define vrshrn_n_u64(__p0, __p1) __extension__ ({ \ + uint32x2_t __ret; \ + uint64x2_t __s0 = __p0; \ + uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + __ret = (uint32x2_t) __builtin_neon_vrshrn_n_v((int8x16_t)__rev0, __p1, 18); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#define __noswap_vrshrn_n_u64(__p0, __p1) __extension__ ({ \ + uint32x2_t __ret; \ + uint64x2_t __s0 = __p0; \ + __ret = (uint32x2_t) __builtin_neon_vrshrn_n_v((int8x16_t)__s0, __p1, 18); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vrshrn_n_u16(__p0, __p1) __extension__ ({ \ + uint8x8_t __ret; \ + uint16x8_t __s0 = __p0; \ + __ret = (uint8x8_t) __builtin_neon_vrshrn_n_v((int8x16_t)__s0, __p1, 16); \ + __ret; \ +}) +#else +#define vrshrn_n_u16(__p0, __p1) __extension__ ({ \ + uint8x8_t __ret; \ + uint16x8_t __s0 = __p0; \ + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (uint8x8_t) __builtin_neon_vrshrn_n_v((int8x16_t)__rev0, __p1, 16); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#define __noswap_vrshrn_n_u16(__p0, __p1) __extension__ ({ \ + uint8x8_t __ret; \ + uint16x8_t __s0 = __p0; \ + __ret = (uint8x8_t) __builtin_neon_vrshrn_n_v((int8x16_t)__s0, __p1, 16); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vrshrn_n_s32(__p0, __p1) __extension__ ({ \ + int16x4_t __ret; \ + int32x4_t __s0 = __p0; \ + __ret = (int16x4_t) __builtin_neon_vrshrn_n_v((int8x16_t)__s0, __p1, 1); \ + __ret; \ +}) +#else +#define vrshrn_n_s32(__p0, __p1) __extension__ ({ \ + int16x4_t __ret; \ + int32x4_t __s0 = __p0; \ + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + __ret = (int16x4_t) __builtin_neon_vrshrn_n_v((int8x16_t)__rev0, __p1, 1); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#define __noswap_vrshrn_n_s32(__p0, __p1) __extension__ ({ \ + int16x4_t __ret; \ + int32x4_t __s0 = __p0; \ + __ret = (int16x4_t) __builtin_neon_vrshrn_n_v((int8x16_t)__s0, __p1, 1); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vrshrn_n_s64(__p0, __p1) __extension__ ({ \ + int32x2_t __ret; \ + int64x2_t __s0 = __p0; \ + __ret = (int32x2_t) __builtin_neon_vrshrn_n_v((int8x16_t)__s0, __p1, 2); \ + __ret; \ +}) +#else +#define vrshrn_n_s64(__p0, __p1) __extension__ ({ \ + int32x2_t __ret; \ + int64x2_t __s0 = __p0; \ + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + __ret = (int32x2_t) __builtin_neon_vrshrn_n_v((int8x16_t)__rev0, __p1, 2); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#define __noswap_vrshrn_n_s64(__p0, __p1) __extension__ ({ \ + int32x2_t __ret; \ + int64x2_t __s0 = __p0; \ + __ret = (int32x2_t) __builtin_neon_vrshrn_n_v((int8x16_t)__s0, __p1, 2); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vrshrn_n_s16(__p0, __p1) __extension__ ({ \ + int8x8_t __ret; \ + int16x8_t __s0 = __p0; \ + __ret = (int8x8_t) __builtin_neon_vrshrn_n_v((int8x16_t)__s0, __p1, 0); \ + __ret; \ +}) +#else +#define vrshrn_n_s16(__p0, __p1) __extension__ ({ \ + int8x8_t __ret; \ + int16x8_t __s0 = __p0; \ + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (int8x8_t) __builtin_neon_vrshrn_n_v((int8x16_t)__rev0, __p1, 0); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#define __noswap_vrshrn_n_s16(__p0, __p1) __extension__ ({ \ + int8x8_t __ret; \ + int16x8_t __s0 = __p0; \ + __ret = (int8x8_t) __builtin_neon_vrshrn_n_v((int8x16_t)__s0, __p1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint32x4_t vrsqrteq_u32(uint32x4_t __p0) { + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vrsqrteq_v((int8x16_t)__p0, 50); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint32x4_t vrsqrteq_u32(uint32x4_t __p0) { + uint32x4_t __ret; + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = (uint32x4_t) __builtin_neon_vrsqrteq_v((int8x16_t)__rev0, 50); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) float32x4_t vrsqrteq_f32(float32x4_t __p0) { + float32x4_t __ret; + __ret = (float32x4_t) __builtin_neon_vrsqrteq_v((int8x16_t)__p0, 41); + return __ret; +} +#else +__ai __attribute__((target("neon"))) float32x4_t vrsqrteq_f32(float32x4_t __p0) { + float32x4_t __ret; + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = (float32x4_t) __builtin_neon_vrsqrteq_v((int8x16_t)__rev0, 41); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint32x2_t vrsqrte_u32(uint32x2_t __p0) { + uint32x2_t __ret; + __ret = (uint32x2_t) __builtin_neon_vrsqrte_v((int8x8_t)__p0, 18); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint32x2_t vrsqrte_u32(uint32x2_t __p0) { + uint32x2_t __ret; + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = (uint32x2_t) __builtin_neon_vrsqrte_v((int8x8_t)__rev0, 18); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) float32x2_t vrsqrte_f32(float32x2_t __p0) { + float32x2_t __ret; + __ret = (float32x2_t) __builtin_neon_vrsqrte_v((int8x8_t)__p0, 9); + return __ret; +} +#else +__ai __attribute__((target("neon"))) float32x2_t vrsqrte_f32(float32x2_t __p0) { + float32x2_t __ret; + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = (float32x2_t) __builtin_neon_vrsqrte_v((int8x8_t)__rev0, 9); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) float32x4_t vrsqrtsq_f32(float32x4_t __p0, float32x4_t __p1) { + float32x4_t __ret; + __ret = (float32x4_t) __builtin_neon_vrsqrtsq_v((int8x16_t)__p0, (int8x16_t)__p1, 41); + return __ret; +} +#else +__ai __attribute__((target("neon"))) float32x4_t vrsqrtsq_f32(float32x4_t __p0, float32x4_t __p1) { + float32x4_t __ret; + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (float32x4_t) __builtin_neon_vrsqrtsq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 41); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) float32x2_t vrsqrts_f32(float32x2_t __p0, float32x2_t __p1) { + float32x2_t __ret; + __ret = (float32x2_t) __builtin_neon_vrsqrts_v((int8x8_t)__p0, (int8x8_t)__p1, 9); + return __ret; +} +#else +__ai __attribute__((target("neon"))) float32x2_t vrsqrts_f32(float32x2_t __p0, float32x2_t __p1) { + float32x2_t __ret; + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (float32x2_t) __builtin_neon_vrsqrts_v((int8x8_t)__rev0, (int8x8_t)__rev1, 9); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vrsraq_n_u8(__p0, __p1, __p2) __extension__ ({ \ + uint8x16_t __ret; \ + uint8x16_t __s0 = __p0; \ + uint8x16_t __s1 = __p1; \ + __ret = (uint8x16_t) __builtin_neon_vrsraq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 48); \ + __ret; \ +}) +#else +#define vrsraq_n_u8(__p0, __p1, __p2) __extension__ ({ \ + uint8x16_t __ret; \ + uint8x16_t __s0 = __p0; \ + uint8x16_t __s1 = __p1; \ + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (uint8x16_t) __builtin_neon_vrsraq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 48); \ + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vrsraq_n_u32(__p0, __p1, __p2) __extension__ ({ \ + uint32x4_t __ret; \ + uint32x4_t __s0 = __p0; \ + uint32x4_t __s1 = __p1; \ + __ret = (uint32x4_t) __builtin_neon_vrsraq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 50); \ + __ret; \ +}) +#else +#define vrsraq_n_u32(__p0, __p1, __p2) __extension__ ({ \ + uint32x4_t __ret; \ + uint32x4_t __s0 = __p0; \ + uint32x4_t __s1 = __p1; \ + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + __ret = (uint32x4_t) __builtin_neon_vrsraq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 50); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vrsraq_n_u64(__p0, __p1, __p2) __extension__ ({ \ + uint64x2_t __ret; \ + uint64x2_t __s0 = __p0; \ + uint64x2_t __s1 = __p1; \ + __ret = (uint64x2_t) __builtin_neon_vrsraq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 51); \ + __ret; \ +}) +#else +#define vrsraq_n_u64(__p0, __p1, __p2) __extension__ ({ \ + uint64x2_t __ret; \ + uint64x2_t __s0 = __p0; \ + uint64x2_t __s1 = __p1; \ + uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ + __ret = (uint64x2_t) __builtin_neon_vrsraq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 51); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vrsraq_n_u16(__p0, __p1, __p2) __extension__ ({ \ + uint16x8_t __ret; \ + uint16x8_t __s0 = __p0; \ + uint16x8_t __s1 = __p1; \ + __ret = (uint16x8_t) __builtin_neon_vrsraq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 49); \ + __ret; \ +}) +#else +#define vrsraq_n_u16(__p0, __p1, __p2) __extension__ ({ \ + uint16x8_t __ret; \ + uint16x8_t __s0 = __p0; \ + uint16x8_t __s1 = __p1; \ + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (uint16x8_t) __builtin_neon_vrsraq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 49); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vrsraq_n_s8(__p0, __p1, __p2) __extension__ ({ \ + int8x16_t __ret; \ + int8x16_t __s0 = __p0; \ + int8x16_t __s1 = __p1; \ + __ret = (int8x16_t) __builtin_neon_vrsraq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 32); \ + __ret; \ +}) +#else +#define vrsraq_n_s8(__p0, __p1, __p2) __extension__ ({ \ + int8x16_t __ret; \ + int8x16_t __s0 = __p0; \ + int8x16_t __s1 = __p1; \ + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + int8x16_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (int8x16_t) __builtin_neon_vrsraq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 32); \ + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vrsraq_n_s32(__p0, __p1, __p2) __extension__ ({ \ + int32x4_t __ret; \ + int32x4_t __s0 = __p0; \ + int32x4_t __s1 = __p1; \ + __ret = (int32x4_t) __builtin_neon_vrsraq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 34); \ + __ret; \ +}) +#else +#define vrsraq_n_s32(__p0, __p1, __p2) __extension__ ({ \ + int32x4_t __ret; \ + int32x4_t __s0 = __p0; \ + int32x4_t __s1 = __p1; \ + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + __ret = (int32x4_t) __builtin_neon_vrsraq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 34); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vrsraq_n_s64(__p0, __p1, __p2) __extension__ ({ \ + int64x2_t __ret; \ + int64x2_t __s0 = __p0; \ + int64x2_t __s1 = __p1; \ + __ret = (int64x2_t) __builtin_neon_vrsraq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 35); \ + __ret; \ +}) +#else +#define vrsraq_n_s64(__p0, __p1, __p2) __extension__ ({ \ + int64x2_t __ret; \ + int64x2_t __s0 = __p0; \ + int64x2_t __s1 = __p1; \ + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + int64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ + __ret = (int64x2_t) __builtin_neon_vrsraq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 35); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vrsraq_n_s16(__p0, __p1, __p2) __extension__ ({ \ + int16x8_t __ret; \ + int16x8_t __s0 = __p0; \ + int16x8_t __s1 = __p1; \ + __ret = (int16x8_t) __builtin_neon_vrsraq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 33); \ + __ret; \ +}) +#else +#define vrsraq_n_s16(__p0, __p1, __p2) __extension__ ({ \ + int16x8_t __ret; \ + int16x8_t __s0 = __p0; \ + int16x8_t __s1 = __p1; \ + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (int16x8_t) __builtin_neon_vrsraq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 33); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vrsra_n_u8(__p0, __p1, __p2) __extension__ ({ \ + uint8x8_t __ret; \ + uint8x8_t __s0 = __p0; \ + uint8x8_t __s1 = __p1; \ + __ret = (uint8x8_t) __builtin_neon_vrsra_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 16); \ + __ret; \ +}) +#else +#define vrsra_n_u8(__p0, __p1, __p2) __extension__ ({ \ + uint8x8_t __ret; \ + uint8x8_t __s0 = __p0; \ + uint8x8_t __s1 = __p1; \ + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (uint8x8_t) __builtin_neon_vrsra_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 16); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vrsra_n_u32(__p0, __p1, __p2) __extension__ ({ \ + uint32x2_t __ret; \ + uint32x2_t __s0 = __p0; \ + uint32x2_t __s1 = __p1; \ + __ret = (uint32x2_t) __builtin_neon_vrsra_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 18); \ + __ret; \ +}) +#else +#define vrsra_n_u32(__p0, __p1, __p2) __extension__ ({ \ + uint32x2_t __ret; \ + uint32x2_t __s0 = __p0; \ + uint32x2_t __s1 = __p1; \ + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ + __ret = (uint32x2_t) __builtin_neon_vrsra_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 18); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#endif + +#define vrsra_n_u64(__p0, __p1, __p2) __extension__ ({ \ + uint64x1_t __ret; \ + uint64x1_t __s0 = __p0; \ + uint64x1_t __s1 = __p1; \ + __ret = (uint64x1_t) __builtin_neon_vrsra_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 19); \ + __ret; \ +}) +#ifdef __LITTLE_ENDIAN__ +#define vrsra_n_u16(__p0, __p1, __p2) __extension__ ({ \ + uint16x4_t __ret; \ + uint16x4_t __s0 = __p0; \ + uint16x4_t __s1 = __p1; \ + __ret = (uint16x4_t) __builtin_neon_vrsra_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 17); \ + __ret; \ +}) +#else +#define vrsra_n_u16(__p0, __p1, __p2) __extension__ ({ \ + uint16x4_t __ret; \ + uint16x4_t __s0 = __p0; \ + uint16x4_t __s1 = __p1; \ + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + __ret = (uint16x4_t) __builtin_neon_vrsra_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 17); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vrsra_n_s8(__p0, __p1, __p2) __extension__ ({ \ + int8x8_t __ret; \ + int8x8_t __s0 = __p0; \ + int8x8_t __s1 = __p1; \ + __ret = (int8x8_t) __builtin_neon_vrsra_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 0); \ + __ret; \ +}) +#else +#define vrsra_n_s8(__p0, __p1, __p2) __extension__ ({ \ + int8x8_t __ret; \ + int8x8_t __s0 = __p0; \ + int8x8_t __s1 = __p1; \ + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + int8x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (int8x8_t) __builtin_neon_vrsra_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 0); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vrsra_n_s32(__p0, __p1, __p2) __extension__ ({ \ + int32x2_t __ret; \ + int32x2_t __s0 = __p0; \ + int32x2_t __s1 = __p1; \ + __ret = (int32x2_t) __builtin_neon_vrsra_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 2); \ + __ret; \ +}) +#else +#define vrsra_n_s32(__p0, __p1, __p2) __extension__ ({ \ + int32x2_t __ret; \ + int32x2_t __s0 = __p0; \ + int32x2_t __s1 = __p1; \ + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ + __ret = (int32x2_t) __builtin_neon_vrsra_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 2); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#endif + +#define vrsra_n_s64(__p0, __p1, __p2) __extension__ ({ \ + int64x1_t __ret; \ + int64x1_t __s0 = __p0; \ + int64x1_t __s1 = __p1; \ + __ret = (int64x1_t) __builtin_neon_vrsra_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 3); \ + __ret; \ +}) +#ifdef __LITTLE_ENDIAN__ +#define vrsra_n_s16(__p0, __p1, __p2) __extension__ ({ \ + int16x4_t __ret; \ + int16x4_t __s0 = __p0; \ + int16x4_t __s1 = __p1; \ + __ret = (int16x4_t) __builtin_neon_vrsra_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 1); \ + __ret; \ +}) +#else +#define vrsra_n_s16(__p0, __p1, __p2) __extension__ ({ \ + int16x4_t __ret; \ + int16x4_t __s0 = __p0; \ + int16x4_t __s1 = __p1; \ + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + int16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + __ret = (int16x4_t) __builtin_neon_vrsra_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 1); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint16x4_t vrsubhn_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint16x4_t __ret; + __ret = (uint16x4_t) __builtin_neon_vrsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 17); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint16x4_t vrsubhn_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint16x4_t __ret; + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (uint16x4_t) __builtin_neon_vrsubhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 17); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +__ai __attribute__((target("neon"))) uint16x4_t __noswap_vrsubhn_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint16x4_t __ret; + __ret = (uint16x4_t) __builtin_neon_vrsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 17); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint32x2_t vrsubhn_u64(uint64x2_t __p0, uint64x2_t __p1) { + uint32x2_t __ret; + __ret = (uint32x2_t) __builtin_neon_vrsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 18); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint32x2_t vrsubhn_u64(uint64x2_t __p0, uint64x2_t __p1) { + uint32x2_t __ret; + uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (uint32x2_t) __builtin_neon_vrsubhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 18); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +__ai __attribute__((target("neon"))) uint32x2_t __noswap_vrsubhn_u64(uint64x2_t __p0, uint64x2_t __p1) { + uint32x2_t __ret; + __ret = (uint32x2_t) __builtin_neon_vrsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 18); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint8x8_t vrsubhn_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint8x8_t __ret; + __ret = (uint8x8_t) __builtin_neon_vrsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 16); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint8x8_t vrsubhn_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint8x8_t __ret; + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint8x8_t) __builtin_neon_vrsubhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 16); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +__ai __attribute__((target("neon"))) uint8x8_t __noswap_vrsubhn_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint8x8_t __ret; + __ret = (uint8x8_t) __builtin_neon_vrsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 16); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int16x4_t vrsubhn_s32(int32x4_t __p0, int32x4_t __p1) { + int16x4_t __ret; + __ret = (int16x4_t) __builtin_neon_vrsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 1); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int16x4_t vrsubhn_s32(int32x4_t __p0, int32x4_t __p1) { + int16x4_t __ret; + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (int16x4_t) __builtin_neon_vrsubhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +__ai __attribute__((target("neon"))) int16x4_t __noswap_vrsubhn_s32(int32x4_t __p0, int32x4_t __p1) { + int16x4_t __ret; + __ret = (int16x4_t) __builtin_neon_vrsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 1); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int32x2_t vrsubhn_s64(int64x2_t __p0, int64x2_t __p1) { + int32x2_t __ret; + __ret = (int32x2_t) __builtin_neon_vrsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 2); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int32x2_t vrsubhn_s64(int64x2_t __p0, int64x2_t __p1) { + int32x2_t __ret; + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (int32x2_t) __builtin_neon_vrsubhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 2); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +__ai __attribute__((target("neon"))) int32x2_t __noswap_vrsubhn_s64(int64x2_t __p0, int64x2_t __p1) { + int32x2_t __ret; + __ret = (int32x2_t) __builtin_neon_vrsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 2); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int8x8_t vrsubhn_s16(int16x8_t __p0, int16x8_t __p1) { + int8x8_t __ret; + __ret = (int8x8_t) __builtin_neon_vrsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 0); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int8x8_t vrsubhn_s16(int16x8_t __p0, int16x8_t __p1) { + int8x8_t __ret; + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int8x8_t) __builtin_neon_vrsubhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 0); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +__ai __attribute__((target("neon"))) int8x8_t __noswap_vrsubhn_s16(int16x8_t __p0, int16x8_t __p1) { + int8x8_t __ret; + __ret = (int8x8_t) __builtin_neon_vrsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vset_lane_p8(__p0, __p1, __p2) __extension__ ({ \ + poly8x8_t __ret; \ + poly8_t __s0 = __p0; \ + poly8x8_t __s1 = __p1; \ + __ret = (poly8x8_t) __builtin_neon_vset_lane_i8(__s0, (poly8x8_t)__s1, __p2); \ + __ret; \ +}) +#else +#define vset_lane_p8(__p0, __p1, __p2) __extension__ ({ \ + poly8x8_t __ret; \ + poly8_t __s0 = __p0; \ + poly8x8_t __s1 = __p1; \ + poly8x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (poly8x8_t) __builtin_neon_vset_lane_i8(__s0, (poly8x8_t)__rev1, __p2); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#define __noswap_vset_lane_p8(__p0, __p1, __p2) __extension__ ({ \ + poly8x8_t __ret; \ + poly8_t __s0 = __p0; \ + poly8x8_t __s1 = __p1; \ + __ret = (poly8x8_t) __builtin_neon_vset_lane_i8(__s0, (poly8x8_t)__s1, __p2); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vset_lane_p16(__p0, __p1, __p2) __extension__ ({ \ + poly16x4_t __ret; \ + poly16_t __s0 = __p0; \ + poly16x4_t __s1 = __p1; \ + __ret = (poly16x4_t) __builtin_neon_vset_lane_i16(__s0, (poly16x4_t)__s1, __p2); \ + __ret; \ +}) +#else +#define vset_lane_p16(__p0, __p1, __p2) __extension__ ({ \ + poly16x4_t __ret; \ + poly16_t __s0 = __p0; \ + poly16x4_t __s1 = __p1; \ + poly16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + __ret = (poly16x4_t) __builtin_neon_vset_lane_i16(__s0, (poly16x4_t)__rev1, __p2); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#define __noswap_vset_lane_p16(__p0, __p1, __p2) __extension__ ({ \ + poly16x4_t __ret; \ + poly16_t __s0 = __p0; \ + poly16x4_t __s1 = __p1; \ + __ret = (poly16x4_t) __builtin_neon_vset_lane_i16(__s0, (poly16x4_t)__s1, __p2); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vsetq_lane_p8(__p0, __p1, __p2) __extension__ ({ \ + poly8x16_t __ret; \ + poly8_t __s0 = __p0; \ + poly8x16_t __s1 = __p1; \ + __ret = (poly8x16_t) __builtin_neon_vsetq_lane_i8(__s0, (poly8x16_t)__s1, __p2); \ + __ret; \ +}) +#else +#define vsetq_lane_p8(__p0, __p1, __p2) __extension__ ({ \ + poly8x16_t __ret; \ + poly8_t __s0 = __p0; \ + poly8x16_t __s1 = __p1; \ + poly8x16_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (poly8x16_t) __builtin_neon_vsetq_lane_i8(__s0, (poly8x16_t)__rev1, __p2); \ + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#define __noswap_vsetq_lane_p8(__p0, __p1, __p2) __extension__ ({ \ + poly8x16_t __ret; \ + poly8_t __s0 = __p0; \ + poly8x16_t __s1 = __p1; \ + __ret = (poly8x16_t) __builtin_neon_vsetq_lane_i8(__s0, (poly8x16_t)__s1, __p2); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vsetq_lane_p16(__p0, __p1, __p2) __extension__ ({ \ + poly16x8_t __ret; \ + poly16_t __s0 = __p0; \ + poly16x8_t __s1 = __p1; \ + __ret = (poly16x8_t) __builtin_neon_vsetq_lane_i16(__s0, (poly16x8_t)__s1, __p2); \ + __ret; \ +}) +#else +#define vsetq_lane_p16(__p0, __p1, __p2) __extension__ ({ \ + poly16x8_t __ret; \ + poly16_t __s0 = __p0; \ + poly16x8_t __s1 = __p1; \ + poly16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (poly16x8_t) __builtin_neon_vsetq_lane_i16(__s0, (poly16x8_t)__rev1, __p2); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#define __noswap_vsetq_lane_p16(__p0, __p1, __p2) __extension__ ({ \ + poly16x8_t __ret; \ + poly16_t __s0 = __p0; \ + poly16x8_t __s1 = __p1; \ + __ret = (poly16x8_t) __builtin_neon_vsetq_lane_i16(__s0, (poly16x8_t)__s1, __p2); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vsetq_lane_u8(__p0, __p1, __p2) __extension__ ({ \ + uint8x16_t __ret; \ + uint8_t __s0 = __p0; \ + uint8x16_t __s1 = __p1; \ + __ret = (uint8x16_t) __builtin_neon_vsetq_lane_i8(__s0, (int8x16_t)__s1, __p2); \ + __ret; \ +}) +#else +#define vsetq_lane_u8(__p0, __p1, __p2) __extension__ ({ \ + uint8x16_t __ret; \ + uint8_t __s0 = __p0; \ + uint8x16_t __s1 = __p1; \ + uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (uint8x16_t) __builtin_neon_vsetq_lane_i8(__s0, (int8x16_t)__rev1, __p2); \ + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#define __noswap_vsetq_lane_u8(__p0, __p1, __p2) __extension__ ({ \ + uint8x16_t __ret; \ + uint8_t __s0 = __p0; \ + uint8x16_t __s1 = __p1; \ + __ret = (uint8x16_t) __builtin_neon_vsetq_lane_i8(__s0, (int8x16_t)__s1, __p2); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vsetq_lane_u32(__p0, __p1, __p2) __extension__ ({ \ + uint32x4_t __ret; \ + uint32_t __s0 = __p0; \ + uint32x4_t __s1 = __p1; \ + __ret = (uint32x4_t) __builtin_neon_vsetq_lane_i32(__s0, (int32x4_t)__s1, __p2); \ + __ret; \ +}) +#else +#define vsetq_lane_u32(__p0, __p1, __p2) __extension__ ({ \ + uint32x4_t __ret; \ + uint32_t __s0 = __p0; \ + uint32x4_t __s1 = __p1; \ + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + __ret = (uint32x4_t) __builtin_neon_vsetq_lane_i32(__s0, (int32x4_t)__rev1, __p2); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#define __noswap_vsetq_lane_u32(__p0, __p1, __p2) __extension__ ({ \ + uint32x4_t __ret; \ + uint32_t __s0 = __p0; \ + uint32x4_t __s1 = __p1; \ + __ret = (uint32x4_t) __builtin_neon_vsetq_lane_i32(__s0, (int32x4_t)__s1, __p2); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vsetq_lane_u64(__p0, __p1, __p2) __extension__ ({ \ + uint64x2_t __ret; \ + uint64_t __s0 = __p0; \ + uint64x2_t __s1 = __p1; \ + __ret = (uint64x2_t) __builtin_neon_vsetq_lane_i64(__s0, (int64x2_t)__s1, __p2); \ + __ret; \ +}) +#else +#define vsetq_lane_u64(__p0, __p1, __p2) __extension__ ({ \ + uint64x2_t __ret; \ + uint64_t __s0 = __p0; \ + uint64x2_t __s1 = __p1; \ + uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ + __ret = (uint64x2_t) __builtin_neon_vsetq_lane_i64(__s0, (int64x2_t)__rev1, __p2); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#define __noswap_vsetq_lane_u64(__p0, __p1, __p2) __extension__ ({ \ + uint64x2_t __ret; \ + uint64_t __s0 = __p0; \ + uint64x2_t __s1 = __p1; \ + __ret = (uint64x2_t) __builtin_neon_vsetq_lane_i64(__s0, (int64x2_t)__s1, __p2); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vsetq_lane_u16(__p0, __p1, __p2) __extension__ ({ \ + uint16x8_t __ret; \ + uint16_t __s0 = __p0; \ + uint16x8_t __s1 = __p1; \ + __ret = (uint16x8_t) __builtin_neon_vsetq_lane_i16(__s0, (int16x8_t)__s1, __p2); \ + __ret; \ +}) +#else +#define vsetq_lane_u16(__p0, __p1, __p2) __extension__ ({ \ + uint16x8_t __ret; \ + uint16_t __s0 = __p0; \ + uint16x8_t __s1 = __p1; \ + uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (uint16x8_t) __builtin_neon_vsetq_lane_i16(__s0, (int16x8_t)__rev1, __p2); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#define __noswap_vsetq_lane_u16(__p0, __p1, __p2) __extension__ ({ \ + uint16x8_t __ret; \ + uint16_t __s0 = __p0; \ + uint16x8_t __s1 = __p1; \ + __ret = (uint16x8_t) __builtin_neon_vsetq_lane_i16(__s0, (int16x8_t)__s1, __p2); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vsetq_lane_s8(__p0, __p1, __p2) __extension__ ({ \ + int8x16_t __ret; \ + int8_t __s0 = __p0; \ + int8x16_t __s1 = __p1; \ + __ret = (int8x16_t) __builtin_neon_vsetq_lane_i8(__s0, (int8x16_t)__s1, __p2); \ + __ret; \ +}) +#else +#define vsetq_lane_s8(__p0, __p1, __p2) __extension__ ({ \ + int8x16_t __ret; \ + int8_t __s0 = __p0; \ + int8x16_t __s1 = __p1; \ + int8x16_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (int8x16_t) __builtin_neon_vsetq_lane_i8(__s0, (int8x16_t)__rev1, __p2); \ + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#define __noswap_vsetq_lane_s8(__p0, __p1, __p2) __extension__ ({ \ + int8x16_t __ret; \ + int8_t __s0 = __p0; \ + int8x16_t __s1 = __p1; \ + __ret = (int8x16_t) __builtin_neon_vsetq_lane_i8(__s0, (int8x16_t)__s1, __p2); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vsetq_lane_f32(__p0, __p1, __p2) __extension__ ({ \ + float32x4_t __ret; \ + float32_t __s0 = __p0; \ + float32x4_t __s1 = __p1; \ + __ret = (float32x4_t) __builtin_neon_vsetq_lane_f32(__s0, (float32x4_t)__s1, __p2); \ + __ret; \ +}) +#else +#define vsetq_lane_f32(__p0, __p1, __p2) __extension__ ({ \ + float32x4_t __ret; \ + float32_t __s0 = __p0; \ + float32x4_t __s1 = __p1; \ + float32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + __ret = (float32x4_t) __builtin_neon_vsetq_lane_f32(__s0, (float32x4_t)__rev1, __p2); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#define __noswap_vsetq_lane_f32(__p0, __p1, __p2) __extension__ ({ \ + float32x4_t __ret; \ + float32_t __s0 = __p0; \ + float32x4_t __s1 = __p1; \ + __ret = (float32x4_t) __builtin_neon_vsetq_lane_f32(__s0, (float32x4_t)__s1, __p2); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vsetq_lane_s32(__p0, __p1, __p2) __extension__ ({ \ + int32x4_t __ret; \ + int32_t __s0 = __p0; \ + int32x4_t __s1 = __p1; \ + __ret = (int32x4_t) __builtin_neon_vsetq_lane_i32(__s0, (int32x4_t)__s1, __p2); \ + __ret; \ +}) +#else +#define vsetq_lane_s32(__p0, __p1, __p2) __extension__ ({ \ + int32x4_t __ret; \ + int32_t __s0 = __p0; \ + int32x4_t __s1 = __p1; \ + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + __ret = (int32x4_t) __builtin_neon_vsetq_lane_i32(__s0, (int32x4_t)__rev1, __p2); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#define __noswap_vsetq_lane_s32(__p0, __p1, __p2) __extension__ ({ \ + int32x4_t __ret; \ + int32_t __s0 = __p0; \ + int32x4_t __s1 = __p1; \ + __ret = (int32x4_t) __builtin_neon_vsetq_lane_i32(__s0, (int32x4_t)__s1, __p2); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vsetq_lane_s64(__p0, __p1, __p2) __extension__ ({ \ + int64x2_t __ret; \ + int64_t __s0 = __p0; \ + int64x2_t __s1 = __p1; \ + __ret = (int64x2_t) __builtin_neon_vsetq_lane_i64(__s0, (int64x2_t)__s1, __p2); \ + __ret; \ +}) +#else +#define vsetq_lane_s64(__p0, __p1, __p2) __extension__ ({ \ + int64x2_t __ret; \ + int64_t __s0 = __p0; \ + int64x2_t __s1 = __p1; \ + int64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ + __ret = (int64x2_t) __builtin_neon_vsetq_lane_i64(__s0, (int64x2_t)__rev1, __p2); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#define __noswap_vsetq_lane_s64(__p0, __p1, __p2) __extension__ ({ \ + int64x2_t __ret; \ + int64_t __s0 = __p0; \ + int64x2_t __s1 = __p1; \ + __ret = (int64x2_t) __builtin_neon_vsetq_lane_i64(__s0, (int64x2_t)__s1, __p2); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vsetq_lane_s16(__p0, __p1, __p2) __extension__ ({ \ + int16x8_t __ret; \ + int16_t __s0 = __p0; \ + int16x8_t __s1 = __p1; \ + __ret = (int16x8_t) __builtin_neon_vsetq_lane_i16(__s0, (int16x8_t)__s1, __p2); \ + __ret; \ +}) +#else +#define vsetq_lane_s16(__p0, __p1, __p2) __extension__ ({ \ + int16x8_t __ret; \ + int16_t __s0 = __p0; \ + int16x8_t __s1 = __p1; \ + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (int16x8_t) __builtin_neon_vsetq_lane_i16(__s0, (int16x8_t)__rev1, __p2); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#define __noswap_vsetq_lane_s16(__p0, __p1, __p2) __extension__ ({ \ + int16x8_t __ret; \ + int16_t __s0 = __p0; \ + int16x8_t __s1 = __p1; \ + __ret = (int16x8_t) __builtin_neon_vsetq_lane_i16(__s0, (int16x8_t)__s1, __p2); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vset_lane_u8(__p0, __p1, __p2) __extension__ ({ \ + uint8x8_t __ret; \ + uint8_t __s0 = __p0; \ + uint8x8_t __s1 = __p1; \ + __ret = (uint8x8_t) __builtin_neon_vset_lane_i8(__s0, (int8x8_t)__s1, __p2); \ + __ret; \ +}) +#else +#define vset_lane_u8(__p0, __p1, __p2) __extension__ ({ \ + uint8x8_t __ret; \ + uint8_t __s0 = __p0; \ + uint8x8_t __s1 = __p1; \ + uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (uint8x8_t) __builtin_neon_vset_lane_i8(__s0, (int8x8_t)__rev1, __p2); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#define __noswap_vset_lane_u8(__p0, __p1, __p2) __extension__ ({ \ + uint8x8_t __ret; \ + uint8_t __s0 = __p0; \ + uint8x8_t __s1 = __p1; \ + __ret = (uint8x8_t) __builtin_neon_vset_lane_i8(__s0, (int8x8_t)__s1, __p2); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vset_lane_u32(__p0, __p1, __p2) __extension__ ({ \ + uint32x2_t __ret; \ + uint32_t __s0 = __p0; \ + uint32x2_t __s1 = __p1; \ + __ret = (uint32x2_t) __builtin_neon_vset_lane_i32(__s0, (int32x2_t)__s1, __p2); \ + __ret; \ +}) +#else +#define vset_lane_u32(__p0, __p1, __p2) __extension__ ({ \ + uint32x2_t __ret; \ + uint32_t __s0 = __p0; \ + uint32x2_t __s1 = __p1; \ + uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ + __ret = (uint32x2_t) __builtin_neon_vset_lane_i32(__s0, (int32x2_t)__rev1, __p2); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#define __noswap_vset_lane_u32(__p0, __p1, __p2) __extension__ ({ \ + uint32x2_t __ret; \ + uint32_t __s0 = __p0; \ + uint32x2_t __s1 = __p1; \ + __ret = (uint32x2_t) __builtin_neon_vset_lane_i32(__s0, (int32x2_t)__s1, __p2); \ + __ret; \ +}) +#endif + +#define vset_lane_u64(__p0, __p1, __p2) __extension__ ({ \ + uint64x1_t __ret; \ + uint64_t __s0 = __p0; \ + uint64x1_t __s1 = __p1; \ + __ret = (uint64x1_t) __builtin_neon_vset_lane_i64(__s0, (int64x1_t)__s1, __p2); \ + __ret; \ +}) +#ifdef __LITTLE_ENDIAN__ +#define vset_lane_u16(__p0, __p1, __p2) __extension__ ({ \ + uint16x4_t __ret; \ + uint16_t __s0 = __p0; \ + uint16x4_t __s1 = __p1; \ + __ret = (uint16x4_t) __builtin_neon_vset_lane_i16(__s0, (int16x4_t)__s1, __p2); \ + __ret; \ +}) +#else +#define vset_lane_u16(__p0, __p1, __p2) __extension__ ({ \ + uint16x4_t __ret; \ + uint16_t __s0 = __p0; \ + uint16x4_t __s1 = __p1; \ + uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + __ret = (uint16x4_t) __builtin_neon_vset_lane_i16(__s0, (int16x4_t)__rev1, __p2); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#define __noswap_vset_lane_u16(__p0, __p1, __p2) __extension__ ({ \ + uint16x4_t __ret; \ + uint16_t __s0 = __p0; \ + uint16x4_t __s1 = __p1; \ + __ret = (uint16x4_t) __builtin_neon_vset_lane_i16(__s0, (int16x4_t)__s1, __p2); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vset_lane_s8(__p0, __p1, __p2) __extension__ ({ \ + int8x8_t __ret; \ + int8_t __s0 = __p0; \ + int8x8_t __s1 = __p1; \ + __ret = (int8x8_t) __builtin_neon_vset_lane_i8(__s0, (int8x8_t)__s1, __p2); \ + __ret; \ +}) +#else +#define vset_lane_s8(__p0, __p1, __p2) __extension__ ({ \ + int8x8_t __ret; \ + int8_t __s0 = __p0; \ + int8x8_t __s1 = __p1; \ + int8x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (int8x8_t) __builtin_neon_vset_lane_i8(__s0, (int8x8_t)__rev1, __p2); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#define __noswap_vset_lane_s8(__p0, __p1, __p2) __extension__ ({ \ + int8x8_t __ret; \ + int8_t __s0 = __p0; \ + int8x8_t __s1 = __p1; \ + __ret = (int8x8_t) __builtin_neon_vset_lane_i8(__s0, (int8x8_t)__s1, __p2); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vset_lane_f32(__p0, __p1, __p2) __extension__ ({ \ + float32x2_t __ret; \ + float32_t __s0 = __p0; \ + float32x2_t __s1 = __p1; \ + __ret = (float32x2_t) __builtin_neon_vset_lane_f32(__s0, (float32x2_t)__s1, __p2); \ + __ret; \ +}) +#else +#define vset_lane_f32(__p0, __p1, __p2) __extension__ ({ \ + float32x2_t __ret; \ + float32_t __s0 = __p0; \ + float32x2_t __s1 = __p1; \ + float32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ + __ret = (float32x2_t) __builtin_neon_vset_lane_f32(__s0, (float32x2_t)__rev1, __p2); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#define __noswap_vset_lane_f32(__p0, __p1, __p2) __extension__ ({ \ + float32x2_t __ret; \ + float32_t __s0 = __p0; \ + float32x2_t __s1 = __p1; \ + __ret = (float32x2_t) __builtin_neon_vset_lane_f32(__s0, (float32x2_t)__s1, __p2); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vset_lane_s32(__p0, __p1, __p2) __extension__ ({ \ + int32x2_t __ret; \ + int32_t __s0 = __p0; \ + int32x2_t __s1 = __p1; \ + __ret = (int32x2_t) __builtin_neon_vset_lane_i32(__s0, (int32x2_t)__s1, __p2); \ + __ret; \ +}) +#else +#define vset_lane_s32(__p0, __p1, __p2) __extension__ ({ \ + int32x2_t __ret; \ + int32_t __s0 = __p0; \ + int32x2_t __s1 = __p1; \ + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ + __ret = (int32x2_t) __builtin_neon_vset_lane_i32(__s0, (int32x2_t)__rev1, __p2); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#define __noswap_vset_lane_s32(__p0, __p1, __p2) __extension__ ({ \ + int32x2_t __ret; \ + int32_t __s0 = __p0; \ + int32x2_t __s1 = __p1; \ + __ret = (int32x2_t) __builtin_neon_vset_lane_i32(__s0, (int32x2_t)__s1, __p2); \ + __ret; \ +}) +#endif + +#define vset_lane_s64(__p0, __p1, __p2) __extension__ ({ \ + int64x1_t __ret; \ + int64_t __s0 = __p0; \ + int64x1_t __s1 = __p1; \ + __ret = (int64x1_t) __builtin_neon_vset_lane_i64(__s0, (int64x1_t)__s1, __p2); \ + __ret; \ +}) +#ifdef __LITTLE_ENDIAN__ +#define vset_lane_s16(__p0, __p1, __p2) __extension__ ({ \ + int16x4_t __ret; \ + int16_t __s0 = __p0; \ + int16x4_t __s1 = __p1; \ + __ret = (int16x4_t) __builtin_neon_vset_lane_i16(__s0, (int16x4_t)__s1, __p2); \ + __ret; \ +}) +#else +#define vset_lane_s16(__p0, __p1, __p2) __extension__ ({ \ + int16x4_t __ret; \ + int16_t __s0 = __p0; \ + int16x4_t __s1 = __p1; \ + int16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + __ret = (int16x4_t) __builtin_neon_vset_lane_i16(__s0, (int16x4_t)__rev1, __p2); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#define __noswap_vset_lane_s16(__p0, __p1, __p2) __extension__ ({ \ + int16x4_t __ret; \ + int16_t __s0 = __p0; \ + int16x4_t __s1 = __p1; \ + __ret = (int16x4_t) __builtin_neon_vset_lane_i16(__s0, (int16x4_t)__s1, __p2); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint8x16_t vshlq_u8(uint8x16_t __p0, int8x16_t __p1) { + uint8x16_t __ret; + __ret = (uint8x16_t) __builtin_neon_vshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 48); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint8x16_t vshlq_u8(uint8x16_t __p0, int8x16_t __p1) { + uint8x16_t __ret; + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint8x16_t) __builtin_neon_vshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint32x4_t vshlq_u32(uint32x4_t __p0, int32x4_t __p1) { + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 50); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint32x4_t vshlq_u32(uint32x4_t __p0, int32x4_t __p1) { + uint32x4_t __ret; + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (uint32x4_t) __builtin_neon_vshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint64x2_t vshlq_u64(uint64x2_t __p0, int64x2_t __p1) { + uint64x2_t __ret; + __ret = (uint64x2_t) __builtin_neon_vshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 51); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint64x2_t vshlq_u64(uint64x2_t __p0, int64x2_t __p1) { + uint64x2_t __ret; + uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (uint64x2_t) __builtin_neon_vshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 51); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint16x8_t vshlq_u16(uint16x8_t __p0, int16x8_t __p1) { + uint16x8_t __ret; + __ret = (uint16x8_t) __builtin_neon_vshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 49); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint16x8_t vshlq_u16(uint16x8_t __p0, int16x8_t __p1) { + uint16x8_t __ret; + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint16x8_t) __builtin_neon_vshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int8x16_t vshlq_s8(int8x16_t __p0, int8x16_t __p1) { + int8x16_t __ret; + __ret = (int8x16_t) __builtin_neon_vshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 32); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int8x16_t vshlq_s8(int8x16_t __p0, int8x16_t __p1) { + int8x16_t __ret; + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int8x16_t) __builtin_neon_vshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 32); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int32x4_t vshlq_s32(int32x4_t __p0, int32x4_t __p1) { + int32x4_t __ret; + __ret = (int32x4_t) __builtin_neon_vshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 34); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int32x4_t vshlq_s32(int32x4_t __p0, int32x4_t __p1) { + int32x4_t __ret; + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (int32x4_t) __builtin_neon_vshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 34); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int64x2_t vshlq_s64(int64x2_t __p0, int64x2_t __p1) { + int64x2_t __ret; + __ret = (int64x2_t) __builtin_neon_vshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 35); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int64x2_t vshlq_s64(int64x2_t __p0, int64x2_t __p1) { + int64x2_t __ret; + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (int64x2_t) __builtin_neon_vshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 35); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int16x8_t vshlq_s16(int16x8_t __p0, int16x8_t __p1) { + int16x8_t __ret; + __ret = (int16x8_t) __builtin_neon_vshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 33); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int16x8_t vshlq_s16(int16x8_t __p0, int16x8_t __p1) { + int16x8_t __ret; + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int16x8_t) __builtin_neon_vshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 33); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint8x8_t vshl_u8(uint8x8_t __p0, int8x8_t __p1) { + uint8x8_t __ret; + __ret = (uint8x8_t) __builtin_neon_vshl_v((int8x8_t)__p0, (int8x8_t)__p1, 16); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint8x8_t vshl_u8(uint8x8_t __p0, int8x8_t __p1) { + uint8x8_t __ret; + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint8x8_t) __builtin_neon_vshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 16); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint32x2_t vshl_u32(uint32x2_t __p0, int32x2_t __p1) { + uint32x2_t __ret; + __ret = (uint32x2_t) __builtin_neon_vshl_v((int8x8_t)__p0, (int8x8_t)__p1, 18); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint32x2_t vshl_u32(uint32x2_t __p0, int32x2_t __p1) { + uint32x2_t __ret; + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (uint32x2_t) __builtin_neon_vshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +__ai __attribute__((target("neon"))) uint64x1_t vshl_u64(uint64x1_t __p0, int64x1_t __p1) { + uint64x1_t __ret; + __ret = (uint64x1_t) __builtin_neon_vshl_v((int8x8_t)__p0, (int8x8_t)__p1, 19); + return __ret; +} +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint16x4_t vshl_u16(uint16x4_t __p0, int16x4_t __p1) { + uint16x4_t __ret; + __ret = (uint16x4_t) __builtin_neon_vshl_v((int8x8_t)__p0, (int8x8_t)__p1, 17); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint16x4_t vshl_u16(uint16x4_t __p0, int16x4_t __p1) { + uint16x4_t __ret; + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (uint16x4_t) __builtin_neon_vshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int8x8_t vshl_s8(int8x8_t __p0, int8x8_t __p1) { + int8x8_t __ret; + __ret = (int8x8_t) __builtin_neon_vshl_v((int8x8_t)__p0, (int8x8_t)__p1, 0); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int8x8_t vshl_s8(int8x8_t __p0, int8x8_t __p1) { + int8x8_t __ret; + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int8x8_t) __builtin_neon_vshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 0); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int32x2_t vshl_s32(int32x2_t __p0, int32x2_t __p1) { + int32x2_t __ret; + __ret = (int32x2_t) __builtin_neon_vshl_v((int8x8_t)__p0, (int8x8_t)__p1, 2); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int32x2_t vshl_s32(int32x2_t __p0, int32x2_t __p1) { + int32x2_t __ret; + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (int32x2_t) __builtin_neon_vshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 2); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +__ai __attribute__((target("neon"))) int64x1_t vshl_s64(int64x1_t __p0, int64x1_t __p1) { + int64x1_t __ret; + __ret = (int64x1_t) __builtin_neon_vshl_v((int8x8_t)__p0, (int8x8_t)__p1, 3); + return __ret; +} +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int16x4_t vshl_s16(int16x4_t __p0, int16x4_t __p1) { + int16x4_t __ret; + __ret = (int16x4_t) __builtin_neon_vshl_v((int8x8_t)__p0, (int8x8_t)__p1, 1); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int16x4_t vshl_s16(int16x4_t __p0, int16x4_t __p1) { + int16x4_t __ret; + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (int16x4_t) __builtin_neon_vshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vshlq_n_u8(__p0, __p1) __extension__ ({ \ + uint8x16_t __ret; \ + uint8x16_t __s0 = __p0; \ + __ret = (uint8x16_t) __builtin_neon_vshlq_n_v((int8x16_t)__s0, __p1, 48); \ + __ret; \ +}) +#else +#define vshlq_n_u8(__p0, __p1) __extension__ ({ \ + uint8x16_t __ret; \ + uint8x16_t __s0 = __p0; \ + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (uint8x16_t) __builtin_neon_vshlq_n_v((int8x16_t)__rev0, __p1, 48); \ + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vshlq_n_u32(__p0, __p1) __extension__ ({ \ + uint32x4_t __ret; \ + uint32x4_t __s0 = __p0; \ + __ret = (uint32x4_t) __builtin_neon_vshlq_n_v((int8x16_t)__s0, __p1, 50); \ + __ret; \ +}) +#else +#define vshlq_n_u32(__p0, __p1) __extension__ ({ \ + uint32x4_t __ret; \ + uint32x4_t __s0 = __p0; \ + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + __ret = (uint32x4_t) __builtin_neon_vshlq_n_v((int8x16_t)__rev0, __p1, 50); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vshlq_n_u64(__p0, __p1) __extension__ ({ \ + uint64x2_t __ret; \ + uint64x2_t __s0 = __p0; \ + __ret = (uint64x2_t) __builtin_neon_vshlq_n_v((int8x16_t)__s0, __p1, 51); \ + __ret; \ +}) +#else +#define vshlq_n_u64(__p0, __p1) __extension__ ({ \ + uint64x2_t __ret; \ + uint64x2_t __s0 = __p0; \ + uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + __ret = (uint64x2_t) __builtin_neon_vshlq_n_v((int8x16_t)__rev0, __p1, 51); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vshlq_n_u16(__p0, __p1) __extension__ ({ \ + uint16x8_t __ret; \ + uint16x8_t __s0 = __p0; \ + __ret = (uint16x8_t) __builtin_neon_vshlq_n_v((int8x16_t)__s0, __p1, 49); \ + __ret; \ +}) +#else +#define vshlq_n_u16(__p0, __p1) __extension__ ({ \ + uint16x8_t __ret; \ + uint16x8_t __s0 = __p0; \ + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (uint16x8_t) __builtin_neon_vshlq_n_v((int8x16_t)__rev0, __p1, 49); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vshlq_n_s8(__p0, __p1) __extension__ ({ \ + int8x16_t __ret; \ + int8x16_t __s0 = __p0; \ + __ret = (int8x16_t) __builtin_neon_vshlq_n_v((int8x16_t)__s0, __p1, 32); \ + __ret; \ +}) +#else +#define vshlq_n_s8(__p0, __p1) __extension__ ({ \ + int8x16_t __ret; \ + int8x16_t __s0 = __p0; \ + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (int8x16_t) __builtin_neon_vshlq_n_v((int8x16_t)__rev0, __p1, 32); \ + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vshlq_n_s32(__p0, __p1) __extension__ ({ \ + int32x4_t __ret; \ + int32x4_t __s0 = __p0; \ + __ret = (int32x4_t) __builtin_neon_vshlq_n_v((int8x16_t)__s0, __p1, 34); \ + __ret; \ +}) +#else +#define vshlq_n_s32(__p0, __p1) __extension__ ({ \ + int32x4_t __ret; \ + int32x4_t __s0 = __p0; \ + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + __ret = (int32x4_t) __builtin_neon_vshlq_n_v((int8x16_t)__rev0, __p1, 34); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vshlq_n_s64(__p0, __p1) __extension__ ({ \ + int64x2_t __ret; \ + int64x2_t __s0 = __p0; \ + __ret = (int64x2_t) __builtin_neon_vshlq_n_v((int8x16_t)__s0, __p1, 35); \ + __ret; \ +}) +#else +#define vshlq_n_s64(__p0, __p1) __extension__ ({ \ + int64x2_t __ret; \ + int64x2_t __s0 = __p0; \ + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + __ret = (int64x2_t) __builtin_neon_vshlq_n_v((int8x16_t)__rev0, __p1, 35); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vshlq_n_s16(__p0, __p1) __extension__ ({ \ + int16x8_t __ret; \ + int16x8_t __s0 = __p0; \ + __ret = (int16x8_t) __builtin_neon_vshlq_n_v((int8x16_t)__s0, __p1, 33); \ + __ret; \ +}) +#else +#define vshlq_n_s16(__p0, __p1) __extension__ ({ \ + int16x8_t __ret; \ + int16x8_t __s0 = __p0; \ + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (int16x8_t) __builtin_neon_vshlq_n_v((int8x16_t)__rev0, __p1, 33); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vshl_n_u8(__p0, __p1) __extension__ ({ \ + uint8x8_t __ret; \ + uint8x8_t __s0 = __p0; \ + __ret = (uint8x8_t) __builtin_neon_vshl_n_v((int8x8_t)__s0, __p1, 16); \ + __ret; \ +}) +#else +#define vshl_n_u8(__p0, __p1) __extension__ ({ \ + uint8x8_t __ret; \ + uint8x8_t __s0 = __p0; \ + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (uint8x8_t) __builtin_neon_vshl_n_v((int8x8_t)__rev0, __p1, 16); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vshl_n_u32(__p0, __p1) __extension__ ({ \ + uint32x2_t __ret; \ + uint32x2_t __s0 = __p0; \ + __ret = (uint32x2_t) __builtin_neon_vshl_n_v((int8x8_t)__s0, __p1, 18); \ + __ret; \ +}) +#else +#define vshl_n_u32(__p0, __p1) __extension__ ({ \ + uint32x2_t __ret; \ + uint32x2_t __s0 = __p0; \ + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + __ret = (uint32x2_t) __builtin_neon_vshl_n_v((int8x8_t)__rev0, __p1, 18); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#endif + +#define vshl_n_u64(__p0, __p1) __extension__ ({ \ + uint64x1_t __ret; \ + uint64x1_t __s0 = __p0; \ + __ret = (uint64x1_t) __builtin_neon_vshl_n_v((int8x8_t)__s0, __p1, 19); \ + __ret; \ +}) +#ifdef __LITTLE_ENDIAN__ +#define vshl_n_u16(__p0, __p1) __extension__ ({ \ + uint16x4_t __ret; \ + uint16x4_t __s0 = __p0; \ + __ret = (uint16x4_t) __builtin_neon_vshl_n_v((int8x8_t)__s0, __p1, 17); \ + __ret; \ +}) +#else +#define vshl_n_u16(__p0, __p1) __extension__ ({ \ + uint16x4_t __ret; \ + uint16x4_t __s0 = __p0; \ + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + __ret = (uint16x4_t) __builtin_neon_vshl_n_v((int8x8_t)__rev0, __p1, 17); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vshl_n_s8(__p0, __p1) __extension__ ({ \ + int8x8_t __ret; \ + int8x8_t __s0 = __p0; \ + __ret = (int8x8_t) __builtin_neon_vshl_n_v((int8x8_t)__s0, __p1, 0); \ + __ret; \ +}) +#else +#define vshl_n_s8(__p0, __p1) __extension__ ({ \ + int8x8_t __ret; \ + int8x8_t __s0 = __p0; \ + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (int8x8_t) __builtin_neon_vshl_n_v((int8x8_t)__rev0, __p1, 0); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vshl_n_s32(__p0, __p1) __extension__ ({ \ + int32x2_t __ret; \ + int32x2_t __s0 = __p0; \ + __ret = (int32x2_t) __builtin_neon_vshl_n_v((int8x8_t)__s0, __p1, 2); \ + __ret; \ +}) +#else +#define vshl_n_s32(__p0, __p1) __extension__ ({ \ + int32x2_t __ret; \ + int32x2_t __s0 = __p0; \ + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + __ret = (int32x2_t) __builtin_neon_vshl_n_v((int8x8_t)__rev0, __p1, 2); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#endif + +#define vshl_n_s64(__p0, __p1) __extension__ ({ \ + int64x1_t __ret; \ + int64x1_t __s0 = __p0; \ + __ret = (int64x1_t) __builtin_neon_vshl_n_v((int8x8_t)__s0, __p1, 3); \ + __ret; \ +}) +#ifdef __LITTLE_ENDIAN__ +#define vshl_n_s16(__p0, __p1) __extension__ ({ \ + int16x4_t __ret; \ + int16x4_t __s0 = __p0; \ + __ret = (int16x4_t) __builtin_neon_vshl_n_v((int8x8_t)__s0, __p1, 1); \ + __ret; \ +}) +#else +#define vshl_n_s16(__p0, __p1) __extension__ ({ \ + int16x4_t __ret; \ + int16x4_t __s0 = __p0; \ + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + __ret = (int16x4_t) __builtin_neon_vshl_n_v((int8x8_t)__rev0, __p1, 1); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vshll_n_u8(__p0, __p1) __extension__ ({ \ + uint16x8_t __ret; \ + uint8x8_t __s0 = __p0; \ + __ret = (uint16x8_t) __builtin_neon_vshll_n_v((int8x8_t)__s0, __p1, 49); \ + __ret; \ +}) +#else +#define vshll_n_u8(__p0, __p1) __extension__ ({ \ + uint16x8_t __ret; \ + uint8x8_t __s0 = __p0; \ + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (uint16x8_t) __builtin_neon_vshll_n_v((int8x8_t)__rev0, __p1, 49); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#define __noswap_vshll_n_u8(__p0, __p1) __extension__ ({ \ + uint16x8_t __ret; \ + uint8x8_t __s0 = __p0; \ + __ret = (uint16x8_t) __builtin_neon_vshll_n_v((int8x8_t)__s0, __p1, 49); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vshll_n_u32(__p0, __p1) __extension__ ({ \ + uint64x2_t __ret; \ + uint32x2_t __s0 = __p0; \ + __ret = (uint64x2_t) __builtin_neon_vshll_n_v((int8x8_t)__s0, __p1, 51); \ + __ret; \ +}) +#else +#define vshll_n_u32(__p0, __p1) __extension__ ({ \ + uint64x2_t __ret; \ + uint32x2_t __s0 = __p0; \ + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + __ret = (uint64x2_t) __builtin_neon_vshll_n_v((int8x8_t)__rev0, __p1, 51); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#define __noswap_vshll_n_u32(__p0, __p1) __extension__ ({ \ + uint64x2_t __ret; \ + uint32x2_t __s0 = __p0; \ + __ret = (uint64x2_t) __builtin_neon_vshll_n_v((int8x8_t)__s0, __p1, 51); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vshll_n_u16(__p0, __p1) __extension__ ({ \ + uint32x4_t __ret; \ + uint16x4_t __s0 = __p0; \ + __ret = (uint32x4_t) __builtin_neon_vshll_n_v((int8x8_t)__s0, __p1, 50); \ + __ret; \ +}) +#else +#define vshll_n_u16(__p0, __p1) __extension__ ({ \ + uint32x4_t __ret; \ + uint16x4_t __s0 = __p0; \ + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + __ret = (uint32x4_t) __builtin_neon_vshll_n_v((int8x8_t)__rev0, __p1, 50); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#define __noswap_vshll_n_u16(__p0, __p1) __extension__ ({ \ + uint32x4_t __ret; \ + uint16x4_t __s0 = __p0; \ + __ret = (uint32x4_t) __builtin_neon_vshll_n_v((int8x8_t)__s0, __p1, 50); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vshll_n_s8(__p0, __p1) __extension__ ({ \ + int16x8_t __ret; \ + int8x8_t __s0 = __p0; \ + __ret = (int16x8_t) __builtin_neon_vshll_n_v((int8x8_t)__s0, __p1, 33); \ + __ret; \ +}) +#else +#define vshll_n_s8(__p0, __p1) __extension__ ({ \ + int16x8_t __ret; \ + int8x8_t __s0 = __p0; \ + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (int16x8_t) __builtin_neon_vshll_n_v((int8x8_t)__rev0, __p1, 33); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#define __noswap_vshll_n_s8(__p0, __p1) __extension__ ({ \ + int16x8_t __ret; \ + int8x8_t __s0 = __p0; \ + __ret = (int16x8_t) __builtin_neon_vshll_n_v((int8x8_t)__s0, __p1, 33); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vshll_n_s32(__p0, __p1) __extension__ ({ \ + int64x2_t __ret; \ + int32x2_t __s0 = __p0; \ + __ret = (int64x2_t) __builtin_neon_vshll_n_v((int8x8_t)__s0, __p1, 35); \ + __ret; \ +}) +#else +#define vshll_n_s32(__p0, __p1) __extension__ ({ \ + int64x2_t __ret; \ + int32x2_t __s0 = __p0; \ + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + __ret = (int64x2_t) __builtin_neon_vshll_n_v((int8x8_t)__rev0, __p1, 35); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#define __noswap_vshll_n_s32(__p0, __p1) __extension__ ({ \ + int64x2_t __ret; \ + int32x2_t __s0 = __p0; \ + __ret = (int64x2_t) __builtin_neon_vshll_n_v((int8x8_t)__s0, __p1, 35); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vshll_n_s16(__p0, __p1) __extension__ ({ \ + int32x4_t __ret; \ + int16x4_t __s0 = __p0; \ + __ret = (int32x4_t) __builtin_neon_vshll_n_v((int8x8_t)__s0, __p1, 34); \ + __ret; \ +}) +#else +#define vshll_n_s16(__p0, __p1) __extension__ ({ \ + int32x4_t __ret; \ + int16x4_t __s0 = __p0; \ + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + __ret = (int32x4_t) __builtin_neon_vshll_n_v((int8x8_t)__rev0, __p1, 34); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#define __noswap_vshll_n_s16(__p0, __p1) __extension__ ({ \ + int32x4_t __ret; \ + int16x4_t __s0 = __p0; \ + __ret = (int32x4_t) __builtin_neon_vshll_n_v((int8x8_t)__s0, __p1, 34); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vshrq_n_u8(__p0, __p1) __extension__ ({ \ + uint8x16_t __ret; \ + uint8x16_t __s0 = __p0; \ + __ret = (uint8x16_t) __builtin_neon_vshrq_n_v((int8x16_t)__s0, __p1, 48); \ + __ret; \ +}) +#else +#define vshrq_n_u8(__p0, __p1) __extension__ ({ \ + uint8x16_t __ret; \ + uint8x16_t __s0 = __p0; \ + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (uint8x16_t) __builtin_neon_vshrq_n_v((int8x16_t)__rev0, __p1, 48); \ + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vshrq_n_u32(__p0, __p1) __extension__ ({ \ + uint32x4_t __ret; \ + uint32x4_t __s0 = __p0; \ + __ret = (uint32x4_t) __builtin_neon_vshrq_n_v((int8x16_t)__s0, __p1, 50); \ + __ret; \ +}) +#else +#define vshrq_n_u32(__p0, __p1) __extension__ ({ \ + uint32x4_t __ret; \ + uint32x4_t __s0 = __p0; \ + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + __ret = (uint32x4_t) __builtin_neon_vshrq_n_v((int8x16_t)__rev0, __p1, 50); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vshrq_n_u64(__p0, __p1) __extension__ ({ \ + uint64x2_t __ret; \ + uint64x2_t __s0 = __p0; \ + __ret = (uint64x2_t) __builtin_neon_vshrq_n_v((int8x16_t)__s0, __p1, 51); \ + __ret; \ +}) +#else +#define vshrq_n_u64(__p0, __p1) __extension__ ({ \ + uint64x2_t __ret; \ + uint64x2_t __s0 = __p0; \ + uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + __ret = (uint64x2_t) __builtin_neon_vshrq_n_v((int8x16_t)__rev0, __p1, 51); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vshrq_n_u16(__p0, __p1) __extension__ ({ \ + uint16x8_t __ret; \ + uint16x8_t __s0 = __p0; \ + __ret = (uint16x8_t) __builtin_neon_vshrq_n_v((int8x16_t)__s0, __p1, 49); \ + __ret; \ +}) +#else +#define vshrq_n_u16(__p0, __p1) __extension__ ({ \ + uint16x8_t __ret; \ + uint16x8_t __s0 = __p0; \ + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (uint16x8_t) __builtin_neon_vshrq_n_v((int8x16_t)__rev0, __p1, 49); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vshrq_n_s8(__p0, __p1) __extension__ ({ \ + int8x16_t __ret; \ + int8x16_t __s0 = __p0; \ + __ret = (int8x16_t) __builtin_neon_vshrq_n_v((int8x16_t)__s0, __p1, 32); \ + __ret; \ +}) +#else +#define vshrq_n_s8(__p0, __p1) __extension__ ({ \ + int8x16_t __ret; \ + int8x16_t __s0 = __p0; \ + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (int8x16_t) __builtin_neon_vshrq_n_v((int8x16_t)__rev0, __p1, 32); \ + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vshrq_n_s32(__p0, __p1) __extension__ ({ \ + int32x4_t __ret; \ + int32x4_t __s0 = __p0; \ + __ret = (int32x4_t) __builtin_neon_vshrq_n_v((int8x16_t)__s0, __p1, 34); \ + __ret; \ +}) +#else +#define vshrq_n_s32(__p0, __p1) __extension__ ({ \ + int32x4_t __ret; \ + int32x4_t __s0 = __p0; \ + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + __ret = (int32x4_t) __builtin_neon_vshrq_n_v((int8x16_t)__rev0, __p1, 34); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vshrq_n_s64(__p0, __p1) __extension__ ({ \ + int64x2_t __ret; \ + int64x2_t __s0 = __p0; \ + __ret = (int64x2_t) __builtin_neon_vshrq_n_v((int8x16_t)__s0, __p1, 35); \ + __ret; \ +}) +#else +#define vshrq_n_s64(__p0, __p1) __extension__ ({ \ + int64x2_t __ret; \ + int64x2_t __s0 = __p0; \ + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + __ret = (int64x2_t) __builtin_neon_vshrq_n_v((int8x16_t)__rev0, __p1, 35); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vshrq_n_s16(__p0, __p1) __extension__ ({ \ + int16x8_t __ret; \ + int16x8_t __s0 = __p0; \ + __ret = (int16x8_t) __builtin_neon_vshrq_n_v((int8x16_t)__s0, __p1, 33); \ + __ret; \ +}) +#else +#define vshrq_n_s16(__p0, __p1) __extension__ ({ \ + int16x8_t __ret; \ + int16x8_t __s0 = __p0; \ + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (int16x8_t) __builtin_neon_vshrq_n_v((int8x16_t)__rev0, __p1, 33); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vshr_n_u8(__p0, __p1) __extension__ ({ \ + uint8x8_t __ret; \ + uint8x8_t __s0 = __p0; \ + __ret = (uint8x8_t) __builtin_neon_vshr_n_v((int8x8_t)__s0, __p1, 16); \ + __ret; \ +}) +#else +#define vshr_n_u8(__p0, __p1) __extension__ ({ \ + uint8x8_t __ret; \ + uint8x8_t __s0 = __p0; \ + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (uint8x8_t) __builtin_neon_vshr_n_v((int8x8_t)__rev0, __p1, 16); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vshr_n_u32(__p0, __p1) __extension__ ({ \ + uint32x2_t __ret; \ + uint32x2_t __s0 = __p0; \ + __ret = (uint32x2_t) __builtin_neon_vshr_n_v((int8x8_t)__s0, __p1, 18); \ + __ret; \ +}) +#else +#define vshr_n_u32(__p0, __p1) __extension__ ({ \ + uint32x2_t __ret; \ + uint32x2_t __s0 = __p0; \ + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + __ret = (uint32x2_t) __builtin_neon_vshr_n_v((int8x8_t)__rev0, __p1, 18); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#endif + +#define vshr_n_u64(__p0, __p1) __extension__ ({ \ + uint64x1_t __ret; \ + uint64x1_t __s0 = __p0; \ + __ret = (uint64x1_t) __builtin_neon_vshr_n_v((int8x8_t)__s0, __p1, 19); \ + __ret; \ +}) +#ifdef __LITTLE_ENDIAN__ +#define vshr_n_u16(__p0, __p1) __extension__ ({ \ + uint16x4_t __ret; \ + uint16x4_t __s0 = __p0; \ + __ret = (uint16x4_t) __builtin_neon_vshr_n_v((int8x8_t)__s0, __p1, 17); \ + __ret; \ +}) +#else +#define vshr_n_u16(__p0, __p1) __extension__ ({ \ + uint16x4_t __ret; \ + uint16x4_t __s0 = __p0; \ + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + __ret = (uint16x4_t) __builtin_neon_vshr_n_v((int8x8_t)__rev0, __p1, 17); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vshr_n_s8(__p0, __p1) __extension__ ({ \ + int8x8_t __ret; \ + int8x8_t __s0 = __p0; \ + __ret = (int8x8_t) __builtin_neon_vshr_n_v((int8x8_t)__s0, __p1, 0); \ + __ret; \ +}) +#else +#define vshr_n_s8(__p0, __p1) __extension__ ({ \ + int8x8_t __ret; \ + int8x8_t __s0 = __p0; \ + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (int8x8_t) __builtin_neon_vshr_n_v((int8x8_t)__rev0, __p1, 0); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vshr_n_s32(__p0, __p1) __extension__ ({ \ + int32x2_t __ret; \ + int32x2_t __s0 = __p0; \ + __ret = (int32x2_t) __builtin_neon_vshr_n_v((int8x8_t)__s0, __p1, 2); \ + __ret; \ +}) +#else +#define vshr_n_s32(__p0, __p1) __extension__ ({ \ + int32x2_t __ret; \ + int32x2_t __s0 = __p0; \ + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + __ret = (int32x2_t) __builtin_neon_vshr_n_v((int8x8_t)__rev0, __p1, 2); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#endif + +#define vshr_n_s64(__p0, __p1) __extension__ ({ \ + int64x1_t __ret; \ + int64x1_t __s0 = __p0; \ + __ret = (int64x1_t) __builtin_neon_vshr_n_v((int8x8_t)__s0, __p1, 3); \ + __ret; \ +}) +#ifdef __LITTLE_ENDIAN__ +#define vshr_n_s16(__p0, __p1) __extension__ ({ \ + int16x4_t __ret; \ + int16x4_t __s0 = __p0; \ + __ret = (int16x4_t) __builtin_neon_vshr_n_v((int8x8_t)__s0, __p1, 1); \ + __ret; \ +}) +#else +#define vshr_n_s16(__p0, __p1) __extension__ ({ \ + int16x4_t __ret; \ + int16x4_t __s0 = __p0; \ + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + __ret = (int16x4_t) __builtin_neon_vshr_n_v((int8x8_t)__rev0, __p1, 1); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vshrn_n_u32(__p0, __p1) __extension__ ({ \ + uint16x4_t __ret; \ + uint32x4_t __s0 = __p0; \ + __ret = (uint16x4_t) __builtin_neon_vshrn_n_v((int8x16_t)__s0, __p1, 17); \ + __ret; \ +}) +#else +#define vshrn_n_u32(__p0, __p1) __extension__ ({ \ + uint16x4_t __ret; \ + uint32x4_t __s0 = __p0; \ + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + __ret = (uint16x4_t) __builtin_neon_vshrn_n_v((int8x16_t)__rev0, __p1, 17); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#define __noswap_vshrn_n_u32(__p0, __p1) __extension__ ({ \ + uint16x4_t __ret; \ + uint32x4_t __s0 = __p0; \ + __ret = (uint16x4_t) __builtin_neon_vshrn_n_v((int8x16_t)__s0, __p1, 17); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vshrn_n_u64(__p0, __p1) __extension__ ({ \ + uint32x2_t __ret; \ + uint64x2_t __s0 = __p0; \ + __ret = (uint32x2_t) __builtin_neon_vshrn_n_v((int8x16_t)__s0, __p1, 18); \ + __ret; \ +}) +#else +#define vshrn_n_u64(__p0, __p1) __extension__ ({ \ + uint32x2_t __ret; \ + uint64x2_t __s0 = __p0; \ + uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + __ret = (uint32x2_t) __builtin_neon_vshrn_n_v((int8x16_t)__rev0, __p1, 18); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#define __noswap_vshrn_n_u64(__p0, __p1) __extension__ ({ \ + uint32x2_t __ret; \ + uint64x2_t __s0 = __p0; \ + __ret = (uint32x2_t) __builtin_neon_vshrn_n_v((int8x16_t)__s0, __p1, 18); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vshrn_n_u16(__p0, __p1) __extension__ ({ \ + uint8x8_t __ret; \ + uint16x8_t __s0 = __p0; \ + __ret = (uint8x8_t) __builtin_neon_vshrn_n_v((int8x16_t)__s0, __p1, 16); \ + __ret; \ +}) +#else +#define vshrn_n_u16(__p0, __p1) __extension__ ({ \ + uint8x8_t __ret; \ + uint16x8_t __s0 = __p0; \ + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (uint8x8_t) __builtin_neon_vshrn_n_v((int8x16_t)__rev0, __p1, 16); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#define __noswap_vshrn_n_u16(__p0, __p1) __extension__ ({ \ + uint8x8_t __ret; \ + uint16x8_t __s0 = __p0; \ + __ret = (uint8x8_t) __builtin_neon_vshrn_n_v((int8x16_t)__s0, __p1, 16); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vshrn_n_s32(__p0, __p1) __extension__ ({ \ + int16x4_t __ret; \ + int32x4_t __s0 = __p0; \ + __ret = (int16x4_t) __builtin_neon_vshrn_n_v((int8x16_t)__s0, __p1, 1); \ + __ret; \ +}) +#else +#define vshrn_n_s32(__p0, __p1) __extension__ ({ \ + int16x4_t __ret; \ + int32x4_t __s0 = __p0; \ + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + __ret = (int16x4_t) __builtin_neon_vshrn_n_v((int8x16_t)__rev0, __p1, 1); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#define __noswap_vshrn_n_s32(__p0, __p1) __extension__ ({ \ + int16x4_t __ret; \ + int32x4_t __s0 = __p0; \ + __ret = (int16x4_t) __builtin_neon_vshrn_n_v((int8x16_t)__s0, __p1, 1); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vshrn_n_s64(__p0, __p1) __extension__ ({ \ + int32x2_t __ret; \ + int64x2_t __s0 = __p0; \ + __ret = (int32x2_t) __builtin_neon_vshrn_n_v((int8x16_t)__s0, __p1, 2); \ + __ret; \ +}) +#else +#define vshrn_n_s64(__p0, __p1) __extension__ ({ \ + int32x2_t __ret; \ + int64x2_t __s0 = __p0; \ + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + __ret = (int32x2_t) __builtin_neon_vshrn_n_v((int8x16_t)__rev0, __p1, 2); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#define __noswap_vshrn_n_s64(__p0, __p1) __extension__ ({ \ + int32x2_t __ret; \ + int64x2_t __s0 = __p0; \ + __ret = (int32x2_t) __builtin_neon_vshrn_n_v((int8x16_t)__s0, __p1, 2); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vshrn_n_s16(__p0, __p1) __extension__ ({ \ + int8x8_t __ret; \ + int16x8_t __s0 = __p0; \ + __ret = (int8x8_t) __builtin_neon_vshrn_n_v((int8x16_t)__s0, __p1, 0); \ + __ret; \ +}) +#else +#define vshrn_n_s16(__p0, __p1) __extension__ ({ \ + int8x8_t __ret; \ + int16x8_t __s0 = __p0; \ + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (int8x8_t) __builtin_neon_vshrn_n_v((int8x16_t)__rev0, __p1, 0); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#define __noswap_vshrn_n_s16(__p0, __p1) __extension__ ({ \ + int8x8_t __ret; \ + int16x8_t __s0 = __p0; \ + __ret = (int8x8_t) __builtin_neon_vshrn_n_v((int8x16_t)__s0, __p1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vsli_n_p8(__p0, __p1, __p2) __extension__ ({ \ + poly8x8_t __ret; \ + poly8x8_t __s0 = __p0; \ + poly8x8_t __s1 = __p1; \ + __ret = (poly8x8_t) __builtin_neon_vsli_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 4); \ + __ret; \ +}) +#else +#define vsli_n_p8(__p0, __p1, __p2) __extension__ ({ \ + poly8x8_t __ret; \ + poly8x8_t __s0 = __p0; \ + poly8x8_t __s1 = __p1; \ + poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + poly8x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (poly8x8_t) __builtin_neon_vsli_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 4); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vsli_n_p16(__p0, __p1, __p2) __extension__ ({ \ + poly16x4_t __ret; \ + poly16x4_t __s0 = __p0; \ + poly16x4_t __s1 = __p1; \ + __ret = (poly16x4_t) __builtin_neon_vsli_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 5); \ + __ret; \ +}) +#else +#define vsli_n_p16(__p0, __p1, __p2) __extension__ ({ \ + poly16x4_t __ret; \ + poly16x4_t __s0 = __p0; \ + poly16x4_t __s1 = __p1; \ + poly16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + poly16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + __ret = (poly16x4_t) __builtin_neon_vsli_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 5); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vsliq_n_p8(__p0, __p1, __p2) __extension__ ({ \ + poly8x16_t __ret; \ + poly8x16_t __s0 = __p0; \ + poly8x16_t __s1 = __p1; \ + __ret = (poly8x16_t) __builtin_neon_vsliq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 36); \ + __ret; \ +}) +#else +#define vsliq_n_p8(__p0, __p1, __p2) __extension__ ({ \ + poly8x16_t __ret; \ + poly8x16_t __s0 = __p0; \ + poly8x16_t __s1 = __p1; \ + poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + poly8x16_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (poly8x16_t) __builtin_neon_vsliq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 36); \ + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vsliq_n_p16(__p0, __p1, __p2) __extension__ ({ \ + poly16x8_t __ret; \ + poly16x8_t __s0 = __p0; \ + poly16x8_t __s1 = __p1; \ + __ret = (poly16x8_t) __builtin_neon_vsliq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 37); \ + __ret; \ +}) +#else +#define vsliq_n_p16(__p0, __p1, __p2) __extension__ ({ \ + poly16x8_t __ret; \ + poly16x8_t __s0 = __p0; \ + poly16x8_t __s1 = __p1; \ + poly16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + poly16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (poly16x8_t) __builtin_neon_vsliq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 37); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vsliq_n_u8(__p0, __p1, __p2) __extension__ ({ \ + uint8x16_t __ret; \ + uint8x16_t __s0 = __p0; \ + uint8x16_t __s1 = __p1; \ + __ret = (uint8x16_t) __builtin_neon_vsliq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 48); \ + __ret; \ +}) +#else +#define vsliq_n_u8(__p0, __p1, __p2) __extension__ ({ \ + uint8x16_t __ret; \ + uint8x16_t __s0 = __p0; \ + uint8x16_t __s1 = __p1; \ + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (uint8x16_t) __builtin_neon_vsliq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 48); \ + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vsliq_n_u32(__p0, __p1, __p2) __extension__ ({ \ + uint32x4_t __ret; \ + uint32x4_t __s0 = __p0; \ + uint32x4_t __s1 = __p1; \ + __ret = (uint32x4_t) __builtin_neon_vsliq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 50); \ + __ret; \ +}) +#else +#define vsliq_n_u32(__p0, __p1, __p2) __extension__ ({ \ + uint32x4_t __ret; \ + uint32x4_t __s0 = __p0; \ + uint32x4_t __s1 = __p1; \ + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + __ret = (uint32x4_t) __builtin_neon_vsliq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 50); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vsliq_n_u64(__p0, __p1, __p2) __extension__ ({ \ + uint64x2_t __ret; \ + uint64x2_t __s0 = __p0; \ + uint64x2_t __s1 = __p1; \ + __ret = (uint64x2_t) __builtin_neon_vsliq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 51); \ + __ret; \ +}) +#else +#define vsliq_n_u64(__p0, __p1, __p2) __extension__ ({ \ + uint64x2_t __ret; \ + uint64x2_t __s0 = __p0; \ + uint64x2_t __s1 = __p1; \ + uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ + __ret = (uint64x2_t) __builtin_neon_vsliq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 51); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vsliq_n_u16(__p0, __p1, __p2) __extension__ ({ \ + uint16x8_t __ret; \ + uint16x8_t __s0 = __p0; \ + uint16x8_t __s1 = __p1; \ + __ret = (uint16x8_t) __builtin_neon_vsliq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 49); \ + __ret; \ +}) +#else +#define vsliq_n_u16(__p0, __p1, __p2) __extension__ ({ \ + uint16x8_t __ret; \ + uint16x8_t __s0 = __p0; \ + uint16x8_t __s1 = __p1; \ + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (uint16x8_t) __builtin_neon_vsliq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 49); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vsliq_n_s8(__p0, __p1, __p2) __extension__ ({ \ + int8x16_t __ret; \ + int8x16_t __s0 = __p0; \ + int8x16_t __s1 = __p1; \ + __ret = (int8x16_t) __builtin_neon_vsliq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 32); \ + __ret; \ +}) +#else +#define vsliq_n_s8(__p0, __p1, __p2) __extension__ ({ \ + int8x16_t __ret; \ + int8x16_t __s0 = __p0; \ + int8x16_t __s1 = __p1; \ + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + int8x16_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (int8x16_t) __builtin_neon_vsliq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 32); \ + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vsliq_n_s32(__p0, __p1, __p2) __extension__ ({ \ + int32x4_t __ret; \ + int32x4_t __s0 = __p0; \ + int32x4_t __s1 = __p1; \ + __ret = (int32x4_t) __builtin_neon_vsliq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 34); \ + __ret; \ +}) +#else +#define vsliq_n_s32(__p0, __p1, __p2) __extension__ ({ \ + int32x4_t __ret; \ + int32x4_t __s0 = __p0; \ + int32x4_t __s1 = __p1; \ + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + __ret = (int32x4_t) __builtin_neon_vsliq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 34); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vsliq_n_s64(__p0, __p1, __p2) __extension__ ({ \ + int64x2_t __ret; \ + int64x2_t __s0 = __p0; \ + int64x2_t __s1 = __p1; \ + __ret = (int64x2_t) __builtin_neon_vsliq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 35); \ + __ret; \ +}) +#else +#define vsliq_n_s64(__p0, __p1, __p2) __extension__ ({ \ + int64x2_t __ret; \ + int64x2_t __s0 = __p0; \ + int64x2_t __s1 = __p1; \ + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + int64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ + __ret = (int64x2_t) __builtin_neon_vsliq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 35); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vsliq_n_s16(__p0, __p1, __p2) __extension__ ({ \ + int16x8_t __ret; \ + int16x8_t __s0 = __p0; \ + int16x8_t __s1 = __p1; \ + __ret = (int16x8_t) __builtin_neon_vsliq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 33); \ + __ret; \ +}) +#else +#define vsliq_n_s16(__p0, __p1, __p2) __extension__ ({ \ + int16x8_t __ret; \ + int16x8_t __s0 = __p0; \ + int16x8_t __s1 = __p1; \ + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (int16x8_t) __builtin_neon_vsliq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 33); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vsli_n_u8(__p0, __p1, __p2) __extension__ ({ \ + uint8x8_t __ret; \ + uint8x8_t __s0 = __p0; \ + uint8x8_t __s1 = __p1; \ + __ret = (uint8x8_t) __builtin_neon_vsli_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 16); \ + __ret; \ +}) +#else +#define vsli_n_u8(__p0, __p1, __p2) __extension__ ({ \ + uint8x8_t __ret; \ + uint8x8_t __s0 = __p0; \ + uint8x8_t __s1 = __p1; \ + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (uint8x8_t) __builtin_neon_vsli_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 16); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vsli_n_u32(__p0, __p1, __p2) __extension__ ({ \ + uint32x2_t __ret; \ + uint32x2_t __s0 = __p0; \ + uint32x2_t __s1 = __p1; \ + __ret = (uint32x2_t) __builtin_neon_vsli_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 18); \ + __ret; \ +}) +#else +#define vsli_n_u32(__p0, __p1, __p2) __extension__ ({ \ + uint32x2_t __ret; \ + uint32x2_t __s0 = __p0; \ + uint32x2_t __s1 = __p1; \ + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ + __ret = (uint32x2_t) __builtin_neon_vsli_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 18); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#endif + +#define vsli_n_u64(__p0, __p1, __p2) __extension__ ({ \ + uint64x1_t __ret; \ + uint64x1_t __s0 = __p0; \ + uint64x1_t __s1 = __p1; \ + __ret = (uint64x1_t) __builtin_neon_vsli_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 19); \ + __ret; \ +}) +#ifdef __LITTLE_ENDIAN__ +#define vsli_n_u16(__p0, __p1, __p2) __extension__ ({ \ + uint16x4_t __ret; \ + uint16x4_t __s0 = __p0; \ + uint16x4_t __s1 = __p1; \ + __ret = (uint16x4_t) __builtin_neon_vsli_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 17); \ + __ret; \ +}) +#else +#define vsli_n_u16(__p0, __p1, __p2) __extension__ ({ \ + uint16x4_t __ret; \ + uint16x4_t __s0 = __p0; \ + uint16x4_t __s1 = __p1; \ + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + __ret = (uint16x4_t) __builtin_neon_vsli_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 17); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vsli_n_s8(__p0, __p1, __p2) __extension__ ({ \ + int8x8_t __ret; \ + int8x8_t __s0 = __p0; \ + int8x8_t __s1 = __p1; \ + __ret = (int8x8_t) __builtin_neon_vsli_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 0); \ + __ret; \ +}) +#else +#define vsli_n_s8(__p0, __p1, __p2) __extension__ ({ \ + int8x8_t __ret; \ + int8x8_t __s0 = __p0; \ + int8x8_t __s1 = __p1; \ + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + int8x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (int8x8_t) __builtin_neon_vsli_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 0); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vsli_n_s32(__p0, __p1, __p2) __extension__ ({ \ + int32x2_t __ret; \ + int32x2_t __s0 = __p0; \ + int32x2_t __s1 = __p1; \ + __ret = (int32x2_t) __builtin_neon_vsli_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 2); \ + __ret; \ +}) +#else +#define vsli_n_s32(__p0, __p1, __p2) __extension__ ({ \ + int32x2_t __ret; \ + int32x2_t __s0 = __p0; \ + int32x2_t __s1 = __p1; \ + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ + __ret = (int32x2_t) __builtin_neon_vsli_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 2); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#endif + +#define vsli_n_s64(__p0, __p1, __p2) __extension__ ({ \ + int64x1_t __ret; \ + int64x1_t __s0 = __p0; \ + int64x1_t __s1 = __p1; \ + __ret = (int64x1_t) __builtin_neon_vsli_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 3); \ + __ret; \ +}) +#ifdef __LITTLE_ENDIAN__ +#define vsli_n_s16(__p0, __p1, __p2) __extension__ ({ \ + int16x4_t __ret; \ + int16x4_t __s0 = __p0; \ + int16x4_t __s1 = __p1; \ + __ret = (int16x4_t) __builtin_neon_vsli_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 1); \ + __ret; \ +}) +#else +#define vsli_n_s16(__p0, __p1, __p2) __extension__ ({ \ + int16x4_t __ret; \ + int16x4_t __s0 = __p0; \ + int16x4_t __s1 = __p1; \ + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + int16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + __ret = (int16x4_t) __builtin_neon_vsli_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 1); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vsraq_n_u8(__p0, __p1, __p2) __extension__ ({ \ + uint8x16_t __ret; \ + uint8x16_t __s0 = __p0; \ + uint8x16_t __s1 = __p1; \ + __ret = (uint8x16_t) __builtin_neon_vsraq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 48); \ + __ret; \ +}) +#else +#define vsraq_n_u8(__p0, __p1, __p2) __extension__ ({ \ + uint8x16_t __ret; \ + uint8x16_t __s0 = __p0; \ + uint8x16_t __s1 = __p1; \ + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (uint8x16_t) __builtin_neon_vsraq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 48); \ + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vsraq_n_u32(__p0, __p1, __p2) __extension__ ({ \ + uint32x4_t __ret; \ + uint32x4_t __s0 = __p0; \ + uint32x4_t __s1 = __p1; \ + __ret = (uint32x4_t) __builtin_neon_vsraq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 50); \ + __ret; \ +}) +#else +#define vsraq_n_u32(__p0, __p1, __p2) __extension__ ({ \ + uint32x4_t __ret; \ + uint32x4_t __s0 = __p0; \ + uint32x4_t __s1 = __p1; \ + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + __ret = (uint32x4_t) __builtin_neon_vsraq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 50); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vsraq_n_u64(__p0, __p1, __p2) __extension__ ({ \ + uint64x2_t __ret; \ + uint64x2_t __s0 = __p0; \ + uint64x2_t __s1 = __p1; \ + __ret = (uint64x2_t) __builtin_neon_vsraq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 51); \ + __ret; \ +}) +#else +#define vsraq_n_u64(__p0, __p1, __p2) __extension__ ({ \ + uint64x2_t __ret; \ + uint64x2_t __s0 = __p0; \ + uint64x2_t __s1 = __p1; \ + uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ + __ret = (uint64x2_t) __builtin_neon_vsraq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 51); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vsraq_n_u16(__p0, __p1, __p2) __extension__ ({ \ + uint16x8_t __ret; \ + uint16x8_t __s0 = __p0; \ + uint16x8_t __s1 = __p1; \ + __ret = (uint16x8_t) __builtin_neon_vsraq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 49); \ + __ret; \ +}) +#else +#define vsraq_n_u16(__p0, __p1, __p2) __extension__ ({ \ + uint16x8_t __ret; \ + uint16x8_t __s0 = __p0; \ + uint16x8_t __s1 = __p1; \ + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (uint16x8_t) __builtin_neon_vsraq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 49); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vsraq_n_s8(__p0, __p1, __p2) __extension__ ({ \ + int8x16_t __ret; \ + int8x16_t __s0 = __p0; \ + int8x16_t __s1 = __p1; \ + __ret = (int8x16_t) __builtin_neon_vsraq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 32); \ + __ret; \ +}) +#else +#define vsraq_n_s8(__p0, __p1, __p2) __extension__ ({ \ + int8x16_t __ret; \ + int8x16_t __s0 = __p0; \ + int8x16_t __s1 = __p1; \ + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + int8x16_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (int8x16_t) __builtin_neon_vsraq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 32); \ + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vsraq_n_s32(__p0, __p1, __p2) __extension__ ({ \ + int32x4_t __ret; \ + int32x4_t __s0 = __p0; \ + int32x4_t __s1 = __p1; \ + __ret = (int32x4_t) __builtin_neon_vsraq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 34); \ + __ret; \ +}) +#else +#define vsraq_n_s32(__p0, __p1, __p2) __extension__ ({ \ + int32x4_t __ret; \ + int32x4_t __s0 = __p0; \ + int32x4_t __s1 = __p1; \ + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + __ret = (int32x4_t) __builtin_neon_vsraq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 34); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vsraq_n_s64(__p0, __p1, __p2) __extension__ ({ \ + int64x2_t __ret; \ + int64x2_t __s0 = __p0; \ + int64x2_t __s1 = __p1; \ + __ret = (int64x2_t) __builtin_neon_vsraq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 35); \ + __ret; \ +}) +#else +#define vsraq_n_s64(__p0, __p1, __p2) __extension__ ({ \ + int64x2_t __ret; \ + int64x2_t __s0 = __p0; \ + int64x2_t __s1 = __p1; \ + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + int64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ + __ret = (int64x2_t) __builtin_neon_vsraq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 35); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vsraq_n_s16(__p0, __p1, __p2) __extension__ ({ \ + int16x8_t __ret; \ + int16x8_t __s0 = __p0; \ + int16x8_t __s1 = __p1; \ + __ret = (int16x8_t) __builtin_neon_vsraq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 33); \ + __ret; \ +}) +#else +#define vsraq_n_s16(__p0, __p1, __p2) __extension__ ({ \ + int16x8_t __ret; \ + int16x8_t __s0 = __p0; \ + int16x8_t __s1 = __p1; \ + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (int16x8_t) __builtin_neon_vsraq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 33); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vsra_n_u8(__p0, __p1, __p2) __extension__ ({ \ + uint8x8_t __ret; \ + uint8x8_t __s0 = __p0; \ + uint8x8_t __s1 = __p1; \ + __ret = (uint8x8_t) __builtin_neon_vsra_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 16); \ + __ret; \ +}) +#else +#define vsra_n_u8(__p0, __p1, __p2) __extension__ ({ \ + uint8x8_t __ret; \ + uint8x8_t __s0 = __p0; \ + uint8x8_t __s1 = __p1; \ + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (uint8x8_t) __builtin_neon_vsra_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 16); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vsra_n_u32(__p0, __p1, __p2) __extension__ ({ \ + uint32x2_t __ret; \ + uint32x2_t __s0 = __p0; \ + uint32x2_t __s1 = __p1; \ + __ret = (uint32x2_t) __builtin_neon_vsra_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 18); \ + __ret; \ +}) +#else +#define vsra_n_u32(__p0, __p1, __p2) __extension__ ({ \ + uint32x2_t __ret; \ + uint32x2_t __s0 = __p0; \ + uint32x2_t __s1 = __p1; \ + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ + __ret = (uint32x2_t) __builtin_neon_vsra_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 18); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#endif + +#define vsra_n_u64(__p0, __p1, __p2) __extension__ ({ \ + uint64x1_t __ret; \ + uint64x1_t __s0 = __p0; \ + uint64x1_t __s1 = __p1; \ + __ret = (uint64x1_t) __builtin_neon_vsra_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 19); \ + __ret; \ +}) +#ifdef __LITTLE_ENDIAN__ +#define vsra_n_u16(__p0, __p1, __p2) __extension__ ({ \ + uint16x4_t __ret; \ + uint16x4_t __s0 = __p0; \ + uint16x4_t __s1 = __p1; \ + __ret = (uint16x4_t) __builtin_neon_vsra_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 17); \ + __ret; \ +}) +#else +#define vsra_n_u16(__p0, __p1, __p2) __extension__ ({ \ + uint16x4_t __ret; \ + uint16x4_t __s0 = __p0; \ + uint16x4_t __s1 = __p1; \ + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + __ret = (uint16x4_t) __builtin_neon_vsra_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 17); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vsra_n_s8(__p0, __p1, __p2) __extension__ ({ \ + int8x8_t __ret; \ + int8x8_t __s0 = __p0; \ + int8x8_t __s1 = __p1; \ + __ret = (int8x8_t) __builtin_neon_vsra_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 0); \ + __ret; \ +}) +#else +#define vsra_n_s8(__p0, __p1, __p2) __extension__ ({ \ + int8x8_t __ret; \ + int8x8_t __s0 = __p0; \ + int8x8_t __s1 = __p1; \ + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + int8x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (int8x8_t) __builtin_neon_vsra_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 0); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vsra_n_s32(__p0, __p1, __p2) __extension__ ({ \ + int32x2_t __ret; \ + int32x2_t __s0 = __p0; \ + int32x2_t __s1 = __p1; \ + __ret = (int32x2_t) __builtin_neon_vsra_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 2); \ + __ret; \ +}) +#else +#define vsra_n_s32(__p0, __p1, __p2) __extension__ ({ \ + int32x2_t __ret; \ + int32x2_t __s0 = __p0; \ + int32x2_t __s1 = __p1; \ + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ + __ret = (int32x2_t) __builtin_neon_vsra_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 2); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#endif + +#define vsra_n_s64(__p0, __p1, __p2) __extension__ ({ \ + int64x1_t __ret; \ + int64x1_t __s0 = __p0; \ + int64x1_t __s1 = __p1; \ + __ret = (int64x1_t) __builtin_neon_vsra_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 3); \ + __ret; \ +}) +#ifdef __LITTLE_ENDIAN__ +#define vsra_n_s16(__p0, __p1, __p2) __extension__ ({ \ + int16x4_t __ret; \ + int16x4_t __s0 = __p0; \ + int16x4_t __s1 = __p1; \ + __ret = (int16x4_t) __builtin_neon_vsra_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 1); \ + __ret; \ +}) +#else +#define vsra_n_s16(__p0, __p1, __p2) __extension__ ({ \ + int16x4_t __ret; \ + int16x4_t __s0 = __p0; \ + int16x4_t __s1 = __p1; \ + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + int16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + __ret = (int16x4_t) __builtin_neon_vsra_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 1); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vsri_n_p8(__p0, __p1, __p2) __extension__ ({ \ + poly8x8_t __ret; \ + poly8x8_t __s0 = __p0; \ + poly8x8_t __s1 = __p1; \ + __ret = (poly8x8_t) __builtin_neon_vsri_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 4); \ + __ret; \ +}) +#else +#define vsri_n_p8(__p0, __p1, __p2) __extension__ ({ \ + poly8x8_t __ret; \ + poly8x8_t __s0 = __p0; \ + poly8x8_t __s1 = __p1; \ + poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + poly8x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (poly8x8_t) __builtin_neon_vsri_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 4); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vsri_n_p16(__p0, __p1, __p2) __extension__ ({ \ + poly16x4_t __ret; \ + poly16x4_t __s0 = __p0; \ + poly16x4_t __s1 = __p1; \ + __ret = (poly16x4_t) __builtin_neon_vsri_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 5); \ + __ret; \ +}) +#else +#define vsri_n_p16(__p0, __p1, __p2) __extension__ ({ \ + poly16x4_t __ret; \ + poly16x4_t __s0 = __p0; \ + poly16x4_t __s1 = __p1; \ + poly16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + poly16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + __ret = (poly16x4_t) __builtin_neon_vsri_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 5); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vsriq_n_p8(__p0, __p1, __p2) __extension__ ({ \ + poly8x16_t __ret; \ + poly8x16_t __s0 = __p0; \ + poly8x16_t __s1 = __p1; \ + __ret = (poly8x16_t) __builtin_neon_vsriq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 36); \ + __ret; \ +}) +#else +#define vsriq_n_p8(__p0, __p1, __p2) __extension__ ({ \ + poly8x16_t __ret; \ + poly8x16_t __s0 = __p0; \ + poly8x16_t __s1 = __p1; \ + poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + poly8x16_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (poly8x16_t) __builtin_neon_vsriq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 36); \ + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vsriq_n_p16(__p0, __p1, __p2) __extension__ ({ \ + poly16x8_t __ret; \ + poly16x8_t __s0 = __p0; \ + poly16x8_t __s1 = __p1; \ + __ret = (poly16x8_t) __builtin_neon_vsriq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 37); \ + __ret; \ +}) +#else +#define vsriq_n_p16(__p0, __p1, __p2) __extension__ ({ \ + poly16x8_t __ret; \ + poly16x8_t __s0 = __p0; \ + poly16x8_t __s1 = __p1; \ + poly16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + poly16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (poly16x8_t) __builtin_neon_vsriq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 37); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vsriq_n_u8(__p0, __p1, __p2) __extension__ ({ \ + uint8x16_t __ret; \ + uint8x16_t __s0 = __p0; \ + uint8x16_t __s1 = __p1; \ + __ret = (uint8x16_t) __builtin_neon_vsriq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 48); \ + __ret; \ +}) +#else +#define vsriq_n_u8(__p0, __p1, __p2) __extension__ ({ \ + uint8x16_t __ret; \ + uint8x16_t __s0 = __p0; \ + uint8x16_t __s1 = __p1; \ + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (uint8x16_t) __builtin_neon_vsriq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 48); \ + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vsriq_n_u32(__p0, __p1, __p2) __extension__ ({ \ + uint32x4_t __ret; \ + uint32x4_t __s0 = __p0; \ + uint32x4_t __s1 = __p1; \ + __ret = (uint32x4_t) __builtin_neon_vsriq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 50); \ + __ret; \ +}) +#else +#define vsriq_n_u32(__p0, __p1, __p2) __extension__ ({ \ + uint32x4_t __ret; \ + uint32x4_t __s0 = __p0; \ + uint32x4_t __s1 = __p1; \ + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + __ret = (uint32x4_t) __builtin_neon_vsriq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 50); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vsriq_n_u64(__p0, __p1, __p2) __extension__ ({ \ + uint64x2_t __ret; \ + uint64x2_t __s0 = __p0; \ + uint64x2_t __s1 = __p1; \ + __ret = (uint64x2_t) __builtin_neon_vsriq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 51); \ + __ret; \ +}) +#else +#define vsriq_n_u64(__p0, __p1, __p2) __extension__ ({ \ + uint64x2_t __ret; \ + uint64x2_t __s0 = __p0; \ + uint64x2_t __s1 = __p1; \ + uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ + __ret = (uint64x2_t) __builtin_neon_vsriq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 51); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vsriq_n_u16(__p0, __p1, __p2) __extension__ ({ \ + uint16x8_t __ret; \ + uint16x8_t __s0 = __p0; \ + uint16x8_t __s1 = __p1; \ + __ret = (uint16x8_t) __builtin_neon_vsriq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 49); \ + __ret; \ +}) +#else +#define vsriq_n_u16(__p0, __p1, __p2) __extension__ ({ \ + uint16x8_t __ret; \ + uint16x8_t __s0 = __p0; \ + uint16x8_t __s1 = __p1; \ + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (uint16x8_t) __builtin_neon_vsriq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 49); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vsriq_n_s8(__p0, __p1, __p2) __extension__ ({ \ + int8x16_t __ret; \ + int8x16_t __s0 = __p0; \ + int8x16_t __s1 = __p1; \ + __ret = (int8x16_t) __builtin_neon_vsriq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 32); \ + __ret; \ +}) +#else +#define vsriq_n_s8(__p0, __p1, __p2) __extension__ ({ \ + int8x16_t __ret; \ + int8x16_t __s0 = __p0; \ + int8x16_t __s1 = __p1; \ + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + int8x16_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (int8x16_t) __builtin_neon_vsriq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 32); \ + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vsriq_n_s32(__p0, __p1, __p2) __extension__ ({ \ + int32x4_t __ret; \ + int32x4_t __s0 = __p0; \ + int32x4_t __s1 = __p1; \ + __ret = (int32x4_t) __builtin_neon_vsriq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 34); \ + __ret; \ +}) +#else +#define vsriq_n_s32(__p0, __p1, __p2) __extension__ ({ \ + int32x4_t __ret; \ + int32x4_t __s0 = __p0; \ + int32x4_t __s1 = __p1; \ + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + __ret = (int32x4_t) __builtin_neon_vsriq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 34); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vsriq_n_s64(__p0, __p1, __p2) __extension__ ({ \ + int64x2_t __ret; \ + int64x2_t __s0 = __p0; \ + int64x2_t __s1 = __p1; \ + __ret = (int64x2_t) __builtin_neon_vsriq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 35); \ + __ret; \ +}) +#else +#define vsriq_n_s64(__p0, __p1, __p2) __extension__ ({ \ + int64x2_t __ret; \ + int64x2_t __s0 = __p0; \ + int64x2_t __s1 = __p1; \ + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + int64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ + __ret = (int64x2_t) __builtin_neon_vsriq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 35); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vsriq_n_s16(__p0, __p1, __p2) __extension__ ({ \ + int16x8_t __ret; \ + int16x8_t __s0 = __p0; \ + int16x8_t __s1 = __p1; \ + __ret = (int16x8_t) __builtin_neon_vsriq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 33); \ + __ret; \ +}) +#else +#define vsriq_n_s16(__p0, __p1, __p2) __extension__ ({ \ + int16x8_t __ret; \ + int16x8_t __s0 = __p0; \ + int16x8_t __s1 = __p1; \ + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (int16x8_t) __builtin_neon_vsriq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 33); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vsri_n_u8(__p0, __p1, __p2) __extension__ ({ \ + uint8x8_t __ret; \ + uint8x8_t __s0 = __p0; \ + uint8x8_t __s1 = __p1; \ + __ret = (uint8x8_t) __builtin_neon_vsri_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 16); \ + __ret; \ +}) +#else +#define vsri_n_u8(__p0, __p1, __p2) __extension__ ({ \ + uint8x8_t __ret; \ + uint8x8_t __s0 = __p0; \ + uint8x8_t __s1 = __p1; \ + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (uint8x8_t) __builtin_neon_vsri_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 16); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vsri_n_u32(__p0, __p1, __p2) __extension__ ({ \ + uint32x2_t __ret; \ + uint32x2_t __s0 = __p0; \ + uint32x2_t __s1 = __p1; \ + __ret = (uint32x2_t) __builtin_neon_vsri_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 18); \ + __ret; \ +}) +#else +#define vsri_n_u32(__p0, __p1, __p2) __extension__ ({ \ + uint32x2_t __ret; \ + uint32x2_t __s0 = __p0; \ + uint32x2_t __s1 = __p1; \ + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ + __ret = (uint32x2_t) __builtin_neon_vsri_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 18); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#endif + +#define vsri_n_u64(__p0, __p1, __p2) __extension__ ({ \ + uint64x1_t __ret; \ + uint64x1_t __s0 = __p0; \ + uint64x1_t __s1 = __p1; \ + __ret = (uint64x1_t) __builtin_neon_vsri_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 19); \ + __ret; \ +}) +#ifdef __LITTLE_ENDIAN__ +#define vsri_n_u16(__p0, __p1, __p2) __extension__ ({ \ + uint16x4_t __ret; \ + uint16x4_t __s0 = __p0; \ + uint16x4_t __s1 = __p1; \ + __ret = (uint16x4_t) __builtin_neon_vsri_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 17); \ + __ret; \ +}) +#else +#define vsri_n_u16(__p0, __p1, __p2) __extension__ ({ \ + uint16x4_t __ret; \ + uint16x4_t __s0 = __p0; \ + uint16x4_t __s1 = __p1; \ + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + __ret = (uint16x4_t) __builtin_neon_vsri_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 17); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vsri_n_s8(__p0, __p1, __p2) __extension__ ({ \ + int8x8_t __ret; \ + int8x8_t __s0 = __p0; \ + int8x8_t __s1 = __p1; \ + __ret = (int8x8_t) __builtin_neon_vsri_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 0); \ + __ret; \ +}) +#else +#define vsri_n_s8(__p0, __p1, __p2) __extension__ ({ \ + int8x8_t __ret; \ + int8x8_t __s0 = __p0; \ + int8x8_t __s1 = __p1; \ + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + int8x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (int8x8_t) __builtin_neon_vsri_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 0); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vsri_n_s32(__p0, __p1, __p2) __extension__ ({ \ + int32x2_t __ret; \ + int32x2_t __s0 = __p0; \ + int32x2_t __s1 = __p1; \ + __ret = (int32x2_t) __builtin_neon_vsri_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 2); \ + __ret; \ +}) +#else +#define vsri_n_s32(__p0, __p1, __p2) __extension__ ({ \ + int32x2_t __ret; \ + int32x2_t __s0 = __p0; \ + int32x2_t __s1 = __p1; \ + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ + __ret = (int32x2_t) __builtin_neon_vsri_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 2); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#endif + +#define vsri_n_s64(__p0, __p1, __p2) __extension__ ({ \ + int64x1_t __ret; \ + int64x1_t __s0 = __p0; \ + int64x1_t __s1 = __p1; \ + __ret = (int64x1_t) __builtin_neon_vsri_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 3); \ + __ret; \ +}) +#ifdef __LITTLE_ENDIAN__ +#define vsri_n_s16(__p0, __p1, __p2) __extension__ ({ \ + int16x4_t __ret; \ + int16x4_t __s0 = __p0; \ + int16x4_t __s1 = __p1; \ + __ret = (int16x4_t) __builtin_neon_vsri_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 1); \ + __ret; \ +}) +#else +#define vsri_n_s16(__p0, __p1, __p2) __extension__ ({ \ + int16x4_t __ret; \ + int16x4_t __s0 = __p0; \ + int16x4_t __s1 = __p1; \ + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + int16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + __ret = (int16x4_t) __builtin_neon_vsri_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 1); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1_p8(__p0, __p1) __extension__ ({ \ + poly8x8_t __s1 = __p1; \ + __builtin_neon_vst1_v(__p0, (int8x8_t)__s1, 4); \ +}) +#else +#define vst1_p8(__p0, __p1) __extension__ ({ \ + poly8x8_t __s1 = __p1; \ + poly8x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst1_v(__p0, (int8x8_t)__rev1, 4); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1_p16(__p0, __p1) __extension__ ({ \ + poly16x4_t __s1 = __p1; \ + __builtin_neon_vst1_v(__p0, (int8x8_t)__s1, 5); \ +}) +#else +#define vst1_p16(__p0, __p1) __extension__ ({ \ + poly16x4_t __s1 = __p1; \ + poly16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + __builtin_neon_vst1_v(__p0, (int8x8_t)__rev1, 5); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1q_p8(__p0, __p1) __extension__ ({ \ + poly8x16_t __s1 = __p1; \ + __builtin_neon_vst1q_v(__p0, (int8x16_t)__s1, 36); \ +}) +#else +#define vst1q_p8(__p0, __p1) __extension__ ({ \ + poly8x16_t __s1 = __p1; \ + poly8x16_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst1q_v(__p0, (int8x16_t)__rev1, 36); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1q_p16(__p0, __p1) __extension__ ({ \ + poly16x8_t __s1 = __p1; \ + __builtin_neon_vst1q_v(__p0, (int8x16_t)__s1, 37); \ +}) +#else +#define vst1q_p16(__p0, __p1) __extension__ ({ \ + poly16x8_t __s1 = __p1; \ + poly16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst1q_v(__p0, (int8x16_t)__rev1, 37); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1q_u8(__p0, __p1) __extension__ ({ \ + uint8x16_t __s1 = __p1; \ + __builtin_neon_vst1q_v(__p0, (int8x16_t)__s1, 48); \ +}) +#else +#define vst1q_u8(__p0, __p1) __extension__ ({ \ + uint8x16_t __s1 = __p1; \ + uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst1q_v(__p0, (int8x16_t)__rev1, 48); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1q_u32(__p0, __p1) __extension__ ({ \ + uint32x4_t __s1 = __p1; \ + __builtin_neon_vst1q_v(__p0, (int8x16_t)__s1, 50); \ +}) +#else +#define vst1q_u32(__p0, __p1) __extension__ ({ \ + uint32x4_t __s1 = __p1; \ + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + __builtin_neon_vst1q_v(__p0, (int8x16_t)__rev1, 50); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1q_u64(__p0, __p1) __extension__ ({ \ + uint64x2_t __s1 = __p1; \ + __builtin_neon_vst1q_v(__p0, (int8x16_t)__s1, 51); \ +}) +#else +#define vst1q_u64(__p0, __p1) __extension__ ({ \ + uint64x2_t __s1 = __p1; \ + uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ + __builtin_neon_vst1q_v(__p0, (int8x16_t)__rev1, 51); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1q_u16(__p0, __p1) __extension__ ({ \ + uint16x8_t __s1 = __p1; \ + __builtin_neon_vst1q_v(__p0, (int8x16_t)__s1, 49); \ +}) +#else +#define vst1q_u16(__p0, __p1) __extension__ ({ \ + uint16x8_t __s1 = __p1; \ + uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst1q_v(__p0, (int8x16_t)__rev1, 49); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1q_s8(__p0, __p1) __extension__ ({ \ + int8x16_t __s1 = __p1; \ + __builtin_neon_vst1q_v(__p0, (int8x16_t)__s1, 32); \ +}) +#else +#define vst1q_s8(__p0, __p1) __extension__ ({ \ + int8x16_t __s1 = __p1; \ + int8x16_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst1q_v(__p0, (int8x16_t)__rev1, 32); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1q_f32(__p0, __p1) __extension__ ({ \ + float32x4_t __s1 = __p1; \ + __builtin_neon_vst1q_v(__p0, (int8x16_t)__s1, 41); \ +}) +#else +#define vst1q_f32(__p0, __p1) __extension__ ({ \ + float32x4_t __s1 = __p1; \ + float32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + __builtin_neon_vst1q_v(__p0, (int8x16_t)__rev1, 41); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1q_s32(__p0, __p1) __extension__ ({ \ + int32x4_t __s1 = __p1; \ + __builtin_neon_vst1q_v(__p0, (int8x16_t)__s1, 34); \ +}) +#else +#define vst1q_s32(__p0, __p1) __extension__ ({ \ + int32x4_t __s1 = __p1; \ + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + __builtin_neon_vst1q_v(__p0, (int8x16_t)__rev1, 34); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1q_s64(__p0, __p1) __extension__ ({ \ + int64x2_t __s1 = __p1; \ + __builtin_neon_vst1q_v(__p0, (int8x16_t)__s1, 35); \ +}) +#else +#define vst1q_s64(__p0, __p1) __extension__ ({ \ + int64x2_t __s1 = __p1; \ + int64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ + __builtin_neon_vst1q_v(__p0, (int8x16_t)__rev1, 35); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1q_s16(__p0, __p1) __extension__ ({ \ + int16x8_t __s1 = __p1; \ + __builtin_neon_vst1q_v(__p0, (int8x16_t)__s1, 33); \ +}) +#else +#define vst1q_s16(__p0, __p1) __extension__ ({ \ + int16x8_t __s1 = __p1; \ + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst1q_v(__p0, (int8x16_t)__rev1, 33); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1_u8(__p0, __p1) __extension__ ({ \ + uint8x8_t __s1 = __p1; \ + __builtin_neon_vst1_v(__p0, (int8x8_t)__s1, 16); \ +}) +#else +#define vst1_u8(__p0, __p1) __extension__ ({ \ + uint8x8_t __s1 = __p1; \ + uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst1_v(__p0, (int8x8_t)__rev1, 16); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1_u32(__p0, __p1) __extension__ ({ \ + uint32x2_t __s1 = __p1; \ + __builtin_neon_vst1_v(__p0, (int8x8_t)__s1, 18); \ +}) +#else +#define vst1_u32(__p0, __p1) __extension__ ({ \ + uint32x2_t __s1 = __p1; \ + uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ + __builtin_neon_vst1_v(__p0, (int8x8_t)__rev1, 18); \ +}) +#endif + +#define vst1_u64(__p0, __p1) __extension__ ({ \ + uint64x1_t __s1 = __p1; \ + __builtin_neon_vst1_v(__p0, (int8x8_t)__s1, 19); \ +}) +#ifdef __LITTLE_ENDIAN__ +#define vst1_u16(__p0, __p1) __extension__ ({ \ + uint16x4_t __s1 = __p1; \ + __builtin_neon_vst1_v(__p0, (int8x8_t)__s1, 17); \ +}) +#else +#define vst1_u16(__p0, __p1) __extension__ ({ \ + uint16x4_t __s1 = __p1; \ + uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + __builtin_neon_vst1_v(__p0, (int8x8_t)__rev1, 17); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1_s8(__p0, __p1) __extension__ ({ \ + int8x8_t __s1 = __p1; \ + __builtin_neon_vst1_v(__p0, (int8x8_t)__s1, 0); \ +}) +#else +#define vst1_s8(__p0, __p1) __extension__ ({ \ + int8x8_t __s1 = __p1; \ + int8x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst1_v(__p0, (int8x8_t)__rev1, 0); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1_f32(__p0, __p1) __extension__ ({ \ + float32x2_t __s1 = __p1; \ + __builtin_neon_vst1_v(__p0, (int8x8_t)__s1, 9); \ +}) +#else +#define vst1_f32(__p0, __p1) __extension__ ({ \ + float32x2_t __s1 = __p1; \ + float32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ + __builtin_neon_vst1_v(__p0, (int8x8_t)__rev1, 9); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1_s32(__p0, __p1) __extension__ ({ \ + int32x2_t __s1 = __p1; \ + __builtin_neon_vst1_v(__p0, (int8x8_t)__s1, 2); \ +}) +#else +#define vst1_s32(__p0, __p1) __extension__ ({ \ + int32x2_t __s1 = __p1; \ + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ + __builtin_neon_vst1_v(__p0, (int8x8_t)__rev1, 2); \ +}) +#endif + +#define vst1_s64(__p0, __p1) __extension__ ({ \ + int64x1_t __s1 = __p1; \ + __builtin_neon_vst1_v(__p0, (int8x8_t)__s1, 3); \ +}) +#ifdef __LITTLE_ENDIAN__ +#define vst1_s16(__p0, __p1) __extension__ ({ \ + int16x4_t __s1 = __p1; \ + __builtin_neon_vst1_v(__p0, (int8x8_t)__s1, 1); \ +}) +#else +#define vst1_s16(__p0, __p1) __extension__ ({ \ + int16x4_t __s1 = __p1; \ + int16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + __builtin_neon_vst1_v(__p0, (int8x8_t)__rev1, 1); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1_lane_p8(__p0, __p1, __p2) __extension__ ({ \ + poly8x8_t __s1 = __p1; \ + __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__s1, __p2, 4); \ +}) +#else +#define vst1_lane_p8(__p0, __p1, __p2) __extension__ ({ \ + poly8x8_t __s1 = __p1; \ + poly8x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__rev1, __p2, 4); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1_lane_p16(__p0, __p1, __p2) __extension__ ({ \ + poly16x4_t __s1 = __p1; \ + __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__s1, __p2, 5); \ +}) +#else +#define vst1_lane_p16(__p0, __p1, __p2) __extension__ ({ \ + poly16x4_t __s1 = __p1; \ + poly16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__rev1, __p2, 5); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1q_lane_p8(__p0, __p1, __p2) __extension__ ({ \ + poly8x16_t __s1 = __p1; \ + __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__s1, __p2, 36); \ +}) +#else +#define vst1q_lane_p8(__p0, __p1, __p2) __extension__ ({ \ + poly8x16_t __s1 = __p1; \ + poly8x16_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 36); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1q_lane_p16(__p0, __p1, __p2) __extension__ ({ \ + poly16x8_t __s1 = __p1; \ + __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__s1, __p2, 37); \ +}) +#else +#define vst1q_lane_p16(__p0, __p1, __p2) __extension__ ({ \ + poly16x8_t __s1 = __p1; \ + poly16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 37); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1q_lane_u8(__p0, __p1, __p2) __extension__ ({ \ + uint8x16_t __s1 = __p1; \ + __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__s1, __p2, 48); \ +}) +#else +#define vst1q_lane_u8(__p0, __p1, __p2) __extension__ ({ \ + uint8x16_t __s1 = __p1; \ + uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 48); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1q_lane_u32(__p0, __p1, __p2) __extension__ ({ \ + uint32x4_t __s1 = __p1; \ + __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__s1, __p2, 50); \ +}) +#else +#define vst1q_lane_u32(__p0, __p1, __p2) __extension__ ({ \ + uint32x4_t __s1 = __p1; \ + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 50); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1q_lane_u64(__p0, __p1, __p2) __extension__ ({ \ + uint64x2_t __s1 = __p1; \ + __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__s1, __p2, 51); \ +}) +#else +#define vst1q_lane_u64(__p0, __p1, __p2) __extension__ ({ \ + uint64x2_t __s1 = __p1; \ + uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ + __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 51); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1q_lane_u16(__p0, __p1, __p2) __extension__ ({ \ + uint16x8_t __s1 = __p1; \ + __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__s1, __p2, 49); \ +}) +#else +#define vst1q_lane_u16(__p0, __p1, __p2) __extension__ ({ \ + uint16x8_t __s1 = __p1; \ + uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 49); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1q_lane_s8(__p0, __p1, __p2) __extension__ ({ \ + int8x16_t __s1 = __p1; \ + __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__s1, __p2, 32); \ +}) +#else +#define vst1q_lane_s8(__p0, __p1, __p2) __extension__ ({ \ + int8x16_t __s1 = __p1; \ + int8x16_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 32); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1q_lane_f32(__p0, __p1, __p2) __extension__ ({ \ + float32x4_t __s1 = __p1; \ + __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__s1, __p2, 41); \ +}) +#else +#define vst1q_lane_f32(__p0, __p1, __p2) __extension__ ({ \ + float32x4_t __s1 = __p1; \ + float32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 41); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1q_lane_s32(__p0, __p1, __p2) __extension__ ({ \ + int32x4_t __s1 = __p1; \ + __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__s1, __p2, 34); \ +}) +#else +#define vst1q_lane_s32(__p0, __p1, __p2) __extension__ ({ \ + int32x4_t __s1 = __p1; \ + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 34); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1q_lane_s64(__p0, __p1, __p2) __extension__ ({ \ + int64x2_t __s1 = __p1; \ + __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__s1, __p2, 35); \ +}) +#else +#define vst1q_lane_s64(__p0, __p1, __p2) __extension__ ({ \ + int64x2_t __s1 = __p1; \ + int64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ + __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 35); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1q_lane_s16(__p0, __p1, __p2) __extension__ ({ \ + int16x8_t __s1 = __p1; \ + __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__s1, __p2, 33); \ +}) +#else +#define vst1q_lane_s16(__p0, __p1, __p2) __extension__ ({ \ + int16x8_t __s1 = __p1; \ + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 33); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1_lane_u8(__p0, __p1, __p2) __extension__ ({ \ + uint8x8_t __s1 = __p1; \ + __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__s1, __p2, 16); \ +}) +#else +#define vst1_lane_u8(__p0, __p1, __p2) __extension__ ({ \ + uint8x8_t __s1 = __p1; \ + uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__rev1, __p2, 16); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1_lane_u32(__p0, __p1, __p2) __extension__ ({ \ + uint32x2_t __s1 = __p1; \ + __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__s1, __p2, 18); \ +}) +#else +#define vst1_lane_u32(__p0, __p1, __p2) __extension__ ({ \ + uint32x2_t __s1 = __p1; \ + uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ + __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__rev1, __p2, 18); \ +}) +#endif + +#define vst1_lane_u64(__p0, __p1, __p2) __extension__ ({ \ + uint64x1_t __s1 = __p1; \ + __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__s1, __p2, 19); \ +}) +#ifdef __LITTLE_ENDIAN__ +#define vst1_lane_u16(__p0, __p1, __p2) __extension__ ({ \ + uint16x4_t __s1 = __p1; \ + __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__s1, __p2, 17); \ +}) +#else +#define vst1_lane_u16(__p0, __p1, __p2) __extension__ ({ \ + uint16x4_t __s1 = __p1; \ + uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__rev1, __p2, 17); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1_lane_s8(__p0, __p1, __p2) __extension__ ({ \ + int8x8_t __s1 = __p1; \ + __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__s1, __p2, 0); \ +}) +#else +#define vst1_lane_s8(__p0, __p1, __p2) __extension__ ({ \ + int8x8_t __s1 = __p1; \ + int8x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__rev1, __p2, 0); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1_lane_f32(__p0, __p1, __p2) __extension__ ({ \ + float32x2_t __s1 = __p1; \ + __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__s1, __p2, 9); \ +}) +#else +#define vst1_lane_f32(__p0, __p1, __p2) __extension__ ({ \ + float32x2_t __s1 = __p1; \ + float32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ + __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__rev1, __p2, 9); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1_lane_s32(__p0, __p1, __p2) __extension__ ({ \ + int32x2_t __s1 = __p1; \ + __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__s1, __p2, 2); \ +}) +#else +#define vst1_lane_s32(__p0, __p1, __p2) __extension__ ({ \ + int32x2_t __s1 = __p1; \ + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ + __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__rev1, __p2, 2); \ +}) +#endif + +#define vst1_lane_s64(__p0, __p1, __p2) __extension__ ({ \ + int64x1_t __s1 = __p1; \ + __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__s1, __p2, 3); \ +}) +#ifdef __LITTLE_ENDIAN__ +#define vst1_lane_s16(__p0, __p1, __p2) __extension__ ({ \ + int16x4_t __s1 = __p1; \ + __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__s1, __p2, 1); \ +}) +#else +#define vst1_lane_s16(__p0, __p1, __p2) __extension__ ({ \ + int16x4_t __s1 = __p1; \ + int16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__rev1, __p2, 1); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1_p8_x2(__p0, __p1) __extension__ ({ \ + poly8x8x2_t __s1 = __p1; \ + __builtin_neon_vst1_x2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 4); \ +}) +#else +#define vst1_p8_x2(__p0, __p1) __extension__ ({ \ + poly8x8x2_t __s1 = __p1; \ + poly8x8x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst1_x2_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], 4); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1_p16_x2(__p0, __p1) __extension__ ({ \ + poly16x4x2_t __s1 = __p1; \ + __builtin_neon_vst1_x2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 5); \ +}) +#else +#define vst1_p16_x2(__p0, __p1) __extension__ ({ \ + poly16x4x2_t __s1 = __p1; \ + poly16x4x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __builtin_neon_vst1_x2_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], 5); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1q_p8_x2(__p0, __p1) __extension__ ({ \ + poly8x16x2_t __s1 = __p1; \ + __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 36); \ +}) +#else +#define vst1q_p8_x2(__p0, __p1) __extension__ ({ \ + poly8x16x2_t __s1 = __p1; \ + poly8x16x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 36); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1q_p16_x2(__p0, __p1) __extension__ ({ \ + poly16x8x2_t __s1 = __p1; \ + __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 37); \ +}) +#else +#define vst1q_p16_x2(__p0, __p1) __extension__ ({ \ + poly16x8x2_t __s1 = __p1; \ + poly16x8x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 37); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1q_u8_x2(__p0, __p1) __extension__ ({ \ + uint8x16x2_t __s1 = __p1; \ + __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 48); \ +}) +#else +#define vst1q_u8_x2(__p0, __p1) __extension__ ({ \ + uint8x16x2_t __s1 = __p1; \ + uint8x16x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 48); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1q_u32_x2(__p0, __p1) __extension__ ({ \ + uint32x4x2_t __s1 = __p1; \ + __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 50); \ +}) +#else +#define vst1q_u32_x2(__p0, __p1) __extension__ ({ \ + uint32x4x2_t __s1 = __p1; \ + uint32x4x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 50); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1q_u64_x2(__p0, __p1) __extension__ ({ \ + uint64x2x2_t __s1 = __p1; \ + __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 51); \ +}) +#else +#define vst1q_u64_x2(__p0, __p1) __extension__ ({ \ + uint64x2x2_t __s1 = __p1; \ + uint64x2x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ + __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 51); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1q_u16_x2(__p0, __p1) __extension__ ({ \ + uint16x8x2_t __s1 = __p1; \ + __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 49); \ +}) +#else +#define vst1q_u16_x2(__p0, __p1) __extension__ ({ \ + uint16x8x2_t __s1 = __p1; \ + uint16x8x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 49); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1q_s8_x2(__p0, __p1) __extension__ ({ \ + int8x16x2_t __s1 = __p1; \ + __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 32); \ +}) +#else +#define vst1q_s8_x2(__p0, __p1) __extension__ ({ \ + int8x16x2_t __s1 = __p1; \ + int8x16x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 32); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1q_f32_x2(__p0, __p1) __extension__ ({ \ + float32x4x2_t __s1 = __p1; \ + __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 41); \ +}) +#else +#define vst1q_f32_x2(__p0, __p1) __extension__ ({ \ + float32x4x2_t __s1 = __p1; \ + float32x4x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 41); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1q_s32_x2(__p0, __p1) __extension__ ({ \ + int32x4x2_t __s1 = __p1; \ + __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 34); \ +}) +#else +#define vst1q_s32_x2(__p0, __p1) __extension__ ({ \ + int32x4x2_t __s1 = __p1; \ + int32x4x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 34); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1q_s64_x2(__p0, __p1) __extension__ ({ \ + int64x2x2_t __s1 = __p1; \ + __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 35); \ +}) +#else +#define vst1q_s64_x2(__p0, __p1) __extension__ ({ \ + int64x2x2_t __s1 = __p1; \ + int64x2x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ + __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 35); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1q_s16_x2(__p0, __p1) __extension__ ({ \ + int16x8x2_t __s1 = __p1; \ + __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 33); \ +}) +#else +#define vst1q_s16_x2(__p0, __p1) __extension__ ({ \ + int16x8x2_t __s1 = __p1; \ + int16x8x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 33); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1_u8_x2(__p0, __p1) __extension__ ({ \ + uint8x8x2_t __s1 = __p1; \ + __builtin_neon_vst1_x2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 16); \ +}) +#else +#define vst1_u8_x2(__p0, __p1) __extension__ ({ \ + uint8x8x2_t __s1 = __p1; \ + uint8x8x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst1_x2_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], 16); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1_u32_x2(__p0, __p1) __extension__ ({ \ + uint32x2x2_t __s1 = __p1; \ + __builtin_neon_vst1_x2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 18); \ +}) +#else +#define vst1_u32_x2(__p0, __p1) __extension__ ({ \ + uint32x2x2_t __s1 = __p1; \ + uint32x2x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ + __builtin_neon_vst1_x2_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], 18); \ +}) +#endif + +#define vst1_u64_x2(__p0, __p1) __extension__ ({ \ + uint64x1x2_t __s1 = __p1; \ + __builtin_neon_vst1_x2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 19); \ +}) +#ifdef __LITTLE_ENDIAN__ +#define vst1_u16_x2(__p0, __p1) __extension__ ({ \ + uint16x4x2_t __s1 = __p1; \ + __builtin_neon_vst1_x2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 17); \ +}) +#else +#define vst1_u16_x2(__p0, __p1) __extension__ ({ \ + uint16x4x2_t __s1 = __p1; \ + uint16x4x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __builtin_neon_vst1_x2_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], 17); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1_s8_x2(__p0, __p1) __extension__ ({ \ + int8x8x2_t __s1 = __p1; \ + __builtin_neon_vst1_x2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 0); \ +}) +#else +#define vst1_s8_x2(__p0, __p1) __extension__ ({ \ + int8x8x2_t __s1 = __p1; \ + int8x8x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst1_x2_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], 0); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1_f32_x2(__p0, __p1) __extension__ ({ \ + float32x2x2_t __s1 = __p1; \ + __builtin_neon_vst1_x2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 9); \ +}) +#else +#define vst1_f32_x2(__p0, __p1) __extension__ ({ \ + float32x2x2_t __s1 = __p1; \ + float32x2x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ + __builtin_neon_vst1_x2_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], 9); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1_s32_x2(__p0, __p1) __extension__ ({ \ + int32x2x2_t __s1 = __p1; \ + __builtin_neon_vst1_x2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 2); \ +}) +#else +#define vst1_s32_x2(__p0, __p1) __extension__ ({ \ + int32x2x2_t __s1 = __p1; \ + int32x2x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ + __builtin_neon_vst1_x2_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], 2); \ +}) +#endif + +#define vst1_s64_x2(__p0, __p1) __extension__ ({ \ + int64x1x2_t __s1 = __p1; \ + __builtin_neon_vst1_x2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 3); \ +}) +#ifdef __LITTLE_ENDIAN__ +#define vst1_s16_x2(__p0, __p1) __extension__ ({ \ + int16x4x2_t __s1 = __p1; \ + __builtin_neon_vst1_x2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 1); \ +}) +#else +#define vst1_s16_x2(__p0, __p1) __extension__ ({ \ + int16x4x2_t __s1 = __p1; \ + int16x4x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __builtin_neon_vst1_x2_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], 1); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1_p8_x3(__p0, __p1) __extension__ ({ \ + poly8x8x3_t __s1 = __p1; \ + __builtin_neon_vst1_x3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 4); \ +}) +#else +#define vst1_p8_x3(__p0, __p1) __extension__ ({ \ + poly8x8x3_t __s1 = __p1; \ + poly8x8x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst1_x3_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], 4); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1_p16_x3(__p0, __p1) __extension__ ({ \ + poly16x4x3_t __s1 = __p1; \ + __builtin_neon_vst1_x3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 5); \ +}) +#else +#define vst1_p16_x3(__p0, __p1) __extension__ ({ \ + poly16x4x3_t __s1 = __p1; \ + poly16x4x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ + __builtin_neon_vst1_x3_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], 5); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1q_p8_x3(__p0, __p1) __extension__ ({ \ + poly8x16x3_t __s1 = __p1; \ + __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 36); \ +}) +#else +#define vst1q_p8_x3(__p0, __p1) __extension__ ({ \ + poly8x16x3_t __s1 = __p1; \ + poly8x16x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 36); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1q_p16_x3(__p0, __p1) __extension__ ({ \ + poly16x8x3_t __s1 = __p1; \ + __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 37); \ +}) +#else +#define vst1q_p16_x3(__p0, __p1) __extension__ ({ \ + poly16x8x3_t __s1 = __p1; \ + poly16x8x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 37); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1q_u8_x3(__p0, __p1) __extension__ ({ \ + uint8x16x3_t __s1 = __p1; \ + __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 48); \ +}) +#else +#define vst1q_u8_x3(__p0, __p1) __extension__ ({ \ + uint8x16x3_t __s1 = __p1; \ + uint8x16x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 48); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1q_u32_x3(__p0, __p1) __extension__ ({ \ + uint32x4x3_t __s1 = __p1; \ + __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 50); \ +}) +#else +#define vst1q_u32_x3(__p0, __p1) __extension__ ({ \ + uint32x4x3_t __s1 = __p1; \ + uint32x4x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ + __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 50); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1q_u64_x3(__p0, __p1) __extension__ ({ \ + uint64x2x3_t __s1 = __p1; \ + __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 51); \ +}) +#else +#define vst1q_u64_x3(__p0, __p1) __extension__ ({ \ + uint64x2x3_t __s1 = __p1; \ + uint64x2x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ + __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 51); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1q_u16_x3(__p0, __p1) __extension__ ({ \ + uint16x8x3_t __s1 = __p1; \ + __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 49); \ +}) +#else +#define vst1q_u16_x3(__p0, __p1) __extension__ ({ \ + uint16x8x3_t __s1 = __p1; \ + uint16x8x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 49); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1q_s8_x3(__p0, __p1) __extension__ ({ \ + int8x16x3_t __s1 = __p1; \ + __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 32); \ +}) +#else +#define vst1q_s8_x3(__p0, __p1) __extension__ ({ \ + int8x16x3_t __s1 = __p1; \ + int8x16x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 32); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1q_f32_x3(__p0, __p1) __extension__ ({ \ + float32x4x3_t __s1 = __p1; \ + __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 41); \ +}) +#else +#define vst1q_f32_x3(__p0, __p1) __extension__ ({ \ + float32x4x3_t __s1 = __p1; \ + float32x4x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ + __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 41); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1q_s32_x3(__p0, __p1) __extension__ ({ \ + int32x4x3_t __s1 = __p1; \ + __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 34); \ +}) +#else +#define vst1q_s32_x3(__p0, __p1) __extension__ ({ \ + int32x4x3_t __s1 = __p1; \ + int32x4x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ + __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 34); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1q_s64_x3(__p0, __p1) __extension__ ({ \ + int64x2x3_t __s1 = __p1; \ + __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 35); \ +}) +#else +#define vst1q_s64_x3(__p0, __p1) __extension__ ({ \ + int64x2x3_t __s1 = __p1; \ + int64x2x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ + __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 35); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1q_s16_x3(__p0, __p1) __extension__ ({ \ + int16x8x3_t __s1 = __p1; \ + __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 33); \ +}) +#else +#define vst1q_s16_x3(__p0, __p1) __extension__ ({ \ + int16x8x3_t __s1 = __p1; \ + int16x8x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 33); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1_u8_x3(__p0, __p1) __extension__ ({ \ + uint8x8x3_t __s1 = __p1; \ + __builtin_neon_vst1_x3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 16); \ +}) +#else +#define vst1_u8_x3(__p0, __p1) __extension__ ({ \ + uint8x8x3_t __s1 = __p1; \ + uint8x8x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst1_x3_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], 16); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1_u32_x3(__p0, __p1) __extension__ ({ \ + uint32x2x3_t __s1 = __p1; \ + __builtin_neon_vst1_x3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 18); \ +}) +#else +#define vst1_u32_x3(__p0, __p1) __extension__ ({ \ + uint32x2x3_t __s1 = __p1; \ + uint32x2x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ + __builtin_neon_vst1_x3_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], 18); \ +}) +#endif + +#define vst1_u64_x3(__p0, __p1) __extension__ ({ \ + uint64x1x3_t __s1 = __p1; \ + __builtin_neon_vst1_x3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 19); \ +}) +#ifdef __LITTLE_ENDIAN__ +#define vst1_u16_x3(__p0, __p1) __extension__ ({ \ + uint16x4x3_t __s1 = __p1; \ + __builtin_neon_vst1_x3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 17); \ +}) +#else +#define vst1_u16_x3(__p0, __p1) __extension__ ({ \ + uint16x4x3_t __s1 = __p1; \ + uint16x4x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ + __builtin_neon_vst1_x3_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], 17); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1_s8_x3(__p0, __p1) __extension__ ({ \ + int8x8x3_t __s1 = __p1; \ + __builtin_neon_vst1_x3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 0); \ +}) +#else +#define vst1_s8_x3(__p0, __p1) __extension__ ({ \ + int8x8x3_t __s1 = __p1; \ + int8x8x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst1_x3_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], 0); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1_f32_x3(__p0, __p1) __extension__ ({ \ + float32x2x3_t __s1 = __p1; \ + __builtin_neon_vst1_x3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 9); \ +}) +#else +#define vst1_f32_x3(__p0, __p1) __extension__ ({ \ + float32x2x3_t __s1 = __p1; \ + float32x2x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ + __builtin_neon_vst1_x3_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], 9); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1_s32_x3(__p0, __p1) __extension__ ({ \ + int32x2x3_t __s1 = __p1; \ + __builtin_neon_vst1_x3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 2); \ +}) +#else +#define vst1_s32_x3(__p0, __p1) __extension__ ({ \ + int32x2x3_t __s1 = __p1; \ + int32x2x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ + __builtin_neon_vst1_x3_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], 2); \ +}) +#endif + +#define vst1_s64_x3(__p0, __p1) __extension__ ({ \ + int64x1x3_t __s1 = __p1; \ + __builtin_neon_vst1_x3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 3); \ +}) +#ifdef __LITTLE_ENDIAN__ +#define vst1_s16_x3(__p0, __p1) __extension__ ({ \ + int16x4x3_t __s1 = __p1; \ + __builtin_neon_vst1_x3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 1); \ +}) +#else +#define vst1_s16_x3(__p0, __p1) __extension__ ({ \ + int16x4x3_t __s1 = __p1; \ + int16x4x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ + __builtin_neon_vst1_x3_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], 1); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1_p8_x4(__p0, __p1) __extension__ ({ \ + poly8x8x4_t __s1 = __p1; \ + __builtin_neon_vst1_x4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 4); \ +}) +#else +#define vst1_p8_x4(__p0, __p1) __extension__ ({ \ + poly8x8x4_t __s1 = __p1; \ + poly8x8x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst1_x4_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], 4); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1_p16_x4(__p0, __p1) __extension__ ({ \ + poly16x4x4_t __s1 = __p1; \ + __builtin_neon_vst1_x4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 5); \ +}) +#else +#define vst1_p16_x4(__p0, __p1) __extension__ ({ \ + poly16x4x4_t __s1 = __p1; \ + poly16x4x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \ + __builtin_neon_vst1_x4_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], 5); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1q_p8_x4(__p0, __p1) __extension__ ({ \ + poly8x16x4_t __s1 = __p1; \ + __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 36); \ +}) +#else +#define vst1q_p8_x4(__p0, __p1) __extension__ ({ \ + poly8x16x4_t __s1 = __p1; \ + poly8x16x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 36); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1q_p16_x4(__p0, __p1) __extension__ ({ \ + poly16x8x4_t __s1 = __p1; \ + __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 37); \ +}) +#else +#define vst1q_p16_x4(__p0, __p1) __extension__ ({ \ + poly16x8x4_t __s1 = __p1; \ + poly16x8x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 37); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1q_u8_x4(__p0, __p1) __extension__ ({ \ + uint8x16x4_t __s1 = __p1; \ + __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 48); \ +}) +#else +#define vst1q_u8_x4(__p0, __p1) __extension__ ({ \ + uint8x16x4_t __s1 = __p1; \ + uint8x16x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 48); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1q_u32_x4(__p0, __p1) __extension__ ({ \ + uint32x4x4_t __s1 = __p1; \ + __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 50); \ +}) +#else +#define vst1q_u32_x4(__p0, __p1) __extension__ ({ \ + uint32x4x4_t __s1 = __p1; \ + uint32x4x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \ + __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 50); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1q_u64_x4(__p0, __p1) __extension__ ({ \ + uint64x2x4_t __s1 = __p1; \ + __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 51); \ +}) +#else +#define vst1q_u64_x4(__p0, __p1) __extension__ ({ \ + uint64x2x4_t __s1 = __p1; \ + uint64x2x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \ + __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 51); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1q_u16_x4(__p0, __p1) __extension__ ({ \ + uint16x8x4_t __s1 = __p1; \ + __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 49); \ +}) +#else +#define vst1q_u16_x4(__p0, __p1) __extension__ ({ \ + uint16x8x4_t __s1 = __p1; \ + uint16x8x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 49); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1q_s8_x4(__p0, __p1) __extension__ ({ \ + int8x16x4_t __s1 = __p1; \ + __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 32); \ +}) +#else +#define vst1q_s8_x4(__p0, __p1) __extension__ ({ \ + int8x16x4_t __s1 = __p1; \ + int8x16x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 32); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1q_f32_x4(__p0, __p1) __extension__ ({ \ + float32x4x4_t __s1 = __p1; \ + __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 41); \ +}) +#else +#define vst1q_f32_x4(__p0, __p1) __extension__ ({ \ + float32x4x4_t __s1 = __p1; \ + float32x4x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \ + __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 41); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1q_s32_x4(__p0, __p1) __extension__ ({ \ + int32x4x4_t __s1 = __p1; \ + __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 34); \ +}) +#else +#define vst1q_s32_x4(__p0, __p1) __extension__ ({ \ + int32x4x4_t __s1 = __p1; \ + int32x4x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \ + __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 34); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1q_s64_x4(__p0, __p1) __extension__ ({ \ + int64x2x4_t __s1 = __p1; \ + __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 35); \ +}) +#else +#define vst1q_s64_x4(__p0, __p1) __extension__ ({ \ + int64x2x4_t __s1 = __p1; \ + int64x2x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \ + __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 35); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1q_s16_x4(__p0, __p1) __extension__ ({ \ + int16x8x4_t __s1 = __p1; \ + __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 33); \ +}) +#else +#define vst1q_s16_x4(__p0, __p1) __extension__ ({ \ + int16x8x4_t __s1 = __p1; \ + int16x8x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 33); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1_u8_x4(__p0, __p1) __extension__ ({ \ + uint8x8x4_t __s1 = __p1; \ + __builtin_neon_vst1_x4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 16); \ +}) +#else +#define vst1_u8_x4(__p0, __p1) __extension__ ({ \ + uint8x8x4_t __s1 = __p1; \ + uint8x8x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst1_x4_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], 16); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1_u32_x4(__p0, __p1) __extension__ ({ \ + uint32x2x4_t __s1 = __p1; \ + __builtin_neon_vst1_x4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 18); \ +}) +#else +#define vst1_u32_x4(__p0, __p1) __extension__ ({ \ + uint32x2x4_t __s1 = __p1; \ + uint32x2x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \ + __builtin_neon_vst1_x4_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], 18); \ +}) +#endif + +#define vst1_u64_x4(__p0, __p1) __extension__ ({ \ + uint64x1x4_t __s1 = __p1; \ + __builtin_neon_vst1_x4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 19); \ +}) +#ifdef __LITTLE_ENDIAN__ +#define vst1_u16_x4(__p0, __p1) __extension__ ({ \ + uint16x4x4_t __s1 = __p1; \ + __builtin_neon_vst1_x4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 17); \ +}) +#else +#define vst1_u16_x4(__p0, __p1) __extension__ ({ \ + uint16x4x4_t __s1 = __p1; \ + uint16x4x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \ + __builtin_neon_vst1_x4_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], 17); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1_s8_x4(__p0, __p1) __extension__ ({ \ + int8x8x4_t __s1 = __p1; \ + __builtin_neon_vst1_x4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 0); \ +}) +#else +#define vst1_s8_x4(__p0, __p1) __extension__ ({ \ + int8x8x4_t __s1 = __p1; \ + int8x8x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst1_x4_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], 0); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1_f32_x4(__p0, __p1) __extension__ ({ \ + float32x2x4_t __s1 = __p1; \ + __builtin_neon_vst1_x4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 9); \ +}) +#else +#define vst1_f32_x4(__p0, __p1) __extension__ ({ \ + float32x2x4_t __s1 = __p1; \ + float32x2x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \ + __builtin_neon_vst1_x4_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], 9); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1_s32_x4(__p0, __p1) __extension__ ({ \ + int32x2x4_t __s1 = __p1; \ + __builtin_neon_vst1_x4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 2); \ +}) +#else +#define vst1_s32_x4(__p0, __p1) __extension__ ({ \ + int32x2x4_t __s1 = __p1; \ + int32x2x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \ + __builtin_neon_vst1_x4_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], 2); \ +}) +#endif + +#define vst1_s64_x4(__p0, __p1) __extension__ ({ \ + int64x1x4_t __s1 = __p1; \ + __builtin_neon_vst1_x4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 3); \ +}) +#ifdef __LITTLE_ENDIAN__ +#define vst1_s16_x4(__p0, __p1) __extension__ ({ \ + int16x4x4_t __s1 = __p1; \ + __builtin_neon_vst1_x4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 1); \ +}) +#else +#define vst1_s16_x4(__p0, __p1) __extension__ ({ \ + int16x4x4_t __s1 = __p1; \ + int16x4x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \ + __builtin_neon_vst1_x4_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], 1); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst2_p8(__p0, __p1) __extension__ ({ \ + poly8x8x2_t __s1 = __p1; \ + __builtin_neon_vst2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 4); \ +}) +#else +#define vst2_p8(__p0, __p1) __extension__ ({ \ + poly8x8x2_t __s1 = __p1; \ + poly8x8x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst2_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], 4); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst2_p16(__p0, __p1) __extension__ ({ \ + poly16x4x2_t __s1 = __p1; \ + __builtin_neon_vst2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 5); \ +}) +#else +#define vst2_p16(__p0, __p1) __extension__ ({ \ + poly16x4x2_t __s1 = __p1; \ + poly16x4x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __builtin_neon_vst2_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], 5); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst2q_p8(__p0, __p1) __extension__ ({ \ + poly8x16x2_t __s1 = __p1; \ + __builtin_neon_vst2q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 36); \ +}) +#else +#define vst2q_p8(__p0, __p1) __extension__ ({ \ + poly8x16x2_t __s1 = __p1; \ + poly8x16x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst2q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 36); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst2q_p16(__p0, __p1) __extension__ ({ \ + poly16x8x2_t __s1 = __p1; \ + __builtin_neon_vst2q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 37); \ +}) +#else +#define vst2q_p16(__p0, __p1) __extension__ ({ \ + poly16x8x2_t __s1 = __p1; \ + poly16x8x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst2q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 37); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst2q_u8(__p0, __p1) __extension__ ({ \ + uint8x16x2_t __s1 = __p1; \ + __builtin_neon_vst2q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 48); \ +}) +#else +#define vst2q_u8(__p0, __p1) __extension__ ({ \ + uint8x16x2_t __s1 = __p1; \ + uint8x16x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst2q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 48); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst2q_u32(__p0, __p1) __extension__ ({ \ + uint32x4x2_t __s1 = __p1; \ + __builtin_neon_vst2q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 50); \ +}) +#else +#define vst2q_u32(__p0, __p1) __extension__ ({ \ + uint32x4x2_t __s1 = __p1; \ + uint32x4x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __builtin_neon_vst2q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 50); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst2q_u16(__p0, __p1) __extension__ ({ \ + uint16x8x2_t __s1 = __p1; \ + __builtin_neon_vst2q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 49); \ +}) +#else +#define vst2q_u16(__p0, __p1) __extension__ ({ \ + uint16x8x2_t __s1 = __p1; \ + uint16x8x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst2q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 49); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst2q_s8(__p0, __p1) __extension__ ({ \ + int8x16x2_t __s1 = __p1; \ + __builtin_neon_vst2q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 32); \ +}) +#else +#define vst2q_s8(__p0, __p1) __extension__ ({ \ + int8x16x2_t __s1 = __p1; \ + int8x16x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst2q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 32); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst2q_f32(__p0, __p1) __extension__ ({ \ + float32x4x2_t __s1 = __p1; \ + __builtin_neon_vst2q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 41); \ +}) +#else +#define vst2q_f32(__p0, __p1) __extension__ ({ \ + float32x4x2_t __s1 = __p1; \ + float32x4x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __builtin_neon_vst2q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 41); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst2q_s32(__p0, __p1) __extension__ ({ \ + int32x4x2_t __s1 = __p1; \ + __builtin_neon_vst2q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 34); \ +}) +#else +#define vst2q_s32(__p0, __p1) __extension__ ({ \ + int32x4x2_t __s1 = __p1; \ + int32x4x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __builtin_neon_vst2q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 34); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst2q_s16(__p0, __p1) __extension__ ({ \ + int16x8x2_t __s1 = __p1; \ + __builtin_neon_vst2q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 33); \ +}) +#else +#define vst2q_s16(__p0, __p1) __extension__ ({ \ + int16x8x2_t __s1 = __p1; \ + int16x8x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst2q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 33); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst2_u8(__p0, __p1) __extension__ ({ \ + uint8x8x2_t __s1 = __p1; \ + __builtin_neon_vst2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 16); \ +}) +#else +#define vst2_u8(__p0, __p1) __extension__ ({ \ + uint8x8x2_t __s1 = __p1; \ + uint8x8x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst2_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], 16); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst2_u32(__p0, __p1) __extension__ ({ \ + uint32x2x2_t __s1 = __p1; \ + __builtin_neon_vst2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 18); \ +}) +#else +#define vst2_u32(__p0, __p1) __extension__ ({ \ + uint32x2x2_t __s1 = __p1; \ + uint32x2x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ + __builtin_neon_vst2_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], 18); \ +}) +#endif + +#define vst2_u64(__p0, __p1) __extension__ ({ \ + uint64x1x2_t __s1 = __p1; \ + __builtin_neon_vst2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 19); \ +}) +#ifdef __LITTLE_ENDIAN__ +#define vst2_u16(__p0, __p1) __extension__ ({ \ + uint16x4x2_t __s1 = __p1; \ + __builtin_neon_vst2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 17); \ +}) +#else +#define vst2_u16(__p0, __p1) __extension__ ({ \ + uint16x4x2_t __s1 = __p1; \ + uint16x4x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __builtin_neon_vst2_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], 17); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst2_s8(__p0, __p1) __extension__ ({ \ + int8x8x2_t __s1 = __p1; \ + __builtin_neon_vst2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 0); \ +}) +#else +#define vst2_s8(__p0, __p1) __extension__ ({ \ + int8x8x2_t __s1 = __p1; \ + int8x8x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst2_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], 0); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst2_f32(__p0, __p1) __extension__ ({ \ + float32x2x2_t __s1 = __p1; \ + __builtin_neon_vst2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 9); \ +}) +#else +#define vst2_f32(__p0, __p1) __extension__ ({ \ + float32x2x2_t __s1 = __p1; \ + float32x2x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ + __builtin_neon_vst2_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], 9); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst2_s32(__p0, __p1) __extension__ ({ \ + int32x2x2_t __s1 = __p1; \ + __builtin_neon_vst2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 2); \ +}) +#else +#define vst2_s32(__p0, __p1) __extension__ ({ \ + int32x2x2_t __s1 = __p1; \ + int32x2x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ + __builtin_neon_vst2_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], 2); \ +}) +#endif + +#define vst2_s64(__p0, __p1) __extension__ ({ \ + int64x1x2_t __s1 = __p1; \ + __builtin_neon_vst2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 3); \ +}) +#ifdef __LITTLE_ENDIAN__ +#define vst2_s16(__p0, __p1) __extension__ ({ \ + int16x4x2_t __s1 = __p1; \ + __builtin_neon_vst2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 1); \ +}) +#else +#define vst2_s16(__p0, __p1) __extension__ ({ \ + int16x4x2_t __s1 = __p1; \ + int16x4x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __builtin_neon_vst2_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], 1); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst2_lane_p8(__p0, __p1, __p2) __extension__ ({ \ + poly8x8x2_t __s1 = __p1; \ + __builtin_neon_vst2_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 4); \ +}) +#else +#define vst2_lane_p8(__p0, __p1, __p2) __extension__ ({ \ + poly8x8x2_t __s1 = __p1; \ + poly8x8x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst2_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], __p2, 4); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst2_lane_p16(__p0, __p1, __p2) __extension__ ({ \ + poly16x4x2_t __s1 = __p1; \ + __builtin_neon_vst2_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 5); \ +}) +#else +#define vst2_lane_p16(__p0, __p1, __p2) __extension__ ({ \ + poly16x4x2_t __s1 = __p1; \ + poly16x4x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __builtin_neon_vst2_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], __p2, 5); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst2q_lane_p16(__p0, __p1, __p2) __extension__ ({ \ + poly16x8x2_t __s1 = __p1; \ + __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 37); \ +}) +#else +#define vst2q_lane_p16(__p0, __p1, __p2) __extension__ ({ \ + poly16x8x2_t __s1 = __p1; \ + poly16x8x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 37); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst2q_lane_u32(__p0, __p1, __p2) __extension__ ({ \ + uint32x4x2_t __s1 = __p1; \ + __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 50); \ +}) +#else +#define vst2q_lane_u32(__p0, __p1, __p2) __extension__ ({ \ + uint32x4x2_t __s1 = __p1; \ + uint32x4x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 50); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst2q_lane_u16(__p0, __p1, __p2) __extension__ ({ \ + uint16x8x2_t __s1 = __p1; \ + __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 49); \ +}) +#else +#define vst2q_lane_u16(__p0, __p1, __p2) __extension__ ({ \ + uint16x8x2_t __s1 = __p1; \ + uint16x8x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 49); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst2q_lane_f32(__p0, __p1, __p2) __extension__ ({ \ + float32x4x2_t __s1 = __p1; \ + __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 41); \ +}) +#else +#define vst2q_lane_f32(__p0, __p1, __p2) __extension__ ({ \ + float32x4x2_t __s1 = __p1; \ + float32x4x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 41); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst2q_lane_s32(__p0, __p1, __p2) __extension__ ({ \ + int32x4x2_t __s1 = __p1; \ + __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 34); \ +}) +#else +#define vst2q_lane_s32(__p0, __p1, __p2) __extension__ ({ \ + int32x4x2_t __s1 = __p1; \ + int32x4x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 34); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst2q_lane_s16(__p0, __p1, __p2) __extension__ ({ \ + int16x8x2_t __s1 = __p1; \ + __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 33); \ +}) +#else +#define vst2q_lane_s16(__p0, __p1, __p2) __extension__ ({ \ + int16x8x2_t __s1 = __p1; \ + int16x8x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 33); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst2_lane_u8(__p0, __p1, __p2) __extension__ ({ \ + uint8x8x2_t __s1 = __p1; \ + __builtin_neon_vst2_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 16); \ +}) +#else +#define vst2_lane_u8(__p0, __p1, __p2) __extension__ ({ \ + uint8x8x2_t __s1 = __p1; \ + uint8x8x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst2_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], __p2, 16); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst2_lane_u32(__p0, __p1, __p2) __extension__ ({ \ + uint32x2x2_t __s1 = __p1; \ + __builtin_neon_vst2_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 18); \ +}) +#else +#define vst2_lane_u32(__p0, __p1, __p2) __extension__ ({ \ + uint32x2x2_t __s1 = __p1; \ + uint32x2x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ + __builtin_neon_vst2_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], __p2, 18); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst2_lane_u16(__p0, __p1, __p2) __extension__ ({ \ + uint16x4x2_t __s1 = __p1; \ + __builtin_neon_vst2_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 17); \ +}) +#else +#define vst2_lane_u16(__p0, __p1, __p2) __extension__ ({ \ + uint16x4x2_t __s1 = __p1; \ + uint16x4x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __builtin_neon_vst2_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], __p2, 17); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst2_lane_s8(__p0, __p1, __p2) __extension__ ({ \ + int8x8x2_t __s1 = __p1; \ + __builtin_neon_vst2_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 0); \ +}) +#else +#define vst2_lane_s8(__p0, __p1, __p2) __extension__ ({ \ + int8x8x2_t __s1 = __p1; \ + int8x8x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst2_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], __p2, 0); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst2_lane_f32(__p0, __p1, __p2) __extension__ ({ \ + float32x2x2_t __s1 = __p1; \ + __builtin_neon_vst2_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 9); \ +}) +#else +#define vst2_lane_f32(__p0, __p1, __p2) __extension__ ({ \ + float32x2x2_t __s1 = __p1; \ + float32x2x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ + __builtin_neon_vst2_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], __p2, 9); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst2_lane_s32(__p0, __p1, __p2) __extension__ ({ \ + int32x2x2_t __s1 = __p1; \ + __builtin_neon_vst2_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 2); \ +}) +#else +#define vst2_lane_s32(__p0, __p1, __p2) __extension__ ({ \ + int32x2x2_t __s1 = __p1; \ + int32x2x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ + __builtin_neon_vst2_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], __p2, 2); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst2_lane_s16(__p0, __p1, __p2) __extension__ ({ \ + int16x4x2_t __s1 = __p1; \ + __builtin_neon_vst2_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 1); \ +}) +#else +#define vst2_lane_s16(__p0, __p1, __p2) __extension__ ({ \ + int16x4x2_t __s1 = __p1; \ + int16x4x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __builtin_neon_vst2_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], __p2, 1); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst3_p8(__p0, __p1) __extension__ ({ \ + poly8x8x3_t __s1 = __p1; \ + __builtin_neon_vst3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 4); \ +}) +#else +#define vst3_p8(__p0, __p1) __extension__ ({ \ + poly8x8x3_t __s1 = __p1; \ + poly8x8x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst3_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], 4); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst3_p16(__p0, __p1) __extension__ ({ \ + poly16x4x3_t __s1 = __p1; \ + __builtin_neon_vst3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 5); \ +}) +#else +#define vst3_p16(__p0, __p1) __extension__ ({ \ + poly16x4x3_t __s1 = __p1; \ + poly16x4x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ + __builtin_neon_vst3_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], 5); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst3q_p8(__p0, __p1) __extension__ ({ \ + poly8x16x3_t __s1 = __p1; \ + __builtin_neon_vst3q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 36); \ +}) +#else +#define vst3q_p8(__p0, __p1) __extension__ ({ \ + poly8x16x3_t __s1 = __p1; \ + poly8x16x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst3q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 36); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst3q_p16(__p0, __p1) __extension__ ({ \ + poly16x8x3_t __s1 = __p1; \ + __builtin_neon_vst3q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 37); \ +}) +#else +#define vst3q_p16(__p0, __p1) __extension__ ({ \ + poly16x8x3_t __s1 = __p1; \ + poly16x8x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst3q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 37); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst3q_u8(__p0, __p1) __extension__ ({ \ + uint8x16x3_t __s1 = __p1; \ + __builtin_neon_vst3q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 48); \ +}) +#else +#define vst3q_u8(__p0, __p1) __extension__ ({ \ + uint8x16x3_t __s1 = __p1; \ + uint8x16x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst3q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 48); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst3q_u32(__p0, __p1) __extension__ ({ \ + uint32x4x3_t __s1 = __p1; \ + __builtin_neon_vst3q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 50); \ +}) +#else +#define vst3q_u32(__p0, __p1) __extension__ ({ \ + uint32x4x3_t __s1 = __p1; \ + uint32x4x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ + __builtin_neon_vst3q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 50); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst3q_u16(__p0, __p1) __extension__ ({ \ + uint16x8x3_t __s1 = __p1; \ + __builtin_neon_vst3q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 49); \ +}) +#else +#define vst3q_u16(__p0, __p1) __extension__ ({ \ + uint16x8x3_t __s1 = __p1; \ + uint16x8x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst3q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 49); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst3q_s8(__p0, __p1) __extension__ ({ \ + int8x16x3_t __s1 = __p1; \ + __builtin_neon_vst3q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 32); \ +}) +#else +#define vst3q_s8(__p0, __p1) __extension__ ({ \ + int8x16x3_t __s1 = __p1; \ + int8x16x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst3q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 32); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst3q_f32(__p0, __p1) __extension__ ({ \ + float32x4x3_t __s1 = __p1; \ + __builtin_neon_vst3q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 41); \ +}) +#else +#define vst3q_f32(__p0, __p1) __extension__ ({ \ + float32x4x3_t __s1 = __p1; \ + float32x4x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ + __builtin_neon_vst3q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 41); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst3q_s32(__p0, __p1) __extension__ ({ \ + int32x4x3_t __s1 = __p1; \ + __builtin_neon_vst3q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 34); \ +}) +#else +#define vst3q_s32(__p0, __p1) __extension__ ({ \ + int32x4x3_t __s1 = __p1; \ + int32x4x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ + __builtin_neon_vst3q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 34); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst3q_s16(__p0, __p1) __extension__ ({ \ + int16x8x3_t __s1 = __p1; \ + __builtin_neon_vst3q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 33); \ +}) +#else +#define vst3q_s16(__p0, __p1) __extension__ ({ \ + int16x8x3_t __s1 = __p1; \ + int16x8x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst3q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 33); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst3_u8(__p0, __p1) __extension__ ({ \ + uint8x8x3_t __s1 = __p1; \ + __builtin_neon_vst3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 16); \ +}) +#else +#define vst3_u8(__p0, __p1) __extension__ ({ \ + uint8x8x3_t __s1 = __p1; \ + uint8x8x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst3_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], 16); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst3_u32(__p0, __p1) __extension__ ({ \ + uint32x2x3_t __s1 = __p1; \ + __builtin_neon_vst3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 18); \ +}) +#else +#define vst3_u32(__p0, __p1) __extension__ ({ \ + uint32x2x3_t __s1 = __p1; \ + uint32x2x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ + __builtin_neon_vst3_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], 18); \ +}) +#endif + +#define vst3_u64(__p0, __p1) __extension__ ({ \ + uint64x1x3_t __s1 = __p1; \ + __builtin_neon_vst3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 19); \ +}) +#ifdef __LITTLE_ENDIAN__ +#define vst3_u16(__p0, __p1) __extension__ ({ \ + uint16x4x3_t __s1 = __p1; \ + __builtin_neon_vst3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 17); \ +}) +#else +#define vst3_u16(__p0, __p1) __extension__ ({ \ + uint16x4x3_t __s1 = __p1; \ + uint16x4x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ + __builtin_neon_vst3_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], 17); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst3_s8(__p0, __p1) __extension__ ({ \ + int8x8x3_t __s1 = __p1; \ + __builtin_neon_vst3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 0); \ +}) +#else +#define vst3_s8(__p0, __p1) __extension__ ({ \ + int8x8x3_t __s1 = __p1; \ + int8x8x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst3_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], 0); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst3_f32(__p0, __p1) __extension__ ({ \ + float32x2x3_t __s1 = __p1; \ + __builtin_neon_vst3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 9); \ +}) +#else +#define vst3_f32(__p0, __p1) __extension__ ({ \ + float32x2x3_t __s1 = __p1; \ + float32x2x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ + __builtin_neon_vst3_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], 9); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst3_s32(__p0, __p1) __extension__ ({ \ + int32x2x3_t __s1 = __p1; \ + __builtin_neon_vst3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 2); \ +}) +#else +#define vst3_s32(__p0, __p1) __extension__ ({ \ + int32x2x3_t __s1 = __p1; \ + int32x2x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ + __builtin_neon_vst3_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], 2); \ +}) +#endif + +#define vst3_s64(__p0, __p1) __extension__ ({ \ + int64x1x3_t __s1 = __p1; \ + __builtin_neon_vst3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 3); \ +}) +#ifdef __LITTLE_ENDIAN__ +#define vst3_s16(__p0, __p1) __extension__ ({ \ + int16x4x3_t __s1 = __p1; \ + __builtin_neon_vst3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 1); \ +}) +#else +#define vst3_s16(__p0, __p1) __extension__ ({ \ + int16x4x3_t __s1 = __p1; \ + int16x4x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ + __builtin_neon_vst3_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], 1); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst3_lane_p8(__p0, __p1, __p2) __extension__ ({ \ + poly8x8x3_t __s1 = __p1; \ + __builtin_neon_vst3_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 4); \ +}) +#else +#define vst3_lane_p8(__p0, __p1, __p2) __extension__ ({ \ + poly8x8x3_t __s1 = __p1; \ + poly8x8x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst3_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], __p2, 4); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst3_lane_p16(__p0, __p1, __p2) __extension__ ({ \ + poly16x4x3_t __s1 = __p1; \ + __builtin_neon_vst3_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 5); \ +}) +#else +#define vst3_lane_p16(__p0, __p1, __p2) __extension__ ({ \ + poly16x4x3_t __s1 = __p1; \ + poly16x4x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ + __builtin_neon_vst3_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], __p2, 5); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst3q_lane_p16(__p0, __p1, __p2) __extension__ ({ \ + poly16x8x3_t __s1 = __p1; \ + __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 37); \ +}) +#else +#define vst3q_lane_p16(__p0, __p1, __p2) __extension__ ({ \ + poly16x8x3_t __s1 = __p1; \ + poly16x8x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 37); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst3q_lane_u32(__p0, __p1, __p2) __extension__ ({ \ + uint32x4x3_t __s1 = __p1; \ + __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 50); \ +}) +#else +#define vst3q_lane_u32(__p0, __p1, __p2) __extension__ ({ \ + uint32x4x3_t __s1 = __p1; \ + uint32x4x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ + __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 50); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst3q_lane_u16(__p0, __p1, __p2) __extension__ ({ \ + uint16x8x3_t __s1 = __p1; \ + __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 49); \ +}) +#else +#define vst3q_lane_u16(__p0, __p1, __p2) __extension__ ({ \ + uint16x8x3_t __s1 = __p1; \ + uint16x8x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 49); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst3q_lane_f32(__p0, __p1, __p2) __extension__ ({ \ + float32x4x3_t __s1 = __p1; \ + __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 41); \ +}) +#else +#define vst3q_lane_f32(__p0, __p1, __p2) __extension__ ({ \ + float32x4x3_t __s1 = __p1; \ + float32x4x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ + __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 41); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst3q_lane_s32(__p0, __p1, __p2) __extension__ ({ \ + int32x4x3_t __s1 = __p1; \ + __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 34); \ +}) +#else +#define vst3q_lane_s32(__p0, __p1, __p2) __extension__ ({ \ + int32x4x3_t __s1 = __p1; \ + int32x4x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ + __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 34); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst3q_lane_s16(__p0, __p1, __p2) __extension__ ({ \ + int16x8x3_t __s1 = __p1; \ + __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 33); \ +}) +#else +#define vst3q_lane_s16(__p0, __p1, __p2) __extension__ ({ \ + int16x8x3_t __s1 = __p1; \ + int16x8x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 33); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst3_lane_u8(__p0, __p1, __p2) __extension__ ({ \ + uint8x8x3_t __s1 = __p1; \ + __builtin_neon_vst3_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 16); \ +}) +#else +#define vst3_lane_u8(__p0, __p1, __p2) __extension__ ({ \ + uint8x8x3_t __s1 = __p1; \ + uint8x8x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst3_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], __p2, 16); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst3_lane_u32(__p0, __p1, __p2) __extension__ ({ \ + uint32x2x3_t __s1 = __p1; \ + __builtin_neon_vst3_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 18); \ +}) +#else +#define vst3_lane_u32(__p0, __p1, __p2) __extension__ ({ \ + uint32x2x3_t __s1 = __p1; \ + uint32x2x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ + __builtin_neon_vst3_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], __p2, 18); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst3_lane_u16(__p0, __p1, __p2) __extension__ ({ \ + uint16x4x3_t __s1 = __p1; \ + __builtin_neon_vst3_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 17); \ +}) +#else +#define vst3_lane_u16(__p0, __p1, __p2) __extension__ ({ \ + uint16x4x3_t __s1 = __p1; \ + uint16x4x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ + __builtin_neon_vst3_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], __p2, 17); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst3_lane_s8(__p0, __p1, __p2) __extension__ ({ \ + int8x8x3_t __s1 = __p1; \ + __builtin_neon_vst3_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 0); \ +}) +#else +#define vst3_lane_s8(__p0, __p1, __p2) __extension__ ({ \ + int8x8x3_t __s1 = __p1; \ + int8x8x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst3_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], __p2, 0); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst3_lane_f32(__p0, __p1, __p2) __extension__ ({ \ + float32x2x3_t __s1 = __p1; \ + __builtin_neon_vst3_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 9); \ +}) +#else +#define vst3_lane_f32(__p0, __p1, __p2) __extension__ ({ \ + float32x2x3_t __s1 = __p1; \ + float32x2x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ + __builtin_neon_vst3_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], __p2, 9); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst3_lane_s32(__p0, __p1, __p2) __extension__ ({ \ + int32x2x3_t __s1 = __p1; \ + __builtin_neon_vst3_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 2); \ +}) +#else +#define vst3_lane_s32(__p0, __p1, __p2) __extension__ ({ \ + int32x2x3_t __s1 = __p1; \ + int32x2x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ + __builtin_neon_vst3_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], __p2, 2); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst3_lane_s16(__p0, __p1, __p2) __extension__ ({ \ + int16x4x3_t __s1 = __p1; \ + __builtin_neon_vst3_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 1); \ +}) +#else +#define vst3_lane_s16(__p0, __p1, __p2) __extension__ ({ \ + int16x4x3_t __s1 = __p1; \ + int16x4x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ + __builtin_neon_vst3_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], __p2, 1); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst4_p8(__p0, __p1) __extension__ ({ \ + poly8x8x4_t __s1 = __p1; \ + __builtin_neon_vst4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 4); \ +}) +#else +#define vst4_p8(__p0, __p1) __extension__ ({ \ + poly8x8x4_t __s1 = __p1; \ + poly8x8x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst4_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], 4); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst4_p16(__p0, __p1) __extension__ ({ \ + poly16x4x4_t __s1 = __p1; \ + __builtin_neon_vst4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 5); \ +}) +#else +#define vst4_p16(__p0, __p1) __extension__ ({ \ + poly16x4x4_t __s1 = __p1; \ + poly16x4x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \ + __builtin_neon_vst4_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], 5); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst4q_p8(__p0, __p1) __extension__ ({ \ + poly8x16x4_t __s1 = __p1; \ + __builtin_neon_vst4q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 36); \ +}) +#else +#define vst4q_p8(__p0, __p1) __extension__ ({ \ + poly8x16x4_t __s1 = __p1; \ + poly8x16x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst4q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 36); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst4q_p16(__p0, __p1) __extension__ ({ \ + poly16x8x4_t __s1 = __p1; \ + __builtin_neon_vst4q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 37); \ +}) +#else +#define vst4q_p16(__p0, __p1) __extension__ ({ \ + poly16x8x4_t __s1 = __p1; \ + poly16x8x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst4q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 37); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst4q_u8(__p0, __p1) __extension__ ({ \ + uint8x16x4_t __s1 = __p1; \ + __builtin_neon_vst4q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 48); \ +}) +#else +#define vst4q_u8(__p0, __p1) __extension__ ({ \ + uint8x16x4_t __s1 = __p1; \ + uint8x16x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst4q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 48); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst4q_u32(__p0, __p1) __extension__ ({ \ + uint32x4x4_t __s1 = __p1; \ + __builtin_neon_vst4q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 50); \ +}) +#else +#define vst4q_u32(__p0, __p1) __extension__ ({ \ + uint32x4x4_t __s1 = __p1; \ + uint32x4x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \ + __builtin_neon_vst4q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 50); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst4q_u16(__p0, __p1) __extension__ ({ \ + uint16x8x4_t __s1 = __p1; \ + __builtin_neon_vst4q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 49); \ +}) +#else +#define vst4q_u16(__p0, __p1) __extension__ ({ \ + uint16x8x4_t __s1 = __p1; \ + uint16x8x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst4q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 49); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst4q_s8(__p0, __p1) __extension__ ({ \ + int8x16x4_t __s1 = __p1; \ + __builtin_neon_vst4q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 32); \ +}) +#else +#define vst4q_s8(__p0, __p1) __extension__ ({ \ + int8x16x4_t __s1 = __p1; \ + int8x16x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst4q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 32); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst4q_f32(__p0, __p1) __extension__ ({ \ + float32x4x4_t __s1 = __p1; \ + __builtin_neon_vst4q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 41); \ +}) +#else +#define vst4q_f32(__p0, __p1) __extension__ ({ \ + float32x4x4_t __s1 = __p1; \ + float32x4x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \ + __builtin_neon_vst4q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 41); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst4q_s32(__p0, __p1) __extension__ ({ \ + int32x4x4_t __s1 = __p1; \ + __builtin_neon_vst4q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 34); \ +}) +#else +#define vst4q_s32(__p0, __p1) __extension__ ({ \ + int32x4x4_t __s1 = __p1; \ + int32x4x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \ + __builtin_neon_vst4q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 34); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst4q_s16(__p0, __p1) __extension__ ({ \ + int16x8x4_t __s1 = __p1; \ + __builtin_neon_vst4q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 33); \ +}) +#else +#define vst4q_s16(__p0, __p1) __extension__ ({ \ + int16x8x4_t __s1 = __p1; \ + int16x8x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst4q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 33); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst4_u8(__p0, __p1) __extension__ ({ \ + uint8x8x4_t __s1 = __p1; \ + __builtin_neon_vst4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 16); \ +}) +#else +#define vst4_u8(__p0, __p1) __extension__ ({ \ + uint8x8x4_t __s1 = __p1; \ + uint8x8x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst4_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], 16); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst4_u32(__p0, __p1) __extension__ ({ \ + uint32x2x4_t __s1 = __p1; \ + __builtin_neon_vst4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 18); \ +}) +#else +#define vst4_u32(__p0, __p1) __extension__ ({ \ + uint32x2x4_t __s1 = __p1; \ + uint32x2x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \ + __builtin_neon_vst4_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], 18); \ +}) +#endif + +#define vst4_u64(__p0, __p1) __extension__ ({ \ + uint64x1x4_t __s1 = __p1; \ + __builtin_neon_vst4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 19); \ +}) +#ifdef __LITTLE_ENDIAN__ +#define vst4_u16(__p0, __p1) __extension__ ({ \ + uint16x4x4_t __s1 = __p1; \ + __builtin_neon_vst4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 17); \ +}) +#else +#define vst4_u16(__p0, __p1) __extension__ ({ \ + uint16x4x4_t __s1 = __p1; \ + uint16x4x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \ + __builtin_neon_vst4_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], 17); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst4_s8(__p0, __p1) __extension__ ({ \ + int8x8x4_t __s1 = __p1; \ + __builtin_neon_vst4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 0); \ +}) +#else +#define vst4_s8(__p0, __p1) __extension__ ({ \ + int8x8x4_t __s1 = __p1; \ + int8x8x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst4_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], 0); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst4_f32(__p0, __p1) __extension__ ({ \ + float32x2x4_t __s1 = __p1; \ + __builtin_neon_vst4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 9); \ +}) +#else +#define vst4_f32(__p0, __p1) __extension__ ({ \ + float32x2x4_t __s1 = __p1; \ + float32x2x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \ + __builtin_neon_vst4_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], 9); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst4_s32(__p0, __p1) __extension__ ({ \ + int32x2x4_t __s1 = __p1; \ + __builtin_neon_vst4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 2); \ +}) +#else +#define vst4_s32(__p0, __p1) __extension__ ({ \ + int32x2x4_t __s1 = __p1; \ + int32x2x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \ + __builtin_neon_vst4_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], 2); \ +}) +#endif + +#define vst4_s64(__p0, __p1) __extension__ ({ \ + int64x1x4_t __s1 = __p1; \ + __builtin_neon_vst4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 3); \ +}) +#ifdef __LITTLE_ENDIAN__ +#define vst4_s16(__p0, __p1) __extension__ ({ \ + int16x4x4_t __s1 = __p1; \ + __builtin_neon_vst4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 1); \ +}) +#else +#define vst4_s16(__p0, __p1) __extension__ ({ \ + int16x4x4_t __s1 = __p1; \ + int16x4x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \ + __builtin_neon_vst4_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], 1); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst4_lane_p8(__p0, __p1, __p2) __extension__ ({ \ + poly8x8x4_t __s1 = __p1; \ + __builtin_neon_vst4_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 4); \ +}) +#else +#define vst4_lane_p8(__p0, __p1, __p2) __extension__ ({ \ + poly8x8x4_t __s1 = __p1; \ + poly8x8x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst4_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], __p2, 4); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst4_lane_p16(__p0, __p1, __p2) __extension__ ({ \ + poly16x4x4_t __s1 = __p1; \ + __builtin_neon_vst4_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 5); \ +}) +#else +#define vst4_lane_p16(__p0, __p1, __p2) __extension__ ({ \ + poly16x4x4_t __s1 = __p1; \ + poly16x4x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \ + __builtin_neon_vst4_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], __p2, 5); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst4q_lane_p16(__p0, __p1, __p2) __extension__ ({ \ + poly16x8x4_t __s1 = __p1; \ + __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 37); \ +}) +#else +#define vst4q_lane_p16(__p0, __p1, __p2) __extension__ ({ \ + poly16x8x4_t __s1 = __p1; \ + poly16x8x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 37); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst4q_lane_u32(__p0, __p1, __p2) __extension__ ({ \ + uint32x4x4_t __s1 = __p1; \ + __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 50); \ +}) +#else +#define vst4q_lane_u32(__p0, __p1, __p2) __extension__ ({ \ + uint32x4x4_t __s1 = __p1; \ + uint32x4x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \ + __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 50); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst4q_lane_u16(__p0, __p1, __p2) __extension__ ({ \ + uint16x8x4_t __s1 = __p1; \ + __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 49); \ +}) +#else +#define vst4q_lane_u16(__p0, __p1, __p2) __extension__ ({ \ + uint16x8x4_t __s1 = __p1; \ + uint16x8x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 49); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst4q_lane_f32(__p0, __p1, __p2) __extension__ ({ \ + float32x4x4_t __s1 = __p1; \ + __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 41); \ +}) +#else +#define vst4q_lane_f32(__p0, __p1, __p2) __extension__ ({ \ + float32x4x4_t __s1 = __p1; \ + float32x4x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \ + __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 41); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst4q_lane_s32(__p0, __p1, __p2) __extension__ ({ \ + int32x4x4_t __s1 = __p1; \ + __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 34); \ +}) +#else +#define vst4q_lane_s32(__p0, __p1, __p2) __extension__ ({ \ + int32x4x4_t __s1 = __p1; \ + int32x4x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \ + __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 34); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst4q_lane_s16(__p0, __p1, __p2) __extension__ ({ \ + int16x8x4_t __s1 = __p1; \ + __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 33); \ +}) +#else +#define vst4q_lane_s16(__p0, __p1, __p2) __extension__ ({ \ + int16x8x4_t __s1 = __p1; \ + int16x8x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 33); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst4_lane_u8(__p0, __p1, __p2) __extension__ ({ \ + uint8x8x4_t __s1 = __p1; \ + __builtin_neon_vst4_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 16); \ +}) +#else +#define vst4_lane_u8(__p0, __p1, __p2) __extension__ ({ \ + uint8x8x4_t __s1 = __p1; \ + uint8x8x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst4_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], __p2, 16); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst4_lane_u32(__p0, __p1, __p2) __extension__ ({ \ + uint32x2x4_t __s1 = __p1; \ + __builtin_neon_vst4_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 18); \ +}) +#else +#define vst4_lane_u32(__p0, __p1, __p2) __extension__ ({ \ + uint32x2x4_t __s1 = __p1; \ + uint32x2x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \ + __builtin_neon_vst4_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], __p2, 18); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst4_lane_u16(__p0, __p1, __p2) __extension__ ({ \ + uint16x4x4_t __s1 = __p1; \ + __builtin_neon_vst4_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 17); \ +}) +#else +#define vst4_lane_u16(__p0, __p1, __p2) __extension__ ({ \ + uint16x4x4_t __s1 = __p1; \ + uint16x4x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \ + __builtin_neon_vst4_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], __p2, 17); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst4_lane_s8(__p0, __p1, __p2) __extension__ ({ \ + int8x8x4_t __s1 = __p1; \ + __builtin_neon_vst4_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 0); \ +}) +#else +#define vst4_lane_s8(__p0, __p1, __p2) __extension__ ({ \ + int8x8x4_t __s1 = __p1; \ + int8x8x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst4_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], __p2, 0); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst4_lane_f32(__p0, __p1, __p2) __extension__ ({ \ + float32x2x4_t __s1 = __p1; \ + __builtin_neon_vst4_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 9); \ +}) +#else +#define vst4_lane_f32(__p0, __p1, __p2) __extension__ ({ \ + float32x2x4_t __s1 = __p1; \ + float32x2x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \ + __builtin_neon_vst4_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], __p2, 9); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst4_lane_s32(__p0, __p1, __p2) __extension__ ({ \ + int32x2x4_t __s1 = __p1; \ + __builtin_neon_vst4_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 2); \ +}) +#else +#define vst4_lane_s32(__p0, __p1, __p2) __extension__ ({ \ + int32x2x4_t __s1 = __p1; \ + int32x2x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \ + __builtin_neon_vst4_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], __p2, 2); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst4_lane_s16(__p0, __p1, __p2) __extension__ ({ \ + int16x4x4_t __s1 = __p1; \ + __builtin_neon_vst4_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 1); \ +}) +#else +#define vst4_lane_s16(__p0, __p1, __p2) __extension__ ({ \ + int16x4x4_t __s1 = __p1; \ + int16x4x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \ + __builtin_neon_vst4_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], __p2, 1); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint8x16_t vsubq_u8(uint8x16_t __p0, uint8x16_t __p1) { + uint8x16_t __ret; + __ret = __p0 - __p1; + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint8x16_t vsubq_u8(uint8x16_t __p0, uint8x16_t __p1) { + uint8x16_t __ret; + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __rev0 - __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint32x4_t vsubq_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint32x4_t __ret; + __ret = __p0 - __p1; + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint32x4_t vsubq_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint32x4_t __ret; + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = __rev0 - __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint64x2_t vsubq_u64(uint64x2_t __p0, uint64x2_t __p1) { + uint64x2_t __ret; + __ret = __p0 - __p1; + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint64x2_t vsubq_u64(uint64x2_t __p0, uint64x2_t __p1) { + uint64x2_t __ret; + uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = __rev0 - __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint16x8_t vsubq_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint16x8_t __ret; + __ret = __p0 - __p1; + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint16x8_t vsubq_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint16x8_t __ret; + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __rev0 - __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int8x16_t vsubq_s8(int8x16_t __p0, int8x16_t __p1) { + int8x16_t __ret; + __ret = __p0 - __p1; + return __ret; +} +#else +__ai __attribute__((target("neon"))) int8x16_t vsubq_s8(int8x16_t __p0, int8x16_t __p1) { + int8x16_t __ret; + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __rev0 - __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) float32x4_t vsubq_f32(float32x4_t __p0, float32x4_t __p1) { + float32x4_t __ret; + __ret = __p0 - __p1; + return __ret; +} +#else +__ai __attribute__((target("neon"))) float32x4_t vsubq_f32(float32x4_t __p0, float32x4_t __p1) { + float32x4_t __ret; + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = __rev0 - __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int32x4_t vsubq_s32(int32x4_t __p0, int32x4_t __p1) { + int32x4_t __ret; + __ret = __p0 - __p1; + return __ret; +} +#else +__ai __attribute__((target("neon"))) int32x4_t vsubq_s32(int32x4_t __p0, int32x4_t __p1) { + int32x4_t __ret; + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = __rev0 - __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int64x2_t vsubq_s64(int64x2_t __p0, int64x2_t __p1) { + int64x2_t __ret; + __ret = __p0 - __p1; + return __ret; +} +#else +__ai __attribute__((target("neon"))) int64x2_t vsubq_s64(int64x2_t __p0, int64x2_t __p1) { + int64x2_t __ret; + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = __rev0 - __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int16x8_t vsubq_s16(int16x8_t __p0, int16x8_t __p1) { + int16x8_t __ret; + __ret = __p0 - __p1; + return __ret; +} +#else +__ai __attribute__((target("neon"))) int16x8_t vsubq_s16(int16x8_t __p0, int16x8_t __p1) { + int16x8_t __ret; + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __rev0 - __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint8x8_t vsub_u8(uint8x8_t __p0, uint8x8_t __p1) { + uint8x8_t __ret; + __ret = __p0 - __p1; + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint8x8_t vsub_u8(uint8x8_t __p0, uint8x8_t __p1) { + uint8x8_t __ret; + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __rev0 - __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint32x2_t vsub_u32(uint32x2_t __p0, uint32x2_t __p1) { + uint32x2_t __ret; + __ret = __p0 - __p1; + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint32x2_t vsub_u32(uint32x2_t __p0, uint32x2_t __p1) { + uint32x2_t __ret; + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = __rev0 - __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +__ai __attribute__((target("neon"))) uint64x1_t vsub_u64(uint64x1_t __p0, uint64x1_t __p1) { + uint64x1_t __ret; + __ret = __p0 - __p1; + return __ret; +} +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint16x4_t vsub_u16(uint16x4_t __p0, uint16x4_t __p1) { + uint16x4_t __ret; + __ret = __p0 - __p1; + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint16x4_t vsub_u16(uint16x4_t __p0, uint16x4_t __p1) { + uint16x4_t __ret; + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = __rev0 - __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int8x8_t vsub_s8(int8x8_t __p0, int8x8_t __p1) { + int8x8_t __ret; + __ret = __p0 - __p1; + return __ret; +} +#else +__ai __attribute__((target("neon"))) int8x8_t vsub_s8(int8x8_t __p0, int8x8_t __p1) { + int8x8_t __ret; + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __rev0 - __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) float32x2_t vsub_f32(float32x2_t __p0, float32x2_t __p1) { + float32x2_t __ret; + __ret = __p0 - __p1; + return __ret; +} +#else +__ai __attribute__((target("neon"))) float32x2_t vsub_f32(float32x2_t __p0, float32x2_t __p1) { + float32x2_t __ret; + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = __rev0 - __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int32x2_t vsub_s32(int32x2_t __p0, int32x2_t __p1) { + int32x2_t __ret; + __ret = __p0 - __p1; + return __ret; +} +#else +__ai __attribute__((target("neon"))) int32x2_t vsub_s32(int32x2_t __p0, int32x2_t __p1) { + int32x2_t __ret; + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = __rev0 - __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +__ai __attribute__((target("neon"))) int64x1_t vsub_s64(int64x1_t __p0, int64x1_t __p1) { + int64x1_t __ret; + __ret = __p0 - __p1; + return __ret; +} +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int16x4_t vsub_s16(int16x4_t __p0, int16x4_t __p1) { + int16x4_t __ret; + __ret = __p0 - __p1; + return __ret; +} +#else +__ai __attribute__((target("neon"))) int16x4_t vsub_s16(int16x4_t __p0, int16x4_t __p1) { + int16x4_t __ret; + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = __rev0 - __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint16x4_t vsubhn_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint16x4_t __ret; + __ret = (uint16x4_t) __builtin_neon_vsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 17); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint16x4_t vsubhn_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint16x4_t __ret; + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (uint16x4_t) __builtin_neon_vsubhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 17); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +__ai __attribute__((target("neon"))) uint16x4_t __noswap_vsubhn_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint16x4_t __ret; + __ret = (uint16x4_t) __builtin_neon_vsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 17); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint32x2_t vsubhn_u64(uint64x2_t __p0, uint64x2_t __p1) { + uint32x2_t __ret; + __ret = (uint32x2_t) __builtin_neon_vsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 18); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint32x2_t vsubhn_u64(uint64x2_t __p0, uint64x2_t __p1) { + uint32x2_t __ret; + uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (uint32x2_t) __builtin_neon_vsubhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 18); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +__ai __attribute__((target("neon"))) uint32x2_t __noswap_vsubhn_u64(uint64x2_t __p0, uint64x2_t __p1) { + uint32x2_t __ret; + __ret = (uint32x2_t) __builtin_neon_vsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 18); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint8x8_t vsubhn_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint8x8_t __ret; + __ret = (uint8x8_t) __builtin_neon_vsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 16); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint8x8_t vsubhn_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint8x8_t __ret; + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint8x8_t) __builtin_neon_vsubhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 16); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +__ai __attribute__((target("neon"))) uint8x8_t __noswap_vsubhn_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint8x8_t __ret; + __ret = (uint8x8_t) __builtin_neon_vsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 16); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int16x4_t vsubhn_s32(int32x4_t __p0, int32x4_t __p1) { + int16x4_t __ret; + __ret = (int16x4_t) __builtin_neon_vsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 1); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int16x4_t vsubhn_s32(int32x4_t __p0, int32x4_t __p1) { + int16x4_t __ret; + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (int16x4_t) __builtin_neon_vsubhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +__ai __attribute__((target("neon"))) int16x4_t __noswap_vsubhn_s32(int32x4_t __p0, int32x4_t __p1) { + int16x4_t __ret; + __ret = (int16x4_t) __builtin_neon_vsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 1); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int32x2_t vsubhn_s64(int64x2_t __p0, int64x2_t __p1) { + int32x2_t __ret; + __ret = (int32x2_t) __builtin_neon_vsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 2); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int32x2_t vsubhn_s64(int64x2_t __p0, int64x2_t __p1) { + int32x2_t __ret; + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (int32x2_t) __builtin_neon_vsubhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 2); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +__ai __attribute__((target("neon"))) int32x2_t __noswap_vsubhn_s64(int64x2_t __p0, int64x2_t __p1) { + int32x2_t __ret; + __ret = (int32x2_t) __builtin_neon_vsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 2); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int8x8_t vsubhn_s16(int16x8_t __p0, int16x8_t __p1) { + int8x8_t __ret; + __ret = (int8x8_t) __builtin_neon_vsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 0); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int8x8_t vsubhn_s16(int16x8_t __p0, int16x8_t __p1) { + int8x8_t __ret; + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int8x8_t) __builtin_neon_vsubhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 0); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +__ai __attribute__((target("neon"))) int8x8_t __noswap_vsubhn_s16(int16x8_t __p0, int16x8_t __p1) { + int8x8_t __ret; + __ret = (int8x8_t) __builtin_neon_vsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint16x8_t vsubl_u8(uint8x8_t __p0, uint8x8_t __p1) { + uint16x8_t __ret; + __ret = vmovl_u8(__p0) - vmovl_u8(__p1); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint16x8_t vsubl_u8(uint8x8_t __p0, uint8x8_t __p1) { + uint16x8_t __ret; + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __noswap_vmovl_u8(__rev0) - __noswap_vmovl_u8(__rev1); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint64x2_t vsubl_u32(uint32x2_t __p0, uint32x2_t __p1) { + uint64x2_t __ret; + __ret = vmovl_u32(__p0) - vmovl_u32(__p1); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint64x2_t vsubl_u32(uint32x2_t __p0, uint32x2_t __p1) { + uint64x2_t __ret; + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = __noswap_vmovl_u32(__rev0) - __noswap_vmovl_u32(__rev1); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint32x4_t vsubl_u16(uint16x4_t __p0, uint16x4_t __p1) { + uint32x4_t __ret; + __ret = vmovl_u16(__p0) - vmovl_u16(__p1); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint32x4_t vsubl_u16(uint16x4_t __p0, uint16x4_t __p1) { + uint32x4_t __ret; + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = __noswap_vmovl_u16(__rev0) - __noswap_vmovl_u16(__rev1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int16x8_t vsubl_s8(int8x8_t __p0, int8x8_t __p1) { + int16x8_t __ret; + __ret = vmovl_s8(__p0) - vmovl_s8(__p1); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int16x8_t vsubl_s8(int8x8_t __p0, int8x8_t __p1) { + int16x8_t __ret; + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __noswap_vmovl_s8(__rev0) - __noswap_vmovl_s8(__rev1); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int64x2_t vsubl_s32(int32x2_t __p0, int32x2_t __p1) { + int64x2_t __ret; + __ret = vmovl_s32(__p0) - vmovl_s32(__p1); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int64x2_t vsubl_s32(int32x2_t __p0, int32x2_t __p1) { + int64x2_t __ret; + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = __noswap_vmovl_s32(__rev0) - __noswap_vmovl_s32(__rev1); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int32x4_t vsubl_s16(int16x4_t __p0, int16x4_t __p1) { + int32x4_t __ret; + __ret = vmovl_s16(__p0) - vmovl_s16(__p1); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int32x4_t vsubl_s16(int16x4_t __p0, int16x4_t __p1) { + int32x4_t __ret; + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = __noswap_vmovl_s16(__rev0) - __noswap_vmovl_s16(__rev1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint16x8_t vsubw_u8(uint16x8_t __p0, uint8x8_t __p1) { + uint16x8_t __ret; + __ret = __p0 - vmovl_u8(__p1); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint16x8_t vsubw_u8(uint16x8_t __p0, uint8x8_t __p1) { + uint16x8_t __ret; + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __rev0 - __noswap_vmovl_u8(__rev1); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint64x2_t vsubw_u32(uint64x2_t __p0, uint32x2_t __p1) { + uint64x2_t __ret; + __ret = __p0 - vmovl_u32(__p1); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint64x2_t vsubw_u32(uint64x2_t __p0, uint32x2_t __p1) { + uint64x2_t __ret; + uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = __rev0 - __noswap_vmovl_u32(__rev1); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint32x4_t vsubw_u16(uint32x4_t __p0, uint16x4_t __p1) { + uint32x4_t __ret; + __ret = __p0 - vmovl_u16(__p1); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint32x4_t vsubw_u16(uint32x4_t __p0, uint16x4_t __p1) { + uint32x4_t __ret; + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = __rev0 - __noswap_vmovl_u16(__rev1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int16x8_t vsubw_s8(int16x8_t __p0, int8x8_t __p1) { + int16x8_t __ret; + __ret = __p0 - vmovl_s8(__p1); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int16x8_t vsubw_s8(int16x8_t __p0, int8x8_t __p1) { + int16x8_t __ret; + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __rev0 - __noswap_vmovl_s8(__rev1); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int64x2_t vsubw_s32(int64x2_t __p0, int32x2_t __p1) { + int64x2_t __ret; + __ret = __p0 - vmovl_s32(__p1); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int64x2_t vsubw_s32(int64x2_t __p0, int32x2_t __p1) { + int64x2_t __ret; + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = __rev0 - __noswap_vmovl_s32(__rev1); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int32x4_t vsubw_s16(int32x4_t __p0, int16x4_t __p1) { + int32x4_t __ret; + __ret = __p0 - vmovl_s16(__p1); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int32x4_t vsubw_s16(int32x4_t __p0, int16x4_t __p1) { + int32x4_t __ret; + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = __rev0 - __noswap_vmovl_s16(__rev1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) poly8x8_t vtbl1_p8(poly8x8_t __p0, uint8x8_t __p1) { + poly8x8_t __ret; + __ret = (poly8x8_t) __builtin_neon_vtbl1_v((int8x8_t)__p0, (int8x8_t)__p1, 4); + return __ret; +} +#else +__ai __attribute__((target("neon"))) poly8x8_t vtbl1_p8(poly8x8_t __p0, uint8x8_t __p1) { + poly8x8_t __ret; + poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (poly8x8_t) __builtin_neon_vtbl1_v((int8x8_t)__rev0, (int8x8_t)__rev1, 4); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint8x8_t vtbl1_u8(uint8x8_t __p0, uint8x8_t __p1) { + uint8x8_t __ret; + __ret = (uint8x8_t) __builtin_neon_vtbl1_v((int8x8_t)__p0, (int8x8_t)__p1, 16); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint8x8_t vtbl1_u8(uint8x8_t __p0, uint8x8_t __p1) { + uint8x8_t __ret; + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint8x8_t) __builtin_neon_vtbl1_v((int8x8_t)__rev0, (int8x8_t)__rev1, 16); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int8x8_t vtbl1_s8(int8x8_t __p0, int8x8_t __p1) { + int8x8_t __ret; + __ret = (int8x8_t) __builtin_neon_vtbl1_v((int8x8_t)__p0, (int8x8_t)__p1, 0); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int8x8_t vtbl1_s8(int8x8_t __p0, int8x8_t __p1) { + int8x8_t __ret; + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int8x8_t) __builtin_neon_vtbl1_v((int8x8_t)__rev0, (int8x8_t)__rev1, 0); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) poly8x8_t vtbl2_p8(poly8x8x2_t __p0, uint8x8_t __p1) { + poly8x8_t __ret; + __ret = (poly8x8_t) __builtin_neon_vtbl2_v((int8x8_t)__p0.val[0], (int8x8_t)__p0.val[1], (int8x8_t)__p1, 4); + return __ret; +} +#else +__ai __attribute__((target("neon"))) poly8x8_t vtbl2_p8(poly8x8x2_t __p0, uint8x8_t __p1) { + poly8x8_t __ret; + poly8x8x2_t __rev0; + __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 7, 6, 5, 4, 3, 2, 1, 0); + __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (poly8x8_t) __builtin_neon_vtbl2_v((int8x8_t)__rev0.val[0], (int8x8_t)__rev0.val[1], (int8x8_t)__rev1, 4); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint8x8_t vtbl2_u8(uint8x8x2_t __p0, uint8x8_t __p1) { + uint8x8_t __ret; + __ret = (uint8x8_t) __builtin_neon_vtbl2_v((int8x8_t)__p0.val[0], (int8x8_t)__p0.val[1], (int8x8_t)__p1, 16); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint8x8_t vtbl2_u8(uint8x8x2_t __p0, uint8x8_t __p1) { + uint8x8_t __ret; + uint8x8x2_t __rev0; + __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 7, 6, 5, 4, 3, 2, 1, 0); + __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint8x8_t) __builtin_neon_vtbl2_v((int8x8_t)__rev0.val[0], (int8x8_t)__rev0.val[1], (int8x8_t)__rev1, 16); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int8x8_t vtbl2_s8(int8x8x2_t __p0, int8x8_t __p1) { + int8x8_t __ret; + __ret = (int8x8_t) __builtin_neon_vtbl2_v((int8x8_t)__p0.val[0], (int8x8_t)__p0.val[1], (int8x8_t)__p1, 0); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int8x8_t vtbl2_s8(int8x8x2_t __p0, int8x8_t __p1) { + int8x8_t __ret; + int8x8x2_t __rev0; + __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 7, 6, 5, 4, 3, 2, 1, 0); + __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int8x8_t) __builtin_neon_vtbl2_v((int8x8_t)__rev0.val[0], (int8x8_t)__rev0.val[1], (int8x8_t)__rev1, 0); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) poly8x8_t vtbl3_p8(poly8x8x3_t __p0, uint8x8_t __p1) { + poly8x8_t __ret; + __ret = (poly8x8_t) __builtin_neon_vtbl3_v((int8x8_t)__p0.val[0], (int8x8_t)__p0.val[1], (int8x8_t)__p0.val[2], (int8x8_t)__p1, 4); + return __ret; +} +#else +__ai __attribute__((target("neon"))) poly8x8_t vtbl3_p8(poly8x8x3_t __p0, uint8x8_t __p1) { + poly8x8_t __ret; + poly8x8x3_t __rev0; + __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 7, 6, 5, 4, 3, 2, 1, 0); + __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 7, 6, 5, 4, 3, 2, 1, 0); + __rev0.val[2] = __builtin_shufflevector(__p0.val[2], __p0.val[2], 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (poly8x8_t) __builtin_neon_vtbl3_v((int8x8_t)__rev0.val[0], (int8x8_t)__rev0.val[1], (int8x8_t)__rev0.val[2], (int8x8_t)__rev1, 4); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint8x8_t vtbl3_u8(uint8x8x3_t __p0, uint8x8_t __p1) { + uint8x8_t __ret; + __ret = (uint8x8_t) __builtin_neon_vtbl3_v((int8x8_t)__p0.val[0], (int8x8_t)__p0.val[1], (int8x8_t)__p0.val[2], (int8x8_t)__p1, 16); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint8x8_t vtbl3_u8(uint8x8x3_t __p0, uint8x8_t __p1) { + uint8x8_t __ret; + uint8x8x3_t __rev0; + __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 7, 6, 5, 4, 3, 2, 1, 0); + __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 7, 6, 5, 4, 3, 2, 1, 0); + __rev0.val[2] = __builtin_shufflevector(__p0.val[2], __p0.val[2], 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint8x8_t) __builtin_neon_vtbl3_v((int8x8_t)__rev0.val[0], (int8x8_t)__rev0.val[1], (int8x8_t)__rev0.val[2], (int8x8_t)__rev1, 16); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int8x8_t vtbl3_s8(int8x8x3_t __p0, int8x8_t __p1) { + int8x8_t __ret; + __ret = (int8x8_t) __builtin_neon_vtbl3_v((int8x8_t)__p0.val[0], (int8x8_t)__p0.val[1], (int8x8_t)__p0.val[2], (int8x8_t)__p1, 0); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int8x8_t vtbl3_s8(int8x8x3_t __p0, int8x8_t __p1) { + int8x8_t __ret; + int8x8x3_t __rev0; + __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 7, 6, 5, 4, 3, 2, 1, 0); + __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 7, 6, 5, 4, 3, 2, 1, 0); + __rev0.val[2] = __builtin_shufflevector(__p0.val[2], __p0.val[2], 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int8x8_t) __builtin_neon_vtbl3_v((int8x8_t)__rev0.val[0], (int8x8_t)__rev0.val[1], (int8x8_t)__rev0.val[2], (int8x8_t)__rev1, 0); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) poly8x8_t vtbl4_p8(poly8x8x4_t __p0, uint8x8_t __p1) { + poly8x8_t __ret; + __ret = (poly8x8_t) __builtin_neon_vtbl4_v((int8x8_t)__p0.val[0], (int8x8_t)__p0.val[1], (int8x8_t)__p0.val[2], (int8x8_t)__p0.val[3], (int8x8_t)__p1, 4); + return __ret; +} +#else +__ai __attribute__((target("neon"))) poly8x8_t vtbl4_p8(poly8x8x4_t __p0, uint8x8_t __p1) { + poly8x8_t __ret; + poly8x8x4_t __rev0; + __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 7, 6, 5, 4, 3, 2, 1, 0); + __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 7, 6, 5, 4, 3, 2, 1, 0); + __rev0.val[2] = __builtin_shufflevector(__p0.val[2], __p0.val[2], 7, 6, 5, 4, 3, 2, 1, 0); + __rev0.val[3] = __builtin_shufflevector(__p0.val[3], __p0.val[3], 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (poly8x8_t) __builtin_neon_vtbl4_v((int8x8_t)__rev0.val[0], (int8x8_t)__rev0.val[1], (int8x8_t)__rev0.val[2], (int8x8_t)__rev0.val[3], (int8x8_t)__rev1, 4); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint8x8_t vtbl4_u8(uint8x8x4_t __p0, uint8x8_t __p1) { + uint8x8_t __ret; + __ret = (uint8x8_t) __builtin_neon_vtbl4_v((int8x8_t)__p0.val[0], (int8x8_t)__p0.val[1], (int8x8_t)__p0.val[2], (int8x8_t)__p0.val[3], (int8x8_t)__p1, 16); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint8x8_t vtbl4_u8(uint8x8x4_t __p0, uint8x8_t __p1) { + uint8x8_t __ret; + uint8x8x4_t __rev0; + __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 7, 6, 5, 4, 3, 2, 1, 0); + __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 7, 6, 5, 4, 3, 2, 1, 0); + __rev0.val[2] = __builtin_shufflevector(__p0.val[2], __p0.val[2], 7, 6, 5, 4, 3, 2, 1, 0); + __rev0.val[3] = __builtin_shufflevector(__p0.val[3], __p0.val[3], 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint8x8_t) __builtin_neon_vtbl4_v((int8x8_t)__rev0.val[0], (int8x8_t)__rev0.val[1], (int8x8_t)__rev0.val[2], (int8x8_t)__rev0.val[3], (int8x8_t)__rev1, 16); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int8x8_t vtbl4_s8(int8x8x4_t __p0, int8x8_t __p1) { + int8x8_t __ret; + __ret = (int8x8_t) __builtin_neon_vtbl4_v((int8x8_t)__p0.val[0], (int8x8_t)__p0.val[1], (int8x8_t)__p0.val[2], (int8x8_t)__p0.val[3], (int8x8_t)__p1, 0); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int8x8_t vtbl4_s8(int8x8x4_t __p0, int8x8_t __p1) { + int8x8_t __ret; + int8x8x4_t __rev0; + __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 7, 6, 5, 4, 3, 2, 1, 0); + __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 7, 6, 5, 4, 3, 2, 1, 0); + __rev0.val[2] = __builtin_shufflevector(__p0.val[2], __p0.val[2], 7, 6, 5, 4, 3, 2, 1, 0); + __rev0.val[3] = __builtin_shufflevector(__p0.val[3], __p0.val[3], 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int8x8_t) __builtin_neon_vtbl4_v((int8x8_t)__rev0.val[0], (int8x8_t)__rev0.val[1], (int8x8_t)__rev0.val[2], (int8x8_t)__rev0.val[3], (int8x8_t)__rev1, 0); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) poly8x8_t vtbx1_p8(poly8x8_t __p0, poly8x8_t __p1, uint8x8_t __p2) { + poly8x8_t __ret; + __ret = (poly8x8_t) __builtin_neon_vtbx1_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 4); + return __ret; +} +#else +__ai __attribute__((target("neon"))) poly8x8_t vtbx1_p8(poly8x8_t __p0, poly8x8_t __p1, uint8x8_t __p2) { + poly8x8_t __ret; + poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + poly8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (poly8x8_t) __builtin_neon_vtbx1_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 4); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint8x8_t vtbx1_u8(uint8x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) { + uint8x8_t __ret; + __ret = (uint8x8_t) __builtin_neon_vtbx1_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 16); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint8x8_t vtbx1_u8(uint8x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) { + uint8x8_t __ret; + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint8x8_t) __builtin_neon_vtbx1_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 16); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int8x8_t vtbx1_s8(int8x8_t __p0, int8x8_t __p1, int8x8_t __p2) { + int8x8_t __ret; + __ret = (int8x8_t) __builtin_neon_vtbx1_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 0); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int8x8_t vtbx1_s8(int8x8_t __p0, int8x8_t __p1, int8x8_t __p2) { + int8x8_t __ret; + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int8x8_t) __builtin_neon_vtbx1_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 0); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) poly8x8_t vtbx2_p8(poly8x8_t __p0, poly8x8x2_t __p1, uint8x8_t __p2) { + poly8x8_t __ret; + __ret = (poly8x8_t) __builtin_neon_vtbx2_v((int8x8_t)__p0, (int8x8_t)__p1.val[0], (int8x8_t)__p1.val[1], (int8x8_t)__p2, 4); + return __ret; +} +#else +__ai __attribute__((target("neon"))) poly8x8_t vtbx2_p8(poly8x8_t __p0, poly8x8x2_t __p1, uint8x8_t __p2) { + poly8x8_t __ret; + poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + poly8x8x2_t __rev1; + __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); + __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (poly8x8_t) __builtin_neon_vtbx2_v((int8x8_t)__rev0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev2, 4); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint8x8_t vtbx2_u8(uint8x8_t __p0, uint8x8x2_t __p1, uint8x8_t __p2) { + uint8x8_t __ret; + __ret = (uint8x8_t) __builtin_neon_vtbx2_v((int8x8_t)__p0, (int8x8_t)__p1.val[0], (int8x8_t)__p1.val[1], (int8x8_t)__p2, 16); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint8x8_t vtbx2_u8(uint8x8_t __p0, uint8x8x2_t __p1, uint8x8_t __p2) { + uint8x8_t __ret; + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8x2_t __rev1; + __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); + __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint8x8_t) __builtin_neon_vtbx2_v((int8x8_t)__rev0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev2, 16); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int8x8_t vtbx2_s8(int8x8_t __p0, int8x8x2_t __p1, int8x8_t __p2) { + int8x8_t __ret; + __ret = (int8x8_t) __builtin_neon_vtbx2_v((int8x8_t)__p0, (int8x8_t)__p1.val[0], (int8x8_t)__p1.val[1], (int8x8_t)__p2, 0); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int8x8_t vtbx2_s8(int8x8_t __p0, int8x8x2_t __p1, int8x8_t __p2) { + int8x8_t __ret; + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8x2_t __rev1; + __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); + __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int8x8_t) __builtin_neon_vtbx2_v((int8x8_t)__rev0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev2, 0); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) poly8x8_t vtbx3_p8(poly8x8_t __p0, poly8x8x3_t __p1, uint8x8_t __p2) { + poly8x8_t __ret; + __ret = (poly8x8_t) __builtin_neon_vtbx3_v((int8x8_t)__p0, (int8x8_t)__p1.val[0], (int8x8_t)__p1.val[1], (int8x8_t)__p1.val[2], (int8x8_t)__p2, 4); + return __ret; +} +#else +__ai __attribute__((target("neon"))) poly8x8_t vtbx3_p8(poly8x8_t __p0, poly8x8x3_t __p1, uint8x8_t __p2) { + poly8x8_t __ret; + poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + poly8x8x3_t __rev1; + __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); + __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); + __rev1.val[2] = __builtin_shufflevector(__p1.val[2], __p1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (poly8x8_t) __builtin_neon_vtbx3_v((int8x8_t)__rev0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev2, 4); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint8x8_t vtbx3_u8(uint8x8_t __p0, uint8x8x3_t __p1, uint8x8_t __p2) { + uint8x8_t __ret; + __ret = (uint8x8_t) __builtin_neon_vtbx3_v((int8x8_t)__p0, (int8x8_t)__p1.val[0], (int8x8_t)__p1.val[1], (int8x8_t)__p1.val[2], (int8x8_t)__p2, 16); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint8x8_t vtbx3_u8(uint8x8_t __p0, uint8x8x3_t __p1, uint8x8_t __p2) { + uint8x8_t __ret; + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8x3_t __rev1; + __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); + __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); + __rev1.val[2] = __builtin_shufflevector(__p1.val[2], __p1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint8x8_t) __builtin_neon_vtbx3_v((int8x8_t)__rev0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev2, 16); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int8x8_t vtbx3_s8(int8x8_t __p0, int8x8x3_t __p1, int8x8_t __p2) { + int8x8_t __ret; + __ret = (int8x8_t) __builtin_neon_vtbx3_v((int8x8_t)__p0, (int8x8_t)__p1.val[0], (int8x8_t)__p1.val[1], (int8x8_t)__p1.val[2], (int8x8_t)__p2, 0); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int8x8_t vtbx3_s8(int8x8_t __p0, int8x8x3_t __p1, int8x8_t __p2) { + int8x8_t __ret; + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8x3_t __rev1; + __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); + __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); + __rev1.val[2] = __builtin_shufflevector(__p1.val[2], __p1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int8x8_t) __builtin_neon_vtbx3_v((int8x8_t)__rev0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev2, 0); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) poly8x8_t vtbx4_p8(poly8x8_t __p0, poly8x8x4_t __p1, uint8x8_t __p2) { + poly8x8_t __ret; + __ret = (poly8x8_t) __builtin_neon_vtbx4_v((int8x8_t)__p0, (int8x8_t)__p1.val[0], (int8x8_t)__p1.val[1], (int8x8_t)__p1.val[2], (int8x8_t)__p1.val[3], (int8x8_t)__p2, 4); + return __ret; +} +#else +__ai __attribute__((target("neon"))) poly8x8_t vtbx4_p8(poly8x8_t __p0, poly8x8x4_t __p1, uint8x8_t __p2) { + poly8x8_t __ret; + poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + poly8x8x4_t __rev1; + __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); + __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); + __rev1.val[2] = __builtin_shufflevector(__p1.val[2], __p1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); + __rev1.val[3] = __builtin_shufflevector(__p1.val[3], __p1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (poly8x8_t) __builtin_neon_vtbx4_v((int8x8_t)__rev0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], (int8x8_t)__rev2, 4); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint8x8_t vtbx4_u8(uint8x8_t __p0, uint8x8x4_t __p1, uint8x8_t __p2) { + uint8x8_t __ret; + __ret = (uint8x8_t) __builtin_neon_vtbx4_v((int8x8_t)__p0, (int8x8_t)__p1.val[0], (int8x8_t)__p1.val[1], (int8x8_t)__p1.val[2], (int8x8_t)__p1.val[3], (int8x8_t)__p2, 16); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint8x8_t vtbx4_u8(uint8x8_t __p0, uint8x8x4_t __p1, uint8x8_t __p2) { + uint8x8_t __ret; + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8x4_t __rev1; + __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); + __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); + __rev1.val[2] = __builtin_shufflevector(__p1.val[2], __p1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); + __rev1.val[3] = __builtin_shufflevector(__p1.val[3], __p1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint8x8_t) __builtin_neon_vtbx4_v((int8x8_t)__rev0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], (int8x8_t)__rev2, 16); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int8x8_t vtbx4_s8(int8x8_t __p0, int8x8x4_t __p1, int8x8_t __p2) { + int8x8_t __ret; + __ret = (int8x8_t) __builtin_neon_vtbx4_v((int8x8_t)__p0, (int8x8_t)__p1.val[0], (int8x8_t)__p1.val[1], (int8x8_t)__p1.val[2], (int8x8_t)__p1.val[3], (int8x8_t)__p2, 0); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int8x8_t vtbx4_s8(int8x8_t __p0, int8x8x4_t __p1, int8x8_t __p2) { + int8x8_t __ret; + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8x4_t __rev1; + __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); + __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); + __rev1.val[2] = __builtin_shufflevector(__p1.val[2], __p1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); + __rev1.val[3] = __builtin_shufflevector(__p1.val[3], __p1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int8x8_t) __builtin_neon_vtbx4_v((int8x8_t)__rev0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], (int8x8_t)__rev2, 0); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) poly8x8x2_t vtrn_p8(poly8x8_t __p0, poly8x8_t __p1) { + poly8x8x2_t __ret; + __builtin_neon_vtrn_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 4); + return __ret; +} +#else +__ai __attribute__((target("neon"))) poly8x8x2_t vtrn_p8(poly8x8_t __p0, poly8x8_t __p1) { + poly8x8x2_t __ret; + poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + poly8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __builtin_neon_vtrn_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 4); + + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) poly16x4x2_t vtrn_p16(poly16x4_t __p0, poly16x4_t __p1) { + poly16x4x2_t __ret; + __builtin_neon_vtrn_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 5); + return __ret; +} +#else +__ai __attribute__((target("neon"))) poly16x4x2_t vtrn_p16(poly16x4_t __p0, poly16x4_t __p1) { + poly16x4x2_t __ret; + poly16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + poly16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __builtin_neon_vtrn_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 5); + + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) poly8x16x2_t vtrnq_p8(poly8x16_t __p0, poly8x16_t __p1) { + poly8x16x2_t __ret; + __builtin_neon_vtrnq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 36); + return __ret; +} +#else +__ai __attribute__((target("neon"))) poly8x16x2_t vtrnq_p8(poly8x16_t __p0, poly8x16_t __p1) { + poly8x16x2_t __ret; + poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + poly8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __builtin_neon_vtrnq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 36); + + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) poly16x8x2_t vtrnq_p16(poly16x8_t __p0, poly16x8_t __p1) { + poly16x8x2_t __ret; + __builtin_neon_vtrnq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 37); + return __ret; +} +#else +__ai __attribute__((target("neon"))) poly16x8x2_t vtrnq_p16(poly16x8_t __p0, poly16x8_t __p1) { + poly16x8x2_t __ret; + poly16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + poly16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __builtin_neon_vtrnq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 37); + + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint8x16x2_t vtrnq_u8(uint8x16_t __p0, uint8x16_t __p1) { + uint8x16x2_t __ret; + __builtin_neon_vtrnq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 48); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint8x16x2_t vtrnq_u8(uint8x16_t __p0, uint8x16_t __p1) { + uint8x16x2_t __ret; + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __builtin_neon_vtrnq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 48); + + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint32x4x2_t vtrnq_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint32x4x2_t __ret; + __builtin_neon_vtrnq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 50); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint32x4x2_t vtrnq_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint32x4x2_t __ret; + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __builtin_neon_vtrnq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 50); + + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint16x8x2_t vtrnq_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint16x8x2_t __ret; + __builtin_neon_vtrnq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 49); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint16x8x2_t vtrnq_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint16x8x2_t __ret; + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __builtin_neon_vtrnq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 49); + + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int8x16x2_t vtrnq_s8(int8x16_t __p0, int8x16_t __p1) { + int8x16x2_t __ret; + __builtin_neon_vtrnq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 32); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int8x16x2_t vtrnq_s8(int8x16_t __p0, int8x16_t __p1) { + int8x16x2_t __ret; + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __builtin_neon_vtrnq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 32); + + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) float32x4x2_t vtrnq_f32(float32x4_t __p0, float32x4_t __p1) { + float32x4x2_t __ret; + __builtin_neon_vtrnq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 41); + return __ret; +} +#else +__ai __attribute__((target("neon"))) float32x4x2_t vtrnq_f32(float32x4_t __p0, float32x4_t __p1) { + float32x4x2_t __ret; + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __builtin_neon_vtrnq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 41); + + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int32x4x2_t vtrnq_s32(int32x4_t __p0, int32x4_t __p1) { + int32x4x2_t __ret; + __builtin_neon_vtrnq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 34); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int32x4x2_t vtrnq_s32(int32x4_t __p0, int32x4_t __p1) { + int32x4x2_t __ret; + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __builtin_neon_vtrnq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 34); + + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int16x8x2_t vtrnq_s16(int16x8_t __p0, int16x8_t __p1) { + int16x8x2_t __ret; + __builtin_neon_vtrnq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 33); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int16x8x2_t vtrnq_s16(int16x8_t __p0, int16x8_t __p1) { + int16x8x2_t __ret; + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __builtin_neon_vtrnq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 33); + + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint8x8x2_t vtrn_u8(uint8x8_t __p0, uint8x8_t __p1) { + uint8x8x2_t __ret; + __builtin_neon_vtrn_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 16); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint8x8x2_t vtrn_u8(uint8x8_t __p0, uint8x8_t __p1) { + uint8x8x2_t __ret; + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __builtin_neon_vtrn_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 16); + + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint32x2x2_t vtrn_u32(uint32x2_t __p0, uint32x2_t __p1) { + uint32x2x2_t __ret; + __builtin_neon_vtrn_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 18); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint32x2x2_t vtrn_u32(uint32x2_t __p0, uint32x2_t __p1) { + uint32x2x2_t __ret; + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __builtin_neon_vtrn_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 18); + + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint16x4x2_t vtrn_u16(uint16x4_t __p0, uint16x4_t __p1) { + uint16x4x2_t __ret; + __builtin_neon_vtrn_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 17); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint16x4x2_t vtrn_u16(uint16x4_t __p0, uint16x4_t __p1) { + uint16x4x2_t __ret; + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __builtin_neon_vtrn_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 17); + + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int8x8x2_t vtrn_s8(int8x8_t __p0, int8x8_t __p1) { + int8x8x2_t __ret; + __builtin_neon_vtrn_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 0); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int8x8x2_t vtrn_s8(int8x8_t __p0, int8x8_t __p1) { + int8x8x2_t __ret; + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __builtin_neon_vtrn_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 0); + + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) float32x2x2_t vtrn_f32(float32x2_t __p0, float32x2_t __p1) { + float32x2x2_t __ret; + __builtin_neon_vtrn_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 9); + return __ret; +} +#else +__ai __attribute__((target("neon"))) float32x2x2_t vtrn_f32(float32x2_t __p0, float32x2_t __p1) { + float32x2x2_t __ret; + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __builtin_neon_vtrn_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 9); + + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int32x2x2_t vtrn_s32(int32x2_t __p0, int32x2_t __p1) { + int32x2x2_t __ret; + __builtin_neon_vtrn_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 2); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int32x2x2_t vtrn_s32(int32x2_t __p0, int32x2_t __p1) { + int32x2x2_t __ret; + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __builtin_neon_vtrn_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 2); + + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int16x4x2_t vtrn_s16(int16x4_t __p0, int16x4_t __p1) { + int16x4x2_t __ret; + __builtin_neon_vtrn_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 1); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int16x4x2_t vtrn_s16(int16x4_t __p0, int16x4_t __p1) { + int16x4x2_t __ret; + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __builtin_neon_vtrn_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 1); + + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) float16x8x2_t vtrnq_f16(float16x8_t __p0, float16x8_t __p1) { + float16x8x2_t __ret; + __builtin_neon_vtrnq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 40); + return __ret; +} +#else +__ai __attribute__((target("neon"))) float16x8x2_t vtrnq_f16(float16x8_t __p0, float16x8_t __p1) { + float16x8x2_t __ret; + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __builtin_neon_vtrnq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 40); + + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) float16x4x2_t vtrn_f16(float16x4_t __p0, float16x4_t __p1) { + float16x4x2_t __ret; + __builtin_neon_vtrn_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 8); + return __ret; +} +#else +__ai __attribute__((target("neon"))) float16x4x2_t vtrn_f16(float16x4_t __p0, float16x4_t __p1) { + float16x4x2_t __ret; + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __builtin_neon_vtrn_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 8); + + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint8x8_t vtst_p8(poly8x8_t __p0, poly8x8_t __p1) { + uint8x8_t __ret; + __ret = (uint8x8_t) __builtin_neon_vtst_v((int8x8_t)__p0, (int8x8_t)__p1, 16); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint8x8_t vtst_p8(poly8x8_t __p0, poly8x8_t __p1) { + uint8x8_t __ret; + poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + poly8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint8x8_t) __builtin_neon_vtst_v((int8x8_t)__rev0, (int8x8_t)__rev1, 16); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint16x4_t vtst_p16(poly16x4_t __p0, poly16x4_t __p1) { + uint16x4_t __ret; + __ret = (uint16x4_t) __builtin_neon_vtst_v((int8x8_t)__p0, (int8x8_t)__p1, 17); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint16x4_t vtst_p16(poly16x4_t __p0, poly16x4_t __p1) { + uint16x4_t __ret; + poly16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + poly16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (uint16x4_t) __builtin_neon_vtst_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint8x16_t vtstq_p8(poly8x16_t __p0, poly8x16_t __p1) { + uint8x16_t __ret; + __ret = (uint8x16_t) __builtin_neon_vtstq_v((int8x16_t)__p0, (int8x16_t)__p1, 48); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint8x16_t vtstq_p8(poly8x16_t __p0, poly8x16_t __p1) { + uint8x16_t __ret; + poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + poly8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint8x16_t) __builtin_neon_vtstq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint16x8_t vtstq_p16(poly16x8_t __p0, poly16x8_t __p1) { + uint16x8_t __ret; + __ret = (uint16x8_t) __builtin_neon_vtstq_v((int8x16_t)__p0, (int8x16_t)__p1, 49); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint16x8_t vtstq_p16(poly16x8_t __p0, poly16x8_t __p1) { + uint16x8_t __ret; + poly16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + poly16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint16x8_t) __builtin_neon_vtstq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint8x16_t vtstq_u8(uint8x16_t __p0, uint8x16_t __p1) { + uint8x16_t __ret; + __ret = (uint8x16_t) __builtin_neon_vtstq_v((int8x16_t)__p0, (int8x16_t)__p1, 48); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint8x16_t vtstq_u8(uint8x16_t __p0, uint8x16_t __p1) { + uint8x16_t __ret; + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint8x16_t) __builtin_neon_vtstq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint32x4_t vtstq_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vtstq_v((int8x16_t)__p0, (int8x16_t)__p1, 50); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint32x4_t vtstq_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint32x4_t __ret; + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (uint32x4_t) __builtin_neon_vtstq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint16x8_t vtstq_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint16x8_t __ret; + __ret = (uint16x8_t) __builtin_neon_vtstq_v((int8x16_t)__p0, (int8x16_t)__p1, 49); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint16x8_t vtstq_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint16x8_t __ret; + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint16x8_t) __builtin_neon_vtstq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint8x16_t vtstq_s8(int8x16_t __p0, int8x16_t __p1) { + uint8x16_t __ret; + __ret = (uint8x16_t) __builtin_neon_vtstq_v((int8x16_t)__p0, (int8x16_t)__p1, 48); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint8x16_t vtstq_s8(int8x16_t __p0, int8x16_t __p1) { + uint8x16_t __ret; + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint8x16_t) __builtin_neon_vtstq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint32x4_t vtstq_s32(int32x4_t __p0, int32x4_t __p1) { + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vtstq_v((int8x16_t)__p0, (int8x16_t)__p1, 50); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint32x4_t vtstq_s32(int32x4_t __p0, int32x4_t __p1) { + uint32x4_t __ret; + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (uint32x4_t) __builtin_neon_vtstq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint16x8_t vtstq_s16(int16x8_t __p0, int16x8_t __p1) { + uint16x8_t __ret; + __ret = (uint16x8_t) __builtin_neon_vtstq_v((int8x16_t)__p0, (int8x16_t)__p1, 49); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint16x8_t vtstq_s16(int16x8_t __p0, int16x8_t __p1) { + uint16x8_t __ret; + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint16x8_t) __builtin_neon_vtstq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint8x8_t vtst_u8(uint8x8_t __p0, uint8x8_t __p1) { + uint8x8_t __ret; + __ret = (uint8x8_t) __builtin_neon_vtst_v((int8x8_t)__p0, (int8x8_t)__p1, 16); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint8x8_t vtst_u8(uint8x8_t __p0, uint8x8_t __p1) { + uint8x8_t __ret; + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint8x8_t) __builtin_neon_vtst_v((int8x8_t)__rev0, (int8x8_t)__rev1, 16); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint32x2_t vtst_u32(uint32x2_t __p0, uint32x2_t __p1) { + uint32x2_t __ret; + __ret = (uint32x2_t) __builtin_neon_vtst_v((int8x8_t)__p0, (int8x8_t)__p1, 18); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint32x2_t vtst_u32(uint32x2_t __p0, uint32x2_t __p1) { + uint32x2_t __ret; + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (uint32x2_t) __builtin_neon_vtst_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint16x4_t vtst_u16(uint16x4_t __p0, uint16x4_t __p1) { + uint16x4_t __ret; + __ret = (uint16x4_t) __builtin_neon_vtst_v((int8x8_t)__p0, (int8x8_t)__p1, 17); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint16x4_t vtst_u16(uint16x4_t __p0, uint16x4_t __p1) { + uint16x4_t __ret; + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (uint16x4_t) __builtin_neon_vtst_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint8x8_t vtst_s8(int8x8_t __p0, int8x8_t __p1) { + uint8x8_t __ret; + __ret = (uint8x8_t) __builtin_neon_vtst_v((int8x8_t)__p0, (int8x8_t)__p1, 16); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint8x8_t vtst_s8(int8x8_t __p0, int8x8_t __p1) { + uint8x8_t __ret; + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint8x8_t) __builtin_neon_vtst_v((int8x8_t)__rev0, (int8x8_t)__rev1, 16); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint32x2_t vtst_s32(int32x2_t __p0, int32x2_t __p1) { + uint32x2_t __ret; + __ret = (uint32x2_t) __builtin_neon_vtst_v((int8x8_t)__p0, (int8x8_t)__p1, 18); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint32x2_t vtst_s32(int32x2_t __p0, int32x2_t __p1) { + uint32x2_t __ret; + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (uint32x2_t) __builtin_neon_vtst_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint16x4_t vtst_s16(int16x4_t __p0, int16x4_t __p1) { + uint16x4_t __ret; + __ret = (uint16x4_t) __builtin_neon_vtst_v((int8x8_t)__p0, (int8x8_t)__p1, 17); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint16x4_t vtst_s16(int16x4_t __p0, int16x4_t __p1) { + uint16x4_t __ret; + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (uint16x4_t) __builtin_neon_vtst_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) poly8x8x2_t vuzp_p8(poly8x8_t __p0, poly8x8_t __p1) { + poly8x8x2_t __ret; + __builtin_neon_vuzp_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 4); + return __ret; +} +#else +__ai __attribute__((target("neon"))) poly8x8x2_t vuzp_p8(poly8x8_t __p0, poly8x8_t __p1) { + poly8x8x2_t __ret; + poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + poly8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __builtin_neon_vuzp_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 4); + + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) poly16x4x2_t vuzp_p16(poly16x4_t __p0, poly16x4_t __p1) { + poly16x4x2_t __ret; + __builtin_neon_vuzp_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 5); + return __ret; +} +#else +__ai __attribute__((target("neon"))) poly16x4x2_t vuzp_p16(poly16x4_t __p0, poly16x4_t __p1) { + poly16x4x2_t __ret; + poly16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + poly16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __builtin_neon_vuzp_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 5); + + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) poly8x16x2_t vuzpq_p8(poly8x16_t __p0, poly8x16_t __p1) { + poly8x16x2_t __ret; + __builtin_neon_vuzpq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 36); + return __ret; +} +#else +__ai __attribute__((target("neon"))) poly8x16x2_t vuzpq_p8(poly8x16_t __p0, poly8x16_t __p1) { + poly8x16x2_t __ret; + poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + poly8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __builtin_neon_vuzpq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 36); + + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) poly16x8x2_t vuzpq_p16(poly16x8_t __p0, poly16x8_t __p1) { + poly16x8x2_t __ret; + __builtin_neon_vuzpq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 37); + return __ret; +} +#else +__ai __attribute__((target("neon"))) poly16x8x2_t vuzpq_p16(poly16x8_t __p0, poly16x8_t __p1) { + poly16x8x2_t __ret; + poly16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + poly16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __builtin_neon_vuzpq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 37); + + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint8x16x2_t vuzpq_u8(uint8x16_t __p0, uint8x16_t __p1) { + uint8x16x2_t __ret; + __builtin_neon_vuzpq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 48); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint8x16x2_t vuzpq_u8(uint8x16_t __p0, uint8x16_t __p1) { + uint8x16x2_t __ret; + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __builtin_neon_vuzpq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 48); + + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint32x4x2_t vuzpq_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint32x4x2_t __ret; + __builtin_neon_vuzpq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 50); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint32x4x2_t vuzpq_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint32x4x2_t __ret; + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __builtin_neon_vuzpq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 50); + + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint16x8x2_t vuzpq_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint16x8x2_t __ret; + __builtin_neon_vuzpq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 49); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint16x8x2_t vuzpq_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint16x8x2_t __ret; + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __builtin_neon_vuzpq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 49); + + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int8x16x2_t vuzpq_s8(int8x16_t __p0, int8x16_t __p1) { + int8x16x2_t __ret; + __builtin_neon_vuzpq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 32); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int8x16x2_t vuzpq_s8(int8x16_t __p0, int8x16_t __p1) { + int8x16x2_t __ret; + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __builtin_neon_vuzpq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 32); + + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) float32x4x2_t vuzpq_f32(float32x4_t __p0, float32x4_t __p1) { + float32x4x2_t __ret; + __builtin_neon_vuzpq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 41); + return __ret; +} +#else +__ai __attribute__((target("neon"))) float32x4x2_t vuzpq_f32(float32x4_t __p0, float32x4_t __p1) { + float32x4x2_t __ret; + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __builtin_neon_vuzpq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 41); + + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int32x4x2_t vuzpq_s32(int32x4_t __p0, int32x4_t __p1) { + int32x4x2_t __ret; + __builtin_neon_vuzpq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 34); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int32x4x2_t vuzpq_s32(int32x4_t __p0, int32x4_t __p1) { + int32x4x2_t __ret; + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __builtin_neon_vuzpq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 34); + + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int16x8x2_t vuzpq_s16(int16x8_t __p0, int16x8_t __p1) { + int16x8x2_t __ret; + __builtin_neon_vuzpq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 33); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int16x8x2_t vuzpq_s16(int16x8_t __p0, int16x8_t __p1) { + int16x8x2_t __ret; + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __builtin_neon_vuzpq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 33); + + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint8x8x2_t vuzp_u8(uint8x8_t __p0, uint8x8_t __p1) { + uint8x8x2_t __ret; + __builtin_neon_vuzp_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 16); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint8x8x2_t vuzp_u8(uint8x8_t __p0, uint8x8_t __p1) { + uint8x8x2_t __ret; + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __builtin_neon_vuzp_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 16); + + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint32x2x2_t vuzp_u32(uint32x2_t __p0, uint32x2_t __p1) { + uint32x2x2_t __ret; + __builtin_neon_vuzp_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 18); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint32x2x2_t vuzp_u32(uint32x2_t __p0, uint32x2_t __p1) { + uint32x2x2_t __ret; + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __builtin_neon_vuzp_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 18); + + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint16x4x2_t vuzp_u16(uint16x4_t __p0, uint16x4_t __p1) { + uint16x4x2_t __ret; + __builtin_neon_vuzp_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 17); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint16x4x2_t vuzp_u16(uint16x4_t __p0, uint16x4_t __p1) { + uint16x4x2_t __ret; + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __builtin_neon_vuzp_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 17); + + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int8x8x2_t vuzp_s8(int8x8_t __p0, int8x8_t __p1) { + int8x8x2_t __ret; + __builtin_neon_vuzp_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 0); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int8x8x2_t vuzp_s8(int8x8_t __p0, int8x8_t __p1) { + int8x8x2_t __ret; + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __builtin_neon_vuzp_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 0); + + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) float32x2x2_t vuzp_f32(float32x2_t __p0, float32x2_t __p1) { + float32x2x2_t __ret; + __builtin_neon_vuzp_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 9); + return __ret; +} +#else +__ai __attribute__((target("neon"))) float32x2x2_t vuzp_f32(float32x2_t __p0, float32x2_t __p1) { + float32x2x2_t __ret; + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __builtin_neon_vuzp_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 9); + + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int32x2x2_t vuzp_s32(int32x2_t __p0, int32x2_t __p1) { + int32x2x2_t __ret; + __builtin_neon_vuzp_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 2); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int32x2x2_t vuzp_s32(int32x2_t __p0, int32x2_t __p1) { + int32x2x2_t __ret; + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __builtin_neon_vuzp_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 2); + + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int16x4x2_t vuzp_s16(int16x4_t __p0, int16x4_t __p1) { + int16x4x2_t __ret; + __builtin_neon_vuzp_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 1); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int16x4x2_t vuzp_s16(int16x4_t __p0, int16x4_t __p1) { + int16x4x2_t __ret; + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __builtin_neon_vuzp_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 1); + + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) float16x8x2_t vuzpq_f16(float16x8_t __p0, float16x8_t __p1) { + float16x8x2_t __ret; + __builtin_neon_vuzpq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 40); + return __ret; +} +#else +__ai __attribute__((target("neon"))) float16x8x2_t vuzpq_f16(float16x8_t __p0, float16x8_t __p1) { + float16x8x2_t __ret; + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __builtin_neon_vuzpq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 40); + + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) float16x4x2_t vuzp_f16(float16x4_t __p0, float16x4_t __p1) { + float16x4x2_t __ret; + __builtin_neon_vuzp_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 8); + return __ret; +} +#else +__ai __attribute__((target("neon"))) float16x4x2_t vuzp_f16(float16x4_t __p0, float16x4_t __p1) { + float16x4x2_t __ret; + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __builtin_neon_vuzp_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 8); + + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) poly8x8x2_t vzip_p8(poly8x8_t __p0, poly8x8_t __p1) { + poly8x8x2_t __ret; + __builtin_neon_vzip_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 4); + return __ret; +} +#else +__ai __attribute__((target("neon"))) poly8x8x2_t vzip_p8(poly8x8_t __p0, poly8x8_t __p1) { + poly8x8x2_t __ret; + poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + poly8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __builtin_neon_vzip_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 4); + + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) poly16x4x2_t vzip_p16(poly16x4_t __p0, poly16x4_t __p1) { + poly16x4x2_t __ret; + __builtin_neon_vzip_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 5); + return __ret; +} +#else +__ai __attribute__((target("neon"))) poly16x4x2_t vzip_p16(poly16x4_t __p0, poly16x4_t __p1) { + poly16x4x2_t __ret; + poly16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + poly16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __builtin_neon_vzip_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 5); + + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) poly8x16x2_t vzipq_p8(poly8x16_t __p0, poly8x16_t __p1) { + poly8x16x2_t __ret; + __builtin_neon_vzipq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 36); + return __ret; +} +#else +__ai __attribute__((target("neon"))) poly8x16x2_t vzipq_p8(poly8x16_t __p0, poly8x16_t __p1) { + poly8x16x2_t __ret; + poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + poly8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __builtin_neon_vzipq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 36); + + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) poly16x8x2_t vzipq_p16(poly16x8_t __p0, poly16x8_t __p1) { + poly16x8x2_t __ret; + __builtin_neon_vzipq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 37); + return __ret; +} +#else +__ai __attribute__((target("neon"))) poly16x8x2_t vzipq_p16(poly16x8_t __p0, poly16x8_t __p1) { + poly16x8x2_t __ret; + poly16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + poly16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __builtin_neon_vzipq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 37); + + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint8x16x2_t vzipq_u8(uint8x16_t __p0, uint8x16_t __p1) { + uint8x16x2_t __ret; + __builtin_neon_vzipq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 48); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint8x16x2_t vzipq_u8(uint8x16_t __p0, uint8x16_t __p1) { + uint8x16x2_t __ret; + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __builtin_neon_vzipq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 48); + + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint32x4x2_t vzipq_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint32x4x2_t __ret; + __builtin_neon_vzipq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 50); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint32x4x2_t vzipq_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint32x4x2_t __ret; + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __builtin_neon_vzipq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 50); + + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint16x8x2_t vzipq_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint16x8x2_t __ret; + __builtin_neon_vzipq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 49); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint16x8x2_t vzipq_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint16x8x2_t __ret; + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __builtin_neon_vzipq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 49); + + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int8x16x2_t vzipq_s8(int8x16_t __p0, int8x16_t __p1) { + int8x16x2_t __ret; + __builtin_neon_vzipq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 32); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int8x16x2_t vzipq_s8(int8x16_t __p0, int8x16_t __p1) { + int8x16x2_t __ret; + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __builtin_neon_vzipq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 32); + + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) float32x4x2_t vzipq_f32(float32x4_t __p0, float32x4_t __p1) { + float32x4x2_t __ret; + __builtin_neon_vzipq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 41); + return __ret; +} +#else +__ai __attribute__((target("neon"))) float32x4x2_t vzipq_f32(float32x4_t __p0, float32x4_t __p1) { + float32x4x2_t __ret; + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __builtin_neon_vzipq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 41); + + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int32x4x2_t vzipq_s32(int32x4_t __p0, int32x4_t __p1) { + int32x4x2_t __ret; + __builtin_neon_vzipq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 34); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int32x4x2_t vzipq_s32(int32x4_t __p0, int32x4_t __p1) { + int32x4x2_t __ret; + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __builtin_neon_vzipq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 34); + + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int16x8x2_t vzipq_s16(int16x8_t __p0, int16x8_t __p1) { + int16x8x2_t __ret; + __builtin_neon_vzipq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 33); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int16x8x2_t vzipq_s16(int16x8_t __p0, int16x8_t __p1) { + int16x8x2_t __ret; + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __builtin_neon_vzipq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 33); + + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint8x8x2_t vzip_u8(uint8x8_t __p0, uint8x8_t __p1) { + uint8x8x2_t __ret; + __builtin_neon_vzip_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 16); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint8x8x2_t vzip_u8(uint8x8_t __p0, uint8x8_t __p1) { + uint8x8x2_t __ret; + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __builtin_neon_vzip_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 16); + + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint32x2x2_t vzip_u32(uint32x2_t __p0, uint32x2_t __p1) { + uint32x2x2_t __ret; + __builtin_neon_vzip_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 18); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint32x2x2_t vzip_u32(uint32x2_t __p0, uint32x2_t __p1) { + uint32x2x2_t __ret; + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __builtin_neon_vzip_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 18); + + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint16x4x2_t vzip_u16(uint16x4_t __p0, uint16x4_t __p1) { + uint16x4x2_t __ret; + __builtin_neon_vzip_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 17); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint16x4x2_t vzip_u16(uint16x4_t __p0, uint16x4_t __p1) { + uint16x4x2_t __ret; + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __builtin_neon_vzip_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 17); + + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int8x8x2_t vzip_s8(int8x8_t __p0, int8x8_t __p1) { + int8x8x2_t __ret; + __builtin_neon_vzip_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 0); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int8x8x2_t vzip_s8(int8x8_t __p0, int8x8_t __p1) { + int8x8x2_t __ret; + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __builtin_neon_vzip_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 0); + + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) float32x2x2_t vzip_f32(float32x2_t __p0, float32x2_t __p1) { + float32x2x2_t __ret; + __builtin_neon_vzip_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 9); + return __ret; +} +#else +__ai __attribute__((target("neon"))) float32x2x2_t vzip_f32(float32x2_t __p0, float32x2_t __p1) { + float32x2x2_t __ret; + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __builtin_neon_vzip_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 9); + + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int32x2x2_t vzip_s32(int32x2_t __p0, int32x2_t __p1) { + int32x2x2_t __ret; + __builtin_neon_vzip_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 2); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int32x2x2_t vzip_s32(int32x2_t __p0, int32x2_t __p1) { + int32x2x2_t __ret; + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __builtin_neon_vzip_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 2); + + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int16x4x2_t vzip_s16(int16x4_t __p0, int16x4_t __p1) { + int16x4x2_t __ret; + __builtin_neon_vzip_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 1); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int16x4x2_t vzip_s16(int16x4_t __p0, int16x4_t __p1) { + int16x4x2_t __ret; + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __builtin_neon_vzip_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 1); + + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) float16x8x2_t vzipq_f16(float16x8_t __p0, float16x8_t __p1) { + float16x8x2_t __ret; + __builtin_neon_vzipq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 40); + return __ret; +} +#else +__ai __attribute__((target("neon"))) float16x8x2_t vzipq_f16(float16x8_t __p0, float16x8_t __p1) { + float16x8x2_t __ret; + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __builtin_neon_vzipq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 40); + + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) float16x4x2_t vzip_f16(float16x4_t __p0, float16x4_t __p1) { + float16x4x2_t __ret; + __builtin_neon_vzip_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 8); + return __ret; +} +#else +__ai __attribute__((target("neon"))) float16x4x2_t vzip_f16(float16x4_t __p0, float16x4_t __p1) { + float16x4x2_t __ret; + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __builtin_neon_vzip_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 8); + + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("v8.1a,neon"))) int32x4_t vqrdmlahq_s32(int32x4_t __p0, int32x4_t __p1, int32x4_t __p2) { + int32x4_t __ret; + __ret = (int32x4_t) __builtin_neon_vqrdmlahq_s32((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 34); + return __ret; +} +#else +__ai __attribute__((target("v8.1a,neon"))) int32x4_t vqrdmlahq_s32(int32x4_t __p0, int32x4_t __p1, int32x4_t __p2) { + int32x4_t __ret; + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + int32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); + __ret = (int32x4_t) __builtin_neon_vqrdmlahq_s32((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 34); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +__ai __attribute__((target("v8.1a,neon"))) int32x4_t __noswap_vqrdmlahq_s32(int32x4_t __p0, int32x4_t __p1, int32x4_t __p2) { + int32x4_t __ret; + __ret = (int32x4_t) __builtin_neon_vqrdmlahq_s32((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 34); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("v8.1a,neon"))) int16x8_t vqrdmlahq_s16(int16x8_t __p0, int16x8_t __p1, int16x8_t __p2) { + int16x8_t __ret; + __ret = (int16x8_t) __builtin_neon_vqrdmlahq_s16((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 33); + return __ret; +} +#else +__ai __attribute__((target("v8.1a,neon"))) int16x8_t vqrdmlahq_s16(int16x8_t __p0, int16x8_t __p1, int16x8_t __p2) { + int16x8_t __ret; + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int16x8_t) __builtin_neon_vqrdmlahq_s16((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 33); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +__ai __attribute__((target("v8.1a,neon"))) int16x8_t __noswap_vqrdmlahq_s16(int16x8_t __p0, int16x8_t __p1, int16x8_t __p2) { + int16x8_t __ret; + __ret = (int16x8_t) __builtin_neon_vqrdmlahq_s16((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 33); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("v8.1a,neon"))) int32x2_t vqrdmlah_s32(int32x2_t __p0, int32x2_t __p1, int32x2_t __p2) { + int32x2_t __ret; + __ret = (int32x2_t) __builtin_neon_vqrdmlah_s32((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 2); + return __ret; +} +#else +__ai __attribute__((target("v8.1a,neon"))) int32x2_t vqrdmlah_s32(int32x2_t __p0, int32x2_t __p1, int32x2_t __p2) { + int32x2_t __ret; + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + int32x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0); + __ret = (int32x2_t) __builtin_neon_vqrdmlah_s32((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 2); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +__ai __attribute__((target("v8.1a,neon"))) int32x2_t __noswap_vqrdmlah_s32(int32x2_t __p0, int32x2_t __p1, int32x2_t __p2) { + int32x2_t __ret; + __ret = (int32x2_t) __builtin_neon_vqrdmlah_s32((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 2); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("v8.1a,neon"))) int16x4_t vqrdmlah_s16(int16x4_t __p0, int16x4_t __p1, int16x4_t __p2) { + int16x4_t __ret; + __ret = (int16x4_t) __builtin_neon_vqrdmlah_s16((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 1); + return __ret; +} +#else +__ai __attribute__((target("v8.1a,neon"))) int16x4_t vqrdmlah_s16(int16x4_t __p0, int16x4_t __p1, int16x4_t __p2) { + int16x4_t __ret; + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + int16x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); + __ret = (int16x4_t) __builtin_neon_vqrdmlah_s16((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +__ai __attribute__((target("v8.1a,neon"))) int16x4_t __noswap_vqrdmlah_s16(int16x4_t __p0, int16x4_t __p1, int16x4_t __p2) { + int16x4_t __ret; + __ret = (int16x4_t) __builtin_neon_vqrdmlah_s16((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 1); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqrdmlahq_lane_s32(__p0_134, __p1_134, __p2_134, __p3_134) __extension__ ({ \ + int32x4_t __ret_134; \ + int32x4_t __s0_134 = __p0_134; \ + int32x4_t __s1_134 = __p1_134; \ + int32x2_t __s2_134 = __p2_134; \ + __ret_134 = vqrdmlahq_s32(__s0_134, __s1_134, splatq_lane_s32(__s2_134, __p3_134)); \ + __ret_134; \ +}) +#else +#define vqrdmlahq_lane_s32(__p0_135, __p1_135, __p2_135, __p3_135) __extension__ ({ \ + int32x4_t __ret_135; \ + int32x4_t __s0_135 = __p0_135; \ + int32x4_t __s1_135 = __p1_135; \ + int32x2_t __s2_135 = __p2_135; \ + int32x4_t __rev0_135; __rev0_135 = __builtin_shufflevector(__s0_135, __s0_135, 3, 2, 1, 0); \ + int32x4_t __rev1_135; __rev1_135 = __builtin_shufflevector(__s1_135, __s1_135, 3, 2, 1, 0); \ + int32x2_t __rev2_135; __rev2_135 = __builtin_shufflevector(__s2_135, __s2_135, 1, 0); \ + __ret_135 = __noswap_vqrdmlahq_s32(__rev0_135, __rev1_135, __noswap_splatq_lane_s32(__rev2_135, __p3_135)); \ + __ret_135 = __builtin_shufflevector(__ret_135, __ret_135, 3, 2, 1, 0); \ + __ret_135; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqrdmlahq_lane_s16(__p0_136, __p1_136, __p2_136, __p3_136) __extension__ ({ \ + int16x8_t __ret_136; \ + int16x8_t __s0_136 = __p0_136; \ + int16x8_t __s1_136 = __p1_136; \ + int16x4_t __s2_136 = __p2_136; \ + __ret_136 = vqrdmlahq_s16(__s0_136, __s1_136, splatq_lane_s16(__s2_136, __p3_136)); \ + __ret_136; \ +}) +#else +#define vqrdmlahq_lane_s16(__p0_137, __p1_137, __p2_137, __p3_137) __extension__ ({ \ + int16x8_t __ret_137; \ + int16x8_t __s0_137 = __p0_137; \ + int16x8_t __s1_137 = __p1_137; \ + int16x4_t __s2_137 = __p2_137; \ + int16x8_t __rev0_137; __rev0_137 = __builtin_shufflevector(__s0_137, __s0_137, 7, 6, 5, 4, 3, 2, 1, 0); \ + int16x8_t __rev1_137; __rev1_137 = __builtin_shufflevector(__s1_137, __s1_137, 7, 6, 5, 4, 3, 2, 1, 0); \ + int16x4_t __rev2_137; __rev2_137 = __builtin_shufflevector(__s2_137, __s2_137, 3, 2, 1, 0); \ + __ret_137 = __noswap_vqrdmlahq_s16(__rev0_137, __rev1_137, __noswap_splatq_lane_s16(__rev2_137, __p3_137)); \ + __ret_137 = __builtin_shufflevector(__ret_137, __ret_137, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_137; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqrdmlah_lane_s32(__p0_138, __p1_138, __p2_138, __p3_138) __extension__ ({ \ + int32x2_t __ret_138; \ + int32x2_t __s0_138 = __p0_138; \ + int32x2_t __s1_138 = __p1_138; \ + int32x2_t __s2_138 = __p2_138; \ + __ret_138 = vqrdmlah_s32(__s0_138, __s1_138, splat_lane_s32(__s2_138, __p3_138)); \ + __ret_138; \ +}) +#else +#define vqrdmlah_lane_s32(__p0_139, __p1_139, __p2_139, __p3_139) __extension__ ({ \ + int32x2_t __ret_139; \ + int32x2_t __s0_139 = __p0_139; \ + int32x2_t __s1_139 = __p1_139; \ + int32x2_t __s2_139 = __p2_139; \ + int32x2_t __rev0_139; __rev0_139 = __builtin_shufflevector(__s0_139, __s0_139, 1, 0); \ + int32x2_t __rev1_139; __rev1_139 = __builtin_shufflevector(__s1_139, __s1_139, 1, 0); \ + int32x2_t __rev2_139; __rev2_139 = __builtin_shufflevector(__s2_139, __s2_139, 1, 0); \ + __ret_139 = __noswap_vqrdmlah_s32(__rev0_139, __rev1_139, __noswap_splat_lane_s32(__rev2_139, __p3_139)); \ + __ret_139 = __builtin_shufflevector(__ret_139, __ret_139, 1, 0); \ + __ret_139; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqrdmlah_lane_s16(__p0_140, __p1_140, __p2_140, __p3_140) __extension__ ({ \ + int16x4_t __ret_140; \ + int16x4_t __s0_140 = __p0_140; \ + int16x4_t __s1_140 = __p1_140; \ + int16x4_t __s2_140 = __p2_140; \ + __ret_140 = vqrdmlah_s16(__s0_140, __s1_140, splat_lane_s16(__s2_140, __p3_140)); \ + __ret_140; \ +}) +#else +#define vqrdmlah_lane_s16(__p0_141, __p1_141, __p2_141, __p3_141) __extension__ ({ \ + int16x4_t __ret_141; \ + int16x4_t __s0_141 = __p0_141; \ + int16x4_t __s1_141 = __p1_141; \ + int16x4_t __s2_141 = __p2_141; \ + int16x4_t __rev0_141; __rev0_141 = __builtin_shufflevector(__s0_141, __s0_141, 3, 2, 1, 0); \ + int16x4_t __rev1_141; __rev1_141 = __builtin_shufflevector(__s1_141, __s1_141, 3, 2, 1, 0); \ + int16x4_t __rev2_141; __rev2_141 = __builtin_shufflevector(__s2_141, __s2_141, 3, 2, 1, 0); \ + __ret_141 = __noswap_vqrdmlah_s16(__rev0_141, __rev1_141, __noswap_splat_lane_s16(__rev2_141, __p3_141)); \ + __ret_141 = __builtin_shufflevector(__ret_141, __ret_141, 3, 2, 1, 0); \ + __ret_141; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("v8.1a,neon"))) int32x4_t vqrdmlshq_s32(int32x4_t __p0, int32x4_t __p1, int32x4_t __p2) { + int32x4_t __ret; + __ret = (int32x4_t) __builtin_neon_vqrdmlshq_s32((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 34); + return __ret; +} +#else +__ai __attribute__((target("v8.1a,neon"))) int32x4_t vqrdmlshq_s32(int32x4_t __p0, int32x4_t __p1, int32x4_t __p2) { + int32x4_t __ret; + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + int32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); + __ret = (int32x4_t) __builtin_neon_vqrdmlshq_s32((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 34); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +__ai __attribute__((target("v8.1a,neon"))) int32x4_t __noswap_vqrdmlshq_s32(int32x4_t __p0, int32x4_t __p1, int32x4_t __p2) { + int32x4_t __ret; + __ret = (int32x4_t) __builtin_neon_vqrdmlshq_s32((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 34); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("v8.1a,neon"))) int16x8_t vqrdmlshq_s16(int16x8_t __p0, int16x8_t __p1, int16x8_t __p2) { + int16x8_t __ret; + __ret = (int16x8_t) __builtin_neon_vqrdmlshq_s16((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 33); + return __ret; +} +#else +__ai __attribute__((target("v8.1a,neon"))) int16x8_t vqrdmlshq_s16(int16x8_t __p0, int16x8_t __p1, int16x8_t __p2) { + int16x8_t __ret; + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int16x8_t) __builtin_neon_vqrdmlshq_s16((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 33); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +__ai __attribute__((target("v8.1a,neon"))) int16x8_t __noswap_vqrdmlshq_s16(int16x8_t __p0, int16x8_t __p1, int16x8_t __p2) { + int16x8_t __ret; + __ret = (int16x8_t) __builtin_neon_vqrdmlshq_s16((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 33); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("v8.1a,neon"))) int32x2_t vqrdmlsh_s32(int32x2_t __p0, int32x2_t __p1, int32x2_t __p2) { + int32x2_t __ret; + __ret = (int32x2_t) __builtin_neon_vqrdmlsh_s32((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 2); + return __ret; +} +#else +__ai __attribute__((target("v8.1a,neon"))) int32x2_t vqrdmlsh_s32(int32x2_t __p0, int32x2_t __p1, int32x2_t __p2) { + int32x2_t __ret; + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + int32x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0); + __ret = (int32x2_t) __builtin_neon_vqrdmlsh_s32((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 2); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +__ai __attribute__((target("v8.1a,neon"))) int32x2_t __noswap_vqrdmlsh_s32(int32x2_t __p0, int32x2_t __p1, int32x2_t __p2) { + int32x2_t __ret; + __ret = (int32x2_t) __builtin_neon_vqrdmlsh_s32((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 2); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("v8.1a,neon"))) int16x4_t vqrdmlsh_s16(int16x4_t __p0, int16x4_t __p1, int16x4_t __p2) { + int16x4_t __ret; + __ret = (int16x4_t) __builtin_neon_vqrdmlsh_s16((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 1); + return __ret; +} +#else +__ai __attribute__((target("v8.1a,neon"))) int16x4_t vqrdmlsh_s16(int16x4_t __p0, int16x4_t __p1, int16x4_t __p2) { + int16x4_t __ret; + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + int16x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); + __ret = (int16x4_t) __builtin_neon_vqrdmlsh_s16((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +__ai __attribute__((target("v8.1a,neon"))) int16x4_t __noswap_vqrdmlsh_s16(int16x4_t __p0, int16x4_t __p1, int16x4_t __p2) { + int16x4_t __ret; + __ret = (int16x4_t) __builtin_neon_vqrdmlsh_s16((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 1); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqrdmlshq_lane_s32(__p0_142, __p1_142, __p2_142, __p3_142) __extension__ ({ \ + int32x4_t __ret_142; \ + int32x4_t __s0_142 = __p0_142; \ + int32x4_t __s1_142 = __p1_142; \ + int32x2_t __s2_142 = __p2_142; \ + __ret_142 = vqrdmlshq_s32(__s0_142, __s1_142, splatq_lane_s32(__s2_142, __p3_142)); \ + __ret_142; \ +}) +#else +#define vqrdmlshq_lane_s32(__p0_143, __p1_143, __p2_143, __p3_143) __extension__ ({ \ + int32x4_t __ret_143; \ + int32x4_t __s0_143 = __p0_143; \ + int32x4_t __s1_143 = __p1_143; \ + int32x2_t __s2_143 = __p2_143; \ + int32x4_t __rev0_143; __rev0_143 = __builtin_shufflevector(__s0_143, __s0_143, 3, 2, 1, 0); \ + int32x4_t __rev1_143; __rev1_143 = __builtin_shufflevector(__s1_143, __s1_143, 3, 2, 1, 0); \ + int32x2_t __rev2_143; __rev2_143 = __builtin_shufflevector(__s2_143, __s2_143, 1, 0); \ + __ret_143 = __noswap_vqrdmlshq_s32(__rev0_143, __rev1_143, __noswap_splatq_lane_s32(__rev2_143, __p3_143)); \ + __ret_143 = __builtin_shufflevector(__ret_143, __ret_143, 3, 2, 1, 0); \ + __ret_143; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqrdmlshq_lane_s16(__p0_144, __p1_144, __p2_144, __p3_144) __extension__ ({ \ + int16x8_t __ret_144; \ + int16x8_t __s0_144 = __p0_144; \ + int16x8_t __s1_144 = __p1_144; \ + int16x4_t __s2_144 = __p2_144; \ + __ret_144 = vqrdmlshq_s16(__s0_144, __s1_144, splatq_lane_s16(__s2_144, __p3_144)); \ + __ret_144; \ +}) +#else +#define vqrdmlshq_lane_s16(__p0_145, __p1_145, __p2_145, __p3_145) __extension__ ({ \ + int16x8_t __ret_145; \ + int16x8_t __s0_145 = __p0_145; \ + int16x8_t __s1_145 = __p1_145; \ + int16x4_t __s2_145 = __p2_145; \ + int16x8_t __rev0_145; __rev0_145 = __builtin_shufflevector(__s0_145, __s0_145, 7, 6, 5, 4, 3, 2, 1, 0); \ + int16x8_t __rev1_145; __rev1_145 = __builtin_shufflevector(__s1_145, __s1_145, 7, 6, 5, 4, 3, 2, 1, 0); \ + int16x4_t __rev2_145; __rev2_145 = __builtin_shufflevector(__s2_145, __s2_145, 3, 2, 1, 0); \ + __ret_145 = __noswap_vqrdmlshq_s16(__rev0_145, __rev1_145, __noswap_splatq_lane_s16(__rev2_145, __p3_145)); \ + __ret_145 = __builtin_shufflevector(__ret_145, __ret_145, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_145; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqrdmlsh_lane_s32(__p0_146, __p1_146, __p2_146, __p3_146) __extension__ ({ \ + int32x2_t __ret_146; \ + int32x2_t __s0_146 = __p0_146; \ + int32x2_t __s1_146 = __p1_146; \ + int32x2_t __s2_146 = __p2_146; \ + __ret_146 = vqrdmlsh_s32(__s0_146, __s1_146, splat_lane_s32(__s2_146, __p3_146)); \ + __ret_146; \ +}) +#else +#define vqrdmlsh_lane_s32(__p0_147, __p1_147, __p2_147, __p3_147) __extension__ ({ \ + int32x2_t __ret_147; \ + int32x2_t __s0_147 = __p0_147; \ + int32x2_t __s1_147 = __p1_147; \ + int32x2_t __s2_147 = __p2_147; \ + int32x2_t __rev0_147; __rev0_147 = __builtin_shufflevector(__s0_147, __s0_147, 1, 0); \ + int32x2_t __rev1_147; __rev1_147 = __builtin_shufflevector(__s1_147, __s1_147, 1, 0); \ + int32x2_t __rev2_147; __rev2_147 = __builtin_shufflevector(__s2_147, __s2_147, 1, 0); \ + __ret_147 = __noswap_vqrdmlsh_s32(__rev0_147, __rev1_147, __noswap_splat_lane_s32(__rev2_147, __p3_147)); \ + __ret_147 = __builtin_shufflevector(__ret_147, __ret_147, 1, 0); \ + __ret_147; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqrdmlsh_lane_s16(__p0_148, __p1_148, __p2_148, __p3_148) __extension__ ({ \ + int16x4_t __ret_148; \ + int16x4_t __s0_148 = __p0_148; \ + int16x4_t __s1_148 = __p1_148; \ + int16x4_t __s2_148 = __p2_148; \ + __ret_148 = vqrdmlsh_s16(__s0_148, __s1_148, splat_lane_s16(__s2_148, __p3_148)); \ + __ret_148; \ +}) +#else +#define vqrdmlsh_lane_s16(__p0_149, __p1_149, __p2_149, __p3_149) __extension__ ({ \ + int16x4_t __ret_149; \ + int16x4_t __s0_149 = __p0_149; \ + int16x4_t __s1_149 = __p1_149; \ + int16x4_t __s2_149 = __p2_149; \ + int16x4_t __rev0_149; __rev0_149 = __builtin_shufflevector(__s0_149, __s0_149, 3, 2, 1, 0); \ + int16x4_t __rev1_149; __rev1_149 = __builtin_shufflevector(__s1_149, __s1_149, 3, 2, 1, 0); \ + int16x4_t __rev2_149; __rev2_149 = __builtin_shufflevector(__s2_149, __s2_149, 3, 2, 1, 0); \ + __ret_149 = __noswap_vqrdmlsh_s16(__rev0_149, __rev1_149, __noswap_splat_lane_s16(__rev2_149, __p3_149)); \ + __ret_149 = __builtin_shufflevector(__ret_149, __ret_149, 3, 2, 1, 0); \ + __ret_149; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("v8.3a,fullfp16,neon"))) float16x4_t vcadd_rot270_f16(float16x4_t __p0, float16x4_t __p1) { + float16x4_t __ret; + __ret = (float16x4_t) __builtin_neon_vcadd_rot270_f16((int8x8_t)__p0, (int8x8_t)__p1, 8); + return __ret; +} +#else +__ai __attribute__((target("v8.3a,fullfp16,neon"))) float16x4_t vcadd_rot270_f16(float16x4_t __p0, float16x4_t __p1) { + float16x4_t __ret; + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (float16x4_t) __builtin_neon_vcadd_rot270_f16((int8x8_t)__rev0, (int8x8_t)__rev1, 8); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("v8.3a,fullfp16,neon"))) float16x4_t vcadd_rot90_f16(float16x4_t __p0, float16x4_t __p1) { + float16x4_t __ret; + __ret = (float16x4_t) __builtin_neon_vcadd_rot90_f16((int8x8_t)__p0, (int8x8_t)__p1, 8); + return __ret; +} +#else +__ai __attribute__((target("v8.3a,fullfp16,neon"))) float16x4_t vcadd_rot90_f16(float16x4_t __p0, float16x4_t __p1) { + float16x4_t __ret; + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (float16x4_t) __builtin_neon_vcadd_rot90_f16((int8x8_t)__rev0, (int8x8_t)__rev1, 8); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("v8.3a,fullfp16,neon"))) float16x8_t vcaddq_rot270_f16(float16x8_t __p0, float16x8_t __p1) { + float16x8_t __ret; + __ret = (float16x8_t) __builtin_neon_vcaddq_rot270_f16((int8x16_t)__p0, (int8x16_t)__p1, 40); + return __ret; +} +#else +__ai __attribute__((target("v8.3a,fullfp16,neon"))) float16x8_t vcaddq_rot270_f16(float16x8_t __p0, float16x8_t __p1) { + float16x8_t __ret; + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (float16x8_t) __builtin_neon_vcaddq_rot270_f16((int8x16_t)__rev0, (int8x16_t)__rev1, 40); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("v8.3a,fullfp16,neon"))) float16x8_t vcaddq_rot90_f16(float16x8_t __p0, float16x8_t __p1) { + float16x8_t __ret; + __ret = (float16x8_t) __builtin_neon_vcaddq_rot90_f16((int8x16_t)__p0, (int8x16_t)__p1, 40); + return __ret; +} +#else +__ai __attribute__((target("v8.3a,fullfp16,neon"))) float16x8_t vcaddq_rot90_f16(float16x8_t __p0, float16x8_t __p1) { + float16x8_t __ret; + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (float16x8_t) __builtin_neon_vcaddq_rot90_f16((int8x16_t)__rev0, (int8x16_t)__rev1, 40); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("v8.3a,fullfp16,neon"))) float16x8_t vcmlaq_f16(float16x8_t __p0, float16x8_t __p1, float16x8_t __p2) { + float16x8_t __ret; + __ret = (float16x8_t) __builtin_neon_vcmlaq_f16((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 40); + return __ret; +} +#else +__ai __attribute__((target("v8.3a,fullfp16,neon"))) float16x8_t vcmlaq_f16(float16x8_t __p0, float16x8_t __p1, float16x8_t __p2) { + float16x8_t __ret; + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + float16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (float16x8_t) __builtin_neon_vcmlaq_f16((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 40); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +__ai __attribute__((target("v8.3a,fullfp16,neon"))) float16x8_t __noswap_vcmlaq_f16(float16x8_t __p0, float16x8_t __p1, float16x8_t __p2) { + float16x8_t __ret; + __ret = (float16x8_t) __builtin_neon_vcmlaq_f16((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 40); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("v8.3a,fullfp16,neon"))) float16x4_t vcmla_f16(float16x4_t __p0, float16x4_t __p1, float16x4_t __p2) { + float16x4_t __ret; + __ret = (float16x4_t) __builtin_neon_vcmla_f16((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 8); + return __ret; +} +#else +__ai __attribute__((target("v8.3a,fullfp16,neon"))) float16x4_t vcmla_f16(float16x4_t __p0, float16x4_t __p1, float16x4_t __p2) { + float16x4_t __ret; + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + float16x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); + __ret = (float16x4_t) __builtin_neon_vcmla_f16((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 8); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +__ai __attribute__((target("v8.3a,fullfp16,neon"))) float16x4_t __noswap_vcmla_f16(float16x4_t __p0, float16x4_t __p1, float16x4_t __p2) { + float16x4_t __ret; + __ret = (float16x4_t) __builtin_neon_vcmla_f16((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 8); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcmla_lane_f16(__p0_150, __p1_150, __p2_150, __p3_150) __extension__ ({ \ + float16x4_t __ret_150; \ + float16x4_t __s0_150 = __p0_150; \ + float16x4_t __s1_150 = __p1_150; \ + float16x4_t __s2_150 = __p2_150; \ +float16x4_t __reint_150 = __s2_150; \ +uint32x2_t __reint1_150 = (uint32x2_t) {vget_lane_u32(*(uint32x2_t *) &__reint_150, __p3_150), vget_lane_u32(*(uint32x2_t *) &__reint_150, __p3_150)}; \ + __ret_150 = vcmla_f16(__s0_150, __s1_150, *(float16x4_t *) &__reint1_150); \ + __ret_150; \ +}) +#else +#define vcmla_lane_f16(__p0_151, __p1_151, __p2_151, __p3_151) __extension__ ({ \ + float16x4_t __ret_151; \ + float16x4_t __s0_151 = __p0_151; \ + float16x4_t __s1_151 = __p1_151; \ + float16x4_t __s2_151 = __p2_151; \ + float16x4_t __rev0_151; __rev0_151 = __builtin_shufflevector(__s0_151, __s0_151, 3, 2, 1, 0); \ + float16x4_t __rev1_151; __rev1_151 = __builtin_shufflevector(__s1_151, __s1_151, 3, 2, 1, 0); \ + float16x4_t __rev2_151; __rev2_151 = __builtin_shufflevector(__s2_151, __s2_151, 3, 2, 1, 0); \ +float16x4_t __reint_151 = __rev2_151; \ +uint32x2_t __reint1_151 = (uint32x2_t) {__noswap_vget_lane_u32(*(uint32x2_t *) &__reint_151, __p3_151), __noswap_vget_lane_u32(*(uint32x2_t *) &__reint_151, __p3_151)}; \ + __ret_151 = __noswap_vcmla_f16(__rev0_151, __rev1_151, *(float16x4_t *) &__reint1_151); \ + __ret_151 = __builtin_shufflevector(__ret_151, __ret_151, 3, 2, 1, 0); \ + __ret_151; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcmlaq_lane_f16(__p0_152, __p1_152, __p2_152, __p3_152) __extension__ ({ \ + float16x8_t __ret_152; \ + float16x8_t __s0_152 = __p0_152; \ + float16x8_t __s1_152 = __p1_152; \ + float16x4_t __s2_152 = __p2_152; \ +float16x4_t __reint_152 = __s2_152; \ +uint32x4_t __reint1_152 = (uint32x4_t) {vget_lane_u32(*(uint32x2_t *) &__reint_152, __p3_152), vget_lane_u32(*(uint32x2_t *) &__reint_152, __p3_152), vget_lane_u32(*(uint32x2_t *) &__reint_152, __p3_152), vget_lane_u32(*(uint32x2_t *) &__reint_152, __p3_152)}; \ + __ret_152 = vcmlaq_f16(__s0_152, __s1_152, *(float16x8_t *) &__reint1_152); \ + __ret_152; \ +}) +#else +#define vcmlaq_lane_f16(__p0_153, __p1_153, __p2_153, __p3_153) __extension__ ({ \ + float16x8_t __ret_153; \ + float16x8_t __s0_153 = __p0_153; \ + float16x8_t __s1_153 = __p1_153; \ + float16x4_t __s2_153 = __p2_153; \ + float16x8_t __rev0_153; __rev0_153 = __builtin_shufflevector(__s0_153, __s0_153, 7, 6, 5, 4, 3, 2, 1, 0); \ + float16x8_t __rev1_153; __rev1_153 = __builtin_shufflevector(__s1_153, __s1_153, 7, 6, 5, 4, 3, 2, 1, 0); \ + float16x4_t __rev2_153; __rev2_153 = __builtin_shufflevector(__s2_153, __s2_153, 3, 2, 1, 0); \ +float16x4_t __reint_153 = __rev2_153; \ +uint32x4_t __reint1_153 = (uint32x4_t) {__noswap_vget_lane_u32(*(uint32x2_t *) &__reint_153, __p3_153), __noswap_vget_lane_u32(*(uint32x2_t *) &__reint_153, __p3_153), __noswap_vget_lane_u32(*(uint32x2_t *) &__reint_153, __p3_153), __noswap_vget_lane_u32(*(uint32x2_t *) &__reint_153, __p3_153)}; \ + __ret_153 = __noswap_vcmlaq_f16(__rev0_153, __rev1_153, *(float16x8_t *) &__reint1_153); \ + __ret_153 = __builtin_shufflevector(__ret_153, __ret_153, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_153; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcmla_laneq_f16(__p0_154, __p1_154, __p2_154, __p3_154) __extension__ ({ \ + float16x4_t __ret_154; \ + float16x4_t __s0_154 = __p0_154; \ + float16x4_t __s1_154 = __p1_154; \ + float16x8_t __s2_154 = __p2_154; \ +float16x8_t __reint_154 = __s2_154; \ +uint32x2_t __reint1_154 = (uint32x2_t) {vgetq_lane_u32(*(uint32x4_t *) &__reint_154, __p3_154), vgetq_lane_u32(*(uint32x4_t *) &__reint_154, __p3_154)}; \ + __ret_154 = vcmla_f16(__s0_154, __s1_154, *(float16x4_t *) &__reint1_154); \ + __ret_154; \ +}) +#else +#define vcmla_laneq_f16(__p0_155, __p1_155, __p2_155, __p3_155) __extension__ ({ \ + float16x4_t __ret_155; \ + float16x4_t __s0_155 = __p0_155; \ + float16x4_t __s1_155 = __p1_155; \ + float16x8_t __s2_155 = __p2_155; \ + float16x4_t __rev0_155; __rev0_155 = __builtin_shufflevector(__s0_155, __s0_155, 3, 2, 1, 0); \ + float16x4_t __rev1_155; __rev1_155 = __builtin_shufflevector(__s1_155, __s1_155, 3, 2, 1, 0); \ + float16x8_t __rev2_155; __rev2_155 = __builtin_shufflevector(__s2_155, __s2_155, 7, 6, 5, 4, 3, 2, 1, 0); \ +float16x8_t __reint_155 = __rev2_155; \ +uint32x2_t __reint1_155 = (uint32x2_t) {__noswap_vgetq_lane_u32(*(uint32x4_t *) &__reint_155, __p3_155), __noswap_vgetq_lane_u32(*(uint32x4_t *) &__reint_155, __p3_155)}; \ + __ret_155 = __noswap_vcmla_f16(__rev0_155, __rev1_155, *(float16x4_t *) &__reint1_155); \ + __ret_155 = __builtin_shufflevector(__ret_155, __ret_155, 3, 2, 1, 0); \ + __ret_155; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcmlaq_laneq_f16(__p0_156, __p1_156, __p2_156, __p3_156) __extension__ ({ \ + float16x8_t __ret_156; \ + float16x8_t __s0_156 = __p0_156; \ + float16x8_t __s1_156 = __p1_156; \ + float16x8_t __s2_156 = __p2_156; \ +float16x8_t __reint_156 = __s2_156; \ +uint32x4_t __reint1_156 = (uint32x4_t) {vgetq_lane_u32(*(uint32x4_t *) &__reint_156, __p3_156), vgetq_lane_u32(*(uint32x4_t *) &__reint_156, __p3_156), vgetq_lane_u32(*(uint32x4_t *) &__reint_156, __p3_156), vgetq_lane_u32(*(uint32x4_t *) &__reint_156, __p3_156)}; \ + __ret_156 = vcmlaq_f16(__s0_156, __s1_156, *(float16x8_t *) &__reint1_156); \ + __ret_156; \ +}) +#else +#define vcmlaq_laneq_f16(__p0_157, __p1_157, __p2_157, __p3_157) __extension__ ({ \ + float16x8_t __ret_157; \ + float16x8_t __s0_157 = __p0_157; \ + float16x8_t __s1_157 = __p1_157; \ + float16x8_t __s2_157 = __p2_157; \ + float16x8_t __rev0_157; __rev0_157 = __builtin_shufflevector(__s0_157, __s0_157, 7, 6, 5, 4, 3, 2, 1, 0); \ + float16x8_t __rev1_157; __rev1_157 = __builtin_shufflevector(__s1_157, __s1_157, 7, 6, 5, 4, 3, 2, 1, 0); \ + float16x8_t __rev2_157; __rev2_157 = __builtin_shufflevector(__s2_157, __s2_157, 7, 6, 5, 4, 3, 2, 1, 0); \ +float16x8_t __reint_157 = __rev2_157; \ +uint32x4_t __reint1_157 = (uint32x4_t) {__noswap_vgetq_lane_u32(*(uint32x4_t *) &__reint_157, __p3_157), __noswap_vgetq_lane_u32(*(uint32x4_t *) &__reint_157, __p3_157), __noswap_vgetq_lane_u32(*(uint32x4_t *) &__reint_157, __p3_157), __noswap_vgetq_lane_u32(*(uint32x4_t *) &__reint_157, __p3_157)}; \ + __ret_157 = __noswap_vcmlaq_f16(__rev0_157, __rev1_157, *(float16x8_t *) &__reint1_157); \ + __ret_157 = __builtin_shufflevector(__ret_157, __ret_157, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_157; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("v8.3a,fullfp16,neon"))) float16x8_t vcmlaq_rot180_f16(float16x8_t __p0, float16x8_t __p1, float16x8_t __p2) { + float16x8_t __ret; + __ret = (float16x8_t) __builtin_neon_vcmlaq_rot180_f16((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 40); + return __ret; +} +#else +__ai __attribute__((target("v8.3a,fullfp16,neon"))) float16x8_t vcmlaq_rot180_f16(float16x8_t __p0, float16x8_t __p1, float16x8_t __p2) { + float16x8_t __ret; + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + float16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (float16x8_t) __builtin_neon_vcmlaq_rot180_f16((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 40); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +__ai __attribute__((target("v8.3a,fullfp16,neon"))) float16x8_t __noswap_vcmlaq_rot180_f16(float16x8_t __p0, float16x8_t __p1, float16x8_t __p2) { + float16x8_t __ret; + __ret = (float16x8_t) __builtin_neon_vcmlaq_rot180_f16((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 40); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("v8.3a,fullfp16,neon"))) float16x4_t vcmla_rot180_f16(float16x4_t __p0, float16x4_t __p1, float16x4_t __p2) { + float16x4_t __ret; + __ret = (float16x4_t) __builtin_neon_vcmla_rot180_f16((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 8); + return __ret; +} +#else +__ai __attribute__((target("v8.3a,fullfp16,neon"))) float16x4_t vcmla_rot180_f16(float16x4_t __p0, float16x4_t __p1, float16x4_t __p2) { + float16x4_t __ret; + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + float16x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); + __ret = (float16x4_t) __builtin_neon_vcmla_rot180_f16((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 8); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +__ai __attribute__((target("v8.3a,fullfp16,neon"))) float16x4_t __noswap_vcmla_rot180_f16(float16x4_t __p0, float16x4_t __p1, float16x4_t __p2) { + float16x4_t __ret; + __ret = (float16x4_t) __builtin_neon_vcmla_rot180_f16((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 8); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcmla_rot180_lane_f16(__p0_158, __p1_158, __p2_158, __p3_158) __extension__ ({ \ + float16x4_t __ret_158; \ + float16x4_t __s0_158 = __p0_158; \ + float16x4_t __s1_158 = __p1_158; \ + float16x4_t __s2_158 = __p2_158; \ +float16x4_t __reint_158 = __s2_158; \ +uint32x2_t __reint1_158 = (uint32x2_t) {vget_lane_u32(*(uint32x2_t *) &__reint_158, __p3_158), vget_lane_u32(*(uint32x2_t *) &__reint_158, __p3_158)}; \ + __ret_158 = vcmla_rot180_f16(__s0_158, __s1_158, *(float16x4_t *) &__reint1_158); \ + __ret_158; \ +}) +#else +#define vcmla_rot180_lane_f16(__p0_159, __p1_159, __p2_159, __p3_159) __extension__ ({ \ + float16x4_t __ret_159; \ + float16x4_t __s0_159 = __p0_159; \ + float16x4_t __s1_159 = __p1_159; \ + float16x4_t __s2_159 = __p2_159; \ + float16x4_t __rev0_159; __rev0_159 = __builtin_shufflevector(__s0_159, __s0_159, 3, 2, 1, 0); \ + float16x4_t __rev1_159; __rev1_159 = __builtin_shufflevector(__s1_159, __s1_159, 3, 2, 1, 0); \ + float16x4_t __rev2_159; __rev2_159 = __builtin_shufflevector(__s2_159, __s2_159, 3, 2, 1, 0); \ +float16x4_t __reint_159 = __rev2_159; \ +uint32x2_t __reint1_159 = (uint32x2_t) {__noswap_vget_lane_u32(*(uint32x2_t *) &__reint_159, __p3_159), __noswap_vget_lane_u32(*(uint32x2_t *) &__reint_159, __p3_159)}; \ + __ret_159 = __noswap_vcmla_rot180_f16(__rev0_159, __rev1_159, *(float16x4_t *) &__reint1_159); \ + __ret_159 = __builtin_shufflevector(__ret_159, __ret_159, 3, 2, 1, 0); \ + __ret_159; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcmlaq_rot180_lane_f16(__p0_160, __p1_160, __p2_160, __p3_160) __extension__ ({ \ + float16x8_t __ret_160; \ + float16x8_t __s0_160 = __p0_160; \ + float16x8_t __s1_160 = __p1_160; \ + float16x4_t __s2_160 = __p2_160; \ +float16x4_t __reint_160 = __s2_160; \ +uint32x4_t __reint1_160 = (uint32x4_t) {vget_lane_u32(*(uint32x2_t *) &__reint_160, __p3_160), vget_lane_u32(*(uint32x2_t *) &__reint_160, __p3_160), vget_lane_u32(*(uint32x2_t *) &__reint_160, __p3_160), vget_lane_u32(*(uint32x2_t *) &__reint_160, __p3_160)}; \ + __ret_160 = vcmlaq_rot180_f16(__s0_160, __s1_160, *(float16x8_t *) &__reint1_160); \ + __ret_160; \ +}) +#else +#define vcmlaq_rot180_lane_f16(__p0_161, __p1_161, __p2_161, __p3_161) __extension__ ({ \ + float16x8_t __ret_161; \ + float16x8_t __s0_161 = __p0_161; \ + float16x8_t __s1_161 = __p1_161; \ + float16x4_t __s2_161 = __p2_161; \ + float16x8_t __rev0_161; __rev0_161 = __builtin_shufflevector(__s0_161, __s0_161, 7, 6, 5, 4, 3, 2, 1, 0); \ + float16x8_t __rev1_161; __rev1_161 = __builtin_shufflevector(__s1_161, __s1_161, 7, 6, 5, 4, 3, 2, 1, 0); \ + float16x4_t __rev2_161; __rev2_161 = __builtin_shufflevector(__s2_161, __s2_161, 3, 2, 1, 0); \ +float16x4_t __reint_161 = __rev2_161; \ +uint32x4_t __reint1_161 = (uint32x4_t) {__noswap_vget_lane_u32(*(uint32x2_t *) &__reint_161, __p3_161), __noswap_vget_lane_u32(*(uint32x2_t *) &__reint_161, __p3_161), __noswap_vget_lane_u32(*(uint32x2_t *) &__reint_161, __p3_161), __noswap_vget_lane_u32(*(uint32x2_t *) &__reint_161, __p3_161)}; \ + __ret_161 = __noswap_vcmlaq_rot180_f16(__rev0_161, __rev1_161, *(float16x8_t *) &__reint1_161); \ + __ret_161 = __builtin_shufflevector(__ret_161, __ret_161, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_161; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcmla_rot180_laneq_f16(__p0_162, __p1_162, __p2_162, __p3_162) __extension__ ({ \ + float16x4_t __ret_162; \ + float16x4_t __s0_162 = __p0_162; \ + float16x4_t __s1_162 = __p1_162; \ + float16x8_t __s2_162 = __p2_162; \ +float16x8_t __reint_162 = __s2_162; \ +uint32x2_t __reint1_162 = (uint32x2_t) {vgetq_lane_u32(*(uint32x4_t *) &__reint_162, __p3_162), vgetq_lane_u32(*(uint32x4_t *) &__reint_162, __p3_162)}; \ + __ret_162 = vcmla_rot180_f16(__s0_162, __s1_162, *(float16x4_t *) &__reint1_162); \ + __ret_162; \ +}) +#else +#define vcmla_rot180_laneq_f16(__p0_163, __p1_163, __p2_163, __p3_163) __extension__ ({ \ + float16x4_t __ret_163; \ + float16x4_t __s0_163 = __p0_163; \ + float16x4_t __s1_163 = __p1_163; \ + float16x8_t __s2_163 = __p2_163; \ + float16x4_t __rev0_163; __rev0_163 = __builtin_shufflevector(__s0_163, __s0_163, 3, 2, 1, 0); \ + float16x4_t __rev1_163; __rev1_163 = __builtin_shufflevector(__s1_163, __s1_163, 3, 2, 1, 0); \ + float16x8_t __rev2_163; __rev2_163 = __builtin_shufflevector(__s2_163, __s2_163, 7, 6, 5, 4, 3, 2, 1, 0); \ +float16x8_t __reint_163 = __rev2_163; \ +uint32x2_t __reint1_163 = (uint32x2_t) {__noswap_vgetq_lane_u32(*(uint32x4_t *) &__reint_163, __p3_163), __noswap_vgetq_lane_u32(*(uint32x4_t *) &__reint_163, __p3_163)}; \ + __ret_163 = __noswap_vcmla_rot180_f16(__rev0_163, __rev1_163, *(float16x4_t *) &__reint1_163); \ + __ret_163 = __builtin_shufflevector(__ret_163, __ret_163, 3, 2, 1, 0); \ + __ret_163; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcmlaq_rot180_laneq_f16(__p0_164, __p1_164, __p2_164, __p3_164) __extension__ ({ \ + float16x8_t __ret_164; \ + float16x8_t __s0_164 = __p0_164; \ + float16x8_t __s1_164 = __p1_164; \ + float16x8_t __s2_164 = __p2_164; \ +float16x8_t __reint_164 = __s2_164; \ +uint32x4_t __reint1_164 = (uint32x4_t) {vgetq_lane_u32(*(uint32x4_t *) &__reint_164, __p3_164), vgetq_lane_u32(*(uint32x4_t *) &__reint_164, __p3_164), vgetq_lane_u32(*(uint32x4_t *) &__reint_164, __p3_164), vgetq_lane_u32(*(uint32x4_t *) &__reint_164, __p3_164)}; \ + __ret_164 = vcmlaq_rot180_f16(__s0_164, __s1_164, *(float16x8_t *) &__reint1_164); \ + __ret_164; \ +}) +#else +#define vcmlaq_rot180_laneq_f16(__p0_165, __p1_165, __p2_165, __p3_165) __extension__ ({ \ + float16x8_t __ret_165; \ + float16x8_t __s0_165 = __p0_165; \ + float16x8_t __s1_165 = __p1_165; \ + float16x8_t __s2_165 = __p2_165; \ + float16x8_t __rev0_165; __rev0_165 = __builtin_shufflevector(__s0_165, __s0_165, 7, 6, 5, 4, 3, 2, 1, 0); \ + float16x8_t __rev1_165; __rev1_165 = __builtin_shufflevector(__s1_165, __s1_165, 7, 6, 5, 4, 3, 2, 1, 0); \ + float16x8_t __rev2_165; __rev2_165 = __builtin_shufflevector(__s2_165, __s2_165, 7, 6, 5, 4, 3, 2, 1, 0); \ +float16x8_t __reint_165 = __rev2_165; \ +uint32x4_t __reint1_165 = (uint32x4_t) {__noswap_vgetq_lane_u32(*(uint32x4_t *) &__reint_165, __p3_165), __noswap_vgetq_lane_u32(*(uint32x4_t *) &__reint_165, __p3_165), __noswap_vgetq_lane_u32(*(uint32x4_t *) &__reint_165, __p3_165), __noswap_vgetq_lane_u32(*(uint32x4_t *) &__reint_165, __p3_165)}; \ + __ret_165 = __noswap_vcmlaq_rot180_f16(__rev0_165, __rev1_165, *(float16x8_t *) &__reint1_165); \ + __ret_165 = __builtin_shufflevector(__ret_165, __ret_165, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_165; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("v8.3a,fullfp16,neon"))) float16x8_t vcmlaq_rot270_f16(float16x8_t __p0, float16x8_t __p1, float16x8_t __p2) { + float16x8_t __ret; + __ret = (float16x8_t) __builtin_neon_vcmlaq_rot270_f16((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 40); + return __ret; +} +#else +__ai __attribute__((target("v8.3a,fullfp16,neon"))) float16x8_t vcmlaq_rot270_f16(float16x8_t __p0, float16x8_t __p1, float16x8_t __p2) { + float16x8_t __ret; + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + float16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (float16x8_t) __builtin_neon_vcmlaq_rot270_f16((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 40); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +__ai __attribute__((target("v8.3a,fullfp16,neon"))) float16x8_t __noswap_vcmlaq_rot270_f16(float16x8_t __p0, float16x8_t __p1, float16x8_t __p2) { + float16x8_t __ret; + __ret = (float16x8_t) __builtin_neon_vcmlaq_rot270_f16((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 40); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("v8.3a,fullfp16,neon"))) float16x4_t vcmla_rot270_f16(float16x4_t __p0, float16x4_t __p1, float16x4_t __p2) { + float16x4_t __ret; + __ret = (float16x4_t) __builtin_neon_vcmla_rot270_f16((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 8); + return __ret; +} +#else +__ai __attribute__((target("v8.3a,fullfp16,neon"))) float16x4_t vcmla_rot270_f16(float16x4_t __p0, float16x4_t __p1, float16x4_t __p2) { + float16x4_t __ret; + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + float16x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); + __ret = (float16x4_t) __builtin_neon_vcmla_rot270_f16((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 8); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +__ai __attribute__((target("v8.3a,fullfp16,neon"))) float16x4_t __noswap_vcmla_rot270_f16(float16x4_t __p0, float16x4_t __p1, float16x4_t __p2) { + float16x4_t __ret; + __ret = (float16x4_t) __builtin_neon_vcmla_rot270_f16((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 8); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcmla_rot270_lane_f16(__p0_166, __p1_166, __p2_166, __p3_166) __extension__ ({ \ + float16x4_t __ret_166; \ + float16x4_t __s0_166 = __p0_166; \ + float16x4_t __s1_166 = __p1_166; \ + float16x4_t __s2_166 = __p2_166; \ +float16x4_t __reint_166 = __s2_166; \ +uint32x2_t __reint1_166 = (uint32x2_t) {vget_lane_u32(*(uint32x2_t *) &__reint_166, __p3_166), vget_lane_u32(*(uint32x2_t *) &__reint_166, __p3_166)}; \ + __ret_166 = vcmla_rot270_f16(__s0_166, __s1_166, *(float16x4_t *) &__reint1_166); \ + __ret_166; \ +}) +#else +#define vcmla_rot270_lane_f16(__p0_167, __p1_167, __p2_167, __p3_167) __extension__ ({ \ + float16x4_t __ret_167; \ + float16x4_t __s0_167 = __p0_167; \ + float16x4_t __s1_167 = __p1_167; \ + float16x4_t __s2_167 = __p2_167; \ + float16x4_t __rev0_167; __rev0_167 = __builtin_shufflevector(__s0_167, __s0_167, 3, 2, 1, 0); \ + float16x4_t __rev1_167; __rev1_167 = __builtin_shufflevector(__s1_167, __s1_167, 3, 2, 1, 0); \ + float16x4_t __rev2_167; __rev2_167 = __builtin_shufflevector(__s2_167, __s2_167, 3, 2, 1, 0); \ +float16x4_t __reint_167 = __rev2_167; \ +uint32x2_t __reint1_167 = (uint32x2_t) {__noswap_vget_lane_u32(*(uint32x2_t *) &__reint_167, __p3_167), __noswap_vget_lane_u32(*(uint32x2_t *) &__reint_167, __p3_167)}; \ + __ret_167 = __noswap_vcmla_rot270_f16(__rev0_167, __rev1_167, *(float16x4_t *) &__reint1_167); \ + __ret_167 = __builtin_shufflevector(__ret_167, __ret_167, 3, 2, 1, 0); \ + __ret_167; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcmlaq_rot270_lane_f16(__p0_168, __p1_168, __p2_168, __p3_168) __extension__ ({ \ + float16x8_t __ret_168; \ + float16x8_t __s0_168 = __p0_168; \ + float16x8_t __s1_168 = __p1_168; \ + float16x4_t __s2_168 = __p2_168; \ +float16x4_t __reint_168 = __s2_168; \ +uint32x4_t __reint1_168 = (uint32x4_t) {vget_lane_u32(*(uint32x2_t *) &__reint_168, __p3_168), vget_lane_u32(*(uint32x2_t *) &__reint_168, __p3_168), vget_lane_u32(*(uint32x2_t *) &__reint_168, __p3_168), vget_lane_u32(*(uint32x2_t *) &__reint_168, __p3_168)}; \ + __ret_168 = vcmlaq_rot270_f16(__s0_168, __s1_168, *(float16x8_t *) &__reint1_168); \ + __ret_168; \ +}) +#else +#define vcmlaq_rot270_lane_f16(__p0_169, __p1_169, __p2_169, __p3_169) __extension__ ({ \ + float16x8_t __ret_169; \ + float16x8_t __s0_169 = __p0_169; \ + float16x8_t __s1_169 = __p1_169; \ + float16x4_t __s2_169 = __p2_169; \ + float16x8_t __rev0_169; __rev0_169 = __builtin_shufflevector(__s0_169, __s0_169, 7, 6, 5, 4, 3, 2, 1, 0); \ + float16x8_t __rev1_169; __rev1_169 = __builtin_shufflevector(__s1_169, __s1_169, 7, 6, 5, 4, 3, 2, 1, 0); \ + float16x4_t __rev2_169; __rev2_169 = __builtin_shufflevector(__s2_169, __s2_169, 3, 2, 1, 0); \ +float16x4_t __reint_169 = __rev2_169; \ +uint32x4_t __reint1_169 = (uint32x4_t) {__noswap_vget_lane_u32(*(uint32x2_t *) &__reint_169, __p3_169), __noswap_vget_lane_u32(*(uint32x2_t *) &__reint_169, __p3_169), __noswap_vget_lane_u32(*(uint32x2_t *) &__reint_169, __p3_169), __noswap_vget_lane_u32(*(uint32x2_t *) &__reint_169, __p3_169)}; \ + __ret_169 = __noswap_vcmlaq_rot270_f16(__rev0_169, __rev1_169, *(float16x8_t *) &__reint1_169); \ + __ret_169 = __builtin_shufflevector(__ret_169, __ret_169, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_169; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcmla_rot270_laneq_f16(__p0_170, __p1_170, __p2_170, __p3_170) __extension__ ({ \ + float16x4_t __ret_170; \ + float16x4_t __s0_170 = __p0_170; \ + float16x4_t __s1_170 = __p1_170; \ + float16x8_t __s2_170 = __p2_170; \ +float16x8_t __reint_170 = __s2_170; \ +uint32x2_t __reint1_170 = (uint32x2_t) {vgetq_lane_u32(*(uint32x4_t *) &__reint_170, __p3_170), vgetq_lane_u32(*(uint32x4_t *) &__reint_170, __p3_170)}; \ + __ret_170 = vcmla_rot270_f16(__s0_170, __s1_170, *(float16x4_t *) &__reint1_170); \ + __ret_170; \ +}) +#else +#define vcmla_rot270_laneq_f16(__p0_171, __p1_171, __p2_171, __p3_171) __extension__ ({ \ + float16x4_t __ret_171; \ + float16x4_t __s0_171 = __p0_171; \ + float16x4_t __s1_171 = __p1_171; \ + float16x8_t __s2_171 = __p2_171; \ + float16x4_t __rev0_171; __rev0_171 = __builtin_shufflevector(__s0_171, __s0_171, 3, 2, 1, 0); \ + float16x4_t __rev1_171; __rev1_171 = __builtin_shufflevector(__s1_171, __s1_171, 3, 2, 1, 0); \ + float16x8_t __rev2_171; __rev2_171 = __builtin_shufflevector(__s2_171, __s2_171, 7, 6, 5, 4, 3, 2, 1, 0); \ +float16x8_t __reint_171 = __rev2_171; \ +uint32x2_t __reint1_171 = (uint32x2_t) {__noswap_vgetq_lane_u32(*(uint32x4_t *) &__reint_171, __p3_171), __noswap_vgetq_lane_u32(*(uint32x4_t *) &__reint_171, __p3_171)}; \ + __ret_171 = __noswap_vcmla_rot270_f16(__rev0_171, __rev1_171, *(float16x4_t *) &__reint1_171); \ + __ret_171 = __builtin_shufflevector(__ret_171, __ret_171, 3, 2, 1, 0); \ + __ret_171; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcmlaq_rot270_laneq_f16(__p0_172, __p1_172, __p2_172, __p3_172) __extension__ ({ \ + float16x8_t __ret_172; \ + float16x8_t __s0_172 = __p0_172; \ + float16x8_t __s1_172 = __p1_172; \ + float16x8_t __s2_172 = __p2_172; \ +float16x8_t __reint_172 = __s2_172; \ +uint32x4_t __reint1_172 = (uint32x4_t) {vgetq_lane_u32(*(uint32x4_t *) &__reint_172, __p3_172), vgetq_lane_u32(*(uint32x4_t *) &__reint_172, __p3_172), vgetq_lane_u32(*(uint32x4_t *) &__reint_172, __p3_172), vgetq_lane_u32(*(uint32x4_t *) &__reint_172, __p3_172)}; \ + __ret_172 = vcmlaq_rot270_f16(__s0_172, __s1_172, *(float16x8_t *) &__reint1_172); \ + __ret_172; \ +}) +#else +#define vcmlaq_rot270_laneq_f16(__p0_173, __p1_173, __p2_173, __p3_173) __extension__ ({ \ + float16x8_t __ret_173; \ + float16x8_t __s0_173 = __p0_173; \ + float16x8_t __s1_173 = __p1_173; \ + float16x8_t __s2_173 = __p2_173; \ + float16x8_t __rev0_173; __rev0_173 = __builtin_shufflevector(__s0_173, __s0_173, 7, 6, 5, 4, 3, 2, 1, 0); \ + float16x8_t __rev1_173; __rev1_173 = __builtin_shufflevector(__s1_173, __s1_173, 7, 6, 5, 4, 3, 2, 1, 0); \ + float16x8_t __rev2_173; __rev2_173 = __builtin_shufflevector(__s2_173, __s2_173, 7, 6, 5, 4, 3, 2, 1, 0); \ +float16x8_t __reint_173 = __rev2_173; \ +uint32x4_t __reint1_173 = (uint32x4_t) {__noswap_vgetq_lane_u32(*(uint32x4_t *) &__reint_173, __p3_173), __noswap_vgetq_lane_u32(*(uint32x4_t *) &__reint_173, __p3_173), __noswap_vgetq_lane_u32(*(uint32x4_t *) &__reint_173, __p3_173), __noswap_vgetq_lane_u32(*(uint32x4_t *) &__reint_173, __p3_173)}; \ + __ret_173 = __noswap_vcmlaq_rot270_f16(__rev0_173, __rev1_173, *(float16x8_t *) &__reint1_173); \ + __ret_173 = __builtin_shufflevector(__ret_173, __ret_173, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_173; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("v8.3a,fullfp16,neon"))) float16x8_t vcmlaq_rot90_f16(float16x8_t __p0, float16x8_t __p1, float16x8_t __p2) { + float16x8_t __ret; + __ret = (float16x8_t) __builtin_neon_vcmlaq_rot90_f16((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 40); + return __ret; +} +#else +__ai __attribute__((target("v8.3a,fullfp16,neon"))) float16x8_t vcmlaq_rot90_f16(float16x8_t __p0, float16x8_t __p1, float16x8_t __p2) { + float16x8_t __ret; + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + float16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (float16x8_t) __builtin_neon_vcmlaq_rot90_f16((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 40); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +__ai __attribute__((target("v8.3a,fullfp16,neon"))) float16x8_t __noswap_vcmlaq_rot90_f16(float16x8_t __p0, float16x8_t __p1, float16x8_t __p2) { + float16x8_t __ret; + __ret = (float16x8_t) __builtin_neon_vcmlaq_rot90_f16((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 40); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("v8.3a,fullfp16,neon"))) float16x4_t vcmla_rot90_f16(float16x4_t __p0, float16x4_t __p1, float16x4_t __p2) { + float16x4_t __ret; + __ret = (float16x4_t) __builtin_neon_vcmla_rot90_f16((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 8); + return __ret; +} +#else +__ai __attribute__((target("v8.3a,fullfp16,neon"))) float16x4_t vcmla_rot90_f16(float16x4_t __p0, float16x4_t __p1, float16x4_t __p2) { + float16x4_t __ret; + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + float16x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); + __ret = (float16x4_t) __builtin_neon_vcmla_rot90_f16((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 8); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +__ai __attribute__((target("v8.3a,fullfp16,neon"))) float16x4_t __noswap_vcmla_rot90_f16(float16x4_t __p0, float16x4_t __p1, float16x4_t __p2) { + float16x4_t __ret; + __ret = (float16x4_t) __builtin_neon_vcmla_rot90_f16((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 8); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcmla_rot90_lane_f16(__p0_174, __p1_174, __p2_174, __p3_174) __extension__ ({ \ + float16x4_t __ret_174; \ + float16x4_t __s0_174 = __p0_174; \ + float16x4_t __s1_174 = __p1_174; \ + float16x4_t __s2_174 = __p2_174; \ +float16x4_t __reint_174 = __s2_174; \ +uint32x2_t __reint1_174 = (uint32x2_t) {vget_lane_u32(*(uint32x2_t *) &__reint_174, __p3_174), vget_lane_u32(*(uint32x2_t *) &__reint_174, __p3_174)}; \ + __ret_174 = vcmla_rot90_f16(__s0_174, __s1_174, *(float16x4_t *) &__reint1_174); \ + __ret_174; \ +}) +#else +#define vcmla_rot90_lane_f16(__p0_175, __p1_175, __p2_175, __p3_175) __extension__ ({ \ + float16x4_t __ret_175; \ + float16x4_t __s0_175 = __p0_175; \ + float16x4_t __s1_175 = __p1_175; \ + float16x4_t __s2_175 = __p2_175; \ + float16x4_t __rev0_175; __rev0_175 = __builtin_shufflevector(__s0_175, __s0_175, 3, 2, 1, 0); \ + float16x4_t __rev1_175; __rev1_175 = __builtin_shufflevector(__s1_175, __s1_175, 3, 2, 1, 0); \ + float16x4_t __rev2_175; __rev2_175 = __builtin_shufflevector(__s2_175, __s2_175, 3, 2, 1, 0); \ +float16x4_t __reint_175 = __rev2_175; \ +uint32x2_t __reint1_175 = (uint32x2_t) {__noswap_vget_lane_u32(*(uint32x2_t *) &__reint_175, __p3_175), __noswap_vget_lane_u32(*(uint32x2_t *) &__reint_175, __p3_175)}; \ + __ret_175 = __noswap_vcmla_rot90_f16(__rev0_175, __rev1_175, *(float16x4_t *) &__reint1_175); \ + __ret_175 = __builtin_shufflevector(__ret_175, __ret_175, 3, 2, 1, 0); \ + __ret_175; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcmlaq_rot90_lane_f16(__p0_176, __p1_176, __p2_176, __p3_176) __extension__ ({ \ + float16x8_t __ret_176; \ + float16x8_t __s0_176 = __p0_176; \ + float16x8_t __s1_176 = __p1_176; \ + float16x4_t __s2_176 = __p2_176; \ +float16x4_t __reint_176 = __s2_176; \ +uint32x4_t __reint1_176 = (uint32x4_t) {vget_lane_u32(*(uint32x2_t *) &__reint_176, __p3_176), vget_lane_u32(*(uint32x2_t *) &__reint_176, __p3_176), vget_lane_u32(*(uint32x2_t *) &__reint_176, __p3_176), vget_lane_u32(*(uint32x2_t *) &__reint_176, __p3_176)}; \ + __ret_176 = vcmlaq_rot90_f16(__s0_176, __s1_176, *(float16x8_t *) &__reint1_176); \ + __ret_176; \ +}) +#else +#define vcmlaq_rot90_lane_f16(__p0_177, __p1_177, __p2_177, __p3_177) __extension__ ({ \ + float16x8_t __ret_177; \ + float16x8_t __s0_177 = __p0_177; \ + float16x8_t __s1_177 = __p1_177; \ + float16x4_t __s2_177 = __p2_177; \ + float16x8_t __rev0_177; __rev0_177 = __builtin_shufflevector(__s0_177, __s0_177, 7, 6, 5, 4, 3, 2, 1, 0); \ + float16x8_t __rev1_177; __rev1_177 = __builtin_shufflevector(__s1_177, __s1_177, 7, 6, 5, 4, 3, 2, 1, 0); \ + float16x4_t __rev2_177; __rev2_177 = __builtin_shufflevector(__s2_177, __s2_177, 3, 2, 1, 0); \ +float16x4_t __reint_177 = __rev2_177; \ +uint32x4_t __reint1_177 = (uint32x4_t) {__noswap_vget_lane_u32(*(uint32x2_t *) &__reint_177, __p3_177), __noswap_vget_lane_u32(*(uint32x2_t *) &__reint_177, __p3_177), __noswap_vget_lane_u32(*(uint32x2_t *) &__reint_177, __p3_177), __noswap_vget_lane_u32(*(uint32x2_t *) &__reint_177, __p3_177)}; \ + __ret_177 = __noswap_vcmlaq_rot90_f16(__rev0_177, __rev1_177, *(float16x8_t *) &__reint1_177); \ + __ret_177 = __builtin_shufflevector(__ret_177, __ret_177, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_177; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcmla_rot90_laneq_f16(__p0_178, __p1_178, __p2_178, __p3_178) __extension__ ({ \ + float16x4_t __ret_178; \ + float16x4_t __s0_178 = __p0_178; \ + float16x4_t __s1_178 = __p1_178; \ + float16x8_t __s2_178 = __p2_178; \ +float16x8_t __reint_178 = __s2_178; \ +uint32x2_t __reint1_178 = (uint32x2_t) {vgetq_lane_u32(*(uint32x4_t *) &__reint_178, __p3_178), vgetq_lane_u32(*(uint32x4_t *) &__reint_178, __p3_178)}; \ + __ret_178 = vcmla_rot90_f16(__s0_178, __s1_178, *(float16x4_t *) &__reint1_178); \ + __ret_178; \ +}) +#else +#define vcmla_rot90_laneq_f16(__p0_179, __p1_179, __p2_179, __p3_179) __extension__ ({ \ + float16x4_t __ret_179; \ + float16x4_t __s0_179 = __p0_179; \ + float16x4_t __s1_179 = __p1_179; \ + float16x8_t __s2_179 = __p2_179; \ + float16x4_t __rev0_179; __rev0_179 = __builtin_shufflevector(__s0_179, __s0_179, 3, 2, 1, 0); \ + float16x4_t __rev1_179; __rev1_179 = __builtin_shufflevector(__s1_179, __s1_179, 3, 2, 1, 0); \ + float16x8_t __rev2_179; __rev2_179 = __builtin_shufflevector(__s2_179, __s2_179, 7, 6, 5, 4, 3, 2, 1, 0); \ +float16x8_t __reint_179 = __rev2_179; \ +uint32x2_t __reint1_179 = (uint32x2_t) {__noswap_vgetq_lane_u32(*(uint32x4_t *) &__reint_179, __p3_179), __noswap_vgetq_lane_u32(*(uint32x4_t *) &__reint_179, __p3_179)}; \ + __ret_179 = __noswap_vcmla_rot90_f16(__rev0_179, __rev1_179, *(float16x4_t *) &__reint1_179); \ + __ret_179 = __builtin_shufflevector(__ret_179, __ret_179, 3, 2, 1, 0); \ + __ret_179; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcmlaq_rot90_laneq_f16(__p0_180, __p1_180, __p2_180, __p3_180) __extension__ ({ \ + float16x8_t __ret_180; \ + float16x8_t __s0_180 = __p0_180; \ + float16x8_t __s1_180 = __p1_180; \ + float16x8_t __s2_180 = __p2_180; \ +float16x8_t __reint_180 = __s2_180; \ +uint32x4_t __reint1_180 = (uint32x4_t) {vgetq_lane_u32(*(uint32x4_t *) &__reint_180, __p3_180), vgetq_lane_u32(*(uint32x4_t *) &__reint_180, __p3_180), vgetq_lane_u32(*(uint32x4_t *) &__reint_180, __p3_180), vgetq_lane_u32(*(uint32x4_t *) &__reint_180, __p3_180)}; \ + __ret_180 = vcmlaq_rot90_f16(__s0_180, __s1_180, *(float16x8_t *) &__reint1_180); \ + __ret_180; \ +}) +#else +#define vcmlaq_rot90_laneq_f16(__p0_181, __p1_181, __p2_181, __p3_181) __extension__ ({ \ + float16x8_t __ret_181; \ + float16x8_t __s0_181 = __p0_181; \ + float16x8_t __s1_181 = __p1_181; \ + float16x8_t __s2_181 = __p2_181; \ + float16x8_t __rev0_181; __rev0_181 = __builtin_shufflevector(__s0_181, __s0_181, 7, 6, 5, 4, 3, 2, 1, 0); \ + float16x8_t __rev1_181; __rev1_181 = __builtin_shufflevector(__s1_181, __s1_181, 7, 6, 5, 4, 3, 2, 1, 0); \ + float16x8_t __rev2_181; __rev2_181 = __builtin_shufflevector(__s2_181, __s2_181, 7, 6, 5, 4, 3, 2, 1, 0); \ +float16x8_t __reint_181 = __rev2_181; \ +uint32x4_t __reint1_181 = (uint32x4_t) {__noswap_vgetq_lane_u32(*(uint32x4_t *) &__reint_181, __p3_181), __noswap_vgetq_lane_u32(*(uint32x4_t *) &__reint_181, __p3_181), __noswap_vgetq_lane_u32(*(uint32x4_t *) &__reint_181, __p3_181), __noswap_vgetq_lane_u32(*(uint32x4_t *) &__reint_181, __p3_181)}; \ + __ret_181 = __noswap_vcmlaq_rot90_f16(__rev0_181, __rev1_181, *(float16x8_t *) &__reint1_181); \ + __ret_181 = __builtin_shufflevector(__ret_181, __ret_181, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_181; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("v8.3a,neon"))) float32x2_t vcadd_rot270_f32(float32x2_t __p0, float32x2_t __p1) { + float32x2_t __ret; + __ret = (float32x2_t) __builtin_neon_vcadd_rot270_f32((int8x8_t)__p0, (int8x8_t)__p1, 9); + return __ret; +} +#else +__ai __attribute__((target("v8.3a,neon"))) float32x2_t vcadd_rot270_f32(float32x2_t __p0, float32x2_t __p1) { + float32x2_t __ret; + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (float32x2_t) __builtin_neon_vcadd_rot270_f32((int8x8_t)__rev0, (int8x8_t)__rev1, 9); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("v8.3a,neon"))) float32x2_t vcadd_rot90_f32(float32x2_t __p0, float32x2_t __p1) { + float32x2_t __ret; + __ret = (float32x2_t) __builtin_neon_vcadd_rot90_f32((int8x8_t)__p0, (int8x8_t)__p1, 9); + return __ret; +} +#else +__ai __attribute__((target("v8.3a,neon"))) float32x2_t vcadd_rot90_f32(float32x2_t __p0, float32x2_t __p1) { + float32x2_t __ret; + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (float32x2_t) __builtin_neon_vcadd_rot90_f32((int8x8_t)__rev0, (int8x8_t)__rev1, 9); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("v8.3a,neon"))) float32x4_t vcaddq_rot270_f32(float32x4_t __p0, float32x4_t __p1) { + float32x4_t __ret; + __ret = (float32x4_t) __builtin_neon_vcaddq_rot270_f32((int8x16_t)__p0, (int8x16_t)__p1, 41); + return __ret; +} +#else +__ai __attribute__((target("v8.3a,neon"))) float32x4_t vcaddq_rot270_f32(float32x4_t __p0, float32x4_t __p1) { + float32x4_t __ret; + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (float32x4_t) __builtin_neon_vcaddq_rot270_f32((int8x16_t)__rev0, (int8x16_t)__rev1, 41); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("v8.3a,neon"))) float32x4_t vcaddq_rot90_f32(float32x4_t __p0, float32x4_t __p1) { + float32x4_t __ret; + __ret = (float32x4_t) __builtin_neon_vcaddq_rot90_f32((int8x16_t)__p0, (int8x16_t)__p1, 41); + return __ret; +} +#else +__ai __attribute__((target("v8.3a,neon"))) float32x4_t vcaddq_rot90_f32(float32x4_t __p0, float32x4_t __p1) { + float32x4_t __ret; + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (float32x4_t) __builtin_neon_vcaddq_rot90_f32((int8x16_t)__rev0, (int8x16_t)__rev1, 41); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("v8.3a,neon"))) float32x4_t vcmlaq_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) { + float32x4_t __ret; + __ret = (float32x4_t) __builtin_neon_vcmlaq_f32((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41); + return __ret; +} +#else +__ai __attribute__((target("v8.3a,neon"))) float32x4_t vcmlaq_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) { + float32x4_t __ret; + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + float32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); + __ret = (float32x4_t) __builtin_neon_vcmlaq_f32((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 41); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +__ai __attribute__((target("v8.3a,neon"))) float32x4_t __noswap_vcmlaq_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) { + float32x4_t __ret; + __ret = (float32x4_t) __builtin_neon_vcmlaq_f32((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("v8.3a,neon"))) float32x2_t vcmla_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) { + float32x2_t __ret; + __ret = (float32x2_t) __builtin_neon_vcmla_f32((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 9); + return __ret; +} +#else +__ai __attribute__((target("v8.3a,neon"))) float32x2_t vcmla_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) { + float32x2_t __ret; + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + float32x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0); + __ret = (float32x2_t) __builtin_neon_vcmla_f32((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 9); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +__ai __attribute__((target("v8.3a,neon"))) float32x2_t __noswap_vcmla_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) { + float32x2_t __ret; + __ret = (float32x2_t) __builtin_neon_vcmla_f32((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 9); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcmla_lane_f32(__p0_182, __p1_182, __p2_182, __p3_182) __extension__ ({ \ + float32x2_t __ret_182; \ + float32x2_t __s0_182 = __p0_182; \ + float32x2_t __s1_182 = __p1_182; \ + float32x2_t __s2_182 = __p2_182; \ +float32x2_t __reint_182 = __s2_182; \ +uint64x1_t __reint1_182 = (uint64x1_t) {vget_lane_u64(*(uint64x1_t *) &__reint_182, __p3_182)}; \ + __ret_182 = vcmla_f32(__s0_182, __s1_182, *(float32x2_t *) &__reint1_182); \ + __ret_182; \ +}) +#else +#define vcmla_lane_f32(__p0_183, __p1_183, __p2_183, __p3_183) __extension__ ({ \ + float32x2_t __ret_183; \ + float32x2_t __s0_183 = __p0_183; \ + float32x2_t __s1_183 = __p1_183; \ + float32x2_t __s2_183 = __p2_183; \ + float32x2_t __rev0_183; __rev0_183 = __builtin_shufflevector(__s0_183, __s0_183, 1, 0); \ + float32x2_t __rev1_183; __rev1_183 = __builtin_shufflevector(__s1_183, __s1_183, 1, 0); \ + float32x2_t __rev2_183; __rev2_183 = __builtin_shufflevector(__s2_183, __s2_183, 1, 0); \ +float32x2_t __reint_183 = __rev2_183; \ +uint64x1_t __reint1_183 = (uint64x1_t) {vget_lane_u64(*(uint64x1_t *) &__reint_183, __p3_183)}; \ + __ret_183 = __noswap_vcmla_f32(__rev0_183, __rev1_183, *(float32x2_t *) &__reint1_183); \ + __ret_183 = __builtin_shufflevector(__ret_183, __ret_183, 1, 0); \ + __ret_183; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcmlaq_lane_f32(__p0_184, __p1_184, __p2_184, __p3_184) __extension__ ({ \ + float32x4_t __ret_184; \ + float32x4_t __s0_184 = __p0_184; \ + float32x4_t __s1_184 = __p1_184; \ + float32x2_t __s2_184 = __p2_184; \ +float32x2_t __reint_184 = __s2_184; \ +uint64x2_t __reint1_184 = (uint64x2_t) {vget_lane_u64(*(uint64x1_t *) &__reint_184, __p3_184), vget_lane_u64(*(uint64x1_t *) &__reint_184, __p3_184)}; \ + __ret_184 = vcmlaq_f32(__s0_184, __s1_184, *(float32x4_t *) &__reint1_184); \ + __ret_184; \ +}) +#else +#define vcmlaq_lane_f32(__p0_185, __p1_185, __p2_185, __p3_185) __extension__ ({ \ + float32x4_t __ret_185; \ + float32x4_t __s0_185 = __p0_185; \ + float32x4_t __s1_185 = __p1_185; \ + float32x2_t __s2_185 = __p2_185; \ + float32x4_t __rev0_185; __rev0_185 = __builtin_shufflevector(__s0_185, __s0_185, 3, 2, 1, 0); \ + float32x4_t __rev1_185; __rev1_185 = __builtin_shufflevector(__s1_185, __s1_185, 3, 2, 1, 0); \ + float32x2_t __rev2_185; __rev2_185 = __builtin_shufflevector(__s2_185, __s2_185, 1, 0); \ +float32x2_t __reint_185 = __rev2_185; \ +uint64x2_t __reint1_185 = (uint64x2_t) {vget_lane_u64(*(uint64x1_t *) &__reint_185, __p3_185), vget_lane_u64(*(uint64x1_t *) &__reint_185, __p3_185)}; \ + __ret_185 = __noswap_vcmlaq_f32(__rev0_185, __rev1_185, *(float32x4_t *) &__reint1_185); \ + __ret_185 = __builtin_shufflevector(__ret_185, __ret_185, 3, 2, 1, 0); \ + __ret_185; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcmla_laneq_f32(__p0_186, __p1_186, __p2_186, __p3_186) __extension__ ({ \ + float32x2_t __ret_186; \ + float32x2_t __s0_186 = __p0_186; \ + float32x2_t __s1_186 = __p1_186; \ + float32x4_t __s2_186 = __p2_186; \ +float32x4_t __reint_186 = __s2_186; \ +uint64x1_t __reint1_186 = (uint64x1_t) {vgetq_lane_u64(*(uint64x2_t *) &__reint_186, __p3_186)}; \ + __ret_186 = vcmla_f32(__s0_186, __s1_186, *(float32x2_t *) &__reint1_186); \ + __ret_186; \ +}) +#else +#define vcmla_laneq_f32(__p0_187, __p1_187, __p2_187, __p3_187) __extension__ ({ \ + float32x2_t __ret_187; \ + float32x2_t __s0_187 = __p0_187; \ + float32x2_t __s1_187 = __p1_187; \ + float32x4_t __s2_187 = __p2_187; \ + float32x2_t __rev0_187; __rev0_187 = __builtin_shufflevector(__s0_187, __s0_187, 1, 0); \ + float32x2_t __rev1_187; __rev1_187 = __builtin_shufflevector(__s1_187, __s1_187, 1, 0); \ + float32x4_t __rev2_187; __rev2_187 = __builtin_shufflevector(__s2_187, __s2_187, 3, 2, 1, 0); \ +float32x4_t __reint_187 = __rev2_187; \ +uint64x1_t __reint1_187 = (uint64x1_t) {__noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_187, __p3_187)}; \ + __ret_187 = __noswap_vcmla_f32(__rev0_187, __rev1_187, *(float32x2_t *) &__reint1_187); \ + __ret_187 = __builtin_shufflevector(__ret_187, __ret_187, 1, 0); \ + __ret_187; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcmlaq_laneq_f32(__p0_188, __p1_188, __p2_188, __p3_188) __extension__ ({ \ + float32x4_t __ret_188; \ + float32x4_t __s0_188 = __p0_188; \ + float32x4_t __s1_188 = __p1_188; \ + float32x4_t __s2_188 = __p2_188; \ +float32x4_t __reint_188 = __s2_188; \ +uint64x2_t __reint1_188 = (uint64x2_t) {vgetq_lane_u64(*(uint64x2_t *) &__reint_188, __p3_188), vgetq_lane_u64(*(uint64x2_t *) &__reint_188, __p3_188)}; \ + __ret_188 = vcmlaq_f32(__s0_188, __s1_188, *(float32x4_t *) &__reint1_188); \ + __ret_188; \ +}) +#else +#define vcmlaq_laneq_f32(__p0_189, __p1_189, __p2_189, __p3_189) __extension__ ({ \ + float32x4_t __ret_189; \ + float32x4_t __s0_189 = __p0_189; \ + float32x4_t __s1_189 = __p1_189; \ + float32x4_t __s2_189 = __p2_189; \ + float32x4_t __rev0_189; __rev0_189 = __builtin_shufflevector(__s0_189, __s0_189, 3, 2, 1, 0); \ + float32x4_t __rev1_189; __rev1_189 = __builtin_shufflevector(__s1_189, __s1_189, 3, 2, 1, 0); \ + float32x4_t __rev2_189; __rev2_189 = __builtin_shufflevector(__s2_189, __s2_189, 3, 2, 1, 0); \ +float32x4_t __reint_189 = __rev2_189; \ +uint64x2_t __reint1_189 = (uint64x2_t) {__noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_189, __p3_189), __noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_189, __p3_189)}; \ + __ret_189 = __noswap_vcmlaq_f32(__rev0_189, __rev1_189, *(float32x4_t *) &__reint1_189); \ + __ret_189 = __builtin_shufflevector(__ret_189, __ret_189, 3, 2, 1, 0); \ + __ret_189; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("v8.3a,neon"))) float32x4_t vcmlaq_rot180_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) { + float32x4_t __ret; + __ret = (float32x4_t) __builtin_neon_vcmlaq_rot180_f32((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41); + return __ret; +} +#else +__ai __attribute__((target("v8.3a,neon"))) float32x4_t vcmlaq_rot180_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) { + float32x4_t __ret; + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + float32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); + __ret = (float32x4_t) __builtin_neon_vcmlaq_rot180_f32((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 41); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +__ai __attribute__((target("v8.3a,neon"))) float32x4_t __noswap_vcmlaq_rot180_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) { + float32x4_t __ret; + __ret = (float32x4_t) __builtin_neon_vcmlaq_rot180_f32((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("v8.3a,neon"))) float32x2_t vcmla_rot180_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) { + float32x2_t __ret; + __ret = (float32x2_t) __builtin_neon_vcmla_rot180_f32((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 9); + return __ret; +} +#else +__ai __attribute__((target("v8.3a,neon"))) float32x2_t vcmla_rot180_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) { + float32x2_t __ret; + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + float32x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0); + __ret = (float32x2_t) __builtin_neon_vcmla_rot180_f32((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 9); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +__ai __attribute__((target("v8.3a,neon"))) float32x2_t __noswap_vcmla_rot180_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) { + float32x2_t __ret; + __ret = (float32x2_t) __builtin_neon_vcmla_rot180_f32((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 9); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcmla_rot180_lane_f32(__p0_190, __p1_190, __p2_190, __p3_190) __extension__ ({ \ + float32x2_t __ret_190; \ + float32x2_t __s0_190 = __p0_190; \ + float32x2_t __s1_190 = __p1_190; \ + float32x2_t __s2_190 = __p2_190; \ +float32x2_t __reint_190 = __s2_190; \ +uint64x1_t __reint1_190 = (uint64x1_t) {vget_lane_u64(*(uint64x1_t *) &__reint_190, __p3_190)}; \ + __ret_190 = vcmla_rot180_f32(__s0_190, __s1_190, *(float32x2_t *) &__reint1_190); \ + __ret_190; \ +}) +#else +#define vcmla_rot180_lane_f32(__p0_191, __p1_191, __p2_191, __p3_191) __extension__ ({ \ + float32x2_t __ret_191; \ + float32x2_t __s0_191 = __p0_191; \ + float32x2_t __s1_191 = __p1_191; \ + float32x2_t __s2_191 = __p2_191; \ + float32x2_t __rev0_191; __rev0_191 = __builtin_shufflevector(__s0_191, __s0_191, 1, 0); \ + float32x2_t __rev1_191; __rev1_191 = __builtin_shufflevector(__s1_191, __s1_191, 1, 0); \ + float32x2_t __rev2_191; __rev2_191 = __builtin_shufflevector(__s2_191, __s2_191, 1, 0); \ +float32x2_t __reint_191 = __rev2_191; \ +uint64x1_t __reint1_191 = (uint64x1_t) {vget_lane_u64(*(uint64x1_t *) &__reint_191, __p3_191)}; \ + __ret_191 = __noswap_vcmla_rot180_f32(__rev0_191, __rev1_191, *(float32x2_t *) &__reint1_191); \ + __ret_191 = __builtin_shufflevector(__ret_191, __ret_191, 1, 0); \ + __ret_191; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcmlaq_rot180_lane_f32(__p0_192, __p1_192, __p2_192, __p3_192) __extension__ ({ \ + float32x4_t __ret_192; \ + float32x4_t __s0_192 = __p0_192; \ + float32x4_t __s1_192 = __p1_192; \ + float32x2_t __s2_192 = __p2_192; \ +float32x2_t __reint_192 = __s2_192; \ +uint64x2_t __reint1_192 = (uint64x2_t) {vget_lane_u64(*(uint64x1_t *) &__reint_192, __p3_192), vget_lane_u64(*(uint64x1_t *) &__reint_192, __p3_192)}; \ + __ret_192 = vcmlaq_rot180_f32(__s0_192, __s1_192, *(float32x4_t *) &__reint1_192); \ + __ret_192; \ +}) +#else +#define vcmlaq_rot180_lane_f32(__p0_193, __p1_193, __p2_193, __p3_193) __extension__ ({ \ + float32x4_t __ret_193; \ + float32x4_t __s0_193 = __p0_193; \ + float32x4_t __s1_193 = __p1_193; \ + float32x2_t __s2_193 = __p2_193; \ + float32x4_t __rev0_193; __rev0_193 = __builtin_shufflevector(__s0_193, __s0_193, 3, 2, 1, 0); \ + float32x4_t __rev1_193; __rev1_193 = __builtin_shufflevector(__s1_193, __s1_193, 3, 2, 1, 0); \ + float32x2_t __rev2_193; __rev2_193 = __builtin_shufflevector(__s2_193, __s2_193, 1, 0); \ +float32x2_t __reint_193 = __rev2_193; \ +uint64x2_t __reint1_193 = (uint64x2_t) {vget_lane_u64(*(uint64x1_t *) &__reint_193, __p3_193), vget_lane_u64(*(uint64x1_t *) &__reint_193, __p3_193)}; \ + __ret_193 = __noswap_vcmlaq_rot180_f32(__rev0_193, __rev1_193, *(float32x4_t *) &__reint1_193); \ + __ret_193 = __builtin_shufflevector(__ret_193, __ret_193, 3, 2, 1, 0); \ + __ret_193; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcmla_rot180_laneq_f32(__p0_194, __p1_194, __p2_194, __p3_194) __extension__ ({ \ + float32x2_t __ret_194; \ + float32x2_t __s0_194 = __p0_194; \ + float32x2_t __s1_194 = __p1_194; \ + float32x4_t __s2_194 = __p2_194; \ +float32x4_t __reint_194 = __s2_194; \ +uint64x1_t __reint1_194 = (uint64x1_t) {vgetq_lane_u64(*(uint64x2_t *) &__reint_194, __p3_194)}; \ + __ret_194 = vcmla_rot180_f32(__s0_194, __s1_194, *(float32x2_t *) &__reint1_194); \ + __ret_194; \ +}) +#else +#define vcmla_rot180_laneq_f32(__p0_195, __p1_195, __p2_195, __p3_195) __extension__ ({ \ + float32x2_t __ret_195; \ + float32x2_t __s0_195 = __p0_195; \ + float32x2_t __s1_195 = __p1_195; \ + float32x4_t __s2_195 = __p2_195; \ + float32x2_t __rev0_195; __rev0_195 = __builtin_shufflevector(__s0_195, __s0_195, 1, 0); \ + float32x2_t __rev1_195; __rev1_195 = __builtin_shufflevector(__s1_195, __s1_195, 1, 0); \ + float32x4_t __rev2_195; __rev2_195 = __builtin_shufflevector(__s2_195, __s2_195, 3, 2, 1, 0); \ +float32x4_t __reint_195 = __rev2_195; \ +uint64x1_t __reint1_195 = (uint64x1_t) {__noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_195, __p3_195)}; \ + __ret_195 = __noswap_vcmla_rot180_f32(__rev0_195, __rev1_195, *(float32x2_t *) &__reint1_195); \ + __ret_195 = __builtin_shufflevector(__ret_195, __ret_195, 1, 0); \ + __ret_195; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcmlaq_rot180_laneq_f32(__p0_196, __p1_196, __p2_196, __p3_196) __extension__ ({ \ + float32x4_t __ret_196; \ + float32x4_t __s0_196 = __p0_196; \ + float32x4_t __s1_196 = __p1_196; \ + float32x4_t __s2_196 = __p2_196; \ +float32x4_t __reint_196 = __s2_196; \ +uint64x2_t __reint1_196 = (uint64x2_t) {vgetq_lane_u64(*(uint64x2_t *) &__reint_196, __p3_196), vgetq_lane_u64(*(uint64x2_t *) &__reint_196, __p3_196)}; \ + __ret_196 = vcmlaq_rot180_f32(__s0_196, __s1_196, *(float32x4_t *) &__reint1_196); \ + __ret_196; \ +}) +#else +#define vcmlaq_rot180_laneq_f32(__p0_197, __p1_197, __p2_197, __p3_197) __extension__ ({ \ + float32x4_t __ret_197; \ + float32x4_t __s0_197 = __p0_197; \ + float32x4_t __s1_197 = __p1_197; \ + float32x4_t __s2_197 = __p2_197; \ + float32x4_t __rev0_197; __rev0_197 = __builtin_shufflevector(__s0_197, __s0_197, 3, 2, 1, 0); \ + float32x4_t __rev1_197; __rev1_197 = __builtin_shufflevector(__s1_197, __s1_197, 3, 2, 1, 0); \ + float32x4_t __rev2_197; __rev2_197 = __builtin_shufflevector(__s2_197, __s2_197, 3, 2, 1, 0); \ +float32x4_t __reint_197 = __rev2_197; \ +uint64x2_t __reint1_197 = (uint64x2_t) {__noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_197, __p3_197), __noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_197, __p3_197)}; \ + __ret_197 = __noswap_vcmlaq_rot180_f32(__rev0_197, __rev1_197, *(float32x4_t *) &__reint1_197); \ + __ret_197 = __builtin_shufflevector(__ret_197, __ret_197, 3, 2, 1, 0); \ + __ret_197; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("v8.3a,neon"))) float32x4_t vcmlaq_rot270_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) { + float32x4_t __ret; + __ret = (float32x4_t) __builtin_neon_vcmlaq_rot270_f32((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41); + return __ret; +} +#else +__ai __attribute__((target("v8.3a,neon"))) float32x4_t vcmlaq_rot270_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) { + float32x4_t __ret; + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + float32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); + __ret = (float32x4_t) __builtin_neon_vcmlaq_rot270_f32((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 41); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +__ai __attribute__((target("v8.3a,neon"))) float32x4_t __noswap_vcmlaq_rot270_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) { + float32x4_t __ret; + __ret = (float32x4_t) __builtin_neon_vcmlaq_rot270_f32((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("v8.3a,neon"))) float32x2_t vcmla_rot270_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) { + float32x2_t __ret; + __ret = (float32x2_t) __builtin_neon_vcmla_rot270_f32((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 9); + return __ret; +} +#else +__ai __attribute__((target("v8.3a,neon"))) float32x2_t vcmla_rot270_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) { + float32x2_t __ret; + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + float32x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0); + __ret = (float32x2_t) __builtin_neon_vcmla_rot270_f32((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 9); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +__ai __attribute__((target("v8.3a,neon"))) float32x2_t __noswap_vcmla_rot270_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) { + float32x2_t __ret; + __ret = (float32x2_t) __builtin_neon_vcmla_rot270_f32((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 9); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcmla_rot270_lane_f32(__p0_198, __p1_198, __p2_198, __p3_198) __extension__ ({ \ + float32x2_t __ret_198; \ + float32x2_t __s0_198 = __p0_198; \ + float32x2_t __s1_198 = __p1_198; \ + float32x2_t __s2_198 = __p2_198; \ +float32x2_t __reint_198 = __s2_198; \ +uint64x1_t __reint1_198 = (uint64x1_t) {vget_lane_u64(*(uint64x1_t *) &__reint_198, __p3_198)}; \ + __ret_198 = vcmla_rot270_f32(__s0_198, __s1_198, *(float32x2_t *) &__reint1_198); \ + __ret_198; \ +}) +#else +#define vcmla_rot270_lane_f32(__p0_199, __p1_199, __p2_199, __p3_199) __extension__ ({ \ + float32x2_t __ret_199; \ + float32x2_t __s0_199 = __p0_199; \ + float32x2_t __s1_199 = __p1_199; \ + float32x2_t __s2_199 = __p2_199; \ + float32x2_t __rev0_199; __rev0_199 = __builtin_shufflevector(__s0_199, __s0_199, 1, 0); \ + float32x2_t __rev1_199; __rev1_199 = __builtin_shufflevector(__s1_199, __s1_199, 1, 0); \ + float32x2_t __rev2_199; __rev2_199 = __builtin_shufflevector(__s2_199, __s2_199, 1, 0); \ +float32x2_t __reint_199 = __rev2_199; \ +uint64x1_t __reint1_199 = (uint64x1_t) {vget_lane_u64(*(uint64x1_t *) &__reint_199, __p3_199)}; \ + __ret_199 = __noswap_vcmla_rot270_f32(__rev0_199, __rev1_199, *(float32x2_t *) &__reint1_199); \ + __ret_199 = __builtin_shufflevector(__ret_199, __ret_199, 1, 0); \ + __ret_199; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcmlaq_rot270_lane_f32(__p0_200, __p1_200, __p2_200, __p3_200) __extension__ ({ \ + float32x4_t __ret_200; \ + float32x4_t __s0_200 = __p0_200; \ + float32x4_t __s1_200 = __p1_200; \ + float32x2_t __s2_200 = __p2_200; \ +float32x2_t __reint_200 = __s2_200; \ +uint64x2_t __reint1_200 = (uint64x2_t) {vget_lane_u64(*(uint64x1_t *) &__reint_200, __p3_200), vget_lane_u64(*(uint64x1_t *) &__reint_200, __p3_200)}; \ + __ret_200 = vcmlaq_rot270_f32(__s0_200, __s1_200, *(float32x4_t *) &__reint1_200); \ + __ret_200; \ +}) +#else +#define vcmlaq_rot270_lane_f32(__p0_201, __p1_201, __p2_201, __p3_201) __extension__ ({ \ + float32x4_t __ret_201; \ + float32x4_t __s0_201 = __p0_201; \ + float32x4_t __s1_201 = __p1_201; \ + float32x2_t __s2_201 = __p2_201; \ + float32x4_t __rev0_201; __rev0_201 = __builtin_shufflevector(__s0_201, __s0_201, 3, 2, 1, 0); \ + float32x4_t __rev1_201; __rev1_201 = __builtin_shufflevector(__s1_201, __s1_201, 3, 2, 1, 0); \ + float32x2_t __rev2_201; __rev2_201 = __builtin_shufflevector(__s2_201, __s2_201, 1, 0); \ +float32x2_t __reint_201 = __rev2_201; \ +uint64x2_t __reint1_201 = (uint64x2_t) {vget_lane_u64(*(uint64x1_t *) &__reint_201, __p3_201), vget_lane_u64(*(uint64x1_t *) &__reint_201, __p3_201)}; \ + __ret_201 = __noswap_vcmlaq_rot270_f32(__rev0_201, __rev1_201, *(float32x4_t *) &__reint1_201); \ + __ret_201 = __builtin_shufflevector(__ret_201, __ret_201, 3, 2, 1, 0); \ + __ret_201; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcmla_rot270_laneq_f32(__p0_202, __p1_202, __p2_202, __p3_202) __extension__ ({ \ + float32x2_t __ret_202; \ + float32x2_t __s0_202 = __p0_202; \ + float32x2_t __s1_202 = __p1_202; \ + float32x4_t __s2_202 = __p2_202; \ +float32x4_t __reint_202 = __s2_202; \ +uint64x1_t __reint1_202 = (uint64x1_t) {vgetq_lane_u64(*(uint64x2_t *) &__reint_202, __p3_202)}; \ + __ret_202 = vcmla_rot270_f32(__s0_202, __s1_202, *(float32x2_t *) &__reint1_202); \ + __ret_202; \ +}) +#else +#define vcmla_rot270_laneq_f32(__p0_203, __p1_203, __p2_203, __p3_203) __extension__ ({ \ + float32x2_t __ret_203; \ + float32x2_t __s0_203 = __p0_203; \ + float32x2_t __s1_203 = __p1_203; \ + float32x4_t __s2_203 = __p2_203; \ + float32x2_t __rev0_203; __rev0_203 = __builtin_shufflevector(__s0_203, __s0_203, 1, 0); \ + float32x2_t __rev1_203; __rev1_203 = __builtin_shufflevector(__s1_203, __s1_203, 1, 0); \ + float32x4_t __rev2_203; __rev2_203 = __builtin_shufflevector(__s2_203, __s2_203, 3, 2, 1, 0); \ +float32x4_t __reint_203 = __rev2_203; \ +uint64x1_t __reint1_203 = (uint64x1_t) {__noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_203, __p3_203)}; \ + __ret_203 = __noswap_vcmla_rot270_f32(__rev0_203, __rev1_203, *(float32x2_t *) &__reint1_203); \ + __ret_203 = __builtin_shufflevector(__ret_203, __ret_203, 1, 0); \ + __ret_203; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcmlaq_rot270_laneq_f32(__p0_204, __p1_204, __p2_204, __p3_204) __extension__ ({ \ + float32x4_t __ret_204; \ + float32x4_t __s0_204 = __p0_204; \ + float32x4_t __s1_204 = __p1_204; \ + float32x4_t __s2_204 = __p2_204; \ +float32x4_t __reint_204 = __s2_204; \ +uint64x2_t __reint1_204 = (uint64x2_t) {vgetq_lane_u64(*(uint64x2_t *) &__reint_204, __p3_204), vgetq_lane_u64(*(uint64x2_t *) &__reint_204, __p3_204)}; \ + __ret_204 = vcmlaq_rot270_f32(__s0_204, __s1_204, *(float32x4_t *) &__reint1_204); \ + __ret_204; \ +}) +#else +#define vcmlaq_rot270_laneq_f32(__p0_205, __p1_205, __p2_205, __p3_205) __extension__ ({ \ + float32x4_t __ret_205; \ + float32x4_t __s0_205 = __p0_205; \ + float32x4_t __s1_205 = __p1_205; \ + float32x4_t __s2_205 = __p2_205; \ + float32x4_t __rev0_205; __rev0_205 = __builtin_shufflevector(__s0_205, __s0_205, 3, 2, 1, 0); \ + float32x4_t __rev1_205; __rev1_205 = __builtin_shufflevector(__s1_205, __s1_205, 3, 2, 1, 0); \ + float32x4_t __rev2_205; __rev2_205 = __builtin_shufflevector(__s2_205, __s2_205, 3, 2, 1, 0); \ +float32x4_t __reint_205 = __rev2_205; \ +uint64x2_t __reint1_205 = (uint64x2_t) {__noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_205, __p3_205), __noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_205, __p3_205)}; \ + __ret_205 = __noswap_vcmlaq_rot270_f32(__rev0_205, __rev1_205, *(float32x4_t *) &__reint1_205); \ + __ret_205 = __builtin_shufflevector(__ret_205, __ret_205, 3, 2, 1, 0); \ + __ret_205; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("v8.3a,neon"))) float32x4_t vcmlaq_rot90_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) { + float32x4_t __ret; + __ret = (float32x4_t) __builtin_neon_vcmlaq_rot90_f32((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41); + return __ret; +} +#else +__ai __attribute__((target("v8.3a,neon"))) float32x4_t vcmlaq_rot90_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) { + float32x4_t __ret; + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + float32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); + __ret = (float32x4_t) __builtin_neon_vcmlaq_rot90_f32((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 41); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +__ai __attribute__((target("v8.3a,neon"))) float32x4_t __noswap_vcmlaq_rot90_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) { + float32x4_t __ret; + __ret = (float32x4_t) __builtin_neon_vcmlaq_rot90_f32((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("v8.3a,neon"))) float32x2_t vcmla_rot90_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) { + float32x2_t __ret; + __ret = (float32x2_t) __builtin_neon_vcmla_rot90_f32((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 9); + return __ret; +} +#else +__ai __attribute__((target("v8.3a,neon"))) float32x2_t vcmla_rot90_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) { + float32x2_t __ret; + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + float32x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0); + __ret = (float32x2_t) __builtin_neon_vcmla_rot90_f32((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 9); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +__ai __attribute__((target("v8.3a,neon"))) float32x2_t __noswap_vcmla_rot90_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) { + float32x2_t __ret; + __ret = (float32x2_t) __builtin_neon_vcmla_rot90_f32((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 9); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcmla_rot90_lane_f32(__p0_206, __p1_206, __p2_206, __p3_206) __extension__ ({ \ + float32x2_t __ret_206; \ + float32x2_t __s0_206 = __p0_206; \ + float32x2_t __s1_206 = __p1_206; \ + float32x2_t __s2_206 = __p2_206; \ +float32x2_t __reint_206 = __s2_206; \ +uint64x1_t __reint1_206 = (uint64x1_t) {vget_lane_u64(*(uint64x1_t *) &__reint_206, __p3_206)}; \ + __ret_206 = vcmla_rot90_f32(__s0_206, __s1_206, *(float32x2_t *) &__reint1_206); \ + __ret_206; \ +}) +#else +#define vcmla_rot90_lane_f32(__p0_207, __p1_207, __p2_207, __p3_207) __extension__ ({ \ + float32x2_t __ret_207; \ + float32x2_t __s0_207 = __p0_207; \ + float32x2_t __s1_207 = __p1_207; \ + float32x2_t __s2_207 = __p2_207; \ + float32x2_t __rev0_207; __rev0_207 = __builtin_shufflevector(__s0_207, __s0_207, 1, 0); \ + float32x2_t __rev1_207; __rev1_207 = __builtin_shufflevector(__s1_207, __s1_207, 1, 0); \ + float32x2_t __rev2_207; __rev2_207 = __builtin_shufflevector(__s2_207, __s2_207, 1, 0); \ +float32x2_t __reint_207 = __rev2_207; \ +uint64x1_t __reint1_207 = (uint64x1_t) {vget_lane_u64(*(uint64x1_t *) &__reint_207, __p3_207)}; \ + __ret_207 = __noswap_vcmla_rot90_f32(__rev0_207, __rev1_207, *(float32x2_t *) &__reint1_207); \ + __ret_207 = __builtin_shufflevector(__ret_207, __ret_207, 1, 0); \ + __ret_207; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcmlaq_rot90_lane_f32(__p0_208, __p1_208, __p2_208, __p3_208) __extension__ ({ \ + float32x4_t __ret_208; \ + float32x4_t __s0_208 = __p0_208; \ + float32x4_t __s1_208 = __p1_208; \ + float32x2_t __s2_208 = __p2_208; \ +float32x2_t __reint_208 = __s2_208; \ +uint64x2_t __reint1_208 = (uint64x2_t) {vget_lane_u64(*(uint64x1_t *) &__reint_208, __p3_208), vget_lane_u64(*(uint64x1_t *) &__reint_208, __p3_208)}; \ + __ret_208 = vcmlaq_rot90_f32(__s0_208, __s1_208, *(float32x4_t *) &__reint1_208); \ + __ret_208; \ +}) +#else +#define vcmlaq_rot90_lane_f32(__p0_209, __p1_209, __p2_209, __p3_209) __extension__ ({ \ + float32x4_t __ret_209; \ + float32x4_t __s0_209 = __p0_209; \ + float32x4_t __s1_209 = __p1_209; \ + float32x2_t __s2_209 = __p2_209; \ + float32x4_t __rev0_209; __rev0_209 = __builtin_shufflevector(__s0_209, __s0_209, 3, 2, 1, 0); \ + float32x4_t __rev1_209; __rev1_209 = __builtin_shufflevector(__s1_209, __s1_209, 3, 2, 1, 0); \ + float32x2_t __rev2_209; __rev2_209 = __builtin_shufflevector(__s2_209, __s2_209, 1, 0); \ +float32x2_t __reint_209 = __rev2_209; \ +uint64x2_t __reint1_209 = (uint64x2_t) {vget_lane_u64(*(uint64x1_t *) &__reint_209, __p3_209), vget_lane_u64(*(uint64x1_t *) &__reint_209, __p3_209)}; \ + __ret_209 = __noswap_vcmlaq_rot90_f32(__rev0_209, __rev1_209, *(float32x4_t *) &__reint1_209); \ + __ret_209 = __builtin_shufflevector(__ret_209, __ret_209, 3, 2, 1, 0); \ + __ret_209; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcmla_rot90_laneq_f32(__p0_210, __p1_210, __p2_210, __p3_210) __extension__ ({ \ + float32x2_t __ret_210; \ + float32x2_t __s0_210 = __p0_210; \ + float32x2_t __s1_210 = __p1_210; \ + float32x4_t __s2_210 = __p2_210; \ +float32x4_t __reint_210 = __s2_210; \ +uint64x1_t __reint1_210 = (uint64x1_t) {vgetq_lane_u64(*(uint64x2_t *) &__reint_210, __p3_210)}; \ + __ret_210 = vcmla_rot90_f32(__s0_210, __s1_210, *(float32x2_t *) &__reint1_210); \ + __ret_210; \ +}) +#else +#define vcmla_rot90_laneq_f32(__p0_211, __p1_211, __p2_211, __p3_211) __extension__ ({ \ + float32x2_t __ret_211; \ + float32x2_t __s0_211 = __p0_211; \ + float32x2_t __s1_211 = __p1_211; \ + float32x4_t __s2_211 = __p2_211; \ + float32x2_t __rev0_211; __rev0_211 = __builtin_shufflevector(__s0_211, __s0_211, 1, 0); \ + float32x2_t __rev1_211; __rev1_211 = __builtin_shufflevector(__s1_211, __s1_211, 1, 0); \ + float32x4_t __rev2_211; __rev2_211 = __builtin_shufflevector(__s2_211, __s2_211, 3, 2, 1, 0); \ +float32x4_t __reint_211 = __rev2_211; \ +uint64x1_t __reint1_211 = (uint64x1_t) {__noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_211, __p3_211)}; \ + __ret_211 = __noswap_vcmla_rot90_f32(__rev0_211, __rev1_211, *(float32x2_t *) &__reint1_211); \ + __ret_211 = __builtin_shufflevector(__ret_211, __ret_211, 1, 0); \ + __ret_211; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcmlaq_rot90_laneq_f32(__p0_212, __p1_212, __p2_212, __p3_212) __extension__ ({ \ + float32x4_t __ret_212; \ + float32x4_t __s0_212 = __p0_212; \ + float32x4_t __s1_212 = __p1_212; \ + float32x4_t __s2_212 = __p2_212; \ +float32x4_t __reint_212 = __s2_212; \ +uint64x2_t __reint1_212 = (uint64x2_t) {vgetq_lane_u64(*(uint64x2_t *) &__reint_212, __p3_212), vgetq_lane_u64(*(uint64x2_t *) &__reint_212, __p3_212)}; \ + __ret_212 = vcmlaq_rot90_f32(__s0_212, __s1_212, *(float32x4_t *) &__reint1_212); \ + __ret_212; \ +}) +#else +#define vcmlaq_rot90_laneq_f32(__p0_213, __p1_213, __p2_213, __p3_213) __extension__ ({ \ + float32x4_t __ret_213; \ + float32x4_t __s0_213 = __p0_213; \ + float32x4_t __s1_213 = __p1_213; \ + float32x4_t __s2_213 = __p2_213; \ + float32x4_t __rev0_213; __rev0_213 = __builtin_shufflevector(__s0_213, __s0_213, 3, 2, 1, 0); \ + float32x4_t __rev1_213; __rev1_213 = __builtin_shufflevector(__s1_213, __s1_213, 3, 2, 1, 0); \ + float32x4_t __rev2_213; __rev2_213 = __builtin_shufflevector(__s2_213, __s2_213, 3, 2, 1, 0); \ +float32x4_t __reint_213 = __rev2_213; \ +uint64x2_t __reint1_213 = (uint64x2_t) {__noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_213, __p3_213), __noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_213, __p3_213)}; \ + __ret_213 = __noswap_vcmlaq_rot90_f32(__rev0_213, __rev1_213, *(float32x4_t *) &__reint1_213); \ + __ret_213 = __builtin_shufflevector(__ret_213, __ret_213, 3, 2, 1, 0); \ + __ret_213; \ +}) +#endif + +#if !defined(__aarch64__) && !defined(__arm64ec__) +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("bf16,neon"))) bfloat16x4_t __a32_vcvt_bf16_f32(float32x4_t __p0) { + bfloat16x4_t __ret; + __ret = (bfloat16x4_t) __builtin_neon___a32_vcvt_bf16_f32((int8x16_t)__p0, 11); + return __ret; +} +#else +__ai __attribute__((target("bf16,neon"))) bfloat16x4_t __a32_vcvt_bf16_f32(float32x4_t __p0) { + bfloat16x4_t __ret; + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = (bfloat16x4_t) __builtin_neon___a32_vcvt_bf16_f32((int8x16_t)__rev0, 11); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +__ai __attribute__((target("bf16,neon"))) bfloat16x4_t __noswap___a32_vcvt_bf16_f32(float32x4_t __p0) { + bfloat16x4_t __ret; + __ret = (bfloat16x4_t) __builtin_neon___a32_vcvt_bf16_f32((int8x16_t)__p0, 11); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("bf16,neon"))) bfloat16x4_t vcvt_bf16_f32(float32x4_t __p0) { + bfloat16x4_t __ret; + __ret = __a32_vcvt_bf16_f32(__p0); + return __ret; +} +#else +__ai __attribute__((target("bf16,neon"))) bfloat16x4_t vcvt_bf16_f32(float32x4_t __p0) { + bfloat16x4_t __ret; + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = __noswap___a32_vcvt_bf16_f32(__rev0); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("bf16,neon"))) bfloat16x8_t vcvtq_high_bf16_f32(bfloat16x8_t __p0, float32x4_t __p1) { + bfloat16x8_t __ret; + __ret = vcombine_bf16(__a32_vcvt_bf16_f32(__p1), vget_low_bf16(__p0)); + return __ret; +} +#else +__ai __attribute__((target("bf16,neon"))) bfloat16x8_t vcvtq_high_bf16_f32(bfloat16x8_t __p0, float32x4_t __p1) { + bfloat16x8_t __ret; + bfloat16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = __noswap_vcombine_bf16(__noswap___a32_vcvt_bf16_f32(__rev1), __noswap_vget_low_bf16(__rev0)); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("bf16,neon"))) bfloat16x8_t vcvtq_low_bf16_f32(float32x4_t __p0) { + bfloat16x8_t __ret; + __ret = vcombine_bf16((bfloat16x4_t)(0ULL), __a32_vcvt_bf16_f32(__p0)); + return __ret; +} +#else +__ai __attribute__((target("bf16,neon"))) bfloat16x8_t vcvtq_low_bf16_f32(float32x4_t __p0) { + bfloat16x8_t __ret; + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = __noswap_vcombine_bf16((bfloat16x4_t)(0ULL), __noswap___a32_vcvt_bf16_f32(__rev0)); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +__ai __attribute__((target("bf16,neon"))) poly8x8_t vreinterpret_p8_bf16(bfloat16x4_t __p0) { + poly8x8_t __ret; + __ret = (poly8x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("bf16,neon"))) poly64x1_t vreinterpret_p64_bf16(bfloat16x4_t __p0) { + poly64x1_t __ret; + __ret = (poly64x1_t)(__p0); + return __ret; +} +__ai __attribute__((target("bf16,neon"))) poly16x4_t vreinterpret_p16_bf16(bfloat16x4_t __p0) { + poly16x4_t __ret; + __ret = (poly16x4_t)(__p0); + return __ret; +} +__ai __attribute__((target("bf16,neon"))) poly8x16_t vreinterpretq_p8_bf16(bfloat16x8_t __p0) { + poly8x16_t __ret; + __ret = (poly8x16_t)(__p0); + return __ret; +} +__ai __attribute__((target("bf16,neon"))) poly64x2_t vreinterpretq_p64_bf16(bfloat16x8_t __p0) { + poly64x2_t __ret; + __ret = (poly64x2_t)(__p0); + return __ret; +} +__ai __attribute__((target("bf16,neon"))) poly16x8_t vreinterpretq_p16_bf16(bfloat16x8_t __p0) { + poly16x8_t __ret; + __ret = (poly16x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("bf16,neon"))) uint8x16_t vreinterpretq_u8_bf16(bfloat16x8_t __p0) { + uint8x16_t __ret; + __ret = (uint8x16_t)(__p0); + return __ret; +} +__ai __attribute__((target("bf16,neon"))) uint32x4_t vreinterpretq_u32_bf16(bfloat16x8_t __p0) { + uint32x4_t __ret; + __ret = (uint32x4_t)(__p0); + return __ret; +} +__ai __attribute__((target("bf16,neon"))) uint64x2_t vreinterpretq_u64_bf16(bfloat16x8_t __p0) { + uint64x2_t __ret; + __ret = (uint64x2_t)(__p0); + return __ret; +} +__ai __attribute__((target("bf16,neon"))) uint16x8_t vreinterpretq_u16_bf16(bfloat16x8_t __p0) { + uint16x8_t __ret; + __ret = (uint16x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("bf16,neon"))) int8x16_t vreinterpretq_s8_bf16(bfloat16x8_t __p0) { + int8x16_t __ret; + __ret = (int8x16_t)(__p0); + return __ret; +} +__ai __attribute__((target("bf16,neon"))) float32x4_t vreinterpretq_f32_bf16(bfloat16x8_t __p0) { + float32x4_t __ret; + __ret = (float32x4_t)(__p0); + return __ret; +} +__ai __attribute__((target("bf16,neon"))) float16x8_t vreinterpretq_f16_bf16(bfloat16x8_t __p0) { + float16x8_t __ret; + __ret = (float16x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("bf16,neon"))) int32x4_t vreinterpretq_s32_bf16(bfloat16x8_t __p0) { + int32x4_t __ret; + __ret = (int32x4_t)(__p0); + return __ret; +} +__ai __attribute__((target("bf16,neon"))) int64x2_t vreinterpretq_s64_bf16(bfloat16x8_t __p0) { + int64x2_t __ret; + __ret = (int64x2_t)(__p0); + return __ret; +} +__ai __attribute__((target("bf16,neon"))) int16x8_t vreinterpretq_s16_bf16(bfloat16x8_t __p0) { + int16x8_t __ret; + __ret = (int16x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("bf16,neon"))) uint8x8_t vreinterpret_u8_bf16(bfloat16x4_t __p0) { + uint8x8_t __ret; + __ret = (uint8x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("bf16,neon"))) uint32x2_t vreinterpret_u32_bf16(bfloat16x4_t __p0) { + uint32x2_t __ret; + __ret = (uint32x2_t)(__p0); + return __ret; +} +__ai __attribute__((target("bf16,neon"))) uint64x1_t vreinterpret_u64_bf16(bfloat16x4_t __p0) { + uint64x1_t __ret; + __ret = (uint64x1_t)(__p0); + return __ret; +} +__ai __attribute__((target("bf16,neon"))) uint16x4_t vreinterpret_u16_bf16(bfloat16x4_t __p0) { + uint16x4_t __ret; + __ret = (uint16x4_t)(__p0); + return __ret; +} +__ai __attribute__((target("bf16,neon"))) int8x8_t vreinterpret_s8_bf16(bfloat16x4_t __p0) { + int8x8_t __ret; + __ret = (int8x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("bf16,neon"))) float32x2_t vreinterpret_f32_bf16(bfloat16x4_t __p0) { + float32x2_t __ret; + __ret = (float32x2_t)(__p0); + return __ret; +} +__ai __attribute__((target("bf16,neon"))) float16x4_t vreinterpret_f16_bf16(bfloat16x4_t __p0) { + float16x4_t __ret; + __ret = (float16x4_t)(__p0); + return __ret; +} +__ai __attribute__((target("bf16,neon"))) int32x2_t vreinterpret_s32_bf16(bfloat16x4_t __p0) { + int32x2_t __ret; + __ret = (int32x2_t)(__p0); + return __ret; +} +__ai __attribute__((target("bf16,neon"))) int64x1_t vreinterpret_s64_bf16(bfloat16x4_t __p0) { + int64x1_t __ret; + __ret = (int64x1_t)(__p0); + return __ret; +} +__ai __attribute__((target("bf16,neon"))) int16x4_t vreinterpret_s16_bf16(bfloat16x4_t __p0) { + int16x4_t __ret; + __ret = (int16x4_t)(__p0); + return __ret; +} +__ai __attribute__((target("bf16,neon"))) bfloat16x8_t vreinterpretq_bf16_p8(poly8x16_t __p0) { + bfloat16x8_t __ret; + __ret = (bfloat16x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("bf16,neon"))) bfloat16x8_t vreinterpretq_bf16_p64(poly64x2_t __p0) { + bfloat16x8_t __ret; + __ret = (bfloat16x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("bf16,neon"))) bfloat16x8_t vreinterpretq_bf16_p16(poly16x8_t __p0) { + bfloat16x8_t __ret; + __ret = (bfloat16x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("bf16,neon"))) bfloat16x8_t vreinterpretq_bf16_u8(uint8x16_t __p0) { + bfloat16x8_t __ret; + __ret = (bfloat16x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("bf16,neon"))) bfloat16x8_t vreinterpretq_bf16_u32(uint32x4_t __p0) { + bfloat16x8_t __ret; + __ret = (bfloat16x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("bf16,neon"))) bfloat16x8_t vreinterpretq_bf16_u64(uint64x2_t __p0) { + bfloat16x8_t __ret; + __ret = (bfloat16x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("bf16,neon"))) bfloat16x8_t vreinterpretq_bf16_u16(uint16x8_t __p0) { + bfloat16x8_t __ret; + __ret = (bfloat16x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("bf16,neon"))) bfloat16x8_t vreinterpretq_bf16_s8(int8x16_t __p0) { + bfloat16x8_t __ret; + __ret = (bfloat16x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("bf16,neon"))) bfloat16x8_t vreinterpretq_bf16_f32(float32x4_t __p0) { + bfloat16x8_t __ret; + __ret = (bfloat16x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("bf16,neon"))) bfloat16x8_t vreinterpretq_bf16_f16(float16x8_t __p0) { + bfloat16x8_t __ret; + __ret = (bfloat16x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("bf16,neon"))) bfloat16x8_t vreinterpretq_bf16_s32(int32x4_t __p0) { + bfloat16x8_t __ret; + __ret = (bfloat16x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("bf16,neon"))) bfloat16x8_t vreinterpretq_bf16_s64(int64x2_t __p0) { + bfloat16x8_t __ret; + __ret = (bfloat16x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("bf16,neon"))) bfloat16x8_t vreinterpretq_bf16_s16(int16x8_t __p0) { + bfloat16x8_t __ret; + __ret = (bfloat16x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("bf16,neon"))) bfloat16x4_t vreinterpret_bf16_p8(poly8x8_t __p0) { + bfloat16x4_t __ret; + __ret = (bfloat16x4_t)(__p0); + return __ret; +} +__ai __attribute__((target("bf16,neon"))) bfloat16x4_t vreinterpret_bf16_p64(poly64x1_t __p0) { + bfloat16x4_t __ret; + __ret = (bfloat16x4_t)(__p0); + return __ret; +} +__ai __attribute__((target("bf16,neon"))) bfloat16x4_t vreinterpret_bf16_p16(poly16x4_t __p0) { + bfloat16x4_t __ret; + __ret = (bfloat16x4_t)(__p0); + return __ret; +} +__ai __attribute__((target("bf16,neon"))) bfloat16x4_t vreinterpret_bf16_u8(uint8x8_t __p0) { + bfloat16x4_t __ret; + __ret = (bfloat16x4_t)(__p0); + return __ret; +} +__ai __attribute__((target("bf16,neon"))) bfloat16x4_t vreinterpret_bf16_u32(uint32x2_t __p0) { + bfloat16x4_t __ret; + __ret = (bfloat16x4_t)(__p0); + return __ret; +} +__ai __attribute__((target("bf16,neon"))) bfloat16x4_t vreinterpret_bf16_u64(uint64x1_t __p0) { + bfloat16x4_t __ret; + __ret = (bfloat16x4_t)(__p0); + return __ret; +} +__ai __attribute__((target("bf16,neon"))) bfloat16x4_t vreinterpret_bf16_u16(uint16x4_t __p0) { + bfloat16x4_t __ret; + __ret = (bfloat16x4_t)(__p0); + return __ret; +} +__ai __attribute__((target("bf16,neon"))) bfloat16x4_t vreinterpret_bf16_s8(int8x8_t __p0) { + bfloat16x4_t __ret; + __ret = (bfloat16x4_t)(__p0); + return __ret; +} +__ai __attribute__((target("bf16,neon"))) bfloat16x4_t vreinterpret_bf16_f32(float32x2_t __p0) { + bfloat16x4_t __ret; + __ret = (bfloat16x4_t)(__p0); + return __ret; +} +__ai __attribute__((target("bf16,neon"))) bfloat16x4_t vreinterpret_bf16_f16(float16x4_t __p0) { + bfloat16x4_t __ret; + __ret = (bfloat16x4_t)(__p0); + return __ret; +} +__ai __attribute__((target("bf16,neon"))) bfloat16x4_t vreinterpret_bf16_s32(int32x2_t __p0) { + bfloat16x4_t __ret; + __ret = (bfloat16x4_t)(__p0); + return __ret; +} +__ai __attribute__((target("bf16,neon"))) bfloat16x4_t vreinterpret_bf16_s64(int64x1_t __p0) { + bfloat16x4_t __ret; + __ret = (bfloat16x4_t)(__p0); + return __ret; +} +__ai __attribute__((target("bf16,neon"))) bfloat16x4_t vreinterpret_bf16_s16(int16x4_t __p0) { + bfloat16x4_t __ret; + __ret = (bfloat16x4_t)(__p0); + return __ret; +} +#ifdef __LITTLE_ENDIAN__ +#define vqdmulhq_lane_s32(__p0_214, __p1_214, __p2_214) __extension__ ({ \ + int32x4_t __ret_214; \ + int32x4_t __s0_214 = __p0_214; \ + int32x2_t __s1_214 = __p1_214; \ + __ret_214 = vqdmulhq_s32(__s0_214, splatq_lane_s32(__s1_214, __p2_214)); \ + __ret_214; \ +}) +#else +#define vqdmulhq_lane_s32(__p0_215, __p1_215, __p2_215) __extension__ ({ \ + int32x4_t __ret_215; \ + int32x4_t __s0_215 = __p0_215; \ + int32x2_t __s1_215 = __p1_215; \ + int32x4_t __rev0_215; __rev0_215 = __builtin_shufflevector(__s0_215, __s0_215, 3, 2, 1, 0); \ + int32x2_t __rev1_215; __rev1_215 = __builtin_shufflevector(__s1_215, __s1_215, 1, 0); \ + __ret_215 = __noswap_vqdmulhq_s32(__rev0_215, __noswap_splatq_lane_s32(__rev1_215, __p2_215)); \ + __ret_215 = __builtin_shufflevector(__ret_215, __ret_215, 3, 2, 1, 0); \ + __ret_215; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqdmulhq_lane_s16(__p0_216, __p1_216, __p2_216) __extension__ ({ \ + int16x8_t __ret_216; \ + int16x8_t __s0_216 = __p0_216; \ + int16x4_t __s1_216 = __p1_216; \ + __ret_216 = vqdmulhq_s16(__s0_216, splatq_lane_s16(__s1_216, __p2_216)); \ + __ret_216; \ +}) +#else +#define vqdmulhq_lane_s16(__p0_217, __p1_217, __p2_217) __extension__ ({ \ + int16x8_t __ret_217; \ + int16x8_t __s0_217 = __p0_217; \ + int16x4_t __s1_217 = __p1_217; \ + int16x8_t __rev0_217; __rev0_217 = __builtin_shufflevector(__s0_217, __s0_217, 7, 6, 5, 4, 3, 2, 1, 0); \ + int16x4_t __rev1_217; __rev1_217 = __builtin_shufflevector(__s1_217, __s1_217, 3, 2, 1, 0); \ + __ret_217 = __noswap_vqdmulhq_s16(__rev0_217, __noswap_splatq_lane_s16(__rev1_217, __p2_217)); \ + __ret_217 = __builtin_shufflevector(__ret_217, __ret_217, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_217; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqdmulh_lane_s32(__p0_218, __p1_218, __p2_218) __extension__ ({ \ + int32x2_t __ret_218; \ + int32x2_t __s0_218 = __p0_218; \ + int32x2_t __s1_218 = __p1_218; \ + __ret_218 = vqdmulh_s32(__s0_218, splat_lane_s32(__s1_218, __p2_218)); \ + __ret_218; \ +}) +#else +#define vqdmulh_lane_s32(__p0_219, __p1_219, __p2_219) __extension__ ({ \ + int32x2_t __ret_219; \ + int32x2_t __s0_219 = __p0_219; \ + int32x2_t __s1_219 = __p1_219; \ + int32x2_t __rev0_219; __rev0_219 = __builtin_shufflevector(__s0_219, __s0_219, 1, 0); \ + int32x2_t __rev1_219; __rev1_219 = __builtin_shufflevector(__s1_219, __s1_219, 1, 0); \ + __ret_219 = __noswap_vqdmulh_s32(__rev0_219, __noswap_splat_lane_s32(__rev1_219, __p2_219)); \ + __ret_219 = __builtin_shufflevector(__ret_219, __ret_219, 1, 0); \ + __ret_219; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqdmulh_lane_s16(__p0_220, __p1_220, __p2_220) __extension__ ({ \ + int16x4_t __ret_220; \ + int16x4_t __s0_220 = __p0_220; \ + int16x4_t __s1_220 = __p1_220; \ + __ret_220 = vqdmulh_s16(__s0_220, splat_lane_s16(__s1_220, __p2_220)); \ + __ret_220; \ +}) +#else +#define vqdmulh_lane_s16(__p0_221, __p1_221, __p2_221) __extension__ ({ \ + int16x4_t __ret_221; \ + int16x4_t __s0_221 = __p0_221; \ + int16x4_t __s1_221 = __p1_221; \ + int16x4_t __rev0_221; __rev0_221 = __builtin_shufflevector(__s0_221, __s0_221, 3, 2, 1, 0); \ + int16x4_t __rev1_221; __rev1_221 = __builtin_shufflevector(__s1_221, __s1_221, 3, 2, 1, 0); \ + __ret_221 = __noswap_vqdmulh_s16(__rev0_221, __noswap_splat_lane_s16(__rev1_221, __p2_221)); \ + __ret_221 = __builtin_shufflevector(__ret_221, __ret_221, 3, 2, 1, 0); \ + __ret_221; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqrdmulhq_lane_s32(__p0_222, __p1_222, __p2_222) __extension__ ({ \ + int32x4_t __ret_222; \ + int32x4_t __s0_222 = __p0_222; \ + int32x2_t __s1_222 = __p1_222; \ + __ret_222 = vqrdmulhq_s32(__s0_222, splatq_lane_s32(__s1_222, __p2_222)); \ + __ret_222; \ +}) +#else +#define vqrdmulhq_lane_s32(__p0_223, __p1_223, __p2_223) __extension__ ({ \ + int32x4_t __ret_223; \ + int32x4_t __s0_223 = __p0_223; \ + int32x2_t __s1_223 = __p1_223; \ + int32x4_t __rev0_223; __rev0_223 = __builtin_shufflevector(__s0_223, __s0_223, 3, 2, 1, 0); \ + int32x2_t __rev1_223; __rev1_223 = __builtin_shufflevector(__s1_223, __s1_223, 1, 0); \ + __ret_223 = __noswap_vqrdmulhq_s32(__rev0_223, __noswap_splatq_lane_s32(__rev1_223, __p2_223)); \ + __ret_223 = __builtin_shufflevector(__ret_223, __ret_223, 3, 2, 1, 0); \ + __ret_223; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqrdmulhq_lane_s16(__p0_224, __p1_224, __p2_224) __extension__ ({ \ + int16x8_t __ret_224; \ + int16x8_t __s0_224 = __p0_224; \ + int16x4_t __s1_224 = __p1_224; \ + __ret_224 = vqrdmulhq_s16(__s0_224, splatq_lane_s16(__s1_224, __p2_224)); \ + __ret_224; \ +}) +#else +#define vqrdmulhq_lane_s16(__p0_225, __p1_225, __p2_225) __extension__ ({ \ + int16x8_t __ret_225; \ + int16x8_t __s0_225 = __p0_225; \ + int16x4_t __s1_225 = __p1_225; \ + int16x8_t __rev0_225; __rev0_225 = __builtin_shufflevector(__s0_225, __s0_225, 7, 6, 5, 4, 3, 2, 1, 0); \ + int16x4_t __rev1_225; __rev1_225 = __builtin_shufflevector(__s1_225, __s1_225, 3, 2, 1, 0); \ + __ret_225 = __noswap_vqrdmulhq_s16(__rev0_225, __noswap_splatq_lane_s16(__rev1_225, __p2_225)); \ + __ret_225 = __builtin_shufflevector(__ret_225, __ret_225, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_225; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqrdmulh_lane_s32(__p0_226, __p1_226, __p2_226) __extension__ ({ \ + int32x2_t __ret_226; \ + int32x2_t __s0_226 = __p0_226; \ + int32x2_t __s1_226 = __p1_226; \ + __ret_226 = vqrdmulh_s32(__s0_226, splat_lane_s32(__s1_226, __p2_226)); \ + __ret_226; \ +}) +#else +#define vqrdmulh_lane_s32(__p0_227, __p1_227, __p2_227) __extension__ ({ \ + int32x2_t __ret_227; \ + int32x2_t __s0_227 = __p0_227; \ + int32x2_t __s1_227 = __p1_227; \ + int32x2_t __rev0_227; __rev0_227 = __builtin_shufflevector(__s0_227, __s0_227, 1, 0); \ + int32x2_t __rev1_227; __rev1_227 = __builtin_shufflevector(__s1_227, __s1_227, 1, 0); \ + __ret_227 = __noswap_vqrdmulh_s32(__rev0_227, __noswap_splat_lane_s32(__rev1_227, __p2_227)); \ + __ret_227 = __builtin_shufflevector(__ret_227, __ret_227, 1, 0); \ + __ret_227; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqrdmulh_lane_s16(__p0_228, __p1_228, __p2_228) __extension__ ({ \ + int16x4_t __ret_228; \ + int16x4_t __s0_228 = __p0_228; \ + int16x4_t __s1_228 = __p1_228; \ + __ret_228 = vqrdmulh_s16(__s0_228, splat_lane_s16(__s1_228, __p2_228)); \ + __ret_228; \ +}) +#else +#define vqrdmulh_lane_s16(__p0_229, __p1_229, __p2_229) __extension__ ({ \ + int16x4_t __ret_229; \ + int16x4_t __s0_229 = __p0_229; \ + int16x4_t __s1_229 = __p1_229; \ + int16x4_t __rev0_229; __rev0_229 = __builtin_shufflevector(__s0_229, __s0_229, 3, 2, 1, 0); \ + int16x4_t __rev1_229; __rev1_229 = __builtin_shufflevector(__s1_229, __s1_229, 3, 2, 1, 0); \ + __ret_229 = __noswap_vqrdmulh_s16(__rev0_229, __noswap_splat_lane_s16(__rev1_229, __p2_229)); \ + __ret_229 = __builtin_shufflevector(__ret_229, __ret_229, 3, 2, 1, 0); \ + __ret_229; \ +}) +#endif + +__ai __attribute__((target("neon"))) poly8x8_t vreinterpret_p8_p16(poly16x4_t __p0) { + poly8x8_t __ret; + __ret = (poly8x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) poly8x8_t vreinterpret_p8_u8(uint8x8_t __p0) { + poly8x8_t __ret; + __ret = (poly8x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) poly8x8_t vreinterpret_p8_u32(uint32x2_t __p0) { + poly8x8_t __ret; + __ret = (poly8x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) poly8x8_t vreinterpret_p8_u64(uint64x1_t __p0) { + poly8x8_t __ret; + __ret = (poly8x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) poly8x8_t vreinterpret_p8_u16(uint16x4_t __p0) { + poly8x8_t __ret; + __ret = (poly8x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) poly8x8_t vreinterpret_p8_s8(int8x8_t __p0) { + poly8x8_t __ret; + __ret = (poly8x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) poly8x8_t vreinterpret_p8_f32(float32x2_t __p0) { + poly8x8_t __ret; + __ret = (poly8x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) poly8x8_t vreinterpret_p8_f16(float16x4_t __p0) { + poly8x8_t __ret; + __ret = (poly8x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) poly8x8_t vreinterpret_p8_s32(int32x2_t __p0) { + poly8x8_t __ret; + __ret = (poly8x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) poly8x8_t vreinterpret_p8_s64(int64x1_t __p0) { + poly8x8_t __ret; + __ret = (poly8x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) poly8x8_t vreinterpret_p8_s16(int16x4_t __p0) { + poly8x8_t __ret; + __ret = (poly8x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) poly16x4_t vreinterpret_p16_p8(poly8x8_t __p0) { + poly16x4_t __ret; + __ret = (poly16x4_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) poly16x4_t vreinterpret_p16_u8(uint8x8_t __p0) { + poly16x4_t __ret; + __ret = (poly16x4_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) poly16x4_t vreinterpret_p16_u32(uint32x2_t __p0) { + poly16x4_t __ret; + __ret = (poly16x4_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) poly16x4_t vreinterpret_p16_u64(uint64x1_t __p0) { + poly16x4_t __ret; + __ret = (poly16x4_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) poly16x4_t vreinterpret_p16_u16(uint16x4_t __p0) { + poly16x4_t __ret; + __ret = (poly16x4_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) poly16x4_t vreinterpret_p16_s8(int8x8_t __p0) { + poly16x4_t __ret; + __ret = (poly16x4_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) poly16x4_t vreinterpret_p16_f32(float32x2_t __p0) { + poly16x4_t __ret; + __ret = (poly16x4_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) poly16x4_t vreinterpret_p16_f16(float16x4_t __p0) { + poly16x4_t __ret; + __ret = (poly16x4_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) poly16x4_t vreinterpret_p16_s32(int32x2_t __p0) { + poly16x4_t __ret; + __ret = (poly16x4_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) poly16x4_t vreinterpret_p16_s64(int64x1_t __p0) { + poly16x4_t __ret; + __ret = (poly16x4_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) poly16x4_t vreinterpret_p16_s16(int16x4_t __p0) { + poly16x4_t __ret; + __ret = (poly16x4_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) poly8x16_t vreinterpretq_p8_p16(poly16x8_t __p0) { + poly8x16_t __ret; + __ret = (poly8x16_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) poly8x16_t vreinterpretq_p8_u8(uint8x16_t __p0) { + poly8x16_t __ret; + __ret = (poly8x16_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) poly8x16_t vreinterpretq_p8_u32(uint32x4_t __p0) { + poly8x16_t __ret; + __ret = (poly8x16_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) poly8x16_t vreinterpretq_p8_u64(uint64x2_t __p0) { + poly8x16_t __ret; + __ret = (poly8x16_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) poly8x16_t vreinterpretq_p8_u16(uint16x8_t __p0) { + poly8x16_t __ret; + __ret = (poly8x16_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) poly8x16_t vreinterpretq_p8_s8(int8x16_t __p0) { + poly8x16_t __ret; + __ret = (poly8x16_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) poly8x16_t vreinterpretq_p8_f32(float32x4_t __p0) { + poly8x16_t __ret; + __ret = (poly8x16_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) poly8x16_t vreinterpretq_p8_f16(float16x8_t __p0) { + poly8x16_t __ret; + __ret = (poly8x16_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) poly8x16_t vreinterpretq_p8_s32(int32x4_t __p0) { + poly8x16_t __ret; + __ret = (poly8x16_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) poly8x16_t vreinterpretq_p8_s64(int64x2_t __p0) { + poly8x16_t __ret; + __ret = (poly8x16_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) poly8x16_t vreinterpretq_p8_s16(int16x8_t __p0) { + poly8x16_t __ret; + __ret = (poly8x16_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) poly16x8_t vreinterpretq_p16_p8(poly8x16_t __p0) { + poly16x8_t __ret; + __ret = (poly16x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) poly16x8_t vreinterpretq_p16_u8(uint8x16_t __p0) { + poly16x8_t __ret; + __ret = (poly16x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) poly16x8_t vreinterpretq_p16_u32(uint32x4_t __p0) { + poly16x8_t __ret; + __ret = (poly16x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) poly16x8_t vreinterpretq_p16_u64(uint64x2_t __p0) { + poly16x8_t __ret; + __ret = (poly16x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) poly16x8_t vreinterpretq_p16_u16(uint16x8_t __p0) { + poly16x8_t __ret; + __ret = (poly16x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) poly16x8_t vreinterpretq_p16_s8(int8x16_t __p0) { + poly16x8_t __ret; + __ret = (poly16x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) poly16x8_t vreinterpretq_p16_f32(float32x4_t __p0) { + poly16x8_t __ret; + __ret = (poly16x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) poly16x8_t vreinterpretq_p16_f16(float16x8_t __p0) { + poly16x8_t __ret; + __ret = (poly16x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) poly16x8_t vreinterpretq_p16_s32(int32x4_t __p0) { + poly16x8_t __ret; + __ret = (poly16x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) poly16x8_t vreinterpretq_p16_s64(int64x2_t __p0) { + poly16x8_t __ret; + __ret = (poly16x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) poly16x8_t vreinterpretq_p16_s16(int16x8_t __p0) { + poly16x8_t __ret; + __ret = (poly16x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) uint8x16_t vreinterpretq_u8_p8(poly8x16_t __p0) { + uint8x16_t __ret; + __ret = (uint8x16_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) uint8x16_t vreinterpretq_u8_p16(poly16x8_t __p0) { + uint8x16_t __ret; + __ret = (uint8x16_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) uint8x16_t vreinterpretq_u8_u32(uint32x4_t __p0) { + uint8x16_t __ret; + __ret = (uint8x16_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) uint8x16_t vreinterpretq_u8_u64(uint64x2_t __p0) { + uint8x16_t __ret; + __ret = (uint8x16_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) uint8x16_t vreinterpretq_u8_u16(uint16x8_t __p0) { + uint8x16_t __ret; + __ret = (uint8x16_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) uint8x16_t vreinterpretq_u8_s8(int8x16_t __p0) { + uint8x16_t __ret; + __ret = (uint8x16_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) uint8x16_t vreinterpretq_u8_f32(float32x4_t __p0) { + uint8x16_t __ret; + __ret = (uint8x16_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) uint8x16_t vreinterpretq_u8_f16(float16x8_t __p0) { + uint8x16_t __ret; + __ret = (uint8x16_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) uint8x16_t vreinterpretq_u8_s32(int32x4_t __p0) { + uint8x16_t __ret; + __ret = (uint8x16_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) uint8x16_t vreinterpretq_u8_s64(int64x2_t __p0) { + uint8x16_t __ret; + __ret = (uint8x16_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) uint8x16_t vreinterpretq_u8_s16(int16x8_t __p0) { + uint8x16_t __ret; + __ret = (uint8x16_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) uint32x4_t vreinterpretq_u32_p8(poly8x16_t __p0) { + uint32x4_t __ret; + __ret = (uint32x4_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) uint32x4_t vreinterpretq_u32_p16(poly16x8_t __p0) { + uint32x4_t __ret; + __ret = (uint32x4_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) uint32x4_t vreinterpretq_u32_u8(uint8x16_t __p0) { + uint32x4_t __ret; + __ret = (uint32x4_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) uint32x4_t vreinterpretq_u32_u64(uint64x2_t __p0) { + uint32x4_t __ret; + __ret = (uint32x4_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) uint32x4_t vreinterpretq_u32_u16(uint16x8_t __p0) { + uint32x4_t __ret; + __ret = (uint32x4_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) uint32x4_t vreinterpretq_u32_s8(int8x16_t __p0) { + uint32x4_t __ret; + __ret = (uint32x4_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) uint32x4_t vreinterpretq_u32_f32(float32x4_t __p0) { + uint32x4_t __ret; + __ret = (uint32x4_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) uint32x4_t vreinterpretq_u32_f16(float16x8_t __p0) { + uint32x4_t __ret; + __ret = (uint32x4_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) uint32x4_t vreinterpretq_u32_s32(int32x4_t __p0) { + uint32x4_t __ret; + __ret = (uint32x4_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) uint32x4_t vreinterpretq_u32_s64(int64x2_t __p0) { + uint32x4_t __ret; + __ret = (uint32x4_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) uint32x4_t vreinterpretq_u32_s16(int16x8_t __p0) { + uint32x4_t __ret; + __ret = (uint32x4_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) uint64x2_t vreinterpretq_u64_p8(poly8x16_t __p0) { + uint64x2_t __ret; + __ret = (uint64x2_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) uint64x2_t vreinterpretq_u64_p16(poly16x8_t __p0) { + uint64x2_t __ret; + __ret = (uint64x2_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) uint64x2_t vreinterpretq_u64_u8(uint8x16_t __p0) { + uint64x2_t __ret; + __ret = (uint64x2_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) uint64x2_t vreinterpretq_u64_u32(uint32x4_t __p0) { + uint64x2_t __ret; + __ret = (uint64x2_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) uint64x2_t vreinterpretq_u64_u16(uint16x8_t __p0) { + uint64x2_t __ret; + __ret = (uint64x2_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) uint64x2_t vreinterpretq_u64_s8(int8x16_t __p0) { + uint64x2_t __ret; + __ret = (uint64x2_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) uint64x2_t vreinterpretq_u64_f32(float32x4_t __p0) { + uint64x2_t __ret; + __ret = (uint64x2_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) uint64x2_t vreinterpretq_u64_f16(float16x8_t __p0) { + uint64x2_t __ret; + __ret = (uint64x2_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) uint64x2_t vreinterpretq_u64_s32(int32x4_t __p0) { + uint64x2_t __ret; + __ret = (uint64x2_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) uint64x2_t vreinterpretq_u64_s64(int64x2_t __p0) { + uint64x2_t __ret; + __ret = (uint64x2_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) uint64x2_t vreinterpretq_u64_s16(int16x8_t __p0) { + uint64x2_t __ret; + __ret = (uint64x2_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) uint16x8_t vreinterpretq_u16_p8(poly8x16_t __p0) { + uint16x8_t __ret; + __ret = (uint16x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) uint16x8_t vreinterpretq_u16_p16(poly16x8_t __p0) { + uint16x8_t __ret; + __ret = (uint16x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) uint16x8_t vreinterpretq_u16_u8(uint8x16_t __p0) { + uint16x8_t __ret; + __ret = (uint16x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) uint16x8_t vreinterpretq_u16_u32(uint32x4_t __p0) { + uint16x8_t __ret; + __ret = (uint16x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) uint16x8_t vreinterpretq_u16_u64(uint64x2_t __p0) { + uint16x8_t __ret; + __ret = (uint16x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) uint16x8_t vreinterpretq_u16_s8(int8x16_t __p0) { + uint16x8_t __ret; + __ret = (uint16x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) uint16x8_t vreinterpretq_u16_f32(float32x4_t __p0) { + uint16x8_t __ret; + __ret = (uint16x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) uint16x8_t vreinterpretq_u16_f16(float16x8_t __p0) { + uint16x8_t __ret; + __ret = (uint16x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) uint16x8_t vreinterpretq_u16_s32(int32x4_t __p0) { + uint16x8_t __ret; + __ret = (uint16x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) uint16x8_t vreinterpretq_u16_s64(int64x2_t __p0) { + uint16x8_t __ret; + __ret = (uint16x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) uint16x8_t vreinterpretq_u16_s16(int16x8_t __p0) { + uint16x8_t __ret; + __ret = (uint16x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) int8x16_t vreinterpretq_s8_p8(poly8x16_t __p0) { + int8x16_t __ret; + __ret = (int8x16_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) int8x16_t vreinterpretq_s8_p16(poly16x8_t __p0) { + int8x16_t __ret; + __ret = (int8x16_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) int8x16_t vreinterpretq_s8_u8(uint8x16_t __p0) { + int8x16_t __ret; + __ret = (int8x16_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) int8x16_t vreinterpretq_s8_u32(uint32x4_t __p0) { + int8x16_t __ret; + __ret = (int8x16_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) int8x16_t vreinterpretq_s8_u64(uint64x2_t __p0) { + int8x16_t __ret; + __ret = (int8x16_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) int8x16_t vreinterpretq_s8_u16(uint16x8_t __p0) { + int8x16_t __ret; + __ret = (int8x16_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) int8x16_t vreinterpretq_s8_f32(float32x4_t __p0) { + int8x16_t __ret; + __ret = (int8x16_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) int8x16_t vreinterpretq_s8_f16(float16x8_t __p0) { + int8x16_t __ret; + __ret = (int8x16_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) int8x16_t vreinterpretq_s8_s32(int32x4_t __p0) { + int8x16_t __ret; + __ret = (int8x16_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) int8x16_t vreinterpretq_s8_s64(int64x2_t __p0) { + int8x16_t __ret; + __ret = (int8x16_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) int8x16_t vreinterpretq_s8_s16(int16x8_t __p0) { + int8x16_t __ret; + __ret = (int8x16_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) float32x4_t vreinterpretq_f32_p8(poly8x16_t __p0) { + float32x4_t __ret; + __ret = (float32x4_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) float32x4_t vreinterpretq_f32_p16(poly16x8_t __p0) { + float32x4_t __ret; + __ret = (float32x4_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) float32x4_t vreinterpretq_f32_u8(uint8x16_t __p0) { + float32x4_t __ret; + __ret = (float32x4_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) float32x4_t vreinterpretq_f32_u32(uint32x4_t __p0) { + float32x4_t __ret; + __ret = (float32x4_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) float32x4_t vreinterpretq_f32_u64(uint64x2_t __p0) { + float32x4_t __ret; + __ret = (float32x4_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) float32x4_t vreinterpretq_f32_u16(uint16x8_t __p0) { + float32x4_t __ret; + __ret = (float32x4_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) float32x4_t vreinterpretq_f32_s8(int8x16_t __p0) { + float32x4_t __ret; + __ret = (float32x4_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) float32x4_t vreinterpretq_f32_f16(float16x8_t __p0) { + float32x4_t __ret; + __ret = (float32x4_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) float32x4_t vreinterpretq_f32_s32(int32x4_t __p0) { + float32x4_t __ret; + __ret = (float32x4_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) float32x4_t vreinterpretq_f32_s64(int64x2_t __p0) { + float32x4_t __ret; + __ret = (float32x4_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) float32x4_t vreinterpretq_f32_s16(int16x8_t __p0) { + float32x4_t __ret; + __ret = (float32x4_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) float16x8_t vreinterpretq_f16_p8(poly8x16_t __p0) { + float16x8_t __ret; + __ret = (float16x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) float16x8_t vreinterpretq_f16_p16(poly16x8_t __p0) { + float16x8_t __ret; + __ret = (float16x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) float16x8_t vreinterpretq_f16_u8(uint8x16_t __p0) { + float16x8_t __ret; + __ret = (float16x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) float16x8_t vreinterpretq_f16_u32(uint32x4_t __p0) { + float16x8_t __ret; + __ret = (float16x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) float16x8_t vreinterpretq_f16_u64(uint64x2_t __p0) { + float16x8_t __ret; + __ret = (float16x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) float16x8_t vreinterpretq_f16_u16(uint16x8_t __p0) { + float16x8_t __ret; + __ret = (float16x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) float16x8_t vreinterpretq_f16_s8(int8x16_t __p0) { + float16x8_t __ret; + __ret = (float16x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) float16x8_t vreinterpretq_f16_f32(float32x4_t __p0) { + float16x8_t __ret; + __ret = (float16x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) float16x8_t vreinterpretq_f16_s32(int32x4_t __p0) { + float16x8_t __ret; + __ret = (float16x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) float16x8_t vreinterpretq_f16_s64(int64x2_t __p0) { + float16x8_t __ret; + __ret = (float16x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) float16x8_t vreinterpretq_f16_s16(int16x8_t __p0) { + float16x8_t __ret; + __ret = (float16x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) int32x4_t vreinterpretq_s32_p8(poly8x16_t __p0) { + int32x4_t __ret; + __ret = (int32x4_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) int32x4_t vreinterpretq_s32_p16(poly16x8_t __p0) { + int32x4_t __ret; + __ret = (int32x4_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) int32x4_t vreinterpretq_s32_u8(uint8x16_t __p0) { + int32x4_t __ret; + __ret = (int32x4_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) int32x4_t vreinterpretq_s32_u32(uint32x4_t __p0) { + int32x4_t __ret; + __ret = (int32x4_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) int32x4_t vreinterpretq_s32_u64(uint64x2_t __p0) { + int32x4_t __ret; + __ret = (int32x4_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) int32x4_t vreinterpretq_s32_u16(uint16x8_t __p0) { + int32x4_t __ret; + __ret = (int32x4_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) int32x4_t vreinterpretq_s32_s8(int8x16_t __p0) { + int32x4_t __ret; + __ret = (int32x4_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) int32x4_t vreinterpretq_s32_f32(float32x4_t __p0) { + int32x4_t __ret; + __ret = (int32x4_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) int32x4_t vreinterpretq_s32_f16(float16x8_t __p0) { + int32x4_t __ret; + __ret = (int32x4_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) int32x4_t vreinterpretq_s32_s64(int64x2_t __p0) { + int32x4_t __ret; + __ret = (int32x4_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) int32x4_t vreinterpretq_s32_s16(int16x8_t __p0) { + int32x4_t __ret; + __ret = (int32x4_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) int64x2_t vreinterpretq_s64_p8(poly8x16_t __p0) { + int64x2_t __ret; + __ret = (int64x2_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) int64x2_t vreinterpretq_s64_p16(poly16x8_t __p0) { + int64x2_t __ret; + __ret = (int64x2_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) int64x2_t vreinterpretq_s64_u8(uint8x16_t __p0) { + int64x2_t __ret; + __ret = (int64x2_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) int64x2_t vreinterpretq_s64_u32(uint32x4_t __p0) { + int64x2_t __ret; + __ret = (int64x2_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) int64x2_t vreinterpretq_s64_u64(uint64x2_t __p0) { + int64x2_t __ret; + __ret = (int64x2_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) int64x2_t vreinterpretq_s64_u16(uint16x8_t __p0) { + int64x2_t __ret; + __ret = (int64x2_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) int64x2_t vreinterpretq_s64_s8(int8x16_t __p0) { + int64x2_t __ret; + __ret = (int64x2_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) int64x2_t vreinterpretq_s64_f32(float32x4_t __p0) { + int64x2_t __ret; + __ret = (int64x2_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) int64x2_t vreinterpretq_s64_f16(float16x8_t __p0) { + int64x2_t __ret; + __ret = (int64x2_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) int64x2_t vreinterpretq_s64_s32(int32x4_t __p0) { + int64x2_t __ret; + __ret = (int64x2_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) int64x2_t vreinterpretq_s64_s16(int16x8_t __p0) { + int64x2_t __ret; + __ret = (int64x2_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) int16x8_t vreinterpretq_s16_p8(poly8x16_t __p0) { + int16x8_t __ret; + __ret = (int16x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) int16x8_t vreinterpretq_s16_p16(poly16x8_t __p0) { + int16x8_t __ret; + __ret = (int16x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) int16x8_t vreinterpretq_s16_u8(uint8x16_t __p0) { + int16x8_t __ret; + __ret = (int16x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) int16x8_t vreinterpretq_s16_u32(uint32x4_t __p0) { + int16x8_t __ret; + __ret = (int16x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) int16x8_t vreinterpretq_s16_u64(uint64x2_t __p0) { + int16x8_t __ret; + __ret = (int16x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) int16x8_t vreinterpretq_s16_u16(uint16x8_t __p0) { + int16x8_t __ret; + __ret = (int16x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) int16x8_t vreinterpretq_s16_s8(int8x16_t __p0) { + int16x8_t __ret; + __ret = (int16x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) int16x8_t vreinterpretq_s16_f32(float32x4_t __p0) { + int16x8_t __ret; + __ret = (int16x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) int16x8_t vreinterpretq_s16_f16(float16x8_t __p0) { + int16x8_t __ret; + __ret = (int16x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) int16x8_t vreinterpretq_s16_s32(int32x4_t __p0) { + int16x8_t __ret; + __ret = (int16x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) int16x8_t vreinterpretq_s16_s64(int64x2_t __p0) { + int16x8_t __ret; + __ret = (int16x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) uint8x8_t vreinterpret_u8_p8(poly8x8_t __p0) { + uint8x8_t __ret; + __ret = (uint8x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) uint8x8_t vreinterpret_u8_p16(poly16x4_t __p0) { + uint8x8_t __ret; + __ret = (uint8x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) uint8x8_t vreinterpret_u8_u32(uint32x2_t __p0) { + uint8x8_t __ret; + __ret = (uint8x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) uint8x8_t vreinterpret_u8_u64(uint64x1_t __p0) { + uint8x8_t __ret; + __ret = (uint8x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) uint8x8_t vreinterpret_u8_u16(uint16x4_t __p0) { + uint8x8_t __ret; + __ret = (uint8x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) uint8x8_t vreinterpret_u8_s8(int8x8_t __p0) { + uint8x8_t __ret; + __ret = (uint8x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) uint8x8_t vreinterpret_u8_f32(float32x2_t __p0) { + uint8x8_t __ret; + __ret = (uint8x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) uint8x8_t vreinterpret_u8_f16(float16x4_t __p0) { + uint8x8_t __ret; + __ret = (uint8x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) uint8x8_t vreinterpret_u8_s32(int32x2_t __p0) { + uint8x8_t __ret; + __ret = (uint8x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) uint8x8_t vreinterpret_u8_s64(int64x1_t __p0) { + uint8x8_t __ret; + __ret = (uint8x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) uint8x8_t vreinterpret_u8_s16(int16x4_t __p0) { + uint8x8_t __ret; + __ret = (uint8x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) uint32x2_t vreinterpret_u32_p8(poly8x8_t __p0) { + uint32x2_t __ret; + __ret = (uint32x2_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) uint32x2_t vreinterpret_u32_p16(poly16x4_t __p0) { + uint32x2_t __ret; + __ret = (uint32x2_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) uint32x2_t vreinterpret_u32_u8(uint8x8_t __p0) { + uint32x2_t __ret; + __ret = (uint32x2_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) uint32x2_t vreinterpret_u32_u64(uint64x1_t __p0) { + uint32x2_t __ret; + __ret = (uint32x2_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) uint32x2_t vreinterpret_u32_u16(uint16x4_t __p0) { + uint32x2_t __ret; + __ret = (uint32x2_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) uint32x2_t vreinterpret_u32_s8(int8x8_t __p0) { + uint32x2_t __ret; + __ret = (uint32x2_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) uint32x2_t vreinterpret_u32_f32(float32x2_t __p0) { + uint32x2_t __ret; + __ret = (uint32x2_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) uint32x2_t vreinterpret_u32_f16(float16x4_t __p0) { + uint32x2_t __ret; + __ret = (uint32x2_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) uint32x2_t vreinterpret_u32_s32(int32x2_t __p0) { + uint32x2_t __ret; + __ret = (uint32x2_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) uint32x2_t vreinterpret_u32_s64(int64x1_t __p0) { + uint32x2_t __ret; + __ret = (uint32x2_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) uint32x2_t vreinterpret_u32_s16(int16x4_t __p0) { + uint32x2_t __ret; + __ret = (uint32x2_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) uint64x1_t vreinterpret_u64_p8(poly8x8_t __p0) { + uint64x1_t __ret; + __ret = (uint64x1_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) uint64x1_t vreinterpret_u64_p16(poly16x4_t __p0) { + uint64x1_t __ret; + __ret = (uint64x1_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) uint64x1_t vreinterpret_u64_u8(uint8x8_t __p0) { + uint64x1_t __ret; + __ret = (uint64x1_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) uint64x1_t vreinterpret_u64_u32(uint32x2_t __p0) { + uint64x1_t __ret; + __ret = (uint64x1_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) uint64x1_t vreinterpret_u64_u16(uint16x4_t __p0) { + uint64x1_t __ret; + __ret = (uint64x1_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) uint64x1_t vreinterpret_u64_s8(int8x8_t __p0) { + uint64x1_t __ret; + __ret = (uint64x1_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) uint64x1_t vreinterpret_u64_f32(float32x2_t __p0) { + uint64x1_t __ret; + __ret = (uint64x1_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) uint64x1_t vreinterpret_u64_f16(float16x4_t __p0) { + uint64x1_t __ret; + __ret = (uint64x1_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) uint64x1_t vreinterpret_u64_s32(int32x2_t __p0) { + uint64x1_t __ret; + __ret = (uint64x1_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) uint64x1_t vreinterpret_u64_s64(int64x1_t __p0) { + uint64x1_t __ret; + __ret = (uint64x1_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) uint64x1_t vreinterpret_u64_s16(int16x4_t __p0) { + uint64x1_t __ret; + __ret = (uint64x1_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) uint16x4_t vreinterpret_u16_p8(poly8x8_t __p0) { + uint16x4_t __ret; + __ret = (uint16x4_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) uint16x4_t vreinterpret_u16_p16(poly16x4_t __p0) { + uint16x4_t __ret; + __ret = (uint16x4_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) uint16x4_t vreinterpret_u16_u8(uint8x8_t __p0) { + uint16x4_t __ret; + __ret = (uint16x4_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) uint16x4_t vreinterpret_u16_u32(uint32x2_t __p0) { + uint16x4_t __ret; + __ret = (uint16x4_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) uint16x4_t vreinterpret_u16_u64(uint64x1_t __p0) { + uint16x4_t __ret; + __ret = (uint16x4_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) uint16x4_t vreinterpret_u16_s8(int8x8_t __p0) { + uint16x4_t __ret; + __ret = (uint16x4_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) uint16x4_t vreinterpret_u16_f32(float32x2_t __p0) { + uint16x4_t __ret; + __ret = (uint16x4_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) uint16x4_t vreinterpret_u16_f16(float16x4_t __p0) { + uint16x4_t __ret; + __ret = (uint16x4_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) uint16x4_t vreinterpret_u16_s32(int32x2_t __p0) { + uint16x4_t __ret; + __ret = (uint16x4_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) uint16x4_t vreinterpret_u16_s64(int64x1_t __p0) { + uint16x4_t __ret; + __ret = (uint16x4_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) uint16x4_t vreinterpret_u16_s16(int16x4_t __p0) { + uint16x4_t __ret; + __ret = (uint16x4_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) int8x8_t vreinterpret_s8_p8(poly8x8_t __p0) { + int8x8_t __ret; + __ret = (int8x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) int8x8_t vreinterpret_s8_p16(poly16x4_t __p0) { + int8x8_t __ret; + __ret = (int8x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) int8x8_t vreinterpret_s8_u8(uint8x8_t __p0) { + int8x8_t __ret; + __ret = (int8x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) int8x8_t vreinterpret_s8_u32(uint32x2_t __p0) { + int8x8_t __ret; + __ret = (int8x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) int8x8_t vreinterpret_s8_u64(uint64x1_t __p0) { + int8x8_t __ret; + __ret = (int8x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) int8x8_t vreinterpret_s8_u16(uint16x4_t __p0) { + int8x8_t __ret; + __ret = (int8x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) int8x8_t vreinterpret_s8_f32(float32x2_t __p0) { + int8x8_t __ret; + __ret = (int8x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) int8x8_t vreinterpret_s8_f16(float16x4_t __p0) { + int8x8_t __ret; + __ret = (int8x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) int8x8_t vreinterpret_s8_s32(int32x2_t __p0) { + int8x8_t __ret; + __ret = (int8x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) int8x8_t vreinterpret_s8_s64(int64x1_t __p0) { + int8x8_t __ret; + __ret = (int8x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) int8x8_t vreinterpret_s8_s16(int16x4_t __p0) { + int8x8_t __ret; + __ret = (int8x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) float32x2_t vreinterpret_f32_p8(poly8x8_t __p0) { + float32x2_t __ret; + __ret = (float32x2_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) float32x2_t vreinterpret_f32_p16(poly16x4_t __p0) { + float32x2_t __ret; + __ret = (float32x2_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) float32x2_t vreinterpret_f32_u8(uint8x8_t __p0) { + float32x2_t __ret; + __ret = (float32x2_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) float32x2_t vreinterpret_f32_u32(uint32x2_t __p0) { + float32x2_t __ret; + __ret = (float32x2_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) float32x2_t vreinterpret_f32_u64(uint64x1_t __p0) { + float32x2_t __ret; + __ret = (float32x2_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) float32x2_t vreinterpret_f32_u16(uint16x4_t __p0) { + float32x2_t __ret; + __ret = (float32x2_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) float32x2_t vreinterpret_f32_s8(int8x8_t __p0) { + float32x2_t __ret; + __ret = (float32x2_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) float32x2_t vreinterpret_f32_f16(float16x4_t __p0) { + float32x2_t __ret; + __ret = (float32x2_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) float32x2_t vreinterpret_f32_s32(int32x2_t __p0) { + float32x2_t __ret; + __ret = (float32x2_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) float32x2_t vreinterpret_f32_s64(int64x1_t __p0) { + float32x2_t __ret; + __ret = (float32x2_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) float32x2_t vreinterpret_f32_s16(int16x4_t __p0) { + float32x2_t __ret; + __ret = (float32x2_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) float16x4_t vreinterpret_f16_p8(poly8x8_t __p0) { + float16x4_t __ret; + __ret = (float16x4_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) float16x4_t vreinterpret_f16_p16(poly16x4_t __p0) { + float16x4_t __ret; + __ret = (float16x4_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) float16x4_t vreinterpret_f16_u8(uint8x8_t __p0) { + float16x4_t __ret; + __ret = (float16x4_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) float16x4_t vreinterpret_f16_u32(uint32x2_t __p0) { + float16x4_t __ret; + __ret = (float16x4_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) float16x4_t vreinterpret_f16_u64(uint64x1_t __p0) { + float16x4_t __ret; + __ret = (float16x4_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) float16x4_t vreinterpret_f16_u16(uint16x4_t __p0) { + float16x4_t __ret; + __ret = (float16x4_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) float16x4_t vreinterpret_f16_s8(int8x8_t __p0) { + float16x4_t __ret; + __ret = (float16x4_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) float16x4_t vreinterpret_f16_f32(float32x2_t __p0) { + float16x4_t __ret; + __ret = (float16x4_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) float16x4_t vreinterpret_f16_s32(int32x2_t __p0) { + float16x4_t __ret; + __ret = (float16x4_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) float16x4_t vreinterpret_f16_s64(int64x1_t __p0) { + float16x4_t __ret; + __ret = (float16x4_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) float16x4_t vreinterpret_f16_s16(int16x4_t __p0) { + float16x4_t __ret; + __ret = (float16x4_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) int32x2_t vreinterpret_s32_p8(poly8x8_t __p0) { + int32x2_t __ret; + __ret = (int32x2_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) int32x2_t vreinterpret_s32_p16(poly16x4_t __p0) { + int32x2_t __ret; + __ret = (int32x2_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) int32x2_t vreinterpret_s32_u8(uint8x8_t __p0) { + int32x2_t __ret; + __ret = (int32x2_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) int32x2_t vreinterpret_s32_u32(uint32x2_t __p0) { + int32x2_t __ret; + __ret = (int32x2_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) int32x2_t vreinterpret_s32_u64(uint64x1_t __p0) { + int32x2_t __ret; + __ret = (int32x2_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) int32x2_t vreinterpret_s32_u16(uint16x4_t __p0) { + int32x2_t __ret; + __ret = (int32x2_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) int32x2_t vreinterpret_s32_s8(int8x8_t __p0) { + int32x2_t __ret; + __ret = (int32x2_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) int32x2_t vreinterpret_s32_f32(float32x2_t __p0) { + int32x2_t __ret; + __ret = (int32x2_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) int32x2_t vreinterpret_s32_f16(float16x4_t __p0) { + int32x2_t __ret; + __ret = (int32x2_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) int32x2_t vreinterpret_s32_s64(int64x1_t __p0) { + int32x2_t __ret; + __ret = (int32x2_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) int32x2_t vreinterpret_s32_s16(int16x4_t __p0) { + int32x2_t __ret; + __ret = (int32x2_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) int64x1_t vreinterpret_s64_p8(poly8x8_t __p0) { + int64x1_t __ret; + __ret = (int64x1_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) int64x1_t vreinterpret_s64_p16(poly16x4_t __p0) { + int64x1_t __ret; + __ret = (int64x1_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) int64x1_t vreinterpret_s64_u8(uint8x8_t __p0) { + int64x1_t __ret; + __ret = (int64x1_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) int64x1_t vreinterpret_s64_u32(uint32x2_t __p0) { + int64x1_t __ret; + __ret = (int64x1_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) int64x1_t vreinterpret_s64_u64(uint64x1_t __p0) { + int64x1_t __ret; + __ret = (int64x1_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) int64x1_t vreinterpret_s64_u16(uint16x4_t __p0) { + int64x1_t __ret; + __ret = (int64x1_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) int64x1_t vreinterpret_s64_s8(int8x8_t __p0) { + int64x1_t __ret; + __ret = (int64x1_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) int64x1_t vreinterpret_s64_f32(float32x2_t __p0) { + int64x1_t __ret; + __ret = (int64x1_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) int64x1_t vreinterpret_s64_f16(float16x4_t __p0) { + int64x1_t __ret; + __ret = (int64x1_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) int64x1_t vreinterpret_s64_s32(int32x2_t __p0) { + int64x1_t __ret; + __ret = (int64x1_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) int64x1_t vreinterpret_s64_s16(int16x4_t __p0) { + int64x1_t __ret; + __ret = (int64x1_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) int16x4_t vreinterpret_s16_p8(poly8x8_t __p0) { + int16x4_t __ret; + __ret = (int16x4_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) int16x4_t vreinterpret_s16_p16(poly16x4_t __p0) { + int16x4_t __ret; + __ret = (int16x4_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) int16x4_t vreinterpret_s16_u8(uint8x8_t __p0) { + int16x4_t __ret; + __ret = (int16x4_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) int16x4_t vreinterpret_s16_u32(uint32x2_t __p0) { + int16x4_t __ret; + __ret = (int16x4_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) int16x4_t vreinterpret_s16_u64(uint64x1_t __p0) { + int16x4_t __ret; + __ret = (int16x4_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) int16x4_t vreinterpret_s16_u16(uint16x4_t __p0) { + int16x4_t __ret; + __ret = (int16x4_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) int16x4_t vreinterpret_s16_s8(int8x8_t __p0) { + int16x4_t __ret; + __ret = (int16x4_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) int16x4_t vreinterpret_s16_f32(float32x2_t __p0) { + int16x4_t __ret; + __ret = (int16x4_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) int16x4_t vreinterpret_s16_f16(float16x4_t __p0) { + int16x4_t __ret; + __ret = (int16x4_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) int16x4_t vreinterpret_s16_s32(int32x2_t __p0) { + int16x4_t __ret; + __ret = (int16x4_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) int16x4_t vreinterpret_s16_s64(int64x1_t __p0) { + int16x4_t __ret; + __ret = (int16x4_t)(__p0); + return __ret; +} +#endif +#if (__ARM_FP & 2) +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) float16x4_t vcvt_f16_f32(float32x4_t __p0) { + float16x4_t __ret; + __ret = (float16x4_t) __builtin_neon_vcvt_f16_f32((int8x16_t)__p0, 41); + return __ret; +} +#else +__ai __attribute__((target("neon"))) float16x4_t vcvt_f16_f32(float32x4_t __p0) { + float16x4_t __ret; + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = (float16x4_t) __builtin_neon_vcvt_f16_f32((int8x16_t)__rev0, 41); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +__ai __attribute__((target("neon"))) float16x4_t __noswap_vcvt_f16_f32(float32x4_t __p0) { + float16x4_t __ret; + __ret = (float16x4_t) __builtin_neon_vcvt_f16_f32((int8x16_t)__p0, 41); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) float32x4_t vcvt_f32_f16(float16x4_t __p0) { + float32x4_t __ret; + __ret = (float32x4_t) __builtin_neon_vcvt_f32_f16((int8x8_t)__p0, 8); + return __ret; +} +#else +__ai __attribute__((target("neon"))) float32x4_t vcvt_f32_f16(float16x4_t __p0) { + float32x4_t __ret; + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = (float32x4_t) __builtin_neon_vcvt_f32_f16((int8x8_t)__rev0, 8); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +__ai __attribute__((target("neon"))) float32x4_t __noswap_vcvt_f32_f16(float16x4_t __p0) { + float32x4_t __ret; + __ret = (float32x4_t) __builtin_neon_vcvt_f32_f16((int8x8_t)__p0, 8); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1q_f16(__p0) __extension__ ({ \ + float16x8_t __ret; \ + __ret = (float16x8_t) __builtin_neon_vld1q_v(__p0, 40); \ + __ret; \ +}) +#else +#define vld1q_f16(__p0) __extension__ ({ \ + float16x8_t __ret; \ + __ret = (float16x8_t) __builtin_neon_vld1q_v(__p0, 40); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1_f16(__p0) __extension__ ({ \ + float16x4_t __ret; \ + __ret = (float16x4_t) __builtin_neon_vld1_v(__p0, 8); \ + __ret; \ +}) +#else +#define vld1_f16(__p0) __extension__ ({ \ + float16x4_t __ret; \ + __ret = (float16x4_t) __builtin_neon_vld1_v(__p0, 8); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1q_dup_f16(__p0) __extension__ ({ \ + float16x8_t __ret; \ + __ret = (float16x8_t) __builtin_neon_vld1q_dup_v(__p0, 40); \ + __ret; \ +}) +#else +#define vld1q_dup_f16(__p0) __extension__ ({ \ + float16x8_t __ret; \ + __ret = (float16x8_t) __builtin_neon_vld1q_dup_v(__p0, 40); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1_dup_f16(__p0) __extension__ ({ \ + float16x4_t __ret; \ + __ret = (float16x4_t) __builtin_neon_vld1_dup_v(__p0, 8); \ + __ret; \ +}) +#else +#define vld1_dup_f16(__p0) __extension__ ({ \ + float16x4_t __ret; \ + __ret = (float16x4_t) __builtin_neon_vld1_dup_v(__p0, 8); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1q_lane_f16(__p0, __p1, __p2) __extension__ ({ \ + float16x8_t __ret; \ + float16x8_t __s1 = __p1; \ + __ret = (float16x8_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__s1, __p2, 40); \ + __ret; \ +}) +#else +#define vld1q_lane_f16(__p0, __p1, __p2) __extension__ ({ \ + float16x8_t __ret; \ + float16x8_t __s1 = __p1; \ + float16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (float16x8_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 40); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1_lane_f16(__p0, __p1, __p2) __extension__ ({ \ + float16x4_t __ret; \ + float16x4_t __s1 = __p1; \ + __ret = (float16x4_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__s1, __p2, 8); \ + __ret; \ +}) +#else +#define vld1_lane_f16(__p0, __p1, __p2) __extension__ ({ \ + float16x4_t __ret; \ + float16x4_t __s1 = __p1; \ + float16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + __ret = (float16x4_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__rev1, __p2, 8); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1q_f16_x2(__p0) __extension__ ({ \ + float16x8x2_t __ret; \ + __builtin_neon_vld1q_x2_v(&__ret, __p0, 40); \ + __ret; \ +}) +#else +#define vld1q_f16_x2(__p0) __extension__ ({ \ + float16x8x2_t __ret; \ + __builtin_neon_vld1q_x2_v(&__ret, __p0, 40); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1_f16_x2(__p0) __extension__ ({ \ + float16x4x2_t __ret; \ + __builtin_neon_vld1_x2_v(&__ret, __p0, 8); \ + __ret; \ +}) +#else +#define vld1_f16_x2(__p0) __extension__ ({ \ + float16x4x2_t __ret; \ + __builtin_neon_vld1_x2_v(&__ret, __p0, 8); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1q_f16_x3(__p0) __extension__ ({ \ + float16x8x3_t __ret; \ + __builtin_neon_vld1q_x3_v(&__ret, __p0, 40); \ + __ret; \ +}) +#else +#define vld1q_f16_x3(__p0) __extension__ ({ \ + float16x8x3_t __ret; \ + __builtin_neon_vld1q_x3_v(&__ret, __p0, 40); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1_f16_x3(__p0) __extension__ ({ \ + float16x4x3_t __ret; \ + __builtin_neon_vld1_x3_v(&__ret, __p0, 8); \ + __ret; \ +}) +#else +#define vld1_f16_x3(__p0) __extension__ ({ \ + float16x4x3_t __ret; \ + __builtin_neon_vld1_x3_v(&__ret, __p0, 8); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1q_f16_x4(__p0) __extension__ ({ \ + float16x8x4_t __ret; \ + __builtin_neon_vld1q_x4_v(&__ret, __p0, 40); \ + __ret; \ +}) +#else +#define vld1q_f16_x4(__p0) __extension__ ({ \ + float16x8x4_t __ret; \ + __builtin_neon_vld1q_x4_v(&__ret, __p0, 40); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1_f16_x4(__p0) __extension__ ({ \ + float16x4x4_t __ret; \ + __builtin_neon_vld1_x4_v(&__ret, __p0, 8); \ + __ret; \ +}) +#else +#define vld1_f16_x4(__p0) __extension__ ({ \ + float16x4x4_t __ret; \ + __builtin_neon_vld1_x4_v(&__ret, __p0, 8); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld2q_f16(__p0) __extension__ ({ \ + float16x8x2_t __ret; \ + __builtin_neon_vld2q_v(&__ret, __p0, 40); \ + __ret; \ +}) +#else +#define vld2q_f16(__p0) __extension__ ({ \ + float16x8x2_t __ret; \ + __builtin_neon_vld2q_v(&__ret, __p0, 40); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld2_f16(__p0) __extension__ ({ \ + float16x4x2_t __ret; \ + __builtin_neon_vld2_v(&__ret, __p0, 8); \ + __ret; \ +}) +#else +#define vld2_f16(__p0) __extension__ ({ \ + float16x4x2_t __ret; \ + __builtin_neon_vld2_v(&__ret, __p0, 8); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld2q_dup_f16(__p0) __extension__ ({ \ + float16x8x2_t __ret; \ + __builtin_neon_vld2q_dup_v(&__ret, __p0, 40); \ + __ret; \ +}) +#else +#define vld2q_dup_f16(__p0) __extension__ ({ \ + float16x8x2_t __ret; \ + __builtin_neon_vld2q_dup_v(&__ret, __p0, 40); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld2_dup_f16(__p0) __extension__ ({ \ + float16x4x2_t __ret; \ + __builtin_neon_vld2_dup_v(&__ret, __p0, 8); \ + __ret; \ +}) +#else +#define vld2_dup_f16(__p0) __extension__ ({ \ + float16x4x2_t __ret; \ + __builtin_neon_vld2_dup_v(&__ret, __p0, 8); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld2q_lane_f16(__p0, __p1, __p2) __extension__ ({ \ + float16x8x2_t __ret; \ + float16x8x2_t __s1 = __p1; \ + __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 40); \ + __ret; \ +}) +#else +#define vld2q_lane_f16(__p0, __p1, __p2) __extension__ ({ \ + float16x8x2_t __ret; \ + float16x8x2_t __s1 = __p1; \ + float16x8x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 40); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld2_lane_f16(__p0, __p1, __p2) __extension__ ({ \ + float16x4x2_t __ret; \ + float16x4x2_t __s1 = __p1; \ + __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 8); \ + __ret; \ +}) +#else +#define vld2_lane_f16(__p0, __p1, __p2) __extension__ ({ \ + float16x4x2_t __ret; \ + float16x4x2_t __s1 = __p1; \ + float16x4x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], __p2, 8); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld3q_f16(__p0) __extension__ ({ \ + float16x8x3_t __ret; \ + __builtin_neon_vld3q_v(&__ret, __p0, 40); \ + __ret; \ +}) +#else +#define vld3q_f16(__p0) __extension__ ({ \ + float16x8x3_t __ret; \ + __builtin_neon_vld3q_v(&__ret, __p0, 40); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld3_f16(__p0) __extension__ ({ \ + float16x4x3_t __ret; \ + __builtin_neon_vld3_v(&__ret, __p0, 8); \ + __ret; \ +}) +#else +#define vld3_f16(__p0) __extension__ ({ \ + float16x4x3_t __ret; \ + __builtin_neon_vld3_v(&__ret, __p0, 8); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld3q_dup_f16(__p0) __extension__ ({ \ + float16x8x3_t __ret; \ + __builtin_neon_vld3q_dup_v(&__ret, __p0, 40); \ + __ret; \ +}) +#else +#define vld3q_dup_f16(__p0) __extension__ ({ \ + float16x8x3_t __ret; \ + __builtin_neon_vld3q_dup_v(&__ret, __p0, 40); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld3_dup_f16(__p0) __extension__ ({ \ + float16x4x3_t __ret; \ + __builtin_neon_vld3_dup_v(&__ret, __p0, 8); \ + __ret; \ +}) +#else +#define vld3_dup_f16(__p0) __extension__ ({ \ + float16x4x3_t __ret; \ + __builtin_neon_vld3_dup_v(&__ret, __p0, 8); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld3q_lane_f16(__p0, __p1, __p2) __extension__ ({ \ + float16x8x3_t __ret; \ + float16x8x3_t __s1 = __p1; \ + __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 40); \ + __ret; \ +}) +#else +#define vld3q_lane_f16(__p0, __p1, __p2) __extension__ ({ \ + float16x8x3_t __ret; \ + float16x8x3_t __s1 = __p1; \ + float16x8x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 40); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld3_lane_f16(__p0, __p1, __p2) __extension__ ({ \ + float16x4x3_t __ret; \ + float16x4x3_t __s1 = __p1; \ + __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 8); \ + __ret; \ +}) +#else +#define vld3_lane_f16(__p0, __p1, __p2) __extension__ ({ \ + float16x4x3_t __ret; \ + float16x4x3_t __s1 = __p1; \ + float16x4x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ + __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], __p2, 8); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld4q_f16(__p0) __extension__ ({ \ + float16x8x4_t __ret; \ + __builtin_neon_vld4q_v(&__ret, __p0, 40); \ + __ret; \ +}) +#else +#define vld4q_f16(__p0) __extension__ ({ \ + float16x8x4_t __ret; \ + __builtin_neon_vld4q_v(&__ret, __p0, 40); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld4_f16(__p0) __extension__ ({ \ + float16x4x4_t __ret; \ + __builtin_neon_vld4_v(&__ret, __p0, 8); \ + __ret; \ +}) +#else +#define vld4_f16(__p0) __extension__ ({ \ + float16x4x4_t __ret; \ + __builtin_neon_vld4_v(&__ret, __p0, 8); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld4q_dup_f16(__p0) __extension__ ({ \ + float16x8x4_t __ret; \ + __builtin_neon_vld4q_dup_v(&__ret, __p0, 40); \ + __ret; \ +}) +#else +#define vld4q_dup_f16(__p0) __extension__ ({ \ + float16x8x4_t __ret; \ + __builtin_neon_vld4q_dup_v(&__ret, __p0, 40); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld4_dup_f16(__p0) __extension__ ({ \ + float16x4x4_t __ret; \ + __builtin_neon_vld4_dup_v(&__ret, __p0, 8); \ + __ret; \ +}) +#else +#define vld4_dup_f16(__p0) __extension__ ({ \ + float16x4x4_t __ret; \ + __builtin_neon_vld4_dup_v(&__ret, __p0, 8); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld4q_lane_f16(__p0, __p1, __p2) __extension__ ({ \ + float16x8x4_t __ret; \ + float16x8x4_t __s1 = __p1; \ + __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 40); \ + __ret; \ +}) +#else +#define vld4q_lane_f16(__p0, __p1, __p2) __extension__ ({ \ + float16x8x4_t __ret; \ + float16x8x4_t __s1 = __p1; \ + float16x8x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 40); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld4_lane_f16(__p0, __p1, __p2) __extension__ ({ \ + float16x4x4_t __ret; \ + float16x4x4_t __s1 = __p1; \ + __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 8); \ + __ret; \ +}) +#else +#define vld4_lane_f16(__p0, __p1, __p2) __extension__ ({ \ + float16x4x4_t __ret; \ + float16x4x4_t __s1 = __p1; \ + float16x4x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \ + __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], __p2, 8); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1q_f16(__p0, __p1) __extension__ ({ \ + float16x8_t __s1 = __p1; \ + __builtin_neon_vst1q_v(__p0, (int8x16_t)__s1, 40); \ +}) +#else +#define vst1q_f16(__p0, __p1) __extension__ ({ \ + float16x8_t __s1 = __p1; \ + float16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst1q_v(__p0, (int8x16_t)__rev1, 40); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1_f16(__p0, __p1) __extension__ ({ \ + float16x4_t __s1 = __p1; \ + __builtin_neon_vst1_v(__p0, (int8x8_t)__s1, 8); \ +}) +#else +#define vst1_f16(__p0, __p1) __extension__ ({ \ + float16x4_t __s1 = __p1; \ + float16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + __builtin_neon_vst1_v(__p0, (int8x8_t)__rev1, 8); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1q_lane_f16(__p0, __p1, __p2) __extension__ ({ \ + float16x8_t __s1 = __p1; \ + __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__s1, __p2, 40); \ +}) +#else +#define vst1q_lane_f16(__p0, __p1, __p2) __extension__ ({ \ + float16x8_t __s1 = __p1; \ + float16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 40); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1_lane_f16(__p0, __p1, __p2) __extension__ ({ \ + float16x4_t __s1 = __p1; \ + __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__s1, __p2, 8); \ +}) +#else +#define vst1_lane_f16(__p0, __p1, __p2) __extension__ ({ \ + float16x4_t __s1 = __p1; \ + float16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__rev1, __p2, 8); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1q_f16_x2(__p0, __p1) __extension__ ({ \ + float16x8x2_t __s1 = __p1; \ + __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 40); \ +}) +#else +#define vst1q_f16_x2(__p0, __p1) __extension__ ({ \ + float16x8x2_t __s1 = __p1; \ + float16x8x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 40); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1_f16_x2(__p0, __p1) __extension__ ({ \ + float16x4x2_t __s1 = __p1; \ + __builtin_neon_vst1_x2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 8); \ +}) +#else +#define vst1_f16_x2(__p0, __p1) __extension__ ({ \ + float16x4x2_t __s1 = __p1; \ + float16x4x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __builtin_neon_vst1_x2_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], 8); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1q_f16_x3(__p0, __p1) __extension__ ({ \ + float16x8x3_t __s1 = __p1; \ + __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 40); \ +}) +#else +#define vst1q_f16_x3(__p0, __p1) __extension__ ({ \ + float16x8x3_t __s1 = __p1; \ + float16x8x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 40); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1_f16_x3(__p0, __p1) __extension__ ({ \ + float16x4x3_t __s1 = __p1; \ + __builtin_neon_vst1_x3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 8); \ +}) +#else +#define vst1_f16_x3(__p0, __p1) __extension__ ({ \ + float16x4x3_t __s1 = __p1; \ + float16x4x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ + __builtin_neon_vst1_x3_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], 8); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1q_f16_x4(__p0, __p1) __extension__ ({ \ + float16x8x4_t __s1 = __p1; \ + __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 40); \ +}) +#else +#define vst1q_f16_x4(__p0, __p1) __extension__ ({ \ + float16x8x4_t __s1 = __p1; \ + float16x8x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 40); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1_f16_x4(__p0, __p1) __extension__ ({ \ + float16x4x4_t __s1 = __p1; \ + __builtin_neon_vst1_x4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 8); \ +}) +#else +#define vst1_f16_x4(__p0, __p1) __extension__ ({ \ + float16x4x4_t __s1 = __p1; \ + float16x4x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \ + __builtin_neon_vst1_x4_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], 8); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst2q_f16(__p0, __p1) __extension__ ({ \ + float16x8x2_t __s1 = __p1; \ + __builtin_neon_vst2q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 40); \ +}) +#else +#define vst2q_f16(__p0, __p1) __extension__ ({ \ + float16x8x2_t __s1 = __p1; \ + float16x8x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst2q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 40); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst2_f16(__p0, __p1) __extension__ ({ \ + float16x4x2_t __s1 = __p1; \ + __builtin_neon_vst2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 8); \ +}) +#else +#define vst2_f16(__p0, __p1) __extension__ ({ \ + float16x4x2_t __s1 = __p1; \ + float16x4x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __builtin_neon_vst2_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], 8); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst2q_lane_f16(__p0, __p1, __p2) __extension__ ({ \ + float16x8x2_t __s1 = __p1; \ + __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 40); \ +}) +#else +#define vst2q_lane_f16(__p0, __p1, __p2) __extension__ ({ \ + float16x8x2_t __s1 = __p1; \ + float16x8x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 40); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst2_lane_f16(__p0, __p1, __p2) __extension__ ({ \ + float16x4x2_t __s1 = __p1; \ + __builtin_neon_vst2_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 8); \ +}) +#else +#define vst2_lane_f16(__p0, __p1, __p2) __extension__ ({ \ + float16x4x2_t __s1 = __p1; \ + float16x4x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __builtin_neon_vst2_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], __p2, 8); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst3q_f16(__p0, __p1) __extension__ ({ \ + float16x8x3_t __s1 = __p1; \ + __builtin_neon_vst3q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 40); \ +}) +#else +#define vst3q_f16(__p0, __p1) __extension__ ({ \ + float16x8x3_t __s1 = __p1; \ + float16x8x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst3q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 40); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst3_f16(__p0, __p1) __extension__ ({ \ + float16x4x3_t __s1 = __p1; \ + __builtin_neon_vst3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 8); \ +}) +#else +#define vst3_f16(__p0, __p1) __extension__ ({ \ + float16x4x3_t __s1 = __p1; \ + float16x4x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ + __builtin_neon_vst3_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], 8); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst3q_lane_f16(__p0, __p1, __p2) __extension__ ({ \ + float16x8x3_t __s1 = __p1; \ + __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 40); \ +}) +#else +#define vst3q_lane_f16(__p0, __p1, __p2) __extension__ ({ \ + float16x8x3_t __s1 = __p1; \ + float16x8x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 40); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst3_lane_f16(__p0, __p1, __p2) __extension__ ({ \ + float16x4x3_t __s1 = __p1; \ + __builtin_neon_vst3_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 8); \ +}) +#else +#define vst3_lane_f16(__p0, __p1, __p2) __extension__ ({ \ + float16x4x3_t __s1 = __p1; \ + float16x4x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ + __builtin_neon_vst3_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], __p2, 8); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst4q_f16(__p0, __p1) __extension__ ({ \ + float16x8x4_t __s1 = __p1; \ + __builtin_neon_vst4q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 40); \ +}) +#else +#define vst4q_f16(__p0, __p1) __extension__ ({ \ + float16x8x4_t __s1 = __p1; \ + float16x8x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst4q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 40); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst4_f16(__p0, __p1) __extension__ ({ \ + float16x4x4_t __s1 = __p1; \ + __builtin_neon_vst4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 8); \ +}) +#else +#define vst4_f16(__p0, __p1) __extension__ ({ \ + float16x4x4_t __s1 = __p1; \ + float16x4x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \ + __builtin_neon_vst4_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], 8); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst4q_lane_f16(__p0, __p1, __p2) __extension__ ({ \ + float16x8x4_t __s1 = __p1; \ + __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 40); \ +}) +#else +#define vst4q_lane_f16(__p0, __p1, __p2) __extension__ ({ \ + float16x8x4_t __s1 = __p1; \ + float16x8x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 40); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst4_lane_f16(__p0, __p1, __p2) __extension__ ({ \ + float16x4x4_t __s1 = __p1; \ + __builtin_neon_vst4_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 8); \ +}) +#else +#define vst4_lane_f16(__p0, __p1, __p2) __extension__ ({ \ + float16x4x4_t __s1 = __p1; \ + float16x4x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \ + __builtin_neon_vst4_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], __p2, 8); \ +}) +#endif + +#endif +#if (defined(__aarch64__) || defined(__arm64ec__)) && defined(__ARM_FEATURE_NUMERIC_MAXMIN) +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) float64x2_t vmaxnmq_f64(float64x2_t __p0, float64x2_t __p1) { + float64x2_t __ret; + __ret = (float64x2_t) __builtin_neon_vmaxnmq_v((int8x16_t)__p0, (int8x16_t)__p1, 42); + return __ret; +} +#else +__ai __attribute__((target("neon"))) float64x2_t vmaxnmq_f64(float64x2_t __p0, float64x2_t __p1) { + float64x2_t __ret; + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (float64x2_t) __builtin_neon_vmaxnmq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 42); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +__ai __attribute__((target("neon"))) float64x1_t vmaxnm_f64(float64x1_t __p0, float64x1_t __p1) { + float64x1_t __ret; + __ret = (float64x1_t) __builtin_neon_vmaxnm_v((int8x8_t)__p0, (int8x8_t)__p1, 10); + return __ret; +} +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) float64x2_t vminnmq_f64(float64x2_t __p0, float64x2_t __p1) { + float64x2_t __ret; + __ret = (float64x2_t) __builtin_neon_vminnmq_v((int8x16_t)__p0, (int8x16_t)__p1, 42); + return __ret; +} +#else +__ai __attribute__((target("neon"))) float64x2_t vminnmq_f64(float64x2_t __p0, float64x2_t __p1) { + float64x2_t __ret; + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (float64x2_t) __builtin_neon_vminnmq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 42); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +__ai __attribute__((target("neon"))) float64x1_t vminnm_f64(float64x1_t __p0, float64x1_t __p1) { + float64x1_t __ret; + __ret = (float64x1_t) __builtin_neon_vminnm_v((int8x8_t)__p0, (int8x8_t)__p1, 10); + return __ret; +} +#endif +#if (defined(__aarch64__) || defined(__arm64ec__)) && defined(__ARM_FEATURE_DIRECTED_ROUNDING) +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) float64x2_t vrndq_f64(float64x2_t __p0) { + float64x2_t __ret; + __ret = (float64x2_t) __builtin_neon_vrndq_v((int8x16_t)__p0, 42); + return __ret; +} +#else +__ai __attribute__((target("neon"))) float64x2_t vrndq_f64(float64x2_t __p0) { + float64x2_t __ret; + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = (float64x2_t) __builtin_neon_vrndq_v((int8x16_t)__rev0, 42); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +__ai __attribute__((target("neon"))) float64x1_t vrnd_f64(float64x1_t __p0) { + float64x1_t __ret; + __ret = (float64x1_t) __builtin_neon_vrnd_v((int8x8_t)__p0, 10); + return __ret; +} +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) float64x2_t vrndaq_f64(float64x2_t __p0) { + float64x2_t __ret; + __ret = (float64x2_t) __builtin_neon_vrndaq_v((int8x16_t)__p0, 42); + return __ret; +} +#else +__ai __attribute__((target("neon"))) float64x2_t vrndaq_f64(float64x2_t __p0) { + float64x2_t __ret; + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = (float64x2_t) __builtin_neon_vrndaq_v((int8x16_t)__rev0, 42); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +__ai __attribute__((target("neon"))) float64x1_t vrnda_f64(float64x1_t __p0) { + float64x1_t __ret; + __ret = (float64x1_t) __builtin_neon_vrnda_v((int8x8_t)__p0, 10); + return __ret; +} +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) float64x2_t vrndiq_f64(float64x2_t __p0) { + float64x2_t __ret; + __ret = (float64x2_t) __builtin_neon_vrndiq_v((int8x16_t)__p0, 42); + return __ret; +} +#else +__ai __attribute__((target("neon"))) float64x2_t vrndiq_f64(float64x2_t __p0) { + float64x2_t __ret; + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = (float64x2_t) __builtin_neon_vrndiq_v((int8x16_t)__rev0, 42); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +__ai __attribute__((target("neon"))) float64x1_t vrndi_f64(float64x1_t __p0) { + float64x1_t __ret; + __ret = (float64x1_t) __builtin_neon_vrndi_v((int8x8_t)__p0, 10); + return __ret; +} +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) float64x2_t vrndmq_f64(float64x2_t __p0) { + float64x2_t __ret; + __ret = (float64x2_t) __builtin_neon_vrndmq_v((int8x16_t)__p0, 42); + return __ret; +} +#else +__ai __attribute__((target("neon"))) float64x2_t vrndmq_f64(float64x2_t __p0) { + float64x2_t __ret; + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = (float64x2_t) __builtin_neon_vrndmq_v((int8x16_t)__rev0, 42); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +__ai __attribute__((target("neon"))) float64x1_t vrndm_f64(float64x1_t __p0) { + float64x1_t __ret; + __ret = (float64x1_t) __builtin_neon_vrndm_v((int8x8_t)__p0, 10); + return __ret; +} +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) float64x2_t vrndnq_f64(float64x2_t __p0) { + float64x2_t __ret; + __ret = (float64x2_t) __builtin_neon_vrndnq_v((int8x16_t)__p0, 42); + return __ret; +} +#else +__ai __attribute__((target("neon"))) float64x2_t vrndnq_f64(float64x2_t __p0) { + float64x2_t __ret; + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = (float64x2_t) __builtin_neon_vrndnq_v((int8x16_t)__rev0, 42); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +__ai __attribute__((target("neon"))) float64x1_t vrndn_f64(float64x1_t __p0) { + float64x1_t __ret; + __ret = (float64x1_t) __builtin_neon_vrndn_v((int8x8_t)__p0, 10); + return __ret; +} +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) float64x2_t vrndpq_f64(float64x2_t __p0) { + float64x2_t __ret; + __ret = (float64x2_t) __builtin_neon_vrndpq_v((int8x16_t)__p0, 42); + return __ret; +} +#else +__ai __attribute__((target("neon"))) float64x2_t vrndpq_f64(float64x2_t __p0) { + float64x2_t __ret; + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = (float64x2_t) __builtin_neon_vrndpq_v((int8x16_t)__rev0, 42); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +__ai __attribute__((target("neon"))) float64x1_t vrndp_f64(float64x1_t __p0) { + float64x1_t __ret; + __ret = (float64x1_t) __builtin_neon_vrndp_v((int8x8_t)__p0, 10); + return __ret; +} +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) float64x2_t vrndxq_f64(float64x2_t __p0) { + float64x2_t __ret; + __ret = (float64x2_t) __builtin_neon_vrndxq_v((int8x16_t)__p0, 42); + return __ret; +} +#else +__ai __attribute__((target("neon"))) float64x2_t vrndxq_f64(float64x2_t __p0) { + float64x2_t __ret; + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = (float64x2_t) __builtin_neon_vrndxq_v((int8x16_t)__rev0, 42); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +__ai __attribute__((target("neon"))) float64x1_t vrndx_f64(float64x1_t __p0) { + float64x1_t __ret; + __ret = (float64x1_t) __builtin_neon_vrndx_v((int8x8_t)__p0, 10); + return __ret; +} +#endif +#if __ARM_ARCH >= 8 +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("aes,neon"))) uint8x16_t vaesdq_u8(uint8x16_t __p0, uint8x16_t __p1) { + uint8x16_t __ret; + __ret = (uint8x16_t) __builtin_neon_vaesdq_u8((int8x16_t)__p0, (int8x16_t)__p1, 48); + return __ret; +} +#else +__ai __attribute__((target("aes,neon"))) uint8x16_t vaesdq_u8(uint8x16_t __p0, uint8x16_t __p1) { + uint8x16_t __ret; + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint8x16_t) __builtin_neon_vaesdq_u8((int8x16_t)__rev0, (int8x16_t)__rev1, 48); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("aes,neon"))) uint8x16_t vaeseq_u8(uint8x16_t __p0, uint8x16_t __p1) { + uint8x16_t __ret; + __ret = (uint8x16_t) __builtin_neon_vaeseq_u8((int8x16_t)__p0, (int8x16_t)__p1, 48); + return __ret; +} +#else +__ai __attribute__((target("aes,neon"))) uint8x16_t vaeseq_u8(uint8x16_t __p0, uint8x16_t __p1) { + uint8x16_t __ret; + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint8x16_t) __builtin_neon_vaeseq_u8((int8x16_t)__rev0, (int8x16_t)__rev1, 48); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("aes,neon"))) uint8x16_t vaesimcq_u8(uint8x16_t __p0) { + uint8x16_t __ret; + __ret = (uint8x16_t) __builtin_neon_vaesimcq_u8((int8x16_t)__p0, 48); + return __ret; +} +#else +__ai __attribute__((target("aes,neon"))) uint8x16_t vaesimcq_u8(uint8x16_t __p0) { + uint8x16_t __ret; + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint8x16_t) __builtin_neon_vaesimcq_u8((int8x16_t)__rev0, 48); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("aes,neon"))) uint8x16_t vaesmcq_u8(uint8x16_t __p0) { + uint8x16_t __ret; + __ret = (uint8x16_t) __builtin_neon_vaesmcq_u8((int8x16_t)__p0, 48); + return __ret; +} +#else +__ai __attribute__((target("aes,neon"))) uint8x16_t vaesmcq_u8(uint8x16_t __p0) { + uint8x16_t __ret; + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint8x16_t) __builtin_neon_vaesmcq_u8((int8x16_t)__rev0, 48); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int32x4_t vcvtaq_s32_f32(float32x4_t __p0) { + int32x4_t __ret; + __ret = (int32x4_t) __builtin_neon_vcvtaq_s32_v((int8x16_t)__p0, 34); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int32x4_t vcvtaq_s32_f32(float32x4_t __p0) { + int32x4_t __ret; + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = (int32x4_t) __builtin_neon_vcvtaq_s32_v((int8x16_t)__rev0, 34); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int32x2_t vcvta_s32_f32(float32x2_t __p0) { + int32x2_t __ret; + __ret = (int32x2_t) __builtin_neon_vcvta_s32_v((int8x8_t)__p0, 2); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int32x2_t vcvta_s32_f32(float32x2_t __p0) { + int32x2_t __ret; + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = (int32x2_t) __builtin_neon_vcvta_s32_v((int8x8_t)__rev0, 2); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint32x4_t vcvtaq_u32_f32(float32x4_t __p0) { + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vcvtaq_u32_v((int8x16_t)__p0, 50); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint32x4_t vcvtaq_u32_f32(float32x4_t __p0) { + uint32x4_t __ret; + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = (uint32x4_t) __builtin_neon_vcvtaq_u32_v((int8x16_t)__rev0, 50); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint32x2_t vcvta_u32_f32(float32x2_t __p0) { + uint32x2_t __ret; + __ret = (uint32x2_t) __builtin_neon_vcvta_u32_v((int8x8_t)__p0, 18); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint32x2_t vcvta_u32_f32(float32x2_t __p0) { + uint32x2_t __ret; + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = (uint32x2_t) __builtin_neon_vcvta_u32_v((int8x8_t)__rev0, 18); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int32x4_t vcvtmq_s32_f32(float32x4_t __p0) { + int32x4_t __ret; + __ret = (int32x4_t) __builtin_neon_vcvtmq_s32_v((int8x16_t)__p0, 34); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int32x4_t vcvtmq_s32_f32(float32x4_t __p0) { + int32x4_t __ret; + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = (int32x4_t) __builtin_neon_vcvtmq_s32_v((int8x16_t)__rev0, 34); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int32x2_t vcvtm_s32_f32(float32x2_t __p0) { + int32x2_t __ret; + __ret = (int32x2_t) __builtin_neon_vcvtm_s32_v((int8x8_t)__p0, 2); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int32x2_t vcvtm_s32_f32(float32x2_t __p0) { + int32x2_t __ret; + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = (int32x2_t) __builtin_neon_vcvtm_s32_v((int8x8_t)__rev0, 2); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint32x4_t vcvtmq_u32_f32(float32x4_t __p0) { + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vcvtmq_u32_v((int8x16_t)__p0, 50); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint32x4_t vcvtmq_u32_f32(float32x4_t __p0) { + uint32x4_t __ret; + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = (uint32x4_t) __builtin_neon_vcvtmq_u32_v((int8x16_t)__rev0, 50); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint32x2_t vcvtm_u32_f32(float32x2_t __p0) { + uint32x2_t __ret; + __ret = (uint32x2_t) __builtin_neon_vcvtm_u32_v((int8x8_t)__p0, 18); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint32x2_t vcvtm_u32_f32(float32x2_t __p0) { + uint32x2_t __ret; + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = (uint32x2_t) __builtin_neon_vcvtm_u32_v((int8x8_t)__rev0, 18); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int32x4_t vcvtnq_s32_f32(float32x4_t __p0) { + int32x4_t __ret; + __ret = (int32x4_t) __builtin_neon_vcvtnq_s32_v((int8x16_t)__p0, 34); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int32x4_t vcvtnq_s32_f32(float32x4_t __p0) { + int32x4_t __ret; + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = (int32x4_t) __builtin_neon_vcvtnq_s32_v((int8x16_t)__rev0, 34); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int32x2_t vcvtn_s32_f32(float32x2_t __p0) { + int32x2_t __ret; + __ret = (int32x2_t) __builtin_neon_vcvtn_s32_v((int8x8_t)__p0, 2); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int32x2_t vcvtn_s32_f32(float32x2_t __p0) { + int32x2_t __ret; + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = (int32x2_t) __builtin_neon_vcvtn_s32_v((int8x8_t)__rev0, 2); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint32x4_t vcvtnq_u32_f32(float32x4_t __p0) { + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vcvtnq_u32_v((int8x16_t)__p0, 50); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint32x4_t vcvtnq_u32_f32(float32x4_t __p0) { + uint32x4_t __ret; + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = (uint32x4_t) __builtin_neon_vcvtnq_u32_v((int8x16_t)__rev0, 50); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint32x2_t vcvtn_u32_f32(float32x2_t __p0) { + uint32x2_t __ret; + __ret = (uint32x2_t) __builtin_neon_vcvtn_u32_v((int8x8_t)__p0, 18); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint32x2_t vcvtn_u32_f32(float32x2_t __p0) { + uint32x2_t __ret; + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = (uint32x2_t) __builtin_neon_vcvtn_u32_v((int8x8_t)__rev0, 18); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int32x4_t vcvtpq_s32_f32(float32x4_t __p0) { + int32x4_t __ret; + __ret = (int32x4_t) __builtin_neon_vcvtpq_s32_v((int8x16_t)__p0, 34); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int32x4_t vcvtpq_s32_f32(float32x4_t __p0) { + int32x4_t __ret; + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = (int32x4_t) __builtin_neon_vcvtpq_s32_v((int8x16_t)__rev0, 34); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int32x2_t vcvtp_s32_f32(float32x2_t __p0) { + int32x2_t __ret; + __ret = (int32x2_t) __builtin_neon_vcvtp_s32_v((int8x8_t)__p0, 2); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int32x2_t vcvtp_s32_f32(float32x2_t __p0) { + int32x2_t __ret; + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = (int32x2_t) __builtin_neon_vcvtp_s32_v((int8x8_t)__rev0, 2); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint32x4_t vcvtpq_u32_f32(float32x4_t __p0) { + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vcvtpq_u32_v((int8x16_t)__p0, 50); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint32x4_t vcvtpq_u32_f32(float32x4_t __p0) { + uint32x4_t __ret; + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = (uint32x4_t) __builtin_neon_vcvtpq_u32_v((int8x16_t)__rev0, 50); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint32x2_t vcvtp_u32_f32(float32x2_t __p0) { + uint32x2_t __ret; + __ret = (uint32x2_t) __builtin_neon_vcvtp_u32_v((int8x8_t)__p0, 18); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint32x2_t vcvtp_u32_f32(float32x2_t __p0) { + uint32x2_t __ret; + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = (uint32x2_t) __builtin_neon_vcvtp_u32_v((int8x8_t)__rev0, 18); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("sha2,neon"))) uint32x4_t vsha1cq_u32(uint32x4_t __p0, uint32_t __p1, uint32x4_t __p2) { + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vsha1cq_u32(__p0, __p1, __p2); + return __ret; +} +#else +__ai __attribute__((target("sha2,neon"))) uint32x4_t vsha1cq_u32(uint32x4_t __p0, uint32_t __p1, uint32x4_t __p2) { + uint32x4_t __ret; + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); + __ret = (uint32x4_t) __builtin_neon_vsha1cq_u32(__rev0, __p1, __rev2); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +__ai __attribute__((target("sha2,neon"))) uint32_t vsha1h_u32(uint32_t __p0) { + uint32_t __ret; + __ret = (uint32_t) __builtin_neon_vsha1h_u32(__p0); + return __ret; +} +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("sha2,neon"))) uint32x4_t vsha1mq_u32(uint32x4_t __p0, uint32_t __p1, uint32x4_t __p2) { + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vsha1mq_u32(__p0, __p1, __p2); + return __ret; +} +#else +__ai __attribute__((target("sha2,neon"))) uint32x4_t vsha1mq_u32(uint32x4_t __p0, uint32_t __p1, uint32x4_t __p2) { + uint32x4_t __ret; + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); + __ret = (uint32x4_t) __builtin_neon_vsha1mq_u32(__rev0, __p1, __rev2); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("sha2,neon"))) uint32x4_t vsha1pq_u32(uint32x4_t __p0, uint32_t __p1, uint32x4_t __p2) { + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vsha1pq_u32(__p0, __p1, __p2); + return __ret; +} +#else +__ai __attribute__((target("sha2,neon"))) uint32x4_t vsha1pq_u32(uint32x4_t __p0, uint32_t __p1, uint32x4_t __p2) { + uint32x4_t __ret; + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); + __ret = (uint32x4_t) __builtin_neon_vsha1pq_u32(__rev0, __p1, __rev2); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("sha2,neon"))) uint32x4_t vsha1su0q_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) { + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vsha1su0q_u32((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 50); + return __ret; +} +#else +__ai __attribute__((target("sha2,neon"))) uint32x4_t vsha1su0q_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) { + uint32x4_t __ret; + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + uint32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); + __ret = (uint32x4_t) __builtin_neon_vsha1su0q_u32((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 50); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("sha2,neon"))) uint32x4_t vsha1su1q_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vsha1su1q_u32((int8x16_t)__p0, (int8x16_t)__p1, 50); + return __ret; +} +#else +__ai __attribute__((target("sha2,neon"))) uint32x4_t vsha1su1q_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint32x4_t __ret; + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (uint32x4_t) __builtin_neon_vsha1su1q_u32((int8x16_t)__rev0, (int8x16_t)__rev1, 50); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("sha2,neon"))) uint32x4_t vsha256hq_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) { + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vsha256hq_u32((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 50); + return __ret; +} +#else +__ai __attribute__((target("sha2,neon"))) uint32x4_t vsha256hq_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) { + uint32x4_t __ret; + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + uint32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); + __ret = (uint32x4_t) __builtin_neon_vsha256hq_u32((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 50); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("sha2,neon"))) uint32x4_t vsha256h2q_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) { + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vsha256h2q_u32((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 50); + return __ret; +} +#else +__ai __attribute__((target("sha2,neon"))) uint32x4_t vsha256h2q_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) { + uint32x4_t __ret; + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + uint32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); + __ret = (uint32x4_t) __builtin_neon_vsha256h2q_u32((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 50); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("sha2,neon"))) uint32x4_t vsha256su0q_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vsha256su0q_u32((int8x16_t)__p0, (int8x16_t)__p1, 50); + return __ret; +} +#else +__ai __attribute__((target("sha2,neon"))) uint32x4_t vsha256su0q_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint32x4_t __ret; + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (uint32x4_t) __builtin_neon_vsha256su0q_u32((int8x16_t)__rev0, (int8x16_t)__rev1, 50); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("sha2,neon"))) uint32x4_t vsha256su1q_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) { + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vsha256su1q_u32((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 50); + return __ret; +} +#else +__ai __attribute__((target("sha2,neon"))) uint32x4_t vsha256su1q_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) { + uint32x4_t __ret; + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + uint32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); + __ret = (uint32x4_t) __builtin_neon_vsha256su1q_u32((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 50); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#endif +#if __ARM_ARCH >= 8 && defined(__ARM_FEATURE_DIRECTED_ROUNDING) +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16,neon"))) float16x8_t vrndq_f16(float16x8_t __p0) { + float16x8_t __ret; + __ret = (float16x8_t) __builtin_neon_vrndq_f16((int8x16_t)__p0, 40); + return __ret; +} +#else +__ai __attribute__((target("fullfp16,neon"))) float16x8_t vrndq_f16(float16x8_t __p0) { + float16x8_t __ret; + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (float16x8_t) __builtin_neon_vrndq_f16((int8x16_t)__rev0, 40); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16,neon"))) float16x4_t vrnd_f16(float16x4_t __p0) { + float16x4_t __ret; + __ret = (float16x4_t) __builtin_neon_vrnd_f16((int8x8_t)__p0, 8); + return __ret; +} +#else +__ai __attribute__((target("fullfp16,neon"))) float16x4_t vrnd_f16(float16x4_t __p0) { + float16x4_t __ret; + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = (float16x4_t) __builtin_neon_vrnd_f16((int8x8_t)__rev0, 8); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16,neon"))) float16x8_t vrndaq_f16(float16x8_t __p0) { + float16x8_t __ret; + __ret = (float16x8_t) __builtin_neon_vrndaq_f16((int8x16_t)__p0, 40); + return __ret; +} +#else +__ai __attribute__((target("fullfp16,neon"))) float16x8_t vrndaq_f16(float16x8_t __p0) { + float16x8_t __ret; + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (float16x8_t) __builtin_neon_vrndaq_f16((int8x16_t)__rev0, 40); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16,neon"))) float16x4_t vrnda_f16(float16x4_t __p0) { + float16x4_t __ret; + __ret = (float16x4_t) __builtin_neon_vrnda_f16((int8x8_t)__p0, 8); + return __ret; +} +#else +__ai __attribute__((target("fullfp16,neon"))) float16x4_t vrnda_f16(float16x4_t __p0) { + float16x4_t __ret; + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = (float16x4_t) __builtin_neon_vrnda_f16((int8x8_t)__rev0, 8); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16,neon"))) float16x8_t vrndmq_f16(float16x8_t __p0) { + float16x8_t __ret; + __ret = (float16x8_t) __builtin_neon_vrndmq_f16((int8x16_t)__p0, 40); + return __ret; +} +#else +__ai __attribute__((target("fullfp16,neon"))) float16x8_t vrndmq_f16(float16x8_t __p0) { + float16x8_t __ret; + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (float16x8_t) __builtin_neon_vrndmq_f16((int8x16_t)__rev0, 40); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16,neon"))) float16x4_t vrndm_f16(float16x4_t __p0) { + float16x4_t __ret; + __ret = (float16x4_t) __builtin_neon_vrndm_f16((int8x8_t)__p0, 8); + return __ret; +} +#else +__ai __attribute__((target("fullfp16,neon"))) float16x4_t vrndm_f16(float16x4_t __p0) { + float16x4_t __ret; + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = (float16x4_t) __builtin_neon_vrndm_f16((int8x8_t)__rev0, 8); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16,neon"))) float16x8_t vrndnq_f16(float16x8_t __p0) { + float16x8_t __ret; + __ret = (float16x8_t) __builtin_neon_vrndnq_f16((int8x16_t)__p0, 40); + return __ret; +} +#else +__ai __attribute__((target("fullfp16,neon"))) float16x8_t vrndnq_f16(float16x8_t __p0) { + float16x8_t __ret; + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (float16x8_t) __builtin_neon_vrndnq_f16((int8x16_t)__rev0, 40); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16,neon"))) float16x4_t vrndn_f16(float16x4_t __p0) { + float16x4_t __ret; + __ret = (float16x4_t) __builtin_neon_vrndn_f16((int8x8_t)__p0, 8); + return __ret; +} +#else +__ai __attribute__((target("fullfp16,neon"))) float16x4_t vrndn_f16(float16x4_t __p0) { + float16x4_t __ret; + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = (float16x4_t) __builtin_neon_vrndn_f16((int8x8_t)__rev0, 8); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16,neon"))) float16x8_t vrndpq_f16(float16x8_t __p0) { + float16x8_t __ret; + __ret = (float16x8_t) __builtin_neon_vrndpq_f16((int8x16_t)__p0, 40); + return __ret; +} +#else +__ai __attribute__((target("fullfp16,neon"))) float16x8_t vrndpq_f16(float16x8_t __p0) { + float16x8_t __ret; + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (float16x8_t) __builtin_neon_vrndpq_f16((int8x16_t)__rev0, 40); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16,neon"))) float16x4_t vrndp_f16(float16x4_t __p0) { + float16x4_t __ret; + __ret = (float16x4_t) __builtin_neon_vrndp_f16((int8x8_t)__p0, 8); + return __ret; +} +#else +__ai __attribute__((target("fullfp16,neon"))) float16x4_t vrndp_f16(float16x4_t __p0) { + float16x4_t __ret; + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = (float16x4_t) __builtin_neon_vrndp_f16((int8x8_t)__rev0, 8); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16,neon"))) float16x8_t vrndxq_f16(float16x8_t __p0) { + float16x8_t __ret; + __ret = (float16x8_t) __builtin_neon_vrndxq_f16((int8x16_t)__p0, 40); + return __ret; +} +#else +__ai __attribute__((target("fullfp16,neon"))) float16x8_t vrndxq_f16(float16x8_t __p0) { + float16x8_t __ret; + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (float16x8_t) __builtin_neon_vrndxq_f16((int8x16_t)__rev0, 40); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16,neon"))) float16x4_t vrndx_f16(float16x4_t __p0) { + float16x4_t __ret; + __ret = (float16x4_t) __builtin_neon_vrndx_f16((int8x8_t)__p0, 8); + return __ret; +} +#else +__ai __attribute__((target("fullfp16,neon"))) float16x4_t vrndx_f16(float16x4_t __p0) { + float16x4_t __ret; + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = (float16x4_t) __builtin_neon_vrndx_f16((int8x8_t)__rev0, 8); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) float32x4_t vrndq_f32(float32x4_t __p0) { + float32x4_t __ret; + __ret = (float32x4_t) __builtin_neon_vrndq_v((int8x16_t)__p0, 41); + return __ret; +} +#else +__ai __attribute__((target("neon"))) float32x4_t vrndq_f32(float32x4_t __p0) { + float32x4_t __ret; + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = (float32x4_t) __builtin_neon_vrndq_v((int8x16_t)__rev0, 41); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) float32x2_t vrnd_f32(float32x2_t __p0) { + float32x2_t __ret; + __ret = (float32x2_t) __builtin_neon_vrnd_v((int8x8_t)__p0, 9); + return __ret; +} +#else +__ai __attribute__((target("neon"))) float32x2_t vrnd_f32(float32x2_t __p0) { + float32x2_t __ret; + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = (float32x2_t) __builtin_neon_vrnd_v((int8x8_t)__rev0, 9); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) float32x4_t vrndaq_f32(float32x4_t __p0) { + float32x4_t __ret; + __ret = (float32x4_t) __builtin_neon_vrndaq_v((int8x16_t)__p0, 41); + return __ret; +} +#else +__ai __attribute__((target("neon"))) float32x4_t vrndaq_f32(float32x4_t __p0) { + float32x4_t __ret; + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = (float32x4_t) __builtin_neon_vrndaq_v((int8x16_t)__rev0, 41); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) float32x2_t vrnda_f32(float32x2_t __p0) { + float32x2_t __ret; + __ret = (float32x2_t) __builtin_neon_vrnda_v((int8x8_t)__p0, 9); + return __ret; +} +#else +__ai __attribute__((target("neon"))) float32x2_t vrnda_f32(float32x2_t __p0) { + float32x2_t __ret; + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = (float32x2_t) __builtin_neon_vrnda_v((int8x8_t)__rev0, 9); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) float32x4_t vrndiq_f32(float32x4_t __p0) { + float32x4_t __ret; + __ret = (float32x4_t) __builtin_neon_vrndiq_v((int8x16_t)__p0, 41); + return __ret; +} +#else +__ai __attribute__((target("neon"))) float32x4_t vrndiq_f32(float32x4_t __p0) { + float32x4_t __ret; + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = (float32x4_t) __builtin_neon_vrndiq_v((int8x16_t)__rev0, 41); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) float32x2_t vrndi_f32(float32x2_t __p0) { + float32x2_t __ret; + __ret = (float32x2_t) __builtin_neon_vrndi_v((int8x8_t)__p0, 9); + return __ret; +} +#else +__ai __attribute__((target("neon"))) float32x2_t vrndi_f32(float32x2_t __p0) { + float32x2_t __ret; + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = (float32x2_t) __builtin_neon_vrndi_v((int8x8_t)__rev0, 9); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) float32x4_t vrndmq_f32(float32x4_t __p0) { + float32x4_t __ret; + __ret = (float32x4_t) __builtin_neon_vrndmq_v((int8x16_t)__p0, 41); + return __ret; +} +#else +__ai __attribute__((target("neon"))) float32x4_t vrndmq_f32(float32x4_t __p0) { + float32x4_t __ret; + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = (float32x4_t) __builtin_neon_vrndmq_v((int8x16_t)__rev0, 41); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) float32x2_t vrndm_f32(float32x2_t __p0) { + float32x2_t __ret; + __ret = (float32x2_t) __builtin_neon_vrndm_v((int8x8_t)__p0, 9); + return __ret; +} +#else +__ai __attribute__((target("neon"))) float32x2_t vrndm_f32(float32x2_t __p0) { + float32x2_t __ret; + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = (float32x2_t) __builtin_neon_vrndm_v((int8x8_t)__rev0, 9); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) float32x4_t vrndnq_f32(float32x4_t __p0) { + float32x4_t __ret; + __ret = (float32x4_t) __builtin_neon_vrndnq_v((int8x16_t)__p0, 41); + return __ret; +} +#else +__ai __attribute__((target("neon"))) float32x4_t vrndnq_f32(float32x4_t __p0) { + float32x4_t __ret; + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = (float32x4_t) __builtin_neon_vrndnq_v((int8x16_t)__rev0, 41); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) float32x2_t vrndn_f32(float32x2_t __p0) { + float32x2_t __ret; + __ret = (float32x2_t) __builtin_neon_vrndn_v((int8x8_t)__p0, 9); + return __ret; +} +#else +__ai __attribute__((target("neon"))) float32x2_t vrndn_f32(float32x2_t __p0) { + float32x2_t __ret; + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = (float32x2_t) __builtin_neon_vrndn_v((int8x8_t)__rev0, 9); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +__ai __attribute__((target("neon"))) float32_t vrndns_f32(float32_t __p0) { + float32_t __ret; + __ret = (float32_t) __builtin_neon_vrndns_f32(__p0); + return __ret; +} +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) float32x4_t vrndpq_f32(float32x4_t __p0) { + float32x4_t __ret; + __ret = (float32x4_t) __builtin_neon_vrndpq_v((int8x16_t)__p0, 41); + return __ret; +} +#else +__ai __attribute__((target("neon"))) float32x4_t vrndpq_f32(float32x4_t __p0) { + float32x4_t __ret; + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = (float32x4_t) __builtin_neon_vrndpq_v((int8x16_t)__rev0, 41); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) float32x2_t vrndp_f32(float32x2_t __p0) { + float32x2_t __ret; + __ret = (float32x2_t) __builtin_neon_vrndp_v((int8x8_t)__p0, 9); + return __ret; +} +#else +__ai __attribute__((target("neon"))) float32x2_t vrndp_f32(float32x2_t __p0) { + float32x2_t __ret; + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = (float32x2_t) __builtin_neon_vrndp_v((int8x8_t)__rev0, 9); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) float32x4_t vrndxq_f32(float32x4_t __p0) { + float32x4_t __ret; + __ret = (float32x4_t) __builtin_neon_vrndxq_v((int8x16_t)__p0, 41); + return __ret; +} +#else +__ai __attribute__((target("neon"))) float32x4_t vrndxq_f32(float32x4_t __p0) { + float32x4_t __ret; + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = (float32x4_t) __builtin_neon_vrndxq_v((int8x16_t)__rev0, 41); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) float32x2_t vrndx_f32(float32x2_t __p0) { + float32x2_t __ret; + __ret = (float32x2_t) __builtin_neon_vrndx_v((int8x8_t)__p0, 9); + return __ret; +} +#else +__ai __attribute__((target("neon"))) float32x2_t vrndx_f32(float32x2_t __p0) { + float32x2_t __ret; + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = (float32x2_t) __builtin_neon_vrndx_v((int8x8_t)__rev0, 9); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#endif +#if __ARM_ARCH >= 8 && defined(__ARM_FEATURE_NUMERIC_MAXMIN) +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16,neon"))) float16x8_t vmaxnmq_f16(float16x8_t __p0, float16x8_t __p1) { + float16x8_t __ret; + __ret = (float16x8_t) __builtin_neon_vmaxnmq_f16((int8x16_t)__p0, (int8x16_t)__p1, 40); + return __ret; +} +#else +__ai __attribute__((target("fullfp16,neon"))) float16x8_t vmaxnmq_f16(float16x8_t __p0, float16x8_t __p1) { + float16x8_t __ret; + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (float16x8_t) __builtin_neon_vmaxnmq_f16((int8x16_t)__rev0, (int8x16_t)__rev1, 40); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16,neon"))) float16x4_t vmaxnm_f16(float16x4_t __p0, float16x4_t __p1) { + float16x4_t __ret; + __ret = (float16x4_t) __builtin_neon_vmaxnm_f16((int8x8_t)__p0, (int8x8_t)__p1, 8); + return __ret; +} +#else +__ai __attribute__((target("fullfp16,neon"))) float16x4_t vmaxnm_f16(float16x4_t __p0, float16x4_t __p1) { + float16x4_t __ret; + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (float16x4_t) __builtin_neon_vmaxnm_f16((int8x8_t)__rev0, (int8x8_t)__rev1, 8); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16,neon"))) float16x8_t vminnmq_f16(float16x8_t __p0, float16x8_t __p1) { + float16x8_t __ret; + __ret = (float16x8_t) __builtin_neon_vminnmq_f16((int8x16_t)__p0, (int8x16_t)__p1, 40); + return __ret; +} +#else +__ai __attribute__((target("fullfp16,neon"))) float16x8_t vminnmq_f16(float16x8_t __p0, float16x8_t __p1) { + float16x8_t __ret; + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (float16x8_t) __builtin_neon_vminnmq_f16((int8x16_t)__rev0, (int8x16_t)__rev1, 40); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16,neon"))) float16x4_t vminnm_f16(float16x4_t __p0, float16x4_t __p1) { + float16x4_t __ret; + __ret = (float16x4_t) __builtin_neon_vminnm_f16((int8x8_t)__p0, (int8x8_t)__p1, 8); + return __ret; +} +#else +__ai __attribute__((target("fullfp16,neon"))) float16x4_t vminnm_f16(float16x4_t __p0, float16x4_t __p1) { + float16x4_t __ret; + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (float16x4_t) __builtin_neon_vminnm_f16((int8x8_t)__rev0, (int8x8_t)__rev1, 8); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) float32x4_t vmaxnmq_f32(float32x4_t __p0, float32x4_t __p1) { + float32x4_t __ret; + __ret = (float32x4_t) __builtin_neon_vmaxnmq_v((int8x16_t)__p0, (int8x16_t)__p1, 41); + return __ret; +} +#else +__ai __attribute__((target("neon"))) float32x4_t vmaxnmq_f32(float32x4_t __p0, float32x4_t __p1) { + float32x4_t __ret; + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (float32x4_t) __builtin_neon_vmaxnmq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 41); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) float32x2_t vmaxnm_f32(float32x2_t __p0, float32x2_t __p1) { + float32x2_t __ret; + __ret = (float32x2_t) __builtin_neon_vmaxnm_v((int8x8_t)__p0, (int8x8_t)__p1, 9); + return __ret; +} +#else +__ai __attribute__((target("neon"))) float32x2_t vmaxnm_f32(float32x2_t __p0, float32x2_t __p1) { + float32x2_t __ret; + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (float32x2_t) __builtin_neon_vmaxnm_v((int8x8_t)__rev0, (int8x8_t)__rev1, 9); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) float32x4_t vminnmq_f32(float32x4_t __p0, float32x4_t __p1) { + float32x4_t __ret; + __ret = (float32x4_t) __builtin_neon_vminnmq_v((int8x16_t)__p0, (int8x16_t)__p1, 41); + return __ret; +} +#else +__ai __attribute__((target("neon"))) float32x4_t vminnmq_f32(float32x4_t __p0, float32x4_t __p1) { + float32x4_t __ret; + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (float32x4_t) __builtin_neon_vminnmq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 41); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) float32x2_t vminnm_f32(float32x2_t __p0, float32x2_t __p1) { + float32x2_t __ret; + __ret = (float32x2_t) __builtin_neon_vminnm_v((int8x8_t)__p0, (int8x8_t)__p1, 9); + return __ret; +} +#else +__ai __attribute__((target("neon"))) float32x2_t vminnm_f32(float32x2_t __p0, float32x2_t __p1) { + float32x2_t __ret; + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (float32x2_t) __builtin_neon_vminnm_v((int8x8_t)__rev0, (int8x8_t)__rev1, 9); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#endif +#if defined(__ARM_FEATURE_FMA) +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) float32x4_t vfmaq_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) { + float32x4_t __ret; + __ret = (float32x4_t) __builtin_neon_vfmaq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41); + return __ret; +} +#else +__ai __attribute__((target("neon"))) float32x4_t vfmaq_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) { + float32x4_t __ret; + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + float32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); + __ret = (float32x4_t) __builtin_neon_vfmaq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 41); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +__ai __attribute__((target("neon"))) float32x4_t __noswap_vfmaq_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) { + float32x4_t __ret; + __ret = (float32x4_t) __builtin_neon_vfmaq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) float32x2_t vfma_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) { + float32x2_t __ret; + __ret = (float32x2_t) __builtin_neon_vfma_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 9); + return __ret; +} +#else +__ai __attribute__((target("neon"))) float32x2_t vfma_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) { + float32x2_t __ret; + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + float32x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0); + __ret = (float32x2_t) __builtin_neon_vfma_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 9); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +__ai __attribute__((target("neon"))) float32x2_t __noswap_vfma_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) { + float32x2_t __ret; + __ret = (float32x2_t) __builtin_neon_vfma_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 9); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) float32x4_t vfmaq_n_f32(float32x4_t __p0, float32x4_t __p1, float32_t __p2) { + float32x4_t __ret; + __ret = vfmaq_f32(__p0, __p1, (float32x4_t) {__p2, __p2, __p2, __p2}); + return __ret; +} +#else +__ai __attribute__((target("neon"))) float32x4_t vfmaq_n_f32(float32x4_t __p0, float32x4_t __p1, float32_t __p2) { + float32x4_t __ret; + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = __noswap_vfmaq_f32(__rev0, __rev1, (float32x4_t) {__p2, __p2, __p2, __p2}); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) float32x2_t vfma_n_f32(float32x2_t __p0, float32x2_t __p1, float32_t __p2) { + float32x2_t __ret; + __ret = vfma_f32(__p0, __p1, (float32x2_t) {__p2, __p2}); + return __ret; +} +#else +__ai __attribute__((target("neon"))) float32x2_t vfma_n_f32(float32x2_t __p0, float32x2_t __p1, float32_t __p2) { + float32x2_t __ret; + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = __noswap_vfma_f32(__rev0, __rev1, (float32x2_t) {__p2, __p2}); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) float32x4_t vfmsq_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) { + float32x4_t __ret; + __ret = vfmaq_f32(__p0, -__p1, __p2); + return __ret; +} +#else +__ai __attribute__((target("neon"))) float32x4_t vfmsq_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) { + float32x4_t __ret; + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + float32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); + __ret = __noswap_vfmaq_f32(__rev0, -__rev1, __rev2); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) float32x2_t vfms_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) { + float32x2_t __ret; + __ret = vfma_f32(__p0, -__p1, __p2); + return __ret; +} +#else +__ai __attribute__((target("neon"))) float32x2_t vfms_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) { + float32x2_t __ret; + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + float32x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0); + __ret = __noswap_vfma_f32(__rev0, -__rev1, __rev2); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#endif +#if defined(__aarch64__) || defined(__arm64ec__) +__ai __attribute__((target("aes,neon"))) poly128_t vmull_p64(poly64_t __p0, poly64_t __p1) { + poly128_t __ret; + __ret = (poly128_t) __builtin_neon_vmull_p64(__p0, __p1); + return __ret; +} +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("bf16,neon"))) bfloat16x8_t __a64_vcvtq_low_bf16_f32(float32x4_t __p0) { + bfloat16x8_t __ret; + __ret = (bfloat16x8_t) __builtin_neon___a64_vcvtq_low_bf16_f32((int8x16_t)__p0, 43); + return __ret; +} +#else +__ai __attribute__((target("bf16,neon"))) bfloat16x8_t __a64_vcvtq_low_bf16_f32(float32x4_t __p0) { + bfloat16x8_t __ret; + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = (bfloat16x8_t) __builtin_neon___a64_vcvtq_low_bf16_f32((int8x16_t)__rev0, 43); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +__ai __attribute__((target("bf16,neon"))) bfloat16x8_t __noswap___a64_vcvtq_low_bf16_f32(float32x4_t __p0) { + bfloat16x8_t __ret; + __ret = (bfloat16x8_t) __builtin_neon___a64_vcvtq_low_bf16_f32((int8x16_t)__p0, 43); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcopyq_lane_bf16(__p0_230, __p1_230, __p2_230, __p3_230) __extension__ ({ \ + bfloat16x8_t __ret_230; \ + bfloat16x8_t __s0_230 = __p0_230; \ + bfloat16x4_t __s2_230 = __p2_230; \ + __ret_230 = vsetq_lane_bf16(vget_lane_bf16(__s2_230, __p3_230), __s0_230, __p1_230); \ + __ret_230; \ +}) +#else +#define vcopyq_lane_bf16(__p0_231, __p1_231, __p2_231, __p3_231) __extension__ ({ \ + bfloat16x8_t __ret_231; \ + bfloat16x8_t __s0_231 = __p0_231; \ + bfloat16x4_t __s2_231 = __p2_231; \ + bfloat16x8_t __rev0_231; __rev0_231 = __builtin_shufflevector(__s0_231, __s0_231, 7, 6, 5, 4, 3, 2, 1, 0); \ + bfloat16x4_t __rev2_231; __rev2_231 = __builtin_shufflevector(__s2_231, __s2_231, 3, 2, 1, 0); \ + __ret_231 = __noswap_vsetq_lane_bf16(__noswap_vget_lane_bf16(__rev2_231, __p3_231), __rev0_231, __p1_231); \ + __ret_231 = __builtin_shufflevector(__ret_231, __ret_231, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_231; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcopy_lane_bf16(__p0_232, __p1_232, __p2_232, __p3_232) __extension__ ({ \ + bfloat16x4_t __ret_232; \ + bfloat16x4_t __s0_232 = __p0_232; \ + bfloat16x4_t __s2_232 = __p2_232; \ + __ret_232 = vset_lane_bf16(vget_lane_bf16(__s2_232, __p3_232), __s0_232, __p1_232); \ + __ret_232; \ +}) +#else +#define vcopy_lane_bf16(__p0_233, __p1_233, __p2_233, __p3_233) __extension__ ({ \ + bfloat16x4_t __ret_233; \ + bfloat16x4_t __s0_233 = __p0_233; \ + bfloat16x4_t __s2_233 = __p2_233; \ + bfloat16x4_t __rev0_233; __rev0_233 = __builtin_shufflevector(__s0_233, __s0_233, 3, 2, 1, 0); \ + bfloat16x4_t __rev2_233; __rev2_233 = __builtin_shufflevector(__s2_233, __s2_233, 3, 2, 1, 0); \ + __ret_233 = __noswap_vset_lane_bf16(__noswap_vget_lane_bf16(__rev2_233, __p3_233), __rev0_233, __p1_233); \ + __ret_233 = __builtin_shufflevector(__ret_233, __ret_233, 3, 2, 1, 0); \ + __ret_233; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcopyq_laneq_bf16(__p0_234, __p1_234, __p2_234, __p3_234) __extension__ ({ \ + bfloat16x8_t __ret_234; \ + bfloat16x8_t __s0_234 = __p0_234; \ + bfloat16x8_t __s2_234 = __p2_234; \ + __ret_234 = vsetq_lane_bf16(vgetq_lane_bf16(__s2_234, __p3_234), __s0_234, __p1_234); \ + __ret_234; \ +}) +#else +#define vcopyq_laneq_bf16(__p0_235, __p1_235, __p2_235, __p3_235) __extension__ ({ \ + bfloat16x8_t __ret_235; \ + bfloat16x8_t __s0_235 = __p0_235; \ + bfloat16x8_t __s2_235 = __p2_235; \ + bfloat16x8_t __rev0_235; __rev0_235 = __builtin_shufflevector(__s0_235, __s0_235, 7, 6, 5, 4, 3, 2, 1, 0); \ + bfloat16x8_t __rev2_235; __rev2_235 = __builtin_shufflevector(__s2_235, __s2_235, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_235 = __noswap_vsetq_lane_bf16(__noswap_vgetq_lane_bf16(__rev2_235, __p3_235), __rev0_235, __p1_235); \ + __ret_235 = __builtin_shufflevector(__ret_235, __ret_235, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_235; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcopy_laneq_bf16(__p0_236, __p1_236, __p2_236, __p3_236) __extension__ ({ \ + bfloat16x4_t __ret_236; \ + bfloat16x4_t __s0_236 = __p0_236; \ + bfloat16x8_t __s2_236 = __p2_236; \ + __ret_236 = vset_lane_bf16(vgetq_lane_bf16(__s2_236, __p3_236), __s0_236, __p1_236); \ + __ret_236; \ +}) +#else +#define vcopy_laneq_bf16(__p0_237, __p1_237, __p2_237, __p3_237) __extension__ ({ \ + bfloat16x4_t __ret_237; \ + bfloat16x4_t __s0_237 = __p0_237; \ + bfloat16x8_t __s2_237 = __p2_237; \ + bfloat16x4_t __rev0_237; __rev0_237 = __builtin_shufflevector(__s0_237, __s0_237, 3, 2, 1, 0); \ + bfloat16x8_t __rev2_237; __rev2_237 = __builtin_shufflevector(__s2_237, __s2_237, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_237 = __noswap_vset_lane_bf16(__noswap_vgetq_lane_bf16(__rev2_237, __p3_237), __rev0_237, __p1_237); \ + __ret_237 = __builtin_shufflevector(__ret_237, __ret_237, 3, 2, 1, 0); \ + __ret_237; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("bf16,neon"))) bfloat16x4_t vcvt_bf16_f32(float32x4_t __p0) { + bfloat16x4_t __ret; + __ret = vget_low_bf16(__a64_vcvtq_low_bf16_f32(__p0)); + return __ret; +} +#else +__ai __attribute__((target("bf16,neon"))) bfloat16x4_t vcvt_bf16_f32(float32x4_t __p0) { + bfloat16x4_t __ret; + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = __noswap_vget_low_bf16(__noswap___a64_vcvtq_low_bf16_f32(__rev0)); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("bf16,neon"))) bfloat16x8_t vcvtq_high_bf16_f32(bfloat16x8_t __p0, float32x4_t __p1) { + bfloat16x8_t __ret; + __ret = (bfloat16x8_t) __builtin_neon_vcvtq_high_bf16_f32((int8x16_t)__p0, (int8x16_t)__p1, 43); + return __ret; +} +#else +__ai __attribute__((target("bf16,neon"))) bfloat16x8_t vcvtq_high_bf16_f32(bfloat16x8_t __p0, float32x4_t __p1) { + bfloat16x8_t __ret; + bfloat16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (bfloat16x8_t) __builtin_neon_vcvtq_high_bf16_f32((int8x16_t)__rev0, (int8x16_t)__rev1, 43); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("bf16,neon"))) bfloat16x8_t vcvtq_low_bf16_f32(float32x4_t __p0) { + bfloat16x8_t __ret; + __ret = __a64_vcvtq_low_bf16_f32(__p0); + return __ret; +} +#else +__ai __attribute__((target("bf16,neon"))) bfloat16x8_t vcvtq_low_bf16_f32(float32x4_t __p0) { + bfloat16x8_t __ret; + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = __noswap___a64_vcvtq_low_bf16_f32(__rev0); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +__ai __attribute__((target("bf16,neon"))) poly8x8_t vreinterpret_p8_bf16(bfloat16x4_t __p0) { + poly8x8_t __ret; + __ret = (poly8x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("bf16,neon"))) poly64x1_t vreinterpret_p64_bf16(bfloat16x4_t __p0) { + poly64x1_t __ret; + __ret = (poly64x1_t)(__p0); + return __ret; +} +__ai __attribute__((target("bf16,neon"))) poly16x4_t vreinterpret_p16_bf16(bfloat16x4_t __p0) { + poly16x4_t __ret; + __ret = (poly16x4_t)(__p0); + return __ret; +} +__ai __attribute__((target("bf16,neon"))) poly8x16_t vreinterpretq_p8_bf16(bfloat16x8_t __p0) { + poly8x16_t __ret; + __ret = (poly8x16_t)(__p0); + return __ret; +} +__ai __attribute__((target("bf16,neon"))) poly128_t vreinterpretq_p128_bf16(bfloat16x8_t __p0) { + poly128_t __ret; + __ret = (poly128_t)(__p0); + return __ret; +} +__ai __attribute__((target("bf16,neon"))) poly64x2_t vreinterpretq_p64_bf16(bfloat16x8_t __p0) { + poly64x2_t __ret; + __ret = (poly64x2_t)(__p0); + return __ret; +} +__ai __attribute__((target("bf16,neon"))) poly16x8_t vreinterpretq_p16_bf16(bfloat16x8_t __p0) { + poly16x8_t __ret; + __ret = (poly16x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("bf16,neon"))) uint8x16_t vreinterpretq_u8_bf16(bfloat16x8_t __p0) { + uint8x16_t __ret; + __ret = (uint8x16_t)(__p0); + return __ret; +} +__ai __attribute__((target("bf16,neon"))) uint32x4_t vreinterpretq_u32_bf16(bfloat16x8_t __p0) { + uint32x4_t __ret; + __ret = (uint32x4_t)(__p0); + return __ret; +} +__ai __attribute__((target("bf16,neon"))) uint64x2_t vreinterpretq_u64_bf16(bfloat16x8_t __p0) { + uint64x2_t __ret; + __ret = (uint64x2_t)(__p0); + return __ret; +} +__ai __attribute__((target("bf16,neon"))) uint16x8_t vreinterpretq_u16_bf16(bfloat16x8_t __p0) { + uint16x8_t __ret; + __ret = (uint16x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("bf16,neon"))) int8x16_t vreinterpretq_s8_bf16(bfloat16x8_t __p0) { + int8x16_t __ret; + __ret = (int8x16_t)(__p0); + return __ret; +} +__ai __attribute__((target("bf16,neon"))) float64x2_t vreinterpretq_f64_bf16(bfloat16x8_t __p0) { + float64x2_t __ret; + __ret = (float64x2_t)(__p0); + return __ret; +} +__ai __attribute__((target("bf16,neon"))) float32x4_t vreinterpretq_f32_bf16(bfloat16x8_t __p0) { + float32x4_t __ret; + __ret = (float32x4_t)(__p0); + return __ret; +} +__ai __attribute__((target("bf16,neon"))) float16x8_t vreinterpretq_f16_bf16(bfloat16x8_t __p0) { + float16x8_t __ret; + __ret = (float16x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("bf16,neon"))) int32x4_t vreinterpretq_s32_bf16(bfloat16x8_t __p0) { + int32x4_t __ret; + __ret = (int32x4_t)(__p0); + return __ret; +} +__ai __attribute__((target("bf16,neon"))) int64x2_t vreinterpretq_s64_bf16(bfloat16x8_t __p0) { + int64x2_t __ret; + __ret = (int64x2_t)(__p0); + return __ret; +} +__ai __attribute__((target("bf16,neon"))) int16x8_t vreinterpretq_s16_bf16(bfloat16x8_t __p0) { + int16x8_t __ret; + __ret = (int16x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("bf16,neon"))) uint8x8_t vreinterpret_u8_bf16(bfloat16x4_t __p0) { + uint8x8_t __ret; + __ret = (uint8x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("bf16,neon"))) uint32x2_t vreinterpret_u32_bf16(bfloat16x4_t __p0) { + uint32x2_t __ret; + __ret = (uint32x2_t)(__p0); + return __ret; +} +__ai __attribute__((target("bf16,neon"))) uint64x1_t vreinterpret_u64_bf16(bfloat16x4_t __p0) { + uint64x1_t __ret; + __ret = (uint64x1_t)(__p0); + return __ret; +} +__ai __attribute__((target("bf16,neon"))) uint16x4_t vreinterpret_u16_bf16(bfloat16x4_t __p0) { + uint16x4_t __ret; + __ret = (uint16x4_t)(__p0); + return __ret; +} +__ai __attribute__((target("bf16,neon"))) int8x8_t vreinterpret_s8_bf16(bfloat16x4_t __p0) { + int8x8_t __ret; + __ret = (int8x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("bf16,neon"))) float64x1_t vreinterpret_f64_bf16(bfloat16x4_t __p0) { + float64x1_t __ret; + __ret = (float64x1_t)(__p0); + return __ret; +} +__ai __attribute__((target("bf16,neon"))) float32x2_t vreinterpret_f32_bf16(bfloat16x4_t __p0) { + float32x2_t __ret; + __ret = (float32x2_t)(__p0); + return __ret; +} +__ai __attribute__((target("bf16,neon"))) float16x4_t vreinterpret_f16_bf16(bfloat16x4_t __p0) { + float16x4_t __ret; + __ret = (float16x4_t)(__p0); + return __ret; +} +__ai __attribute__((target("bf16,neon"))) int32x2_t vreinterpret_s32_bf16(bfloat16x4_t __p0) { + int32x2_t __ret; + __ret = (int32x2_t)(__p0); + return __ret; +} +__ai __attribute__((target("bf16,neon"))) int64x1_t vreinterpret_s64_bf16(bfloat16x4_t __p0) { + int64x1_t __ret; + __ret = (int64x1_t)(__p0); + return __ret; +} +__ai __attribute__((target("bf16,neon"))) int16x4_t vreinterpret_s16_bf16(bfloat16x4_t __p0) { + int16x4_t __ret; + __ret = (int16x4_t)(__p0); + return __ret; +} +__ai __attribute__((target("bf16,neon"))) bfloat16x8_t vreinterpretq_bf16_p8(poly8x16_t __p0) { + bfloat16x8_t __ret; + __ret = (bfloat16x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("bf16,neon"))) bfloat16x8_t vreinterpretq_bf16_p128(poly128_t __p0) { + bfloat16x8_t __ret; + __ret = (bfloat16x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("bf16,neon"))) bfloat16x8_t vreinterpretq_bf16_p64(poly64x2_t __p0) { + bfloat16x8_t __ret; + __ret = (bfloat16x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("bf16,neon"))) bfloat16x8_t vreinterpretq_bf16_p16(poly16x8_t __p0) { + bfloat16x8_t __ret; + __ret = (bfloat16x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("bf16,neon"))) bfloat16x8_t vreinterpretq_bf16_u8(uint8x16_t __p0) { + bfloat16x8_t __ret; + __ret = (bfloat16x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("bf16,neon"))) bfloat16x8_t vreinterpretq_bf16_u32(uint32x4_t __p0) { + bfloat16x8_t __ret; + __ret = (bfloat16x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("bf16,neon"))) bfloat16x8_t vreinterpretq_bf16_u64(uint64x2_t __p0) { + bfloat16x8_t __ret; + __ret = (bfloat16x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("bf16,neon"))) bfloat16x8_t vreinterpretq_bf16_u16(uint16x8_t __p0) { + bfloat16x8_t __ret; + __ret = (bfloat16x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("bf16,neon"))) bfloat16x8_t vreinterpretq_bf16_s8(int8x16_t __p0) { + bfloat16x8_t __ret; + __ret = (bfloat16x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("bf16,neon"))) bfloat16x8_t vreinterpretq_bf16_f64(float64x2_t __p0) { + bfloat16x8_t __ret; + __ret = (bfloat16x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("bf16,neon"))) bfloat16x8_t vreinterpretq_bf16_f32(float32x4_t __p0) { + bfloat16x8_t __ret; + __ret = (bfloat16x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("bf16,neon"))) bfloat16x8_t vreinterpretq_bf16_f16(float16x8_t __p0) { + bfloat16x8_t __ret; + __ret = (bfloat16x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("bf16,neon"))) bfloat16x8_t vreinterpretq_bf16_s32(int32x4_t __p0) { + bfloat16x8_t __ret; + __ret = (bfloat16x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("bf16,neon"))) bfloat16x8_t vreinterpretq_bf16_s64(int64x2_t __p0) { + bfloat16x8_t __ret; + __ret = (bfloat16x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("bf16,neon"))) bfloat16x8_t vreinterpretq_bf16_s16(int16x8_t __p0) { + bfloat16x8_t __ret; + __ret = (bfloat16x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("bf16,neon"))) bfloat16x4_t vreinterpret_bf16_p8(poly8x8_t __p0) { + bfloat16x4_t __ret; + __ret = (bfloat16x4_t)(__p0); + return __ret; +} +__ai __attribute__((target("bf16,neon"))) bfloat16x4_t vreinterpret_bf16_p64(poly64x1_t __p0) { + bfloat16x4_t __ret; + __ret = (bfloat16x4_t)(__p0); + return __ret; +} +__ai __attribute__((target("bf16,neon"))) bfloat16x4_t vreinterpret_bf16_p16(poly16x4_t __p0) { + bfloat16x4_t __ret; + __ret = (bfloat16x4_t)(__p0); + return __ret; +} +__ai __attribute__((target("bf16,neon"))) bfloat16x4_t vreinterpret_bf16_u8(uint8x8_t __p0) { + bfloat16x4_t __ret; + __ret = (bfloat16x4_t)(__p0); + return __ret; +} +__ai __attribute__((target("bf16,neon"))) bfloat16x4_t vreinterpret_bf16_u32(uint32x2_t __p0) { + bfloat16x4_t __ret; + __ret = (bfloat16x4_t)(__p0); + return __ret; +} +__ai __attribute__((target("bf16,neon"))) bfloat16x4_t vreinterpret_bf16_u64(uint64x1_t __p0) { + bfloat16x4_t __ret; + __ret = (bfloat16x4_t)(__p0); + return __ret; +} +__ai __attribute__((target("bf16,neon"))) bfloat16x4_t vreinterpret_bf16_u16(uint16x4_t __p0) { + bfloat16x4_t __ret; + __ret = (bfloat16x4_t)(__p0); + return __ret; +} +__ai __attribute__((target("bf16,neon"))) bfloat16x4_t vreinterpret_bf16_s8(int8x8_t __p0) { + bfloat16x4_t __ret; + __ret = (bfloat16x4_t)(__p0); + return __ret; +} +__ai __attribute__((target("bf16,neon"))) bfloat16x4_t vreinterpret_bf16_f64(float64x1_t __p0) { + bfloat16x4_t __ret; + __ret = (bfloat16x4_t)(__p0); + return __ret; +} +__ai __attribute__((target("bf16,neon"))) bfloat16x4_t vreinterpret_bf16_f32(float32x2_t __p0) { + bfloat16x4_t __ret; + __ret = (bfloat16x4_t)(__p0); + return __ret; +} +__ai __attribute__((target("bf16,neon"))) bfloat16x4_t vreinterpret_bf16_f16(float16x4_t __p0) { + bfloat16x4_t __ret; + __ret = (bfloat16x4_t)(__p0); + return __ret; +} +__ai __attribute__((target("bf16,neon"))) bfloat16x4_t vreinterpret_bf16_s32(int32x2_t __p0) { + bfloat16x4_t __ret; + __ret = (bfloat16x4_t)(__p0); + return __ret; +} +__ai __attribute__((target("bf16,neon"))) bfloat16x4_t vreinterpret_bf16_s64(int64x1_t __p0) { + bfloat16x4_t __ret; + __ret = (bfloat16x4_t)(__p0); + return __ret; +} +__ai __attribute__((target("bf16,neon"))) bfloat16x4_t vreinterpret_bf16_s16(int16x4_t __p0) { + bfloat16x4_t __ret; + __ret = (bfloat16x4_t)(__p0); + return __ret; +} +#ifdef __LITTLE_ENDIAN__ +#define vdotq_laneq_u32(__p0_238, __p1_238, __p2_238, __p3_238) __extension__ ({ \ + uint32x4_t __ret_238; \ + uint32x4_t __s0_238 = __p0_238; \ + uint8x16_t __s1_238 = __p1_238; \ + uint8x16_t __s2_238 = __p2_238; \ +uint8x16_t __reint_238 = __s2_238; \ +uint32x4_t __reint1_238 = splatq_laneq_u32(*(uint32x4_t *) &__reint_238, __p3_238); \ + __ret_238 = vdotq_u32(__s0_238, __s1_238, *(uint8x16_t *) &__reint1_238); \ + __ret_238; \ +}) +#else +#define vdotq_laneq_u32(__p0_239, __p1_239, __p2_239, __p3_239) __extension__ ({ \ + uint32x4_t __ret_239; \ + uint32x4_t __s0_239 = __p0_239; \ + uint8x16_t __s1_239 = __p1_239; \ + uint8x16_t __s2_239 = __p2_239; \ + uint32x4_t __rev0_239; __rev0_239 = __builtin_shufflevector(__s0_239, __s0_239, 3, 2, 1, 0); \ + uint8x16_t __rev1_239; __rev1_239 = __builtin_shufflevector(__s1_239, __s1_239, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint8x16_t __rev2_239; __rev2_239 = __builtin_shufflevector(__s2_239, __s2_239, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ +uint8x16_t __reint_239 = __rev2_239; \ +uint32x4_t __reint1_239 = __noswap_splatq_laneq_u32(*(uint32x4_t *) &__reint_239, __p3_239); \ + __ret_239 = __noswap_vdotq_u32(__rev0_239, __rev1_239, *(uint8x16_t *) &__reint1_239); \ + __ret_239 = __builtin_shufflevector(__ret_239, __ret_239, 3, 2, 1, 0); \ + __ret_239; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vdotq_laneq_s32(__p0_240, __p1_240, __p2_240, __p3_240) __extension__ ({ \ + int32x4_t __ret_240; \ + int32x4_t __s0_240 = __p0_240; \ + int8x16_t __s1_240 = __p1_240; \ + int8x16_t __s2_240 = __p2_240; \ +int8x16_t __reint_240 = __s2_240; \ +int32x4_t __reint1_240 = splatq_laneq_s32(*(int32x4_t *) &__reint_240, __p3_240); \ + __ret_240 = vdotq_s32(__s0_240, __s1_240, *(int8x16_t *) &__reint1_240); \ + __ret_240; \ +}) +#else +#define vdotq_laneq_s32(__p0_241, __p1_241, __p2_241, __p3_241) __extension__ ({ \ + int32x4_t __ret_241; \ + int32x4_t __s0_241 = __p0_241; \ + int8x16_t __s1_241 = __p1_241; \ + int8x16_t __s2_241 = __p2_241; \ + int32x4_t __rev0_241; __rev0_241 = __builtin_shufflevector(__s0_241, __s0_241, 3, 2, 1, 0); \ + int8x16_t __rev1_241; __rev1_241 = __builtin_shufflevector(__s1_241, __s1_241, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + int8x16_t __rev2_241; __rev2_241 = __builtin_shufflevector(__s2_241, __s2_241, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ +int8x16_t __reint_241 = __rev2_241; \ +int32x4_t __reint1_241 = __noswap_splatq_laneq_s32(*(int32x4_t *) &__reint_241, __p3_241); \ + __ret_241 = __noswap_vdotq_s32(__rev0_241, __rev1_241, *(int8x16_t *) &__reint1_241); \ + __ret_241 = __builtin_shufflevector(__ret_241, __ret_241, 3, 2, 1, 0); \ + __ret_241; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vdot_laneq_u32(__p0_242, __p1_242, __p2_242, __p3_242) __extension__ ({ \ + uint32x2_t __ret_242; \ + uint32x2_t __s0_242 = __p0_242; \ + uint8x8_t __s1_242 = __p1_242; \ + uint8x16_t __s2_242 = __p2_242; \ +uint8x16_t __reint_242 = __s2_242; \ +uint32x2_t __reint1_242 = splat_laneq_u32(*(uint32x4_t *) &__reint_242, __p3_242); \ + __ret_242 = vdot_u32(__s0_242, __s1_242, *(uint8x8_t *) &__reint1_242); \ + __ret_242; \ +}) +#else +#define vdot_laneq_u32(__p0_243, __p1_243, __p2_243, __p3_243) __extension__ ({ \ + uint32x2_t __ret_243; \ + uint32x2_t __s0_243 = __p0_243; \ + uint8x8_t __s1_243 = __p1_243; \ + uint8x16_t __s2_243 = __p2_243; \ + uint32x2_t __rev0_243; __rev0_243 = __builtin_shufflevector(__s0_243, __s0_243, 1, 0); \ + uint8x8_t __rev1_243; __rev1_243 = __builtin_shufflevector(__s1_243, __s1_243, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint8x16_t __rev2_243; __rev2_243 = __builtin_shufflevector(__s2_243, __s2_243, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ +uint8x16_t __reint_243 = __rev2_243; \ +uint32x2_t __reint1_243 = __noswap_splat_laneq_u32(*(uint32x4_t *) &__reint_243, __p3_243); \ + __ret_243 = __noswap_vdot_u32(__rev0_243, __rev1_243, *(uint8x8_t *) &__reint1_243); \ + __ret_243 = __builtin_shufflevector(__ret_243, __ret_243, 1, 0); \ + __ret_243; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vdot_laneq_s32(__p0_244, __p1_244, __p2_244, __p3_244) __extension__ ({ \ + int32x2_t __ret_244; \ + int32x2_t __s0_244 = __p0_244; \ + int8x8_t __s1_244 = __p1_244; \ + int8x16_t __s2_244 = __p2_244; \ +int8x16_t __reint_244 = __s2_244; \ +int32x2_t __reint1_244 = splat_laneq_s32(*(int32x4_t *) &__reint_244, __p3_244); \ + __ret_244 = vdot_s32(__s0_244, __s1_244, *(int8x8_t *) &__reint1_244); \ + __ret_244; \ +}) +#else +#define vdot_laneq_s32(__p0_245, __p1_245, __p2_245, __p3_245) __extension__ ({ \ + int32x2_t __ret_245; \ + int32x2_t __s0_245 = __p0_245; \ + int8x8_t __s1_245 = __p1_245; \ + int8x16_t __s2_245 = __p2_245; \ + int32x2_t __rev0_245; __rev0_245 = __builtin_shufflevector(__s0_245, __s0_245, 1, 0); \ + int8x8_t __rev1_245; __rev1_245 = __builtin_shufflevector(__s1_245, __s1_245, 7, 6, 5, 4, 3, 2, 1, 0); \ + int8x16_t __rev2_245; __rev2_245 = __builtin_shufflevector(__s2_245, __s2_245, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ +int8x16_t __reint_245 = __rev2_245; \ +int32x2_t __reint1_245 = __noswap_splat_laneq_s32(*(int32x4_t *) &__reint_245, __p3_245); \ + __ret_245 = __noswap_vdot_s32(__rev0_245, __rev1_245, *(int8x8_t *) &__reint1_245); \ + __ret_245 = __builtin_shufflevector(__ret_245, __ret_245, 1, 0); \ + __ret_245; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fp16fml,neon"))) float32x4_t vfmlalq_high_f16(float32x4_t __p0, float16x8_t __p1, float16x8_t __p2) { + float32x4_t __ret; + __ret = (float32x4_t) __builtin_neon_vfmlalq_high_f16((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41); + return __ret; +} +#else +__ai __attribute__((target("fp16fml,neon"))) float32x4_t vfmlalq_high_f16(float32x4_t __p0, float16x8_t __p1, float16x8_t __p2) { + float32x4_t __ret; + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + float16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (float32x4_t) __builtin_neon_vfmlalq_high_f16((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 41); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +__ai __attribute__((target("fp16fml,neon"))) float32x4_t __noswap_vfmlalq_high_f16(float32x4_t __p0, float16x8_t __p1, float16x8_t __p2) { + float32x4_t __ret; + __ret = (float32x4_t) __builtin_neon_vfmlalq_high_f16((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fp16fml,neon"))) float32x2_t vfmlal_high_f16(float32x2_t __p0, float16x4_t __p1, float16x4_t __p2) { + float32x2_t __ret; + __ret = (float32x2_t) __builtin_neon_vfmlal_high_f16((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 9); + return __ret; +} +#else +__ai __attribute__((target("fp16fml,neon"))) float32x2_t vfmlal_high_f16(float32x2_t __p0, float16x4_t __p1, float16x4_t __p2) { + float32x2_t __ret; + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + float16x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); + __ret = (float32x2_t) __builtin_neon_vfmlal_high_f16((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 9); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +__ai __attribute__((target("fp16fml,neon"))) float32x2_t __noswap_vfmlal_high_f16(float32x2_t __p0, float16x4_t __p1, float16x4_t __p2) { + float32x2_t __ret; + __ret = (float32x2_t) __builtin_neon_vfmlal_high_f16((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 9); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fp16fml,neon"))) float32x4_t vfmlalq_low_f16(float32x4_t __p0, float16x8_t __p1, float16x8_t __p2) { + float32x4_t __ret; + __ret = (float32x4_t) __builtin_neon_vfmlalq_low_f16((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41); + return __ret; +} +#else +__ai __attribute__((target("fp16fml,neon"))) float32x4_t vfmlalq_low_f16(float32x4_t __p0, float16x8_t __p1, float16x8_t __p2) { + float32x4_t __ret; + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + float16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (float32x4_t) __builtin_neon_vfmlalq_low_f16((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 41); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +__ai __attribute__((target("fp16fml,neon"))) float32x4_t __noswap_vfmlalq_low_f16(float32x4_t __p0, float16x8_t __p1, float16x8_t __p2) { + float32x4_t __ret; + __ret = (float32x4_t) __builtin_neon_vfmlalq_low_f16((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fp16fml,neon"))) float32x2_t vfmlal_low_f16(float32x2_t __p0, float16x4_t __p1, float16x4_t __p2) { + float32x2_t __ret; + __ret = (float32x2_t) __builtin_neon_vfmlal_low_f16((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 9); + return __ret; +} +#else +__ai __attribute__((target("fp16fml,neon"))) float32x2_t vfmlal_low_f16(float32x2_t __p0, float16x4_t __p1, float16x4_t __p2) { + float32x2_t __ret; + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + float16x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); + __ret = (float32x2_t) __builtin_neon_vfmlal_low_f16((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 9); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +__ai __attribute__((target("fp16fml,neon"))) float32x2_t __noswap_vfmlal_low_f16(float32x2_t __p0, float16x4_t __p1, float16x4_t __p2) { + float32x2_t __ret; + __ret = (float32x2_t) __builtin_neon_vfmlal_low_f16((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 9); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fp16fml,neon"))) float32x4_t vfmlslq_high_f16(float32x4_t __p0, float16x8_t __p1, float16x8_t __p2) { + float32x4_t __ret; + __ret = (float32x4_t) __builtin_neon_vfmlslq_high_f16((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41); + return __ret; +} +#else +__ai __attribute__((target("fp16fml,neon"))) float32x4_t vfmlslq_high_f16(float32x4_t __p0, float16x8_t __p1, float16x8_t __p2) { + float32x4_t __ret; + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + float16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (float32x4_t) __builtin_neon_vfmlslq_high_f16((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 41); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +__ai __attribute__((target("fp16fml,neon"))) float32x4_t __noswap_vfmlslq_high_f16(float32x4_t __p0, float16x8_t __p1, float16x8_t __p2) { + float32x4_t __ret; + __ret = (float32x4_t) __builtin_neon_vfmlslq_high_f16((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fp16fml,neon"))) float32x2_t vfmlsl_high_f16(float32x2_t __p0, float16x4_t __p1, float16x4_t __p2) { + float32x2_t __ret; + __ret = (float32x2_t) __builtin_neon_vfmlsl_high_f16((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 9); + return __ret; +} +#else +__ai __attribute__((target("fp16fml,neon"))) float32x2_t vfmlsl_high_f16(float32x2_t __p0, float16x4_t __p1, float16x4_t __p2) { + float32x2_t __ret; + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + float16x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); + __ret = (float32x2_t) __builtin_neon_vfmlsl_high_f16((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 9); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +__ai __attribute__((target("fp16fml,neon"))) float32x2_t __noswap_vfmlsl_high_f16(float32x2_t __p0, float16x4_t __p1, float16x4_t __p2) { + float32x2_t __ret; + __ret = (float32x2_t) __builtin_neon_vfmlsl_high_f16((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 9); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fp16fml,neon"))) float32x4_t vfmlslq_low_f16(float32x4_t __p0, float16x8_t __p1, float16x8_t __p2) { + float32x4_t __ret; + __ret = (float32x4_t) __builtin_neon_vfmlslq_low_f16((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41); + return __ret; +} +#else +__ai __attribute__((target("fp16fml,neon"))) float32x4_t vfmlslq_low_f16(float32x4_t __p0, float16x8_t __p1, float16x8_t __p2) { + float32x4_t __ret; + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + float16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (float32x4_t) __builtin_neon_vfmlslq_low_f16((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 41); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +__ai __attribute__((target("fp16fml,neon"))) float32x4_t __noswap_vfmlslq_low_f16(float32x4_t __p0, float16x8_t __p1, float16x8_t __p2) { + float32x4_t __ret; + __ret = (float32x4_t) __builtin_neon_vfmlslq_low_f16((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fp16fml,neon"))) float32x2_t vfmlsl_low_f16(float32x2_t __p0, float16x4_t __p1, float16x4_t __p2) { + float32x2_t __ret; + __ret = (float32x2_t) __builtin_neon_vfmlsl_low_f16((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 9); + return __ret; +} +#else +__ai __attribute__((target("fp16fml,neon"))) float32x2_t vfmlsl_low_f16(float32x2_t __p0, float16x4_t __p1, float16x4_t __p2) { + float32x2_t __ret; + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + float16x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); + __ret = (float32x2_t) __builtin_neon_vfmlsl_low_f16((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 9); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +__ai __attribute__((target("fp16fml,neon"))) float32x2_t __noswap_vfmlsl_low_f16(float32x2_t __p0, float16x4_t __p1, float16x4_t __p2) { + float32x2_t __ret; + __ret = (float32x2_t) __builtin_neon_vfmlsl_low_f16((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 9); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16,neon"))) float16x8_t vdivq_f16(float16x8_t __p0, float16x8_t __p1) { + float16x8_t __ret; + __ret = __p0 / __p1; + return __ret; +} +#else +__ai __attribute__((target("fullfp16,neon"))) float16x8_t vdivq_f16(float16x8_t __p0, float16x8_t __p1) { + float16x8_t __ret; + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __rev0 / __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16,neon"))) float16x4_t vdiv_f16(float16x4_t __p0, float16x4_t __p1) { + float16x4_t __ret; + __ret = __p0 / __p1; + return __ret; +} +#else +__ai __attribute__((target("fullfp16,neon"))) float16x4_t vdiv_f16(float16x4_t __p0, float16x4_t __p1) { + float16x4_t __ret; + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = __rev0 / __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vfmah_lane_f16(__p0, __p1, __p2, __p3) __extension__ ({ \ + float16_t __ret; \ + float16_t __s0 = __p0; \ + float16_t __s1 = __p1; \ + float16x4_t __s2 = __p2; \ + __ret = (float16_t) __builtin_neon_vfmah_lane_f16(__s0, __s1, (float16x4_t)__s2, __p3); \ + __ret; \ +}) +#else +#define vfmah_lane_f16(__p0, __p1, __p2, __p3) __extension__ ({ \ + float16_t __ret; \ + float16_t __s0 = __p0; \ + float16_t __s1 = __p1; \ + float16x4_t __s2 = __p2; \ + float16x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \ + __ret = (float16_t) __builtin_neon_vfmah_lane_f16(__s0, __s1, (float16x4_t)__rev2, __p3); \ + __ret; \ +}) +#define __noswap_vfmah_lane_f16(__p0, __p1, __p2, __p3) __extension__ ({ \ + float16_t __ret; \ + float16_t __s0 = __p0; \ + float16_t __s1 = __p1; \ + float16x4_t __s2 = __p2; \ + __ret = (float16_t) __builtin_neon_vfmah_lane_f16(__s0, __s1, (float16x4_t)__s2, __p3); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vfmaq_lane_f16(__p0, __p1, __p2, __p3) __extension__ ({ \ + float16x8_t __ret; \ + float16x8_t __s0 = __p0; \ + float16x8_t __s1 = __p1; \ + float16x4_t __s2 = __p2; \ + __ret = (float16x8_t) __builtin_neon_vfmaq_lane_f16((int8x16_t)__s0, (int8x16_t)__s1, (int8x8_t)__s2, __p3, 40); \ + __ret; \ +}) +#else +#define vfmaq_lane_f16(__p0, __p1, __p2, __p3) __extension__ ({ \ + float16x8_t __ret; \ + float16x8_t __s0 = __p0; \ + float16x8_t __s1 = __p1; \ + float16x4_t __s2 = __p2; \ + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + float16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ + float16x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \ + __ret = (float16x8_t) __builtin_neon_vfmaq_lane_f16((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x8_t)__rev2, __p3, 40); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#define __noswap_vfmaq_lane_f16(__p0, __p1, __p2, __p3) __extension__ ({ \ + float16x8_t __ret; \ + float16x8_t __s0 = __p0; \ + float16x8_t __s1 = __p1; \ + float16x4_t __s2 = __p2; \ + __ret = (float16x8_t) __builtin_neon_vfmaq_lane_f16((int8x16_t)__s0, (int8x16_t)__s1, (int8x8_t)__s2, __p3, 40); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vfma_lane_f16(__p0, __p1, __p2, __p3) __extension__ ({ \ + float16x4_t __ret; \ + float16x4_t __s0 = __p0; \ + float16x4_t __s1 = __p1; \ + float16x4_t __s2 = __p2; \ + __ret = (float16x4_t) __builtin_neon_vfma_lane_f16((int8x8_t)__s0, (int8x8_t)__s1, (int8x8_t)__s2, __p3, 8); \ + __ret; \ +}) +#else +#define vfma_lane_f16(__p0, __p1, __p2, __p3) __extension__ ({ \ + float16x4_t __ret; \ + float16x4_t __s0 = __p0; \ + float16x4_t __s1 = __p1; \ + float16x4_t __s2 = __p2; \ + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + float16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + float16x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \ + __ret = (float16x4_t) __builtin_neon_vfma_lane_f16((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, __p3, 8); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#define __noswap_vfma_lane_f16(__p0, __p1, __p2, __p3) __extension__ ({ \ + float16x4_t __ret; \ + float16x4_t __s0 = __p0; \ + float16x4_t __s1 = __p1; \ + float16x4_t __s2 = __p2; \ + __ret = (float16x4_t) __builtin_neon_vfma_lane_f16((int8x8_t)__s0, (int8x8_t)__s1, (int8x8_t)__s2, __p3, 8); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vfmah_laneq_f16(__p0, __p1, __p2, __p3) __extension__ ({ \ + float16_t __ret; \ + float16_t __s0 = __p0; \ + float16_t __s1 = __p1; \ + float16x8_t __s2 = __p2; \ + __ret = (float16_t) __builtin_neon_vfmah_laneq_f16(__s0, __s1, (float16x8_t)__s2, __p3); \ + __ret; \ +}) +#else +#define vfmah_laneq_f16(__p0, __p1, __p2, __p3) __extension__ ({ \ + float16_t __ret; \ + float16_t __s0 = __p0; \ + float16_t __s1 = __p1; \ + float16x8_t __s2 = __p2; \ + float16x8_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (float16_t) __builtin_neon_vfmah_laneq_f16(__s0, __s1, (float16x8_t)__rev2, __p3); \ + __ret; \ +}) +#define __noswap_vfmah_laneq_f16(__p0, __p1, __p2, __p3) __extension__ ({ \ + float16_t __ret; \ + float16_t __s0 = __p0; \ + float16_t __s1 = __p1; \ + float16x8_t __s2 = __p2; \ + __ret = (float16_t) __builtin_neon_vfmah_laneq_f16(__s0, __s1, (float16x8_t)__s2, __p3); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vfmaq_laneq_f16(__p0, __p1, __p2, __p3) __extension__ ({ \ + float16x8_t __ret; \ + float16x8_t __s0 = __p0; \ + float16x8_t __s1 = __p1; \ + float16x8_t __s2 = __p2; \ + __ret = (float16x8_t) __builtin_neon_vfmaq_laneq_f16((int8x16_t)__s0, (int8x16_t)__s1, (int8x16_t)__s2, __p3, 40); \ + __ret; \ +}) +#else +#define vfmaq_laneq_f16(__p0, __p1, __p2, __p3) __extension__ ({ \ + float16x8_t __ret; \ + float16x8_t __s0 = __p0; \ + float16x8_t __s1 = __p1; \ + float16x8_t __s2 = __p2; \ + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + float16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ + float16x8_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (float16x8_t) __builtin_neon_vfmaq_laneq_f16((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, __p3, 40); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#define __noswap_vfmaq_laneq_f16(__p0, __p1, __p2, __p3) __extension__ ({ \ + float16x8_t __ret; \ + float16x8_t __s0 = __p0; \ + float16x8_t __s1 = __p1; \ + float16x8_t __s2 = __p2; \ + __ret = (float16x8_t) __builtin_neon_vfmaq_laneq_f16((int8x16_t)__s0, (int8x16_t)__s1, (int8x16_t)__s2, __p3, 40); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vfma_laneq_f16(__p0, __p1, __p2, __p3) __extension__ ({ \ + float16x4_t __ret; \ + float16x4_t __s0 = __p0; \ + float16x4_t __s1 = __p1; \ + float16x8_t __s2 = __p2; \ + __ret = (float16x4_t) __builtin_neon_vfma_laneq_f16((int8x8_t)__s0, (int8x8_t)__s1, (int8x16_t)__s2, __p3, 8); \ + __ret; \ +}) +#else +#define vfma_laneq_f16(__p0, __p1, __p2, __p3) __extension__ ({ \ + float16x4_t __ret; \ + float16x4_t __s0 = __p0; \ + float16x4_t __s1 = __p1; \ + float16x8_t __s2 = __p2; \ + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + float16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + float16x8_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (float16x4_t) __builtin_neon_vfma_laneq_f16((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x16_t)__rev2, __p3, 8); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#define __noswap_vfma_laneq_f16(__p0, __p1, __p2, __p3) __extension__ ({ \ + float16x4_t __ret; \ + float16x4_t __s0 = __p0; \ + float16x4_t __s1 = __p1; \ + float16x8_t __s2 = __p2; \ + __ret = (float16x4_t) __builtin_neon_vfma_laneq_f16((int8x8_t)__s0, (int8x8_t)__s1, (int8x16_t)__s2, __p3, 8); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vfmaq_n_f16(__p0, __p1, __p2) __extension__ ({ \ + float16x8_t __ret; \ + float16x8_t __s0 = __p0; \ + float16x8_t __s1 = __p1; \ + float16_t __s2 = __p2; \ + __ret = vfmaq_f16(__s0, __s1, (float16x8_t) {__s2, __s2, __s2, __s2, __s2, __s2, __s2, __s2}); \ + __ret; \ +}) +#else +#define vfmaq_n_f16(__p0, __p1, __p2) __extension__ ({ \ + float16x8_t __ret; \ + float16x8_t __s0 = __p0; \ + float16x8_t __s1 = __p1; \ + float16_t __s2 = __p2; \ + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + float16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = __noswap_vfmaq_f16(__rev0, __rev1, (float16x8_t) {__s2, __s2, __s2, __s2, __s2, __s2, __s2, __s2}); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vfma_n_f16(__p0, __p1, __p2) __extension__ ({ \ + float16x4_t __ret; \ + float16x4_t __s0 = __p0; \ + float16x4_t __s1 = __p1; \ + float16_t __s2 = __p2; \ + __ret = vfma_f16(__s0, __s1, (float16x4_t) {__s2, __s2, __s2, __s2}); \ + __ret; \ +}) +#else +#define vfma_n_f16(__p0, __p1, __p2) __extension__ ({ \ + float16x4_t __ret; \ + float16x4_t __s0 = __p0; \ + float16x4_t __s1 = __p1; \ + float16_t __s2 = __p2; \ + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + float16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + __ret = __noswap_vfma_f16(__rev0, __rev1, (float16x4_t) {__s2, __s2, __s2, __s2}); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vfmsh_lane_f16(__p0_246, __p1_246, __p2_246, __p3_246) __extension__ ({ \ + float16_t __ret_246; \ + float16_t __s0_246 = __p0_246; \ + float16_t __s1_246 = __p1_246; \ + float16x4_t __s2_246 = __p2_246; \ + __ret_246 = vfmah_lane_f16(__s0_246, -__s1_246, __s2_246, __p3_246); \ + __ret_246; \ +}) +#else +#define vfmsh_lane_f16(__p0_247, __p1_247, __p2_247, __p3_247) __extension__ ({ \ + float16_t __ret_247; \ + float16_t __s0_247 = __p0_247; \ + float16_t __s1_247 = __p1_247; \ + float16x4_t __s2_247 = __p2_247; \ + float16x4_t __rev2_247; __rev2_247 = __builtin_shufflevector(__s2_247, __s2_247, 3, 2, 1, 0); \ + __ret_247 = __noswap_vfmah_lane_f16(__s0_247, -__s1_247, __rev2_247, __p3_247); \ + __ret_247; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vfmsq_lane_f16(__p0_248, __p1_248, __p2_248, __p3_248) __extension__ ({ \ + float16x8_t __ret_248; \ + float16x8_t __s0_248 = __p0_248; \ + float16x8_t __s1_248 = __p1_248; \ + float16x4_t __s2_248 = __p2_248; \ + __ret_248 = vfmaq_lane_f16(__s0_248, -__s1_248, __s2_248, __p3_248); \ + __ret_248; \ +}) +#else +#define vfmsq_lane_f16(__p0_249, __p1_249, __p2_249, __p3_249) __extension__ ({ \ + float16x8_t __ret_249; \ + float16x8_t __s0_249 = __p0_249; \ + float16x8_t __s1_249 = __p1_249; \ + float16x4_t __s2_249 = __p2_249; \ + float16x8_t __rev0_249; __rev0_249 = __builtin_shufflevector(__s0_249, __s0_249, 7, 6, 5, 4, 3, 2, 1, 0); \ + float16x8_t __rev1_249; __rev1_249 = __builtin_shufflevector(__s1_249, __s1_249, 7, 6, 5, 4, 3, 2, 1, 0); \ + float16x4_t __rev2_249; __rev2_249 = __builtin_shufflevector(__s2_249, __s2_249, 3, 2, 1, 0); \ + __ret_249 = __noswap_vfmaq_lane_f16(__rev0_249, -__rev1_249, __rev2_249, __p3_249); \ + __ret_249 = __builtin_shufflevector(__ret_249, __ret_249, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_249; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vfms_lane_f16(__p0_250, __p1_250, __p2_250, __p3_250) __extension__ ({ \ + float16x4_t __ret_250; \ + float16x4_t __s0_250 = __p0_250; \ + float16x4_t __s1_250 = __p1_250; \ + float16x4_t __s2_250 = __p2_250; \ + __ret_250 = vfma_lane_f16(__s0_250, -__s1_250, __s2_250, __p3_250); \ + __ret_250; \ +}) +#else +#define vfms_lane_f16(__p0_251, __p1_251, __p2_251, __p3_251) __extension__ ({ \ + float16x4_t __ret_251; \ + float16x4_t __s0_251 = __p0_251; \ + float16x4_t __s1_251 = __p1_251; \ + float16x4_t __s2_251 = __p2_251; \ + float16x4_t __rev0_251; __rev0_251 = __builtin_shufflevector(__s0_251, __s0_251, 3, 2, 1, 0); \ + float16x4_t __rev1_251; __rev1_251 = __builtin_shufflevector(__s1_251, __s1_251, 3, 2, 1, 0); \ + float16x4_t __rev2_251; __rev2_251 = __builtin_shufflevector(__s2_251, __s2_251, 3, 2, 1, 0); \ + __ret_251 = __noswap_vfma_lane_f16(__rev0_251, -__rev1_251, __rev2_251, __p3_251); \ + __ret_251 = __builtin_shufflevector(__ret_251, __ret_251, 3, 2, 1, 0); \ + __ret_251; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vfmsh_laneq_f16(__p0_252, __p1_252, __p2_252, __p3_252) __extension__ ({ \ + float16_t __ret_252; \ + float16_t __s0_252 = __p0_252; \ + float16_t __s1_252 = __p1_252; \ + float16x8_t __s2_252 = __p2_252; \ + __ret_252 = vfmah_laneq_f16(__s0_252, -__s1_252, __s2_252, __p3_252); \ + __ret_252; \ +}) +#else +#define vfmsh_laneq_f16(__p0_253, __p1_253, __p2_253, __p3_253) __extension__ ({ \ + float16_t __ret_253; \ + float16_t __s0_253 = __p0_253; \ + float16_t __s1_253 = __p1_253; \ + float16x8_t __s2_253 = __p2_253; \ + float16x8_t __rev2_253; __rev2_253 = __builtin_shufflevector(__s2_253, __s2_253, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_253 = __noswap_vfmah_laneq_f16(__s0_253, -__s1_253, __rev2_253, __p3_253); \ + __ret_253; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vfmsq_laneq_f16(__p0_254, __p1_254, __p2_254, __p3_254) __extension__ ({ \ + float16x8_t __ret_254; \ + float16x8_t __s0_254 = __p0_254; \ + float16x8_t __s1_254 = __p1_254; \ + float16x8_t __s2_254 = __p2_254; \ + __ret_254 = vfmaq_laneq_f16(__s0_254, -__s1_254, __s2_254, __p3_254); \ + __ret_254; \ +}) +#else +#define vfmsq_laneq_f16(__p0_255, __p1_255, __p2_255, __p3_255) __extension__ ({ \ + float16x8_t __ret_255; \ + float16x8_t __s0_255 = __p0_255; \ + float16x8_t __s1_255 = __p1_255; \ + float16x8_t __s2_255 = __p2_255; \ + float16x8_t __rev0_255; __rev0_255 = __builtin_shufflevector(__s0_255, __s0_255, 7, 6, 5, 4, 3, 2, 1, 0); \ + float16x8_t __rev1_255; __rev1_255 = __builtin_shufflevector(__s1_255, __s1_255, 7, 6, 5, 4, 3, 2, 1, 0); \ + float16x8_t __rev2_255; __rev2_255 = __builtin_shufflevector(__s2_255, __s2_255, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_255 = __noswap_vfmaq_laneq_f16(__rev0_255, -__rev1_255, __rev2_255, __p3_255); \ + __ret_255 = __builtin_shufflevector(__ret_255, __ret_255, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_255; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vfms_laneq_f16(__p0_256, __p1_256, __p2_256, __p3_256) __extension__ ({ \ + float16x4_t __ret_256; \ + float16x4_t __s0_256 = __p0_256; \ + float16x4_t __s1_256 = __p1_256; \ + float16x8_t __s2_256 = __p2_256; \ + __ret_256 = vfma_laneq_f16(__s0_256, -__s1_256, __s2_256, __p3_256); \ + __ret_256; \ +}) +#else +#define vfms_laneq_f16(__p0_257, __p1_257, __p2_257, __p3_257) __extension__ ({ \ + float16x4_t __ret_257; \ + float16x4_t __s0_257 = __p0_257; \ + float16x4_t __s1_257 = __p1_257; \ + float16x8_t __s2_257 = __p2_257; \ + float16x4_t __rev0_257; __rev0_257 = __builtin_shufflevector(__s0_257, __s0_257, 3, 2, 1, 0); \ + float16x4_t __rev1_257; __rev1_257 = __builtin_shufflevector(__s1_257, __s1_257, 3, 2, 1, 0); \ + float16x8_t __rev2_257; __rev2_257 = __builtin_shufflevector(__s2_257, __s2_257, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_257 = __noswap_vfma_laneq_f16(__rev0_257, -__rev1_257, __rev2_257, __p3_257); \ + __ret_257 = __builtin_shufflevector(__ret_257, __ret_257, 3, 2, 1, 0); \ + __ret_257; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vfmsq_n_f16(__p0, __p1, __p2) __extension__ ({ \ + float16x8_t __ret; \ + float16x8_t __s0 = __p0; \ + float16x8_t __s1 = __p1; \ + float16_t __s2 = __p2; \ + __ret = vfmaq_f16(__s0, -__s1, (float16x8_t) {__s2, __s2, __s2, __s2, __s2, __s2, __s2, __s2}); \ + __ret; \ +}) +#else +#define vfmsq_n_f16(__p0, __p1, __p2) __extension__ ({ \ + float16x8_t __ret; \ + float16x8_t __s0 = __p0; \ + float16x8_t __s1 = __p1; \ + float16_t __s2 = __p2; \ + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + float16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = __noswap_vfmaq_f16(__rev0, -__rev1, (float16x8_t) {__s2, __s2, __s2, __s2, __s2, __s2, __s2, __s2}); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vfms_n_f16(__p0, __p1, __p2) __extension__ ({ \ + float16x4_t __ret; \ + float16x4_t __s0 = __p0; \ + float16x4_t __s1 = __p1; \ + float16_t __s2 = __p2; \ + __ret = vfma_f16(__s0, -__s1, (float16x4_t) {__s2, __s2, __s2, __s2}); \ + __ret; \ +}) +#else +#define vfms_n_f16(__p0, __p1, __p2) __extension__ ({ \ + float16x4_t __ret; \ + float16x4_t __s0 = __p0; \ + float16x4_t __s1 = __p1; \ + float16_t __s2 = __p2; \ + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + float16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + __ret = __noswap_vfma_f16(__rev0, -__rev1, (float16x4_t) {__s2, __s2, __s2, __s2}); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmaxnmvq_f16(__p0) __extension__ ({ \ + float16_t __ret; \ + float16x8_t __s0 = __p0; \ + __ret = (float16_t) __builtin_neon_vmaxnmvq_f16((int8x16_t)__s0); \ + __ret; \ +}) +#else +#define vmaxnmvq_f16(__p0) __extension__ ({ \ + float16_t __ret; \ + float16x8_t __s0 = __p0; \ + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (float16_t) __builtin_neon_vmaxnmvq_f16((int8x16_t)__rev0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmaxnmv_f16(__p0) __extension__ ({ \ + float16_t __ret; \ + float16x4_t __s0 = __p0; \ + __ret = (float16_t) __builtin_neon_vmaxnmv_f16((int8x8_t)__s0); \ + __ret; \ +}) +#else +#define vmaxnmv_f16(__p0) __extension__ ({ \ + float16_t __ret; \ + float16x4_t __s0 = __p0; \ + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + __ret = (float16_t) __builtin_neon_vmaxnmv_f16((int8x8_t)__rev0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmaxvq_f16(__p0) __extension__ ({ \ + float16_t __ret; \ + float16x8_t __s0 = __p0; \ + __ret = (float16_t) __builtin_neon_vmaxvq_f16((int8x16_t)__s0); \ + __ret; \ +}) +#else +#define vmaxvq_f16(__p0) __extension__ ({ \ + float16_t __ret; \ + float16x8_t __s0 = __p0; \ + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (float16_t) __builtin_neon_vmaxvq_f16((int8x16_t)__rev0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmaxv_f16(__p0) __extension__ ({ \ + float16_t __ret; \ + float16x4_t __s0 = __p0; \ + __ret = (float16_t) __builtin_neon_vmaxv_f16((int8x8_t)__s0); \ + __ret; \ +}) +#else +#define vmaxv_f16(__p0) __extension__ ({ \ + float16_t __ret; \ + float16x4_t __s0 = __p0; \ + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + __ret = (float16_t) __builtin_neon_vmaxv_f16((int8x8_t)__rev0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vminnmvq_f16(__p0) __extension__ ({ \ + float16_t __ret; \ + float16x8_t __s0 = __p0; \ + __ret = (float16_t) __builtin_neon_vminnmvq_f16((int8x16_t)__s0); \ + __ret; \ +}) +#else +#define vminnmvq_f16(__p0) __extension__ ({ \ + float16_t __ret; \ + float16x8_t __s0 = __p0; \ + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (float16_t) __builtin_neon_vminnmvq_f16((int8x16_t)__rev0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vminnmv_f16(__p0) __extension__ ({ \ + float16_t __ret; \ + float16x4_t __s0 = __p0; \ + __ret = (float16_t) __builtin_neon_vminnmv_f16((int8x8_t)__s0); \ + __ret; \ +}) +#else +#define vminnmv_f16(__p0) __extension__ ({ \ + float16_t __ret; \ + float16x4_t __s0 = __p0; \ + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + __ret = (float16_t) __builtin_neon_vminnmv_f16((int8x8_t)__rev0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vminvq_f16(__p0) __extension__ ({ \ + float16_t __ret; \ + float16x8_t __s0 = __p0; \ + __ret = (float16_t) __builtin_neon_vminvq_f16((int8x16_t)__s0); \ + __ret; \ +}) +#else +#define vminvq_f16(__p0) __extension__ ({ \ + float16_t __ret; \ + float16x8_t __s0 = __p0; \ + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (float16_t) __builtin_neon_vminvq_f16((int8x16_t)__rev0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vminv_f16(__p0) __extension__ ({ \ + float16_t __ret; \ + float16x4_t __s0 = __p0; \ + __ret = (float16_t) __builtin_neon_vminv_f16((int8x8_t)__s0); \ + __ret; \ +}) +#else +#define vminv_f16(__p0) __extension__ ({ \ + float16_t __ret; \ + float16x4_t __s0 = __p0; \ + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + __ret = (float16_t) __builtin_neon_vminv_f16((int8x8_t)__rev0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmulq_laneq_f16(__p0_258, __p1_258, __p2_258) __extension__ ({ \ + float16x8_t __ret_258; \ + float16x8_t __s0_258 = __p0_258; \ + float16x8_t __s1_258 = __p1_258; \ + __ret_258 = __s0_258 * splatq_laneq_f16(__s1_258, __p2_258); \ + __ret_258; \ +}) +#else +#define vmulq_laneq_f16(__p0_259, __p1_259, __p2_259) __extension__ ({ \ + float16x8_t __ret_259; \ + float16x8_t __s0_259 = __p0_259; \ + float16x8_t __s1_259 = __p1_259; \ + float16x8_t __rev0_259; __rev0_259 = __builtin_shufflevector(__s0_259, __s0_259, 7, 6, 5, 4, 3, 2, 1, 0); \ + float16x8_t __rev1_259; __rev1_259 = __builtin_shufflevector(__s1_259, __s1_259, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_259 = __rev0_259 * __noswap_splatq_laneq_f16(__rev1_259, __p2_259); \ + __ret_259 = __builtin_shufflevector(__ret_259, __ret_259, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_259; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmul_laneq_f16(__p0_260, __p1_260, __p2_260) __extension__ ({ \ + float16x4_t __ret_260; \ + float16x4_t __s0_260 = __p0_260; \ + float16x8_t __s1_260 = __p1_260; \ + __ret_260 = __s0_260 * splat_laneq_f16(__s1_260, __p2_260); \ + __ret_260; \ +}) +#else +#define vmul_laneq_f16(__p0_261, __p1_261, __p2_261) __extension__ ({ \ + float16x4_t __ret_261; \ + float16x4_t __s0_261 = __p0_261; \ + float16x8_t __s1_261 = __p1_261; \ + float16x4_t __rev0_261; __rev0_261 = __builtin_shufflevector(__s0_261, __s0_261, 3, 2, 1, 0); \ + float16x8_t __rev1_261; __rev1_261 = __builtin_shufflevector(__s1_261, __s1_261, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_261 = __rev0_261 * __noswap_splat_laneq_f16(__rev1_261, __p2_261); \ + __ret_261 = __builtin_shufflevector(__ret_261, __ret_261, 3, 2, 1, 0); \ + __ret_261; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16,neon"))) float16x8_t vmulxq_f16(float16x8_t __p0, float16x8_t __p1) { + float16x8_t __ret; + __ret = (float16x8_t) __builtin_neon_vmulxq_f16((int8x16_t)__p0, (int8x16_t)__p1, 40); + return __ret; +} +#else +__ai __attribute__((target("fullfp16,neon"))) float16x8_t vmulxq_f16(float16x8_t __p0, float16x8_t __p1) { + float16x8_t __ret; + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (float16x8_t) __builtin_neon_vmulxq_f16((int8x16_t)__rev0, (int8x16_t)__rev1, 40); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +__ai __attribute__((target("fullfp16,neon"))) float16x8_t __noswap_vmulxq_f16(float16x8_t __p0, float16x8_t __p1) { + float16x8_t __ret; + __ret = (float16x8_t) __builtin_neon_vmulxq_f16((int8x16_t)__p0, (int8x16_t)__p1, 40); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16,neon"))) float16x4_t vmulx_f16(float16x4_t __p0, float16x4_t __p1) { + float16x4_t __ret; + __ret = (float16x4_t) __builtin_neon_vmulx_f16((int8x8_t)__p0, (int8x8_t)__p1, 8); + return __ret; +} +#else +__ai __attribute__((target("fullfp16,neon"))) float16x4_t vmulx_f16(float16x4_t __p0, float16x4_t __p1) { + float16x4_t __ret; + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (float16x4_t) __builtin_neon_vmulx_f16((int8x8_t)__rev0, (int8x8_t)__rev1, 8); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +__ai __attribute__((target("fullfp16,neon"))) float16x4_t __noswap_vmulx_f16(float16x4_t __p0, float16x4_t __p1) { + float16x4_t __ret; + __ret = (float16x4_t) __builtin_neon_vmulx_f16((int8x8_t)__p0, (int8x8_t)__p1, 8); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmulxh_lane_f16(__p0, __p1, __p2) __extension__ ({ \ + float16_t __ret; \ + float16_t __s0 = __p0; \ + float16x4_t __s1 = __p1; \ + __ret = (float16_t) __builtin_neon_vmulxh_lane_f16(__s0, (float16x4_t)__s1, __p2); \ + __ret; \ +}) +#else +#define vmulxh_lane_f16(__p0, __p1, __p2) __extension__ ({ \ + float16_t __ret; \ + float16_t __s0 = __p0; \ + float16x4_t __s1 = __p1; \ + float16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + __ret = (float16_t) __builtin_neon_vmulxh_lane_f16(__s0, (float16x4_t)__rev1, __p2); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmulxq_lane_f16(__p0_262, __p1_262, __p2_262) __extension__ ({ \ + float16x8_t __ret_262; \ + float16x8_t __s0_262 = __p0_262; \ + float16x4_t __s1_262 = __p1_262; \ + __ret_262 = vmulxq_f16(__s0_262, splatq_lane_f16(__s1_262, __p2_262)); \ + __ret_262; \ +}) +#else +#define vmulxq_lane_f16(__p0_263, __p1_263, __p2_263) __extension__ ({ \ + float16x8_t __ret_263; \ + float16x8_t __s0_263 = __p0_263; \ + float16x4_t __s1_263 = __p1_263; \ + float16x8_t __rev0_263; __rev0_263 = __builtin_shufflevector(__s0_263, __s0_263, 7, 6, 5, 4, 3, 2, 1, 0); \ + float16x4_t __rev1_263; __rev1_263 = __builtin_shufflevector(__s1_263, __s1_263, 3, 2, 1, 0); \ + __ret_263 = __noswap_vmulxq_f16(__rev0_263, __noswap_splatq_lane_f16(__rev1_263, __p2_263)); \ + __ret_263 = __builtin_shufflevector(__ret_263, __ret_263, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_263; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmulx_lane_f16(__p0_264, __p1_264, __p2_264) __extension__ ({ \ + float16x4_t __ret_264; \ + float16x4_t __s0_264 = __p0_264; \ + float16x4_t __s1_264 = __p1_264; \ + __ret_264 = vmulx_f16(__s0_264, splat_lane_f16(__s1_264, __p2_264)); \ + __ret_264; \ +}) +#else +#define vmulx_lane_f16(__p0_265, __p1_265, __p2_265) __extension__ ({ \ + float16x4_t __ret_265; \ + float16x4_t __s0_265 = __p0_265; \ + float16x4_t __s1_265 = __p1_265; \ + float16x4_t __rev0_265; __rev0_265 = __builtin_shufflevector(__s0_265, __s0_265, 3, 2, 1, 0); \ + float16x4_t __rev1_265; __rev1_265 = __builtin_shufflevector(__s1_265, __s1_265, 3, 2, 1, 0); \ + __ret_265 = __noswap_vmulx_f16(__rev0_265, __noswap_splat_lane_f16(__rev1_265, __p2_265)); \ + __ret_265 = __builtin_shufflevector(__ret_265, __ret_265, 3, 2, 1, 0); \ + __ret_265; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmulxh_laneq_f16(__p0, __p1, __p2) __extension__ ({ \ + float16_t __ret; \ + float16_t __s0 = __p0; \ + float16x8_t __s1 = __p1; \ + __ret = (float16_t) __builtin_neon_vmulxh_laneq_f16(__s0, (float16x8_t)__s1, __p2); \ + __ret; \ +}) +#else +#define vmulxh_laneq_f16(__p0, __p1, __p2) __extension__ ({ \ + float16_t __ret; \ + float16_t __s0 = __p0; \ + float16x8_t __s1 = __p1; \ + float16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (float16_t) __builtin_neon_vmulxh_laneq_f16(__s0, (float16x8_t)__rev1, __p2); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmulxq_laneq_f16(__p0_266, __p1_266, __p2_266) __extension__ ({ \ + float16x8_t __ret_266; \ + float16x8_t __s0_266 = __p0_266; \ + float16x8_t __s1_266 = __p1_266; \ + __ret_266 = vmulxq_f16(__s0_266, splatq_laneq_f16(__s1_266, __p2_266)); \ + __ret_266; \ +}) +#else +#define vmulxq_laneq_f16(__p0_267, __p1_267, __p2_267) __extension__ ({ \ + float16x8_t __ret_267; \ + float16x8_t __s0_267 = __p0_267; \ + float16x8_t __s1_267 = __p1_267; \ + float16x8_t __rev0_267; __rev0_267 = __builtin_shufflevector(__s0_267, __s0_267, 7, 6, 5, 4, 3, 2, 1, 0); \ + float16x8_t __rev1_267; __rev1_267 = __builtin_shufflevector(__s1_267, __s1_267, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_267 = __noswap_vmulxq_f16(__rev0_267, __noswap_splatq_laneq_f16(__rev1_267, __p2_267)); \ + __ret_267 = __builtin_shufflevector(__ret_267, __ret_267, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_267; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmulx_laneq_f16(__p0_268, __p1_268, __p2_268) __extension__ ({ \ + float16x4_t __ret_268; \ + float16x4_t __s0_268 = __p0_268; \ + float16x8_t __s1_268 = __p1_268; \ + __ret_268 = vmulx_f16(__s0_268, splat_laneq_f16(__s1_268, __p2_268)); \ + __ret_268; \ +}) +#else +#define vmulx_laneq_f16(__p0_269, __p1_269, __p2_269) __extension__ ({ \ + float16x4_t __ret_269; \ + float16x4_t __s0_269 = __p0_269; \ + float16x8_t __s1_269 = __p1_269; \ + float16x4_t __rev0_269; __rev0_269 = __builtin_shufflevector(__s0_269, __s0_269, 3, 2, 1, 0); \ + float16x8_t __rev1_269; __rev1_269 = __builtin_shufflevector(__s1_269, __s1_269, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_269 = __noswap_vmulx_f16(__rev0_269, __noswap_splat_laneq_f16(__rev1_269, __p2_269)); \ + __ret_269 = __builtin_shufflevector(__ret_269, __ret_269, 3, 2, 1, 0); \ + __ret_269; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmulxq_n_f16(__p0, __p1) __extension__ ({ \ + float16x8_t __ret; \ + float16x8_t __s0 = __p0; \ + float16_t __s1 = __p1; \ + __ret = vmulxq_f16(__s0, (float16x8_t) {__s1, __s1, __s1, __s1, __s1, __s1, __s1, __s1}); \ + __ret; \ +}) +#else +#define vmulxq_n_f16(__p0, __p1) __extension__ ({ \ + float16x8_t __ret; \ + float16x8_t __s0 = __p0; \ + float16_t __s1 = __p1; \ + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = __noswap_vmulxq_f16(__rev0, (float16x8_t) {__s1, __s1, __s1, __s1, __s1, __s1, __s1, __s1}); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmulx_n_f16(__p0, __p1) __extension__ ({ \ + float16x4_t __ret; \ + float16x4_t __s0 = __p0; \ + float16_t __s1 = __p1; \ + __ret = vmulx_f16(__s0, (float16x4_t) {__s1, __s1, __s1, __s1}); \ + __ret; \ +}) +#else +#define vmulx_n_f16(__p0, __p1) __extension__ ({ \ + float16x4_t __ret; \ + float16x4_t __s0 = __p0; \ + float16_t __s1 = __p1; \ + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + __ret = __noswap_vmulx_f16(__rev0, (float16x4_t) {__s1, __s1, __s1, __s1}); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16,neon"))) float16x8_t vpaddq_f16(float16x8_t __p0, float16x8_t __p1) { + float16x8_t __ret; + __ret = (float16x8_t) __builtin_neon_vpaddq_f16((int8x16_t)__p0, (int8x16_t)__p1, 40); + return __ret; +} +#else +__ai __attribute__((target("fullfp16,neon"))) float16x8_t vpaddq_f16(float16x8_t __p0, float16x8_t __p1) { + float16x8_t __ret; + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (float16x8_t) __builtin_neon_vpaddq_f16((int8x16_t)__rev0, (int8x16_t)__rev1, 40); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16,neon"))) float16x8_t vpmaxq_f16(float16x8_t __p0, float16x8_t __p1) { + float16x8_t __ret; + __ret = (float16x8_t) __builtin_neon_vpmaxq_f16((int8x16_t)__p0, (int8x16_t)__p1, 40); + return __ret; +} +#else +__ai __attribute__((target("fullfp16,neon"))) float16x8_t vpmaxq_f16(float16x8_t __p0, float16x8_t __p1) { + float16x8_t __ret; + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (float16x8_t) __builtin_neon_vpmaxq_f16((int8x16_t)__rev0, (int8x16_t)__rev1, 40); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16,neon"))) float16x8_t vpmaxnmq_f16(float16x8_t __p0, float16x8_t __p1) { + float16x8_t __ret; + __ret = (float16x8_t) __builtin_neon_vpmaxnmq_f16((int8x16_t)__p0, (int8x16_t)__p1, 40); + return __ret; +} +#else +__ai __attribute__((target("fullfp16,neon"))) float16x8_t vpmaxnmq_f16(float16x8_t __p0, float16x8_t __p1) { + float16x8_t __ret; + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (float16x8_t) __builtin_neon_vpmaxnmq_f16((int8x16_t)__rev0, (int8x16_t)__rev1, 40); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16,neon"))) float16x4_t vpmaxnm_f16(float16x4_t __p0, float16x4_t __p1) { + float16x4_t __ret; + __ret = (float16x4_t) __builtin_neon_vpmaxnm_f16((int8x8_t)__p0, (int8x8_t)__p1, 8); + return __ret; +} +#else +__ai __attribute__((target("fullfp16,neon"))) float16x4_t vpmaxnm_f16(float16x4_t __p0, float16x4_t __p1) { + float16x4_t __ret; + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (float16x4_t) __builtin_neon_vpmaxnm_f16((int8x8_t)__rev0, (int8x8_t)__rev1, 8); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16,neon"))) float16x8_t vpminq_f16(float16x8_t __p0, float16x8_t __p1) { + float16x8_t __ret; + __ret = (float16x8_t) __builtin_neon_vpminq_f16((int8x16_t)__p0, (int8x16_t)__p1, 40); + return __ret; +} +#else +__ai __attribute__((target("fullfp16,neon"))) float16x8_t vpminq_f16(float16x8_t __p0, float16x8_t __p1) { + float16x8_t __ret; + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (float16x8_t) __builtin_neon_vpminq_f16((int8x16_t)__rev0, (int8x16_t)__rev1, 40); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16,neon"))) float16x8_t vpminnmq_f16(float16x8_t __p0, float16x8_t __p1) { + float16x8_t __ret; + __ret = (float16x8_t) __builtin_neon_vpminnmq_f16((int8x16_t)__p0, (int8x16_t)__p1, 40); + return __ret; +} +#else +__ai __attribute__((target("fullfp16,neon"))) float16x8_t vpminnmq_f16(float16x8_t __p0, float16x8_t __p1) { + float16x8_t __ret; + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (float16x8_t) __builtin_neon_vpminnmq_f16((int8x16_t)__rev0, (int8x16_t)__rev1, 40); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16,neon"))) float16x4_t vpminnm_f16(float16x4_t __p0, float16x4_t __p1) { + float16x4_t __ret; + __ret = (float16x4_t) __builtin_neon_vpminnm_f16((int8x8_t)__p0, (int8x8_t)__p1, 8); + return __ret; +} +#else +__ai __attribute__((target("fullfp16,neon"))) float16x4_t vpminnm_f16(float16x4_t __p0, float16x4_t __p1) { + float16x4_t __ret; + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (float16x4_t) __builtin_neon_vpminnm_f16((int8x8_t)__rev0, (int8x8_t)__rev1, 8); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16,neon"))) float16x8_t vrndiq_f16(float16x8_t __p0) { + float16x8_t __ret; + __ret = (float16x8_t) __builtin_neon_vrndiq_f16((int8x16_t)__p0, 40); + return __ret; +} +#else +__ai __attribute__((target("fullfp16,neon"))) float16x8_t vrndiq_f16(float16x8_t __p0) { + float16x8_t __ret; + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (float16x8_t) __builtin_neon_vrndiq_f16((int8x16_t)__rev0, 40); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16,neon"))) float16x4_t vrndi_f16(float16x4_t __p0) { + float16x4_t __ret; + __ret = (float16x4_t) __builtin_neon_vrndi_f16((int8x8_t)__p0, 8); + return __ret; +} +#else +__ai __attribute__((target("fullfp16,neon"))) float16x4_t vrndi_f16(float16x4_t __p0) { + float16x4_t __ret; + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = (float16x4_t) __builtin_neon_vrndi_f16((int8x8_t)__rev0, 8); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16,neon"))) float16x8_t vsqrtq_f16(float16x8_t __p0) { + float16x8_t __ret; + __ret = (float16x8_t) __builtin_neon_vsqrtq_f16((int8x16_t)__p0, 40); + return __ret; +} +#else +__ai __attribute__((target("fullfp16,neon"))) float16x8_t vsqrtq_f16(float16x8_t __p0) { + float16x8_t __ret; + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (float16x8_t) __builtin_neon_vsqrtq_f16((int8x16_t)__rev0, 40); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16,neon"))) float16x4_t vsqrt_f16(float16x4_t __p0) { + float16x4_t __ret; + __ret = (float16x4_t) __builtin_neon_vsqrt_f16((int8x8_t)__p0, 8); + return __ret; +} +#else +__ai __attribute__((target("fullfp16,neon"))) float16x4_t vsqrt_f16(float16x4_t __p0) { + float16x4_t __ret; + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = (float16x4_t) __builtin_neon_vsqrt_f16((int8x8_t)__rev0, 8); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vsudotq_laneq_s32(__p0_270, __p1_270, __p2_270, __p3_270) __extension__ ({ \ + int32x4_t __ret_270; \ + int32x4_t __s0_270 = __p0_270; \ + int8x16_t __s1_270 = __p1_270; \ + uint8x16_t __s2_270 = __p2_270; \ +uint8x16_t __reint_270 = __s2_270; \ + __ret_270 = vusdotq_s32(__s0_270, (uint8x16_t)(splatq_laneq_s32(*(int32x4_t *) &__reint_270, __p3_270)), __s1_270); \ + __ret_270; \ +}) +#else +#define vsudotq_laneq_s32(__p0_271, __p1_271, __p2_271, __p3_271) __extension__ ({ \ + int32x4_t __ret_271; \ + int32x4_t __s0_271 = __p0_271; \ + int8x16_t __s1_271 = __p1_271; \ + uint8x16_t __s2_271 = __p2_271; \ + int32x4_t __rev0_271; __rev0_271 = __builtin_shufflevector(__s0_271, __s0_271, 3, 2, 1, 0); \ + int8x16_t __rev1_271; __rev1_271 = __builtin_shufflevector(__s1_271, __s1_271, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint8x16_t __rev2_271; __rev2_271 = __builtin_shufflevector(__s2_271, __s2_271, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ +uint8x16_t __reint_271 = __rev2_271; \ + __ret_271 = __noswap_vusdotq_s32(__rev0_271, (uint8x16_t)(__noswap_splatq_laneq_s32(*(int32x4_t *) &__reint_271, __p3_271)), __rev1_271); \ + __ret_271 = __builtin_shufflevector(__ret_271, __ret_271, 3, 2, 1, 0); \ + __ret_271; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vsudot_laneq_s32(__p0_272, __p1_272, __p2_272, __p3_272) __extension__ ({ \ + int32x2_t __ret_272; \ + int32x2_t __s0_272 = __p0_272; \ + int8x8_t __s1_272 = __p1_272; \ + uint8x16_t __s2_272 = __p2_272; \ +uint8x16_t __reint_272 = __s2_272; \ + __ret_272 = vusdot_s32(__s0_272, (uint8x8_t)(splat_laneq_s32(*(int32x4_t *) &__reint_272, __p3_272)), __s1_272); \ + __ret_272; \ +}) +#else +#define vsudot_laneq_s32(__p0_273, __p1_273, __p2_273, __p3_273) __extension__ ({ \ + int32x2_t __ret_273; \ + int32x2_t __s0_273 = __p0_273; \ + int8x8_t __s1_273 = __p1_273; \ + uint8x16_t __s2_273 = __p2_273; \ + int32x2_t __rev0_273; __rev0_273 = __builtin_shufflevector(__s0_273, __s0_273, 1, 0); \ + int8x8_t __rev1_273; __rev1_273 = __builtin_shufflevector(__s1_273, __s1_273, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint8x16_t __rev2_273; __rev2_273 = __builtin_shufflevector(__s2_273, __s2_273, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ +uint8x16_t __reint_273 = __rev2_273; \ + __ret_273 = __noswap_vusdot_s32(__rev0_273, (uint8x8_t)(__noswap_splat_laneq_s32(*(int32x4_t *) &__reint_273, __p3_273)), __rev1_273); \ + __ret_273 = __builtin_shufflevector(__ret_273, __ret_273, 1, 0); \ + __ret_273; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vusdotq_laneq_s32(__p0_274, __p1_274, __p2_274, __p3_274) __extension__ ({ \ + int32x4_t __ret_274; \ + int32x4_t __s0_274 = __p0_274; \ + uint8x16_t __s1_274 = __p1_274; \ + int8x16_t __s2_274 = __p2_274; \ +int8x16_t __reint_274 = __s2_274; \ + __ret_274 = vusdotq_s32(__s0_274, __s1_274, (int8x16_t)(splatq_laneq_s32(*(int32x4_t *) &__reint_274, __p3_274))); \ + __ret_274; \ +}) +#else +#define vusdotq_laneq_s32(__p0_275, __p1_275, __p2_275, __p3_275) __extension__ ({ \ + int32x4_t __ret_275; \ + int32x4_t __s0_275 = __p0_275; \ + uint8x16_t __s1_275 = __p1_275; \ + int8x16_t __s2_275 = __p2_275; \ + int32x4_t __rev0_275; __rev0_275 = __builtin_shufflevector(__s0_275, __s0_275, 3, 2, 1, 0); \ + uint8x16_t __rev1_275; __rev1_275 = __builtin_shufflevector(__s1_275, __s1_275, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + int8x16_t __rev2_275; __rev2_275 = __builtin_shufflevector(__s2_275, __s2_275, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ +int8x16_t __reint_275 = __rev2_275; \ + __ret_275 = __noswap_vusdotq_s32(__rev0_275, __rev1_275, (int8x16_t)(__noswap_splatq_laneq_s32(*(int32x4_t *) &__reint_275, __p3_275))); \ + __ret_275 = __builtin_shufflevector(__ret_275, __ret_275, 3, 2, 1, 0); \ + __ret_275; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vusdot_laneq_s32(__p0_276, __p1_276, __p2_276, __p3_276) __extension__ ({ \ + int32x2_t __ret_276; \ + int32x2_t __s0_276 = __p0_276; \ + uint8x8_t __s1_276 = __p1_276; \ + int8x16_t __s2_276 = __p2_276; \ +int8x16_t __reint_276 = __s2_276; \ + __ret_276 = vusdot_s32(__s0_276, __s1_276, (int8x8_t)(splat_laneq_s32(*(int32x4_t *) &__reint_276, __p3_276))); \ + __ret_276; \ +}) +#else +#define vusdot_laneq_s32(__p0_277, __p1_277, __p2_277, __p3_277) __extension__ ({ \ + int32x2_t __ret_277; \ + int32x2_t __s0_277 = __p0_277; \ + uint8x8_t __s1_277 = __p1_277; \ + int8x16_t __s2_277 = __p2_277; \ + int32x2_t __rev0_277; __rev0_277 = __builtin_shufflevector(__s0_277, __s0_277, 1, 0); \ + uint8x8_t __rev1_277; __rev1_277 = __builtin_shufflevector(__s1_277, __s1_277, 7, 6, 5, 4, 3, 2, 1, 0); \ + int8x16_t __rev2_277; __rev2_277 = __builtin_shufflevector(__s2_277, __s2_277, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ +int8x16_t __reint_277 = __rev2_277; \ + __ret_277 = __noswap_vusdot_s32(__rev0_277, __rev1_277, (int8x8_t)(__noswap_splat_laneq_s32(*(int32x4_t *) &__reint_277, __p3_277))); \ + __ret_277 = __builtin_shufflevector(__ret_277, __ret_277, 1, 0); \ + __ret_277; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) float64x2_t vabdq_f64(float64x2_t __p0, float64x2_t __p1) { + float64x2_t __ret; + __ret = (float64x2_t) __builtin_neon_vabdq_v((int8x16_t)__p0, (int8x16_t)__p1, 42); + return __ret; +} +#else +__ai __attribute__((target("neon"))) float64x2_t vabdq_f64(float64x2_t __p0, float64x2_t __p1) { + float64x2_t __ret; + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (float64x2_t) __builtin_neon_vabdq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 42); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +__ai __attribute__((target("neon"))) float64x1_t vabd_f64(float64x1_t __p0, float64x1_t __p1) { + float64x1_t __ret; + __ret = (float64x1_t) __builtin_neon_vabd_v((int8x8_t)__p0, (int8x8_t)__p1, 10); + return __ret; +} +__ai __attribute__((target("neon"))) float64_t vabdd_f64(float64_t __p0, float64_t __p1) { + float64_t __ret; + __ret = (float64_t) __builtin_neon_vabdd_f64(__p0, __p1); + return __ret; +} +__ai __attribute__((target("neon"))) float32_t vabds_f32(float32_t __p0, float32_t __p1) { + float32_t __ret; + __ret = (float32_t) __builtin_neon_vabds_f32(__p0, __p1); + return __ret; +} +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) float64x2_t vabsq_f64(float64x2_t __p0) { + float64x2_t __ret; + __ret = (float64x2_t) __builtin_neon_vabsq_v((int8x16_t)__p0, 42); + return __ret; +} +#else +__ai __attribute__((target("neon"))) float64x2_t vabsq_f64(float64x2_t __p0) { + float64x2_t __ret; + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = (float64x2_t) __builtin_neon_vabsq_v((int8x16_t)__rev0, 42); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int64x2_t vabsq_s64(int64x2_t __p0) { + int64x2_t __ret; + __ret = (int64x2_t) __builtin_neon_vabsq_v((int8x16_t)__p0, 35); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int64x2_t vabsq_s64(int64x2_t __p0) { + int64x2_t __ret; + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = (int64x2_t) __builtin_neon_vabsq_v((int8x16_t)__rev0, 35); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +__ai __attribute__((target("neon"))) float64x1_t vabs_f64(float64x1_t __p0) { + float64x1_t __ret; + __ret = (float64x1_t) __builtin_neon_vabs_v((int8x8_t)__p0, 10); + return __ret; +} +__ai __attribute__((target("neon"))) int64x1_t vabs_s64(int64x1_t __p0) { + int64x1_t __ret; + __ret = (int64x1_t) __builtin_neon_vabs_v((int8x8_t)__p0, 3); + return __ret; +} +__ai __attribute__((target("neon"))) int64_t vabsd_s64(int64_t __p0) { + int64_t __ret; + __ret = (int64_t) __builtin_neon_vabsd_s64(__p0); + return __ret; +} +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) float64x2_t vaddq_f64(float64x2_t __p0, float64x2_t __p1) { + float64x2_t __ret; + __ret = __p0 + __p1; + return __ret; +} +#else +__ai __attribute__((target("neon"))) float64x2_t vaddq_f64(float64x2_t __p0, float64x2_t __p1) { + float64x2_t __ret; + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = __rev0 + __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +__ai __attribute__((target("neon"))) float64x1_t vadd_f64(float64x1_t __p0, float64x1_t __p1) { + float64x1_t __ret; + __ret = __p0 + __p1; + return __ret; +} +__ai __attribute__((target("neon"))) uint64_t vaddd_u64(uint64_t __p0, uint64_t __p1) { + uint64_t __ret; + __ret = (uint64_t) __builtin_neon_vaddd_u64(__p0, __p1); + return __ret; +} +__ai __attribute__((target("neon"))) int64_t vaddd_s64(int64_t __p0, int64_t __p1) { + int64_t __ret; + __ret = (int64_t) __builtin_neon_vaddd_s64(__p0, __p1); + return __ret; +} +__ai __attribute__((target("neon"))) poly128_t vaddq_p128(poly128_t __p0, poly128_t __p1) { + poly128_t __ret; + __ret = (poly128_t) __builtin_neon_vaddq_p128(__p0, __p1); + return __ret; +} +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint16x8_t vaddhn_high_u32(uint16x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) { + uint16x8_t __ret; + __ret = vcombine_u16(__p0, vaddhn_u32(__p1, __p2)); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint16x8_t vaddhn_high_u32(uint16x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) { + uint16x8_t __ret; + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + uint32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); + __ret = __noswap_vcombine_u16(__rev0, __noswap_vaddhn_u32(__rev1, __rev2)); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint32x4_t vaddhn_high_u64(uint32x2_t __p0, uint64x2_t __p1, uint64x2_t __p2) { + uint32x4_t __ret; + __ret = vcombine_u32(__p0, vaddhn_u64(__p1, __p2)); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint32x4_t vaddhn_high_u64(uint32x2_t __p0, uint64x2_t __p1, uint64x2_t __p2) { + uint32x4_t __ret; + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + uint64x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0); + __ret = __noswap_vcombine_u32(__rev0, __noswap_vaddhn_u64(__rev1, __rev2)); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint8x16_t vaddhn_high_u16(uint8x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) { + uint8x16_t __ret; + __ret = vcombine_u8(__p0, vaddhn_u16(__p1, __p2)); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint8x16_t vaddhn_high_u16(uint8x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) { + uint8x16_t __ret; + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __noswap_vcombine_u8(__rev0, __noswap_vaddhn_u16(__rev1, __rev2)); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int16x8_t vaddhn_high_s32(int16x4_t __p0, int32x4_t __p1, int32x4_t __p2) { + int16x8_t __ret; + __ret = vcombine_s16(__p0, vaddhn_s32(__p1, __p2)); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int16x8_t vaddhn_high_s32(int16x4_t __p0, int32x4_t __p1, int32x4_t __p2) { + int16x8_t __ret; + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + int32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); + __ret = __noswap_vcombine_s16(__rev0, __noswap_vaddhn_s32(__rev1, __rev2)); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int32x4_t vaddhn_high_s64(int32x2_t __p0, int64x2_t __p1, int64x2_t __p2) { + int32x4_t __ret; + __ret = vcombine_s32(__p0, vaddhn_s64(__p1, __p2)); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int32x4_t vaddhn_high_s64(int32x2_t __p0, int64x2_t __p1, int64x2_t __p2) { + int32x4_t __ret; + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + int64x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0); + __ret = __noswap_vcombine_s32(__rev0, __noswap_vaddhn_s64(__rev1, __rev2)); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int8x16_t vaddhn_high_s16(int8x8_t __p0, int16x8_t __p1, int16x8_t __p2) { + int8x16_t __ret; + __ret = vcombine_s8(__p0, vaddhn_s16(__p1, __p2)); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int8x16_t vaddhn_high_s16(int8x8_t __p0, int16x8_t __p1, int16x8_t __p2) { + int8x16_t __ret; + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __noswap_vcombine_s8(__rev0, __noswap_vaddhn_s16(__rev1, __rev2)); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint16_t vaddlvq_u8(uint8x16_t __p0) { + uint16_t __ret; + __ret = (uint16_t) __builtin_neon_vaddlvq_u8(__p0); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint16_t vaddlvq_u8(uint8x16_t __p0) { + uint16_t __ret; + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint16_t) __builtin_neon_vaddlvq_u8(__rev0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint64_t vaddlvq_u32(uint32x4_t __p0) { + uint64_t __ret; + __ret = (uint64_t) __builtin_neon_vaddlvq_u32(__p0); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint64_t vaddlvq_u32(uint32x4_t __p0) { + uint64_t __ret; + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = (uint64_t) __builtin_neon_vaddlvq_u32(__rev0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint32_t vaddlvq_u16(uint16x8_t __p0) { + uint32_t __ret; + __ret = (uint32_t) __builtin_neon_vaddlvq_u16(__p0); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint32_t vaddlvq_u16(uint16x8_t __p0) { + uint32_t __ret; + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint32_t) __builtin_neon_vaddlvq_u16(__rev0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int16_t vaddlvq_s8(int8x16_t __p0) { + int16_t __ret; + __ret = (int16_t) __builtin_neon_vaddlvq_s8(__p0); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int16_t vaddlvq_s8(int8x16_t __p0) { + int16_t __ret; + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int16_t) __builtin_neon_vaddlvq_s8(__rev0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int64_t vaddlvq_s32(int32x4_t __p0) { + int64_t __ret; + __ret = (int64_t) __builtin_neon_vaddlvq_s32(__p0); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int64_t vaddlvq_s32(int32x4_t __p0) { + int64_t __ret; + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = (int64_t) __builtin_neon_vaddlvq_s32(__rev0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int32_t vaddlvq_s16(int16x8_t __p0) { + int32_t __ret; + __ret = (int32_t) __builtin_neon_vaddlvq_s16(__p0); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int32_t vaddlvq_s16(int16x8_t __p0) { + int32_t __ret; + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int32_t) __builtin_neon_vaddlvq_s16(__rev0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint16_t vaddlv_u8(uint8x8_t __p0) { + uint16_t __ret; + __ret = (uint16_t) __builtin_neon_vaddlv_u8(__p0); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint16_t vaddlv_u8(uint8x8_t __p0) { + uint16_t __ret; + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint16_t) __builtin_neon_vaddlv_u8(__rev0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint64_t vaddlv_u32(uint32x2_t __p0) { + uint64_t __ret; + __ret = (uint64_t) __builtin_neon_vaddlv_u32(__p0); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint64_t vaddlv_u32(uint32x2_t __p0) { + uint64_t __ret; + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = (uint64_t) __builtin_neon_vaddlv_u32(__rev0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint32_t vaddlv_u16(uint16x4_t __p0) { + uint32_t __ret; + __ret = (uint32_t) __builtin_neon_vaddlv_u16(__p0); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint32_t vaddlv_u16(uint16x4_t __p0) { + uint32_t __ret; + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = (uint32_t) __builtin_neon_vaddlv_u16(__rev0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int16_t vaddlv_s8(int8x8_t __p0) { + int16_t __ret; + __ret = (int16_t) __builtin_neon_vaddlv_s8(__p0); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int16_t vaddlv_s8(int8x8_t __p0) { + int16_t __ret; + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int16_t) __builtin_neon_vaddlv_s8(__rev0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int64_t vaddlv_s32(int32x2_t __p0) { + int64_t __ret; + __ret = (int64_t) __builtin_neon_vaddlv_s32(__p0); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int64_t vaddlv_s32(int32x2_t __p0) { + int64_t __ret; + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = (int64_t) __builtin_neon_vaddlv_s32(__rev0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int32_t vaddlv_s16(int16x4_t __p0) { + int32_t __ret; + __ret = (int32_t) __builtin_neon_vaddlv_s16(__p0); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int32_t vaddlv_s16(int16x4_t __p0) { + int32_t __ret; + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = (int32_t) __builtin_neon_vaddlv_s16(__rev0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint8_t vaddvq_u8(uint8x16_t __p0) { + uint8_t __ret; + __ret = (uint8_t) __builtin_neon_vaddvq_u8(__p0); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint8_t vaddvq_u8(uint8x16_t __p0) { + uint8_t __ret; + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint8_t) __builtin_neon_vaddvq_u8(__rev0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint32_t vaddvq_u32(uint32x4_t __p0) { + uint32_t __ret; + __ret = (uint32_t) __builtin_neon_vaddvq_u32(__p0); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint32_t vaddvq_u32(uint32x4_t __p0) { + uint32_t __ret; + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = (uint32_t) __builtin_neon_vaddvq_u32(__rev0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint64_t vaddvq_u64(uint64x2_t __p0) { + uint64_t __ret; + __ret = (uint64_t) __builtin_neon_vaddvq_u64(__p0); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint64_t vaddvq_u64(uint64x2_t __p0) { + uint64_t __ret; + uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = (uint64_t) __builtin_neon_vaddvq_u64(__rev0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint16_t vaddvq_u16(uint16x8_t __p0) { + uint16_t __ret; + __ret = (uint16_t) __builtin_neon_vaddvq_u16(__p0); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint16_t vaddvq_u16(uint16x8_t __p0) { + uint16_t __ret; + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint16_t) __builtin_neon_vaddvq_u16(__rev0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int8_t vaddvq_s8(int8x16_t __p0) { + int8_t __ret; + __ret = (int8_t) __builtin_neon_vaddvq_s8(__p0); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int8_t vaddvq_s8(int8x16_t __p0) { + int8_t __ret; + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int8_t) __builtin_neon_vaddvq_s8(__rev0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) float64_t vaddvq_f64(float64x2_t __p0) { + float64_t __ret; + __ret = (float64_t) __builtin_neon_vaddvq_f64(__p0); + return __ret; +} +#else +__ai __attribute__((target("neon"))) float64_t vaddvq_f64(float64x2_t __p0) { + float64_t __ret; + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = (float64_t) __builtin_neon_vaddvq_f64(__rev0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) float32_t vaddvq_f32(float32x4_t __p0) { + float32_t __ret; + __ret = (float32_t) __builtin_neon_vaddvq_f32(__p0); + return __ret; +} +#else +__ai __attribute__((target("neon"))) float32_t vaddvq_f32(float32x4_t __p0) { + float32_t __ret; + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = (float32_t) __builtin_neon_vaddvq_f32(__rev0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int32_t vaddvq_s32(int32x4_t __p0) { + int32_t __ret; + __ret = (int32_t) __builtin_neon_vaddvq_s32(__p0); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int32_t vaddvq_s32(int32x4_t __p0) { + int32_t __ret; + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = (int32_t) __builtin_neon_vaddvq_s32(__rev0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int64_t vaddvq_s64(int64x2_t __p0) { + int64_t __ret; + __ret = (int64_t) __builtin_neon_vaddvq_s64(__p0); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int64_t vaddvq_s64(int64x2_t __p0) { + int64_t __ret; + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = (int64_t) __builtin_neon_vaddvq_s64(__rev0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int16_t vaddvq_s16(int16x8_t __p0) { + int16_t __ret; + __ret = (int16_t) __builtin_neon_vaddvq_s16(__p0); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int16_t vaddvq_s16(int16x8_t __p0) { + int16_t __ret; + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int16_t) __builtin_neon_vaddvq_s16(__rev0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint8_t vaddv_u8(uint8x8_t __p0) { + uint8_t __ret; + __ret = (uint8_t) __builtin_neon_vaddv_u8(__p0); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint8_t vaddv_u8(uint8x8_t __p0) { + uint8_t __ret; + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint8_t) __builtin_neon_vaddv_u8(__rev0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint32_t vaddv_u32(uint32x2_t __p0) { + uint32_t __ret; + __ret = (uint32_t) __builtin_neon_vaddv_u32(__p0); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint32_t vaddv_u32(uint32x2_t __p0) { + uint32_t __ret; + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = (uint32_t) __builtin_neon_vaddv_u32(__rev0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint16_t vaddv_u16(uint16x4_t __p0) { + uint16_t __ret; + __ret = (uint16_t) __builtin_neon_vaddv_u16(__p0); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint16_t vaddv_u16(uint16x4_t __p0) { + uint16_t __ret; + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = (uint16_t) __builtin_neon_vaddv_u16(__rev0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int8_t vaddv_s8(int8x8_t __p0) { + int8_t __ret; + __ret = (int8_t) __builtin_neon_vaddv_s8(__p0); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int8_t vaddv_s8(int8x8_t __p0) { + int8_t __ret; + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int8_t) __builtin_neon_vaddv_s8(__rev0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) float32_t vaddv_f32(float32x2_t __p0) { + float32_t __ret; + __ret = (float32_t) __builtin_neon_vaddv_f32(__p0); + return __ret; +} +#else +__ai __attribute__((target("neon"))) float32_t vaddv_f32(float32x2_t __p0) { + float32_t __ret; + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = (float32_t) __builtin_neon_vaddv_f32(__rev0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int32_t vaddv_s32(int32x2_t __p0) { + int32_t __ret; + __ret = (int32_t) __builtin_neon_vaddv_s32(__p0); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int32_t vaddv_s32(int32x2_t __p0) { + int32_t __ret; + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = (int32_t) __builtin_neon_vaddv_s32(__rev0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int16_t vaddv_s16(int16x4_t __p0) { + int16_t __ret; + __ret = (int16_t) __builtin_neon_vaddv_s16(__p0); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int16_t vaddv_s16(int16x4_t __p0) { + int16_t __ret; + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = (int16_t) __builtin_neon_vaddv_s16(__rev0); + return __ret; +} +#endif + +__ai __attribute__((target("neon"))) poly64x1_t vbsl_p64(uint64x1_t __p0, poly64x1_t __p1, poly64x1_t __p2) { + poly64x1_t __ret; + __ret = (poly64x1_t) __builtin_neon_vbsl_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 6); + return __ret; +} +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) poly64x2_t vbslq_p64(uint64x2_t __p0, poly64x2_t __p1, poly64x2_t __p2) { + poly64x2_t __ret; + __ret = (poly64x2_t) __builtin_neon_vbslq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 38); + return __ret; +} +#else +__ai __attribute__((target("neon"))) poly64x2_t vbslq_p64(uint64x2_t __p0, poly64x2_t __p1, poly64x2_t __p2) { + poly64x2_t __ret; + uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + poly64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + poly64x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0); + __ret = (poly64x2_t) __builtin_neon_vbslq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 38); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) float64x2_t vbslq_f64(uint64x2_t __p0, float64x2_t __p1, float64x2_t __p2) { + float64x2_t __ret; + __ret = (float64x2_t) __builtin_neon_vbslq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 42); + return __ret; +} +#else +__ai __attribute__((target("neon"))) float64x2_t vbslq_f64(uint64x2_t __p0, float64x2_t __p1, float64x2_t __p2) { + float64x2_t __ret; + uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + float64x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0); + __ret = (float64x2_t) __builtin_neon_vbslq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 42); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +__ai __attribute__((target("neon"))) float64x1_t vbsl_f64(uint64x1_t __p0, float64x1_t __p1, float64x1_t __p2) { + float64x1_t __ret; + __ret = (float64x1_t) __builtin_neon_vbsl_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 10); + return __ret; +} +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint64x2_t vcageq_f64(float64x2_t __p0, float64x2_t __p1) { + uint64x2_t __ret; + __ret = (uint64x2_t) __builtin_neon_vcageq_v((int8x16_t)__p0, (int8x16_t)__p1, 51); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint64x2_t vcageq_f64(float64x2_t __p0, float64x2_t __p1) { + uint64x2_t __ret; + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (uint64x2_t) __builtin_neon_vcageq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 51); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +__ai __attribute__((target("neon"))) uint64x1_t vcage_f64(float64x1_t __p0, float64x1_t __p1) { + uint64x1_t __ret; + __ret = (uint64x1_t) __builtin_neon_vcage_v((int8x8_t)__p0, (int8x8_t)__p1, 19); + return __ret; +} +__ai __attribute__((target("neon"))) uint64_t vcaged_f64(float64_t __p0, float64_t __p1) { + uint64_t __ret; + __ret = (uint64_t) __builtin_neon_vcaged_f64(__p0, __p1); + return __ret; +} +__ai __attribute__((target("neon"))) uint32_t vcages_f32(float32_t __p0, float32_t __p1) { + uint32_t __ret; + __ret = (uint32_t) __builtin_neon_vcages_f32(__p0, __p1); + return __ret; +} +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint64x2_t vcagtq_f64(float64x2_t __p0, float64x2_t __p1) { + uint64x2_t __ret; + __ret = (uint64x2_t) __builtin_neon_vcagtq_v((int8x16_t)__p0, (int8x16_t)__p1, 51); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint64x2_t vcagtq_f64(float64x2_t __p0, float64x2_t __p1) { + uint64x2_t __ret; + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (uint64x2_t) __builtin_neon_vcagtq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 51); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +__ai __attribute__((target("neon"))) uint64x1_t vcagt_f64(float64x1_t __p0, float64x1_t __p1) { + uint64x1_t __ret; + __ret = (uint64x1_t) __builtin_neon_vcagt_v((int8x8_t)__p0, (int8x8_t)__p1, 19); + return __ret; +} +__ai __attribute__((target("neon"))) uint64_t vcagtd_f64(float64_t __p0, float64_t __p1) { + uint64_t __ret; + __ret = (uint64_t) __builtin_neon_vcagtd_f64(__p0, __p1); + return __ret; +} +__ai __attribute__((target("neon"))) uint32_t vcagts_f32(float32_t __p0, float32_t __p1) { + uint32_t __ret; + __ret = (uint32_t) __builtin_neon_vcagts_f32(__p0, __p1); + return __ret; +} +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint64x2_t vcaleq_f64(float64x2_t __p0, float64x2_t __p1) { + uint64x2_t __ret; + __ret = (uint64x2_t) __builtin_neon_vcaleq_v((int8x16_t)__p0, (int8x16_t)__p1, 51); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint64x2_t vcaleq_f64(float64x2_t __p0, float64x2_t __p1) { + uint64x2_t __ret; + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (uint64x2_t) __builtin_neon_vcaleq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 51); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +__ai __attribute__((target("neon"))) uint64x1_t vcale_f64(float64x1_t __p0, float64x1_t __p1) { + uint64x1_t __ret; + __ret = (uint64x1_t) __builtin_neon_vcale_v((int8x8_t)__p0, (int8x8_t)__p1, 19); + return __ret; +} +__ai __attribute__((target("neon"))) uint64_t vcaled_f64(float64_t __p0, float64_t __p1) { + uint64_t __ret; + __ret = (uint64_t) __builtin_neon_vcaled_f64(__p0, __p1); + return __ret; +} +__ai __attribute__((target("neon"))) uint32_t vcales_f32(float32_t __p0, float32_t __p1) { + uint32_t __ret; + __ret = (uint32_t) __builtin_neon_vcales_f32(__p0, __p1); + return __ret; +} +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint64x2_t vcaltq_f64(float64x2_t __p0, float64x2_t __p1) { + uint64x2_t __ret; + __ret = (uint64x2_t) __builtin_neon_vcaltq_v((int8x16_t)__p0, (int8x16_t)__p1, 51); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint64x2_t vcaltq_f64(float64x2_t __p0, float64x2_t __p1) { + uint64x2_t __ret; + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (uint64x2_t) __builtin_neon_vcaltq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 51); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +__ai __attribute__((target("neon"))) uint64x1_t vcalt_f64(float64x1_t __p0, float64x1_t __p1) { + uint64x1_t __ret; + __ret = (uint64x1_t) __builtin_neon_vcalt_v((int8x8_t)__p0, (int8x8_t)__p1, 19); + return __ret; +} +__ai __attribute__((target("neon"))) uint64_t vcaltd_f64(float64_t __p0, float64_t __p1) { + uint64_t __ret; + __ret = (uint64_t) __builtin_neon_vcaltd_f64(__p0, __p1); + return __ret; +} +__ai __attribute__((target("neon"))) uint32_t vcalts_f32(float32_t __p0, float32_t __p1) { + uint32_t __ret; + __ret = (uint32_t) __builtin_neon_vcalts_f32(__p0, __p1); + return __ret; +} +__ai __attribute__((target("neon"))) uint64x1_t vceq_p64(poly64x1_t __p0, poly64x1_t __p1) { + uint64x1_t __ret; + __ret = (uint64x1_t)(__p0 == __p1); + return __ret; +} +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint64x2_t vceqq_p64(poly64x2_t __p0, poly64x2_t __p1) { + uint64x2_t __ret; + __ret = (uint64x2_t)(__p0 == __p1); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint64x2_t vceqq_p64(poly64x2_t __p0, poly64x2_t __p1) { + uint64x2_t __ret; + poly64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + poly64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (uint64x2_t)(__rev0 == __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint64x2_t vceqq_u64(uint64x2_t __p0, uint64x2_t __p1) { + uint64x2_t __ret; + __ret = (uint64x2_t)(__p0 == __p1); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint64x2_t vceqq_u64(uint64x2_t __p0, uint64x2_t __p1) { + uint64x2_t __ret; + uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (uint64x2_t)(__rev0 == __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint64x2_t vceqq_f64(float64x2_t __p0, float64x2_t __p1) { + uint64x2_t __ret; + __ret = (uint64x2_t)(__p0 == __p1); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint64x2_t vceqq_f64(float64x2_t __p0, float64x2_t __p1) { + uint64x2_t __ret; + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (uint64x2_t)(__rev0 == __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint64x2_t vceqq_s64(int64x2_t __p0, int64x2_t __p1) { + uint64x2_t __ret; + __ret = (uint64x2_t)(__p0 == __p1); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint64x2_t vceqq_s64(int64x2_t __p0, int64x2_t __p1) { + uint64x2_t __ret; + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (uint64x2_t)(__rev0 == __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +__ai __attribute__((target("neon"))) uint64x1_t vceq_u64(uint64x1_t __p0, uint64x1_t __p1) { + uint64x1_t __ret; + __ret = (uint64x1_t)(__p0 == __p1); + return __ret; +} +__ai __attribute__((target("neon"))) uint64x1_t vceq_f64(float64x1_t __p0, float64x1_t __p1) { + uint64x1_t __ret; + __ret = (uint64x1_t)(__p0 == __p1); + return __ret; +} +__ai __attribute__((target("neon"))) uint64x1_t vceq_s64(int64x1_t __p0, int64x1_t __p1) { + uint64x1_t __ret; + __ret = (uint64x1_t)(__p0 == __p1); + return __ret; +} +__ai __attribute__((target("neon"))) uint64_t vceqd_u64(uint64_t __p0, uint64_t __p1) { + uint64_t __ret; + __ret = (uint64_t) __builtin_neon_vceqd_u64(__p0, __p1); + return __ret; +} +__ai __attribute__((target("neon"))) uint64_t vceqd_s64(int64_t __p0, int64_t __p1) { + uint64_t __ret; + __ret = (uint64_t) __builtin_neon_vceqd_s64(__p0, __p1); + return __ret; +} +__ai __attribute__((target("neon"))) uint64_t vceqd_f64(float64_t __p0, float64_t __p1) { + uint64_t __ret; + __ret = (uint64_t) __builtin_neon_vceqd_f64(__p0, __p1); + return __ret; +} +__ai __attribute__((target("neon"))) uint32_t vceqs_f32(float32_t __p0, float32_t __p1) { + uint32_t __ret; + __ret = (uint32_t) __builtin_neon_vceqs_f32(__p0, __p1); + return __ret; +} +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint8x8_t vceqz_p8(poly8x8_t __p0) { + uint8x8_t __ret; + __ret = (uint8x8_t) __builtin_neon_vceqz_v((int8x8_t)__p0, 16); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint8x8_t vceqz_p8(poly8x8_t __p0) { + uint8x8_t __ret; + poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint8x8_t) __builtin_neon_vceqz_v((int8x8_t)__rev0, 16); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +__ai __attribute__((target("neon"))) uint64x1_t vceqz_p64(poly64x1_t __p0) { + uint64x1_t __ret; + __ret = (uint64x1_t) __builtin_neon_vceqz_v((int8x8_t)__p0, 19); + return __ret; +} +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint8x16_t vceqzq_p8(poly8x16_t __p0) { + uint8x16_t __ret; + __ret = (uint8x16_t) __builtin_neon_vceqzq_v((int8x16_t)__p0, 48); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint8x16_t vceqzq_p8(poly8x16_t __p0) { + uint8x16_t __ret; + poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint8x16_t) __builtin_neon_vceqzq_v((int8x16_t)__rev0, 48); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint64x2_t vceqzq_p64(poly64x2_t __p0) { + uint64x2_t __ret; + __ret = (uint64x2_t) __builtin_neon_vceqzq_v((int8x16_t)__p0, 51); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint64x2_t vceqzq_p64(poly64x2_t __p0) { + uint64x2_t __ret; + poly64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = (uint64x2_t) __builtin_neon_vceqzq_v((int8x16_t)__rev0, 51); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint8x16_t vceqzq_u8(uint8x16_t __p0) { + uint8x16_t __ret; + __ret = (uint8x16_t) __builtin_neon_vceqzq_v((int8x16_t)__p0, 48); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint8x16_t vceqzq_u8(uint8x16_t __p0) { + uint8x16_t __ret; + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint8x16_t) __builtin_neon_vceqzq_v((int8x16_t)__rev0, 48); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint32x4_t vceqzq_u32(uint32x4_t __p0) { + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vceqzq_v((int8x16_t)__p0, 50); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint32x4_t vceqzq_u32(uint32x4_t __p0) { + uint32x4_t __ret; + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = (uint32x4_t) __builtin_neon_vceqzq_v((int8x16_t)__rev0, 50); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint64x2_t vceqzq_u64(uint64x2_t __p0) { + uint64x2_t __ret; + __ret = (uint64x2_t) __builtin_neon_vceqzq_v((int8x16_t)__p0, 51); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint64x2_t vceqzq_u64(uint64x2_t __p0) { + uint64x2_t __ret; + uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = (uint64x2_t) __builtin_neon_vceqzq_v((int8x16_t)__rev0, 51); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint16x8_t vceqzq_u16(uint16x8_t __p0) { + uint16x8_t __ret; + __ret = (uint16x8_t) __builtin_neon_vceqzq_v((int8x16_t)__p0, 49); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint16x8_t vceqzq_u16(uint16x8_t __p0) { + uint16x8_t __ret; + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint16x8_t) __builtin_neon_vceqzq_v((int8x16_t)__rev0, 49); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint8x16_t vceqzq_s8(int8x16_t __p0) { + uint8x16_t __ret; + __ret = (uint8x16_t) __builtin_neon_vceqzq_v((int8x16_t)__p0, 48); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint8x16_t vceqzq_s8(int8x16_t __p0) { + uint8x16_t __ret; + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint8x16_t) __builtin_neon_vceqzq_v((int8x16_t)__rev0, 48); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint64x2_t vceqzq_f64(float64x2_t __p0) { + uint64x2_t __ret; + __ret = (uint64x2_t) __builtin_neon_vceqzq_v((int8x16_t)__p0, 51); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint64x2_t vceqzq_f64(float64x2_t __p0) { + uint64x2_t __ret; + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = (uint64x2_t) __builtin_neon_vceqzq_v((int8x16_t)__rev0, 51); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint32x4_t vceqzq_f32(float32x4_t __p0) { + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vceqzq_v((int8x16_t)__p0, 50); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint32x4_t vceqzq_f32(float32x4_t __p0) { + uint32x4_t __ret; + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = (uint32x4_t) __builtin_neon_vceqzq_v((int8x16_t)__rev0, 50); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint32x4_t vceqzq_s32(int32x4_t __p0) { + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vceqzq_v((int8x16_t)__p0, 50); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint32x4_t vceqzq_s32(int32x4_t __p0) { + uint32x4_t __ret; + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = (uint32x4_t) __builtin_neon_vceqzq_v((int8x16_t)__rev0, 50); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint64x2_t vceqzq_s64(int64x2_t __p0) { + uint64x2_t __ret; + __ret = (uint64x2_t) __builtin_neon_vceqzq_v((int8x16_t)__p0, 51); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint64x2_t vceqzq_s64(int64x2_t __p0) { + uint64x2_t __ret; + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = (uint64x2_t) __builtin_neon_vceqzq_v((int8x16_t)__rev0, 51); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint16x8_t vceqzq_s16(int16x8_t __p0) { + uint16x8_t __ret; + __ret = (uint16x8_t) __builtin_neon_vceqzq_v((int8x16_t)__p0, 49); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint16x8_t vceqzq_s16(int16x8_t __p0) { + uint16x8_t __ret; + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint16x8_t) __builtin_neon_vceqzq_v((int8x16_t)__rev0, 49); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint8x8_t vceqz_u8(uint8x8_t __p0) { + uint8x8_t __ret; + __ret = (uint8x8_t) __builtin_neon_vceqz_v((int8x8_t)__p0, 16); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint8x8_t vceqz_u8(uint8x8_t __p0) { + uint8x8_t __ret; + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint8x8_t) __builtin_neon_vceqz_v((int8x8_t)__rev0, 16); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint32x2_t vceqz_u32(uint32x2_t __p0) { + uint32x2_t __ret; + __ret = (uint32x2_t) __builtin_neon_vceqz_v((int8x8_t)__p0, 18); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint32x2_t vceqz_u32(uint32x2_t __p0) { + uint32x2_t __ret; + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = (uint32x2_t) __builtin_neon_vceqz_v((int8x8_t)__rev0, 18); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +__ai __attribute__((target("neon"))) uint64x1_t vceqz_u64(uint64x1_t __p0) { + uint64x1_t __ret; + __ret = (uint64x1_t) __builtin_neon_vceqz_v((int8x8_t)__p0, 19); + return __ret; +} +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint16x4_t vceqz_u16(uint16x4_t __p0) { + uint16x4_t __ret; + __ret = (uint16x4_t) __builtin_neon_vceqz_v((int8x8_t)__p0, 17); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint16x4_t vceqz_u16(uint16x4_t __p0) { + uint16x4_t __ret; + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = (uint16x4_t) __builtin_neon_vceqz_v((int8x8_t)__rev0, 17); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint8x8_t vceqz_s8(int8x8_t __p0) { + uint8x8_t __ret; + __ret = (uint8x8_t) __builtin_neon_vceqz_v((int8x8_t)__p0, 16); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint8x8_t vceqz_s8(int8x8_t __p0) { + uint8x8_t __ret; + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint8x8_t) __builtin_neon_vceqz_v((int8x8_t)__rev0, 16); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +__ai __attribute__((target("neon"))) uint64x1_t vceqz_f64(float64x1_t __p0) { + uint64x1_t __ret; + __ret = (uint64x1_t) __builtin_neon_vceqz_v((int8x8_t)__p0, 19); + return __ret; +} +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint32x2_t vceqz_f32(float32x2_t __p0) { + uint32x2_t __ret; + __ret = (uint32x2_t) __builtin_neon_vceqz_v((int8x8_t)__p0, 18); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint32x2_t vceqz_f32(float32x2_t __p0) { + uint32x2_t __ret; + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = (uint32x2_t) __builtin_neon_vceqz_v((int8x8_t)__rev0, 18); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint32x2_t vceqz_s32(int32x2_t __p0) { + uint32x2_t __ret; + __ret = (uint32x2_t) __builtin_neon_vceqz_v((int8x8_t)__p0, 18); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint32x2_t vceqz_s32(int32x2_t __p0) { + uint32x2_t __ret; + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = (uint32x2_t) __builtin_neon_vceqz_v((int8x8_t)__rev0, 18); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +__ai __attribute__((target("neon"))) uint64x1_t vceqz_s64(int64x1_t __p0) { + uint64x1_t __ret; + __ret = (uint64x1_t) __builtin_neon_vceqz_v((int8x8_t)__p0, 19); + return __ret; +} +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint16x4_t vceqz_s16(int16x4_t __p0) { + uint16x4_t __ret; + __ret = (uint16x4_t) __builtin_neon_vceqz_v((int8x8_t)__p0, 17); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint16x4_t vceqz_s16(int16x4_t __p0) { + uint16x4_t __ret; + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = (uint16x4_t) __builtin_neon_vceqz_v((int8x8_t)__rev0, 17); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +__ai __attribute__((target("neon"))) uint64_t vceqzd_u64(uint64_t __p0) { + uint64_t __ret; + __ret = (uint64_t) __builtin_neon_vceqzd_u64(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) uint64_t vceqzd_s64(int64_t __p0) { + uint64_t __ret; + __ret = (uint64_t) __builtin_neon_vceqzd_s64(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) uint64_t vceqzd_f64(float64_t __p0) { + uint64_t __ret; + __ret = (uint64_t) __builtin_neon_vceqzd_f64(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) uint32_t vceqzs_f32(float32_t __p0) { + uint32_t __ret; + __ret = (uint32_t) __builtin_neon_vceqzs_f32(__p0); + return __ret; +} +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint64x2_t vcgeq_u64(uint64x2_t __p0, uint64x2_t __p1) { + uint64x2_t __ret; + __ret = (uint64x2_t)(__p0 >= __p1); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint64x2_t vcgeq_u64(uint64x2_t __p0, uint64x2_t __p1) { + uint64x2_t __ret; + uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (uint64x2_t)(__rev0 >= __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint64x2_t vcgeq_f64(float64x2_t __p0, float64x2_t __p1) { + uint64x2_t __ret; + __ret = (uint64x2_t)(__p0 >= __p1); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint64x2_t vcgeq_f64(float64x2_t __p0, float64x2_t __p1) { + uint64x2_t __ret; + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (uint64x2_t)(__rev0 >= __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint64x2_t vcgeq_s64(int64x2_t __p0, int64x2_t __p1) { + uint64x2_t __ret; + __ret = (uint64x2_t)(__p0 >= __p1); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint64x2_t vcgeq_s64(int64x2_t __p0, int64x2_t __p1) { + uint64x2_t __ret; + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (uint64x2_t)(__rev0 >= __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +__ai __attribute__((target("neon"))) uint64x1_t vcge_u64(uint64x1_t __p0, uint64x1_t __p1) { + uint64x1_t __ret; + __ret = (uint64x1_t)(__p0 >= __p1); + return __ret; +} +__ai __attribute__((target("neon"))) uint64x1_t vcge_f64(float64x1_t __p0, float64x1_t __p1) { + uint64x1_t __ret; + __ret = (uint64x1_t)(__p0 >= __p1); + return __ret; +} +__ai __attribute__((target("neon"))) uint64x1_t vcge_s64(int64x1_t __p0, int64x1_t __p1) { + uint64x1_t __ret; + __ret = (uint64x1_t)(__p0 >= __p1); + return __ret; +} +__ai __attribute__((target("neon"))) uint64_t vcged_s64(int64_t __p0, int64_t __p1) { + uint64_t __ret; + __ret = (uint64_t) __builtin_neon_vcged_s64(__p0, __p1); + return __ret; +} +__ai __attribute__((target("neon"))) uint64_t vcged_u64(uint64_t __p0, uint64_t __p1) { + uint64_t __ret; + __ret = (uint64_t) __builtin_neon_vcged_u64(__p0, __p1); + return __ret; +} +__ai __attribute__((target("neon"))) uint64_t vcged_f64(float64_t __p0, float64_t __p1) { + uint64_t __ret; + __ret = (uint64_t) __builtin_neon_vcged_f64(__p0, __p1); + return __ret; +} +__ai __attribute__((target("neon"))) uint32_t vcges_f32(float32_t __p0, float32_t __p1) { + uint32_t __ret; + __ret = (uint32_t) __builtin_neon_vcges_f32(__p0, __p1); + return __ret; +} +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint8x16_t vcgezq_s8(int8x16_t __p0) { + uint8x16_t __ret; + __ret = (uint8x16_t) __builtin_neon_vcgezq_v((int8x16_t)__p0, 48); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint8x16_t vcgezq_s8(int8x16_t __p0) { + uint8x16_t __ret; + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint8x16_t) __builtin_neon_vcgezq_v((int8x16_t)__rev0, 48); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint64x2_t vcgezq_f64(float64x2_t __p0) { + uint64x2_t __ret; + __ret = (uint64x2_t) __builtin_neon_vcgezq_v((int8x16_t)__p0, 51); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint64x2_t vcgezq_f64(float64x2_t __p0) { + uint64x2_t __ret; + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = (uint64x2_t) __builtin_neon_vcgezq_v((int8x16_t)__rev0, 51); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint32x4_t vcgezq_f32(float32x4_t __p0) { + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vcgezq_v((int8x16_t)__p0, 50); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint32x4_t vcgezq_f32(float32x4_t __p0) { + uint32x4_t __ret; + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = (uint32x4_t) __builtin_neon_vcgezq_v((int8x16_t)__rev0, 50); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint32x4_t vcgezq_s32(int32x4_t __p0) { + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vcgezq_v((int8x16_t)__p0, 50); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint32x4_t vcgezq_s32(int32x4_t __p0) { + uint32x4_t __ret; + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = (uint32x4_t) __builtin_neon_vcgezq_v((int8x16_t)__rev0, 50); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint64x2_t vcgezq_s64(int64x2_t __p0) { + uint64x2_t __ret; + __ret = (uint64x2_t) __builtin_neon_vcgezq_v((int8x16_t)__p0, 51); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint64x2_t vcgezq_s64(int64x2_t __p0) { + uint64x2_t __ret; + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = (uint64x2_t) __builtin_neon_vcgezq_v((int8x16_t)__rev0, 51); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint16x8_t vcgezq_s16(int16x8_t __p0) { + uint16x8_t __ret; + __ret = (uint16x8_t) __builtin_neon_vcgezq_v((int8x16_t)__p0, 49); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint16x8_t vcgezq_s16(int16x8_t __p0) { + uint16x8_t __ret; + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint16x8_t) __builtin_neon_vcgezq_v((int8x16_t)__rev0, 49); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint8x8_t vcgez_s8(int8x8_t __p0) { + uint8x8_t __ret; + __ret = (uint8x8_t) __builtin_neon_vcgez_v((int8x8_t)__p0, 16); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint8x8_t vcgez_s8(int8x8_t __p0) { + uint8x8_t __ret; + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint8x8_t) __builtin_neon_vcgez_v((int8x8_t)__rev0, 16); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +__ai __attribute__((target("neon"))) uint64x1_t vcgez_f64(float64x1_t __p0) { + uint64x1_t __ret; + __ret = (uint64x1_t) __builtin_neon_vcgez_v((int8x8_t)__p0, 19); + return __ret; +} +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint32x2_t vcgez_f32(float32x2_t __p0) { + uint32x2_t __ret; + __ret = (uint32x2_t) __builtin_neon_vcgez_v((int8x8_t)__p0, 18); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint32x2_t vcgez_f32(float32x2_t __p0) { + uint32x2_t __ret; + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = (uint32x2_t) __builtin_neon_vcgez_v((int8x8_t)__rev0, 18); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint32x2_t vcgez_s32(int32x2_t __p0) { + uint32x2_t __ret; + __ret = (uint32x2_t) __builtin_neon_vcgez_v((int8x8_t)__p0, 18); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint32x2_t vcgez_s32(int32x2_t __p0) { + uint32x2_t __ret; + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = (uint32x2_t) __builtin_neon_vcgez_v((int8x8_t)__rev0, 18); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +__ai __attribute__((target("neon"))) uint64x1_t vcgez_s64(int64x1_t __p0) { + uint64x1_t __ret; + __ret = (uint64x1_t) __builtin_neon_vcgez_v((int8x8_t)__p0, 19); + return __ret; +} +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint16x4_t vcgez_s16(int16x4_t __p0) { + uint16x4_t __ret; + __ret = (uint16x4_t) __builtin_neon_vcgez_v((int8x8_t)__p0, 17); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint16x4_t vcgez_s16(int16x4_t __p0) { + uint16x4_t __ret; + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = (uint16x4_t) __builtin_neon_vcgez_v((int8x8_t)__rev0, 17); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +__ai __attribute__((target("neon"))) uint64_t vcgezd_s64(int64_t __p0) { + uint64_t __ret; + __ret = (uint64_t) __builtin_neon_vcgezd_s64(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) uint64_t vcgezd_f64(float64_t __p0) { + uint64_t __ret; + __ret = (uint64_t) __builtin_neon_vcgezd_f64(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) uint32_t vcgezs_f32(float32_t __p0) { + uint32_t __ret; + __ret = (uint32_t) __builtin_neon_vcgezs_f32(__p0); + return __ret; +} +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint64x2_t vcgtq_u64(uint64x2_t __p0, uint64x2_t __p1) { + uint64x2_t __ret; + __ret = (uint64x2_t)(__p0 > __p1); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint64x2_t vcgtq_u64(uint64x2_t __p0, uint64x2_t __p1) { + uint64x2_t __ret; + uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (uint64x2_t)(__rev0 > __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint64x2_t vcgtq_f64(float64x2_t __p0, float64x2_t __p1) { + uint64x2_t __ret; + __ret = (uint64x2_t)(__p0 > __p1); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint64x2_t vcgtq_f64(float64x2_t __p0, float64x2_t __p1) { + uint64x2_t __ret; + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (uint64x2_t)(__rev0 > __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint64x2_t vcgtq_s64(int64x2_t __p0, int64x2_t __p1) { + uint64x2_t __ret; + __ret = (uint64x2_t)(__p0 > __p1); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint64x2_t vcgtq_s64(int64x2_t __p0, int64x2_t __p1) { + uint64x2_t __ret; + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (uint64x2_t)(__rev0 > __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +__ai __attribute__((target("neon"))) uint64x1_t vcgt_u64(uint64x1_t __p0, uint64x1_t __p1) { + uint64x1_t __ret; + __ret = (uint64x1_t)(__p0 > __p1); + return __ret; +} +__ai __attribute__((target("neon"))) uint64x1_t vcgt_f64(float64x1_t __p0, float64x1_t __p1) { + uint64x1_t __ret; + __ret = (uint64x1_t)(__p0 > __p1); + return __ret; +} +__ai __attribute__((target("neon"))) uint64x1_t vcgt_s64(int64x1_t __p0, int64x1_t __p1) { + uint64x1_t __ret; + __ret = (uint64x1_t)(__p0 > __p1); + return __ret; +} +__ai __attribute__((target("neon"))) uint64_t vcgtd_s64(int64_t __p0, int64_t __p1) { + uint64_t __ret; + __ret = (uint64_t) __builtin_neon_vcgtd_s64(__p0, __p1); + return __ret; +} +__ai __attribute__((target("neon"))) uint64_t vcgtd_u64(uint64_t __p0, uint64_t __p1) { + uint64_t __ret; + __ret = (uint64_t) __builtin_neon_vcgtd_u64(__p0, __p1); + return __ret; +} +__ai __attribute__((target("neon"))) uint64_t vcgtd_f64(float64_t __p0, float64_t __p1) { + uint64_t __ret; + __ret = (uint64_t) __builtin_neon_vcgtd_f64(__p0, __p1); + return __ret; +} +__ai __attribute__((target("neon"))) uint32_t vcgts_f32(float32_t __p0, float32_t __p1) { + uint32_t __ret; + __ret = (uint32_t) __builtin_neon_vcgts_f32(__p0, __p1); + return __ret; +} +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint8x16_t vcgtzq_s8(int8x16_t __p0) { + uint8x16_t __ret; + __ret = (uint8x16_t) __builtin_neon_vcgtzq_v((int8x16_t)__p0, 48); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint8x16_t vcgtzq_s8(int8x16_t __p0) { + uint8x16_t __ret; + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint8x16_t) __builtin_neon_vcgtzq_v((int8x16_t)__rev0, 48); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint64x2_t vcgtzq_f64(float64x2_t __p0) { + uint64x2_t __ret; + __ret = (uint64x2_t) __builtin_neon_vcgtzq_v((int8x16_t)__p0, 51); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint64x2_t vcgtzq_f64(float64x2_t __p0) { + uint64x2_t __ret; + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = (uint64x2_t) __builtin_neon_vcgtzq_v((int8x16_t)__rev0, 51); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint32x4_t vcgtzq_f32(float32x4_t __p0) { + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vcgtzq_v((int8x16_t)__p0, 50); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint32x4_t vcgtzq_f32(float32x4_t __p0) { + uint32x4_t __ret; + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = (uint32x4_t) __builtin_neon_vcgtzq_v((int8x16_t)__rev0, 50); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint32x4_t vcgtzq_s32(int32x4_t __p0) { + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vcgtzq_v((int8x16_t)__p0, 50); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint32x4_t vcgtzq_s32(int32x4_t __p0) { + uint32x4_t __ret; + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = (uint32x4_t) __builtin_neon_vcgtzq_v((int8x16_t)__rev0, 50); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint64x2_t vcgtzq_s64(int64x2_t __p0) { + uint64x2_t __ret; + __ret = (uint64x2_t) __builtin_neon_vcgtzq_v((int8x16_t)__p0, 51); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint64x2_t vcgtzq_s64(int64x2_t __p0) { + uint64x2_t __ret; + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = (uint64x2_t) __builtin_neon_vcgtzq_v((int8x16_t)__rev0, 51); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint16x8_t vcgtzq_s16(int16x8_t __p0) { + uint16x8_t __ret; + __ret = (uint16x8_t) __builtin_neon_vcgtzq_v((int8x16_t)__p0, 49); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint16x8_t vcgtzq_s16(int16x8_t __p0) { + uint16x8_t __ret; + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint16x8_t) __builtin_neon_vcgtzq_v((int8x16_t)__rev0, 49); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint8x8_t vcgtz_s8(int8x8_t __p0) { + uint8x8_t __ret; + __ret = (uint8x8_t) __builtin_neon_vcgtz_v((int8x8_t)__p0, 16); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint8x8_t vcgtz_s8(int8x8_t __p0) { + uint8x8_t __ret; + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint8x8_t) __builtin_neon_vcgtz_v((int8x8_t)__rev0, 16); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +__ai __attribute__((target("neon"))) uint64x1_t vcgtz_f64(float64x1_t __p0) { + uint64x1_t __ret; + __ret = (uint64x1_t) __builtin_neon_vcgtz_v((int8x8_t)__p0, 19); + return __ret; +} +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint32x2_t vcgtz_f32(float32x2_t __p0) { + uint32x2_t __ret; + __ret = (uint32x2_t) __builtin_neon_vcgtz_v((int8x8_t)__p0, 18); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint32x2_t vcgtz_f32(float32x2_t __p0) { + uint32x2_t __ret; + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = (uint32x2_t) __builtin_neon_vcgtz_v((int8x8_t)__rev0, 18); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint32x2_t vcgtz_s32(int32x2_t __p0) { + uint32x2_t __ret; + __ret = (uint32x2_t) __builtin_neon_vcgtz_v((int8x8_t)__p0, 18); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint32x2_t vcgtz_s32(int32x2_t __p0) { + uint32x2_t __ret; + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = (uint32x2_t) __builtin_neon_vcgtz_v((int8x8_t)__rev0, 18); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +__ai __attribute__((target("neon"))) uint64x1_t vcgtz_s64(int64x1_t __p0) { + uint64x1_t __ret; + __ret = (uint64x1_t) __builtin_neon_vcgtz_v((int8x8_t)__p0, 19); + return __ret; +} +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint16x4_t vcgtz_s16(int16x4_t __p0) { + uint16x4_t __ret; + __ret = (uint16x4_t) __builtin_neon_vcgtz_v((int8x8_t)__p0, 17); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint16x4_t vcgtz_s16(int16x4_t __p0) { + uint16x4_t __ret; + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = (uint16x4_t) __builtin_neon_vcgtz_v((int8x8_t)__rev0, 17); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +__ai __attribute__((target("neon"))) uint64_t vcgtzd_s64(int64_t __p0) { + uint64_t __ret; + __ret = (uint64_t) __builtin_neon_vcgtzd_s64(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) uint64_t vcgtzd_f64(float64_t __p0) { + uint64_t __ret; + __ret = (uint64_t) __builtin_neon_vcgtzd_f64(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) uint32_t vcgtzs_f32(float32_t __p0) { + uint32_t __ret; + __ret = (uint32_t) __builtin_neon_vcgtzs_f32(__p0); + return __ret; +} +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint64x2_t vcleq_u64(uint64x2_t __p0, uint64x2_t __p1) { + uint64x2_t __ret; + __ret = (uint64x2_t)(__p0 <= __p1); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint64x2_t vcleq_u64(uint64x2_t __p0, uint64x2_t __p1) { + uint64x2_t __ret; + uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (uint64x2_t)(__rev0 <= __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint64x2_t vcleq_f64(float64x2_t __p0, float64x2_t __p1) { + uint64x2_t __ret; + __ret = (uint64x2_t)(__p0 <= __p1); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint64x2_t vcleq_f64(float64x2_t __p0, float64x2_t __p1) { + uint64x2_t __ret; + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (uint64x2_t)(__rev0 <= __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint64x2_t vcleq_s64(int64x2_t __p0, int64x2_t __p1) { + uint64x2_t __ret; + __ret = (uint64x2_t)(__p0 <= __p1); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint64x2_t vcleq_s64(int64x2_t __p0, int64x2_t __p1) { + uint64x2_t __ret; + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (uint64x2_t)(__rev0 <= __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +__ai __attribute__((target("neon"))) uint64x1_t vcle_u64(uint64x1_t __p0, uint64x1_t __p1) { + uint64x1_t __ret; + __ret = (uint64x1_t)(__p0 <= __p1); + return __ret; +} +__ai __attribute__((target("neon"))) uint64x1_t vcle_f64(float64x1_t __p0, float64x1_t __p1) { + uint64x1_t __ret; + __ret = (uint64x1_t)(__p0 <= __p1); + return __ret; +} +__ai __attribute__((target("neon"))) uint64x1_t vcle_s64(int64x1_t __p0, int64x1_t __p1) { + uint64x1_t __ret; + __ret = (uint64x1_t)(__p0 <= __p1); + return __ret; +} +__ai __attribute__((target("neon"))) uint64_t vcled_u64(uint64_t __p0, uint64_t __p1) { + uint64_t __ret; + __ret = (uint64_t) __builtin_neon_vcled_u64(__p0, __p1); + return __ret; +} +__ai __attribute__((target("neon"))) uint64_t vcled_s64(int64_t __p0, int64_t __p1) { + uint64_t __ret; + __ret = (uint64_t) __builtin_neon_vcled_s64(__p0, __p1); + return __ret; +} +__ai __attribute__((target("neon"))) uint64_t vcled_f64(float64_t __p0, float64_t __p1) { + uint64_t __ret; + __ret = (uint64_t) __builtin_neon_vcled_f64(__p0, __p1); + return __ret; +} +__ai __attribute__((target("neon"))) uint32_t vcles_f32(float32_t __p0, float32_t __p1) { + uint32_t __ret; + __ret = (uint32_t) __builtin_neon_vcles_f32(__p0, __p1); + return __ret; +} +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint8x16_t vclezq_s8(int8x16_t __p0) { + uint8x16_t __ret; + __ret = (uint8x16_t) __builtin_neon_vclezq_v((int8x16_t)__p0, 48); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint8x16_t vclezq_s8(int8x16_t __p0) { + uint8x16_t __ret; + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint8x16_t) __builtin_neon_vclezq_v((int8x16_t)__rev0, 48); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint64x2_t vclezq_f64(float64x2_t __p0) { + uint64x2_t __ret; + __ret = (uint64x2_t) __builtin_neon_vclezq_v((int8x16_t)__p0, 51); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint64x2_t vclezq_f64(float64x2_t __p0) { + uint64x2_t __ret; + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = (uint64x2_t) __builtin_neon_vclezq_v((int8x16_t)__rev0, 51); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint32x4_t vclezq_f32(float32x4_t __p0) { + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vclezq_v((int8x16_t)__p0, 50); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint32x4_t vclezq_f32(float32x4_t __p0) { + uint32x4_t __ret; + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = (uint32x4_t) __builtin_neon_vclezq_v((int8x16_t)__rev0, 50); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint32x4_t vclezq_s32(int32x4_t __p0) { + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vclezq_v((int8x16_t)__p0, 50); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint32x4_t vclezq_s32(int32x4_t __p0) { + uint32x4_t __ret; + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = (uint32x4_t) __builtin_neon_vclezq_v((int8x16_t)__rev0, 50); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint64x2_t vclezq_s64(int64x2_t __p0) { + uint64x2_t __ret; + __ret = (uint64x2_t) __builtin_neon_vclezq_v((int8x16_t)__p0, 51); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint64x2_t vclezq_s64(int64x2_t __p0) { + uint64x2_t __ret; + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = (uint64x2_t) __builtin_neon_vclezq_v((int8x16_t)__rev0, 51); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint16x8_t vclezq_s16(int16x8_t __p0) { + uint16x8_t __ret; + __ret = (uint16x8_t) __builtin_neon_vclezq_v((int8x16_t)__p0, 49); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint16x8_t vclezq_s16(int16x8_t __p0) { + uint16x8_t __ret; + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint16x8_t) __builtin_neon_vclezq_v((int8x16_t)__rev0, 49); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint8x8_t vclez_s8(int8x8_t __p0) { + uint8x8_t __ret; + __ret = (uint8x8_t) __builtin_neon_vclez_v((int8x8_t)__p0, 16); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint8x8_t vclez_s8(int8x8_t __p0) { + uint8x8_t __ret; + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint8x8_t) __builtin_neon_vclez_v((int8x8_t)__rev0, 16); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +__ai __attribute__((target("neon"))) uint64x1_t vclez_f64(float64x1_t __p0) { + uint64x1_t __ret; + __ret = (uint64x1_t) __builtin_neon_vclez_v((int8x8_t)__p0, 19); + return __ret; +} +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint32x2_t vclez_f32(float32x2_t __p0) { + uint32x2_t __ret; + __ret = (uint32x2_t) __builtin_neon_vclez_v((int8x8_t)__p0, 18); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint32x2_t vclez_f32(float32x2_t __p0) { + uint32x2_t __ret; + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = (uint32x2_t) __builtin_neon_vclez_v((int8x8_t)__rev0, 18); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint32x2_t vclez_s32(int32x2_t __p0) { + uint32x2_t __ret; + __ret = (uint32x2_t) __builtin_neon_vclez_v((int8x8_t)__p0, 18); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint32x2_t vclez_s32(int32x2_t __p0) { + uint32x2_t __ret; + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = (uint32x2_t) __builtin_neon_vclez_v((int8x8_t)__rev0, 18); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +__ai __attribute__((target("neon"))) uint64x1_t vclez_s64(int64x1_t __p0) { + uint64x1_t __ret; + __ret = (uint64x1_t) __builtin_neon_vclez_v((int8x8_t)__p0, 19); + return __ret; +} +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint16x4_t vclez_s16(int16x4_t __p0) { + uint16x4_t __ret; + __ret = (uint16x4_t) __builtin_neon_vclez_v((int8x8_t)__p0, 17); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint16x4_t vclez_s16(int16x4_t __p0) { + uint16x4_t __ret; + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = (uint16x4_t) __builtin_neon_vclez_v((int8x8_t)__rev0, 17); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +__ai __attribute__((target("neon"))) uint64_t vclezd_s64(int64_t __p0) { + uint64_t __ret; + __ret = (uint64_t) __builtin_neon_vclezd_s64(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) uint64_t vclezd_f64(float64_t __p0) { + uint64_t __ret; + __ret = (uint64_t) __builtin_neon_vclezd_f64(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) uint32_t vclezs_f32(float32_t __p0) { + uint32_t __ret; + __ret = (uint32_t) __builtin_neon_vclezs_f32(__p0); + return __ret; +} +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint64x2_t vcltq_u64(uint64x2_t __p0, uint64x2_t __p1) { + uint64x2_t __ret; + __ret = (uint64x2_t)(__p0 < __p1); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint64x2_t vcltq_u64(uint64x2_t __p0, uint64x2_t __p1) { + uint64x2_t __ret; + uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (uint64x2_t)(__rev0 < __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint64x2_t vcltq_f64(float64x2_t __p0, float64x2_t __p1) { + uint64x2_t __ret; + __ret = (uint64x2_t)(__p0 < __p1); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint64x2_t vcltq_f64(float64x2_t __p0, float64x2_t __p1) { + uint64x2_t __ret; + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (uint64x2_t)(__rev0 < __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint64x2_t vcltq_s64(int64x2_t __p0, int64x2_t __p1) { + uint64x2_t __ret; + __ret = (uint64x2_t)(__p0 < __p1); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint64x2_t vcltq_s64(int64x2_t __p0, int64x2_t __p1) { + uint64x2_t __ret; + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (uint64x2_t)(__rev0 < __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +__ai __attribute__((target("neon"))) uint64x1_t vclt_u64(uint64x1_t __p0, uint64x1_t __p1) { + uint64x1_t __ret; + __ret = (uint64x1_t)(__p0 < __p1); + return __ret; +} +__ai __attribute__((target("neon"))) uint64x1_t vclt_f64(float64x1_t __p0, float64x1_t __p1) { + uint64x1_t __ret; + __ret = (uint64x1_t)(__p0 < __p1); + return __ret; +} +__ai __attribute__((target("neon"))) uint64x1_t vclt_s64(int64x1_t __p0, int64x1_t __p1) { + uint64x1_t __ret; + __ret = (uint64x1_t)(__p0 < __p1); + return __ret; +} +__ai __attribute__((target("neon"))) uint64_t vcltd_u64(uint64_t __p0, uint64_t __p1) { + uint64_t __ret; + __ret = (uint64_t) __builtin_neon_vcltd_u64(__p0, __p1); + return __ret; +} +__ai __attribute__((target("neon"))) uint64_t vcltd_s64(int64_t __p0, int64_t __p1) { + uint64_t __ret; + __ret = (uint64_t) __builtin_neon_vcltd_s64(__p0, __p1); + return __ret; +} +__ai __attribute__((target("neon"))) uint64_t vcltd_f64(float64_t __p0, float64_t __p1) { + uint64_t __ret; + __ret = (uint64_t) __builtin_neon_vcltd_f64(__p0, __p1); + return __ret; +} +__ai __attribute__((target("neon"))) uint32_t vclts_f32(float32_t __p0, float32_t __p1) { + uint32_t __ret; + __ret = (uint32_t) __builtin_neon_vclts_f32(__p0, __p1); + return __ret; +} +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint8x16_t vcltzq_s8(int8x16_t __p0) { + uint8x16_t __ret; + __ret = (uint8x16_t) __builtin_neon_vcltzq_v((int8x16_t)__p0, 48); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint8x16_t vcltzq_s8(int8x16_t __p0) { + uint8x16_t __ret; + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint8x16_t) __builtin_neon_vcltzq_v((int8x16_t)__rev0, 48); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint64x2_t vcltzq_f64(float64x2_t __p0) { + uint64x2_t __ret; + __ret = (uint64x2_t) __builtin_neon_vcltzq_v((int8x16_t)__p0, 51); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint64x2_t vcltzq_f64(float64x2_t __p0) { + uint64x2_t __ret; + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = (uint64x2_t) __builtin_neon_vcltzq_v((int8x16_t)__rev0, 51); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint32x4_t vcltzq_f32(float32x4_t __p0) { + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vcltzq_v((int8x16_t)__p0, 50); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint32x4_t vcltzq_f32(float32x4_t __p0) { + uint32x4_t __ret; + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = (uint32x4_t) __builtin_neon_vcltzq_v((int8x16_t)__rev0, 50); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint32x4_t vcltzq_s32(int32x4_t __p0) { + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vcltzq_v((int8x16_t)__p0, 50); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint32x4_t vcltzq_s32(int32x4_t __p0) { + uint32x4_t __ret; + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = (uint32x4_t) __builtin_neon_vcltzq_v((int8x16_t)__rev0, 50); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint64x2_t vcltzq_s64(int64x2_t __p0) { + uint64x2_t __ret; + __ret = (uint64x2_t) __builtin_neon_vcltzq_v((int8x16_t)__p0, 51); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint64x2_t vcltzq_s64(int64x2_t __p0) { + uint64x2_t __ret; + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = (uint64x2_t) __builtin_neon_vcltzq_v((int8x16_t)__rev0, 51); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint16x8_t vcltzq_s16(int16x8_t __p0) { + uint16x8_t __ret; + __ret = (uint16x8_t) __builtin_neon_vcltzq_v((int8x16_t)__p0, 49); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint16x8_t vcltzq_s16(int16x8_t __p0) { + uint16x8_t __ret; + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint16x8_t) __builtin_neon_vcltzq_v((int8x16_t)__rev0, 49); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint8x8_t vcltz_s8(int8x8_t __p0) { + uint8x8_t __ret; + __ret = (uint8x8_t) __builtin_neon_vcltz_v((int8x8_t)__p0, 16); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint8x8_t vcltz_s8(int8x8_t __p0) { + uint8x8_t __ret; + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint8x8_t) __builtin_neon_vcltz_v((int8x8_t)__rev0, 16); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +__ai __attribute__((target("neon"))) uint64x1_t vcltz_f64(float64x1_t __p0) { + uint64x1_t __ret; + __ret = (uint64x1_t) __builtin_neon_vcltz_v((int8x8_t)__p0, 19); + return __ret; +} +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint32x2_t vcltz_f32(float32x2_t __p0) { + uint32x2_t __ret; + __ret = (uint32x2_t) __builtin_neon_vcltz_v((int8x8_t)__p0, 18); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint32x2_t vcltz_f32(float32x2_t __p0) { + uint32x2_t __ret; + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = (uint32x2_t) __builtin_neon_vcltz_v((int8x8_t)__rev0, 18); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint32x2_t vcltz_s32(int32x2_t __p0) { + uint32x2_t __ret; + __ret = (uint32x2_t) __builtin_neon_vcltz_v((int8x8_t)__p0, 18); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint32x2_t vcltz_s32(int32x2_t __p0) { + uint32x2_t __ret; + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = (uint32x2_t) __builtin_neon_vcltz_v((int8x8_t)__rev0, 18); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +__ai __attribute__((target("neon"))) uint64x1_t vcltz_s64(int64x1_t __p0) { + uint64x1_t __ret; + __ret = (uint64x1_t) __builtin_neon_vcltz_v((int8x8_t)__p0, 19); + return __ret; +} +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint16x4_t vcltz_s16(int16x4_t __p0) { + uint16x4_t __ret; + __ret = (uint16x4_t) __builtin_neon_vcltz_v((int8x8_t)__p0, 17); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint16x4_t vcltz_s16(int16x4_t __p0) { + uint16x4_t __ret; + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = (uint16x4_t) __builtin_neon_vcltz_v((int8x8_t)__rev0, 17); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +__ai __attribute__((target("neon"))) uint64_t vcltzd_s64(int64_t __p0) { + uint64_t __ret; + __ret = (uint64_t) __builtin_neon_vcltzd_s64(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) uint64_t vcltzd_f64(float64_t __p0) { + uint64_t __ret; + __ret = (uint64_t) __builtin_neon_vcltzd_f64(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) uint32_t vcltzs_f32(float32_t __p0) { + uint32_t __ret; + __ret = (uint32_t) __builtin_neon_vcltzs_f32(__p0); + return __ret; +} +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) poly64x2_t vcombine_p64(poly64x1_t __p0, poly64x1_t __p1) { + poly64x2_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 1); + return __ret; +} +#else +__ai __attribute__((target("neon"))) poly64x2_t vcombine_p64(poly64x1_t __p0, poly64x1_t __p1) { + poly64x2_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 1); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) float64x2_t vcombine_f64(float64x1_t __p0, float64x1_t __p1) { + float64x2_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 1); + return __ret; +} +#else +__ai __attribute__((target("neon"))) float64x2_t vcombine_f64(float64x1_t __p0, float64x1_t __p1) { + float64x2_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 1); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcopyq_lane_p8(__p0_278, __p1_278, __p2_278, __p3_278) __extension__ ({ \ + poly8x16_t __ret_278; \ + poly8x16_t __s0_278 = __p0_278; \ + poly8x8_t __s2_278 = __p2_278; \ + __ret_278 = vsetq_lane_p8(vget_lane_p8(__s2_278, __p3_278), __s0_278, __p1_278); \ + __ret_278; \ +}) +#else +#define vcopyq_lane_p8(__p0_279, __p1_279, __p2_279, __p3_279) __extension__ ({ \ + poly8x16_t __ret_279; \ + poly8x16_t __s0_279 = __p0_279; \ + poly8x8_t __s2_279 = __p2_279; \ + poly8x16_t __rev0_279; __rev0_279 = __builtin_shufflevector(__s0_279, __s0_279, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + poly8x8_t __rev2_279; __rev2_279 = __builtin_shufflevector(__s2_279, __s2_279, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_279 = __noswap_vsetq_lane_p8(__noswap_vget_lane_p8(__rev2_279, __p3_279), __rev0_279, __p1_279); \ + __ret_279 = __builtin_shufflevector(__ret_279, __ret_279, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_279; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcopyq_lane_p16(__p0_280, __p1_280, __p2_280, __p3_280) __extension__ ({ \ + poly16x8_t __ret_280; \ + poly16x8_t __s0_280 = __p0_280; \ + poly16x4_t __s2_280 = __p2_280; \ + __ret_280 = vsetq_lane_p16(vget_lane_p16(__s2_280, __p3_280), __s0_280, __p1_280); \ + __ret_280; \ +}) +#else +#define vcopyq_lane_p16(__p0_281, __p1_281, __p2_281, __p3_281) __extension__ ({ \ + poly16x8_t __ret_281; \ + poly16x8_t __s0_281 = __p0_281; \ + poly16x4_t __s2_281 = __p2_281; \ + poly16x8_t __rev0_281; __rev0_281 = __builtin_shufflevector(__s0_281, __s0_281, 7, 6, 5, 4, 3, 2, 1, 0); \ + poly16x4_t __rev2_281; __rev2_281 = __builtin_shufflevector(__s2_281, __s2_281, 3, 2, 1, 0); \ + __ret_281 = __noswap_vsetq_lane_p16(__noswap_vget_lane_p16(__rev2_281, __p3_281), __rev0_281, __p1_281); \ + __ret_281 = __builtin_shufflevector(__ret_281, __ret_281, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_281; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcopyq_lane_u8(__p0_282, __p1_282, __p2_282, __p3_282) __extension__ ({ \ + uint8x16_t __ret_282; \ + uint8x16_t __s0_282 = __p0_282; \ + uint8x8_t __s2_282 = __p2_282; \ + __ret_282 = vsetq_lane_u8(vget_lane_u8(__s2_282, __p3_282), __s0_282, __p1_282); \ + __ret_282; \ +}) +#else +#define vcopyq_lane_u8(__p0_283, __p1_283, __p2_283, __p3_283) __extension__ ({ \ + uint8x16_t __ret_283; \ + uint8x16_t __s0_283 = __p0_283; \ + uint8x8_t __s2_283 = __p2_283; \ + uint8x16_t __rev0_283; __rev0_283 = __builtin_shufflevector(__s0_283, __s0_283, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint8x8_t __rev2_283; __rev2_283 = __builtin_shufflevector(__s2_283, __s2_283, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_283 = __noswap_vsetq_lane_u8(__noswap_vget_lane_u8(__rev2_283, __p3_283), __rev0_283, __p1_283); \ + __ret_283 = __builtin_shufflevector(__ret_283, __ret_283, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_283; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcopyq_lane_u32(__p0_284, __p1_284, __p2_284, __p3_284) __extension__ ({ \ + uint32x4_t __ret_284; \ + uint32x4_t __s0_284 = __p0_284; \ + uint32x2_t __s2_284 = __p2_284; \ + __ret_284 = vsetq_lane_u32(vget_lane_u32(__s2_284, __p3_284), __s0_284, __p1_284); \ + __ret_284; \ +}) +#else +#define vcopyq_lane_u32(__p0_285, __p1_285, __p2_285, __p3_285) __extension__ ({ \ + uint32x4_t __ret_285; \ + uint32x4_t __s0_285 = __p0_285; \ + uint32x2_t __s2_285 = __p2_285; \ + uint32x4_t __rev0_285; __rev0_285 = __builtin_shufflevector(__s0_285, __s0_285, 3, 2, 1, 0); \ + uint32x2_t __rev2_285; __rev2_285 = __builtin_shufflevector(__s2_285, __s2_285, 1, 0); \ + __ret_285 = __noswap_vsetq_lane_u32(__noswap_vget_lane_u32(__rev2_285, __p3_285), __rev0_285, __p1_285); \ + __ret_285 = __builtin_shufflevector(__ret_285, __ret_285, 3, 2, 1, 0); \ + __ret_285; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcopyq_lane_u64(__p0_286, __p1_286, __p2_286, __p3_286) __extension__ ({ \ + uint64x2_t __ret_286; \ + uint64x2_t __s0_286 = __p0_286; \ + uint64x1_t __s2_286 = __p2_286; \ + __ret_286 = vsetq_lane_u64(vget_lane_u64(__s2_286, __p3_286), __s0_286, __p1_286); \ + __ret_286; \ +}) +#else +#define vcopyq_lane_u64(__p0_287, __p1_287, __p2_287, __p3_287) __extension__ ({ \ + uint64x2_t __ret_287; \ + uint64x2_t __s0_287 = __p0_287; \ + uint64x1_t __s2_287 = __p2_287; \ + uint64x2_t __rev0_287; __rev0_287 = __builtin_shufflevector(__s0_287, __s0_287, 1, 0); \ + __ret_287 = __noswap_vsetq_lane_u64(vget_lane_u64(__s2_287, __p3_287), __rev0_287, __p1_287); \ + __ret_287 = __builtin_shufflevector(__ret_287, __ret_287, 1, 0); \ + __ret_287; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcopyq_lane_u16(__p0_288, __p1_288, __p2_288, __p3_288) __extension__ ({ \ + uint16x8_t __ret_288; \ + uint16x8_t __s0_288 = __p0_288; \ + uint16x4_t __s2_288 = __p2_288; \ + __ret_288 = vsetq_lane_u16(vget_lane_u16(__s2_288, __p3_288), __s0_288, __p1_288); \ + __ret_288; \ +}) +#else +#define vcopyq_lane_u16(__p0_289, __p1_289, __p2_289, __p3_289) __extension__ ({ \ + uint16x8_t __ret_289; \ + uint16x8_t __s0_289 = __p0_289; \ + uint16x4_t __s2_289 = __p2_289; \ + uint16x8_t __rev0_289; __rev0_289 = __builtin_shufflevector(__s0_289, __s0_289, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint16x4_t __rev2_289; __rev2_289 = __builtin_shufflevector(__s2_289, __s2_289, 3, 2, 1, 0); \ + __ret_289 = __noswap_vsetq_lane_u16(__noswap_vget_lane_u16(__rev2_289, __p3_289), __rev0_289, __p1_289); \ + __ret_289 = __builtin_shufflevector(__ret_289, __ret_289, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_289; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcopyq_lane_s8(__p0_290, __p1_290, __p2_290, __p3_290) __extension__ ({ \ + int8x16_t __ret_290; \ + int8x16_t __s0_290 = __p0_290; \ + int8x8_t __s2_290 = __p2_290; \ + __ret_290 = vsetq_lane_s8(vget_lane_s8(__s2_290, __p3_290), __s0_290, __p1_290); \ + __ret_290; \ +}) +#else +#define vcopyq_lane_s8(__p0_291, __p1_291, __p2_291, __p3_291) __extension__ ({ \ + int8x16_t __ret_291; \ + int8x16_t __s0_291 = __p0_291; \ + int8x8_t __s2_291 = __p2_291; \ + int8x16_t __rev0_291; __rev0_291 = __builtin_shufflevector(__s0_291, __s0_291, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + int8x8_t __rev2_291; __rev2_291 = __builtin_shufflevector(__s2_291, __s2_291, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_291 = __noswap_vsetq_lane_s8(__noswap_vget_lane_s8(__rev2_291, __p3_291), __rev0_291, __p1_291); \ + __ret_291 = __builtin_shufflevector(__ret_291, __ret_291, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_291; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcopyq_lane_f32(__p0_292, __p1_292, __p2_292, __p3_292) __extension__ ({ \ + float32x4_t __ret_292; \ + float32x4_t __s0_292 = __p0_292; \ + float32x2_t __s2_292 = __p2_292; \ + __ret_292 = vsetq_lane_f32(vget_lane_f32(__s2_292, __p3_292), __s0_292, __p1_292); \ + __ret_292; \ +}) +#else +#define vcopyq_lane_f32(__p0_293, __p1_293, __p2_293, __p3_293) __extension__ ({ \ + float32x4_t __ret_293; \ + float32x4_t __s0_293 = __p0_293; \ + float32x2_t __s2_293 = __p2_293; \ + float32x4_t __rev0_293; __rev0_293 = __builtin_shufflevector(__s0_293, __s0_293, 3, 2, 1, 0); \ + float32x2_t __rev2_293; __rev2_293 = __builtin_shufflevector(__s2_293, __s2_293, 1, 0); \ + __ret_293 = __noswap_vsetq_lane_f32(__noswap_vget_lane_f32(__rev2_293, __p3_293), __rev0_293, __p1_293); \ + __ret_293 = __builtin_shufflevector(__ret_293, __ret_293, 3, 2, 1, 0); \ + __ret_293; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcopyq_lane_s32(__p0_294, __p1_294, __p2_294, __p3_294) __extension__ ({ \ + int32x4_t __ret_294; \ + int32x4_t __s0_294 = __p0_294; \ + int32x2_t __s2_294 = __p2_294; \ + __ret_294 = vsetq_lane_s32(vget_lane_s32(__s2_294, __p3_294), __s0_294, __p1_294); \ + __ret_294; \ +}) +#else +#define vcopyq_lane_s32(__p0_295, __p1_295, __p2_295, __p3_295) __extension__ ({ \ + int32x4_t __ret_295; \ + int32x4_t __s0_295 = __p0_295; \ + int32x2_t __s2_295 = __p2_295; \ + int32x4_t __rev0_295; __rev0_295 = __builtin_shufflevector(__s0_295, __s0_295, 3, 2, 1, 0); \ + int32x2_t __rev2_295; __rev2_295 = __builtin_shufflevector(__s2_295, __s2_295, 1, 0); \ + __ret_295 = __noswap_vsetq_lane_s32(__noswap_vget_lane_s32(__rev2_295, __p3_295), __rev0_295, __p1_295); \ + __ret_295 = __builtin_shufflevector(__ret_295, __ret_295, 3, 2, 1, 0); \ + __ret_295; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcopyq_lane_s64(__p0_296, __p1_296, __p2_296, __p3_296) __extension__ ({ \ + int64x2_t __ret_296; \ + int64x2_t __s0_296 = __p0_296; \ + int64x1_t __s2_296 = __p2_296; \ + __ret_296 = vsetq_lane_s64(vget_lane_s64(__s2_296, __p3_296), __s0_296, __p1_296); \ + __ret_296; \ +}) +#else +#define vcopyq_lane_s64(__p0_297, __p1_297, __p2_297, __p3_297) __extension__ ({ \ + int64x2_t __ret_297; \ + int64x2_t __s0_297 = __p0_297; \ + int64x1_t __s2_297 = __p2_297; \ + int64x2_t __rev0_297; __rev0_297 = __builtin_shufflevector(__s0_297, __s0_297, 1, 0); \ + __ret_297 = __noswap_vsetq_lane_s64(vget_lane_s64(__s2_297, __p3_297), __rev0_297, __p1_297); \ + __ret_297 = __builtin_shufflevector(__ret_297, __ret_297, 1, 0); \ + __ret_297; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcopyq_lane_s16(__p0_298, __p1_298, __p2_298, __p3_298) __extension__ ({ \ + int16x8_t __ret_298; \ + int16x8_t __s0_298 = __p0_298; \ + int16x4_t __s2_298 = __p2_298; \ + __ret_298 = vsetq_lane_s16(vget_lane_s16(__s2_298, __p3_298), __s0_298, __p1_298); \ + __ret_298; \ +}) +#else +#define vcopyq_lane_s16(__p0_299, __p1_299, __p2_299, __p3_299) __extension__ ({ \ + int16x8_t __ret_299; \ + int16x8_t __s0_299 = __p0_299; \ + int16x4_t __s2_299 = __p2_299; \ + int16x8_t __rev0_299; __rev0_299 = __builtin_shufflevector(__s0_299, __s0_299, 7, 6, 5, 4, 3, 2, 1, 0); \ + int16x4_t __rev2_299; __rev2_299 = __builtin_shufflevector(__s2_299, __s2_299, 3, 2, 1, 0); \ + __ret_299 = __noswap_vsetq_lane_s16(__noswap_vget_lane_s16(__rev2_299, __p3_299), __rev0_299, __p1_299); \ + __ret_299 = __builtin_shufflevector(__ret_299, __ret_299, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_299; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcopy_lane_p8(__p0_300, __p1_300, __p2_300, __p3_300) __extension__ ({ \ + poly8x8_t __ret_300; \ + poly8x8_t __s0_300 = __p0_300; \ + poly8x8_t __s2_300 = __p2_300; \ + __ret_300 = vset_lane_p8(vget_lane_p8(__s2_300, __p3_300), __s0_300, __p1_300); \ + __ret_300; \ +}) +#else +#define vcopy_lane_p8(__p0_301, __p1_301, __p2_301, __p3_301) __extension__ ({ \ + poly8x8_t __ret_301; \ + poly8x8_t __s0_301 = __p0_301; \ + poly8x8_t __s2_301 = __p2_301; \ + poly8x8_t __rev0_301; __rev0_301 = __builtin_shufflevector(__s0_301, __s0_301, 7, 6, 5, 4, 3, 2, 1, 0); \ + poly8x8_t __rev2_301; __rev2_301 = __builtin_shufflevector(__s2_301, __s2_301, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_301 = __noswap_vset_lane_p8(__noswap_vget_lane_p8(__rev2_301, __p3_301), __rev0_301, __p1_301); \ + __ret_301 = __builtin_shufflevector(__ret_301, __ret_301, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_301; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcopy_lane_p16(__p0_302, __p1_302, __p2_302, __p3_302) __extension__ ({ \ + poly16x4_t __ret_302; \ + poly16x4_t __s0_302 = __p0_302; \ + poly16x4_t __s2_302 = __p2_302; \ + __ret_302 = vset_lane_p16(vget_lane_p16(__s2_302, __p3_302), __s0_302, __p1_302); \ + __ret_302; \ +}) +#else +#define vcopy_lane_p16(__p0_303, __p1_303, __p2_303, __p3_303) __extension__ ({ \ + poly16x4_t __ret_303; \ + poly16x4_t __s0_303 = __p0_303; \ + poly16x4_t __s2_303 = __p2_303; \ + poly16x4_t __rev0_303; __rev0_303 = __builtin_shufflevector(__s0_303, __s0_303, 3, 2, 1, 0); \ + poly16x4_t __rev2_303; __rev2_303 = __builtin_shufflevector(__s2_303, __s2_303, 3, 2, 1, 0); \ + __ret_303 = __noswap_vset_lane_p16(__noswap_vget_lane_p16(__rev2_303, __p3_303), __rev0_303, __p1_303); \ + __ret_303 = __builtin_shufflevector(__ret_303, __ret_303, 3, 2, 1, 0); \ + __ret_303; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcopy_lane_u8(__p0_304, __p1_304, __p2_304, __p3_304) __extension__ ({ \ + uint8x8_t __ret_304; \ + uint8x8_t __s0_304 = __p0_304; \ + uint8x8_t __s2_304 = __p2_304; \ + __ret_304 = vset_lane_u8(vget_lane_u8(__s2_304, __p3_304), __s0_304, __p1_304); \ + __ret_304; \ +}) +#else +#define vcopy_lane_u8(__p0_305, __p1_305, __p2_305, __p3_305) __extension__ ({ \ + uint8x8_t __ret_305; \ + uint8x8_t __s0_305 = __p0_305; \ + uint8x8_t __s2_305 = __p2_305; \ + uint8x8_t __rev0_305; __rev0_305 = __builtin_shufflevector(__s0_305, __s0_305, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint8x8_t __rev2_305; __rev2_305 = __builtin_shufflevector(__s2_305, __s2_305, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_305 = __noswap_vset_lane_u8(__noswap_vget_lane_u8(__rev2_305, __p3_305), __rev0_305, __p1_305); \ + __ret_305 = __builtin_shufflevector(__ret_305, __ret_305, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_305; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcopy_lane_u32(__p0_306, __p1_306, __p2_306, __p3_306) __extension__ ({ \ + uint32x2_t __ret_306; \ + uint32x2_t __s0_306 = __p0_306; \ + uint32x2_t __s2_306 = __p2_306; \ + __ret_306 = vset_lane_u32(vget_lane_u32(__s2_306, __p3_306), __s0_306, __p1_306); \ + __ret_306; \ +}) +#else +#define vcopy_lane_u32(__p0_307, __p1_307, __p2_307, __p3_307) __extension__ ({ \ + uint32x2_t __ret_307; \ + uint32x2_t __s0_307 = __p0_307; \ + uint32x2_t __s2_307 = __p2_307; \ + uint32x2_t __rev0_307; __rev0_307 = __builtin_shufflevector(__s0_307, __s0_307, 1, 0); \ + uint32x2_t __rev2_307; __rev2_307 = __builtin_shufflevector(__s2_307, __s2_307, 1, 0); \ + __ret_307 = __noswap_vset_lane_u32(__noswap_vget_lane_u32(__rev2_307, __p3_307), __rev0_307, __p1_307); \ + __ret_307 = __builtin_shufflevector(__ret_307, __ret_307, 1, 0); \ + __ret_307; \ +}) +#endif + +#define vcopy_lane_u64(__p0_308, __p1_308, __p2_308, __p3_308) __extension__ ({ \ + uint64x1_t __ret_308; \ + uint64x1_t __s0_308 = __p0_308; \ + uint64x1_t __s2_308 = __p2_308; \ + __ret_308 = vset_lane_u64(vget_lane_u64(__s2_308, __p3_308), __s0_308, __p1_308); \ + __ret_308; \ +}) +#ifdef __LITTLE_ENDIAN__ +#define vcopy_lane_u16(__p0_309, __p1_309, __p2_309, __p3_309) __extension__ ({ \ + uint16x4_t __ret_309; \ + uint16x4_t __s0_309 = __p0_309; \ + uint16x4_t __s2_309 = __p2_309; \ + __ret_309 = vset_lane_u16(vget_lane_u16(__s2_309, __p3_309), __s0_309, __p1_309); \ + __ret_309; \ +}) +#else +#define vcopy_lane_u16(__p0_310, __p1_310, __p2_310, __p3_310) __extension__ ({ \ + uint16x4_t __ret_310; \ + uint16x4_t __s0_310 = __p0_310; \ + uint16x4_t __s2_310 = __p2_310; \ + uint16x4_t __rev0_310; __rev0_310 = __builtin_shufflevector(__s0_310, __s0_310, 3, 2, 1, 0); \ + uint16x4_t __rev2_310; __rev2_310 = __builtin_shufflevector(__s2_310, __s2_310, 3, 2, 1, 0); \ + __ret_310 = __noswap_vset_lane_u16(__noswap_vget_lane_u16(__rev2_310, __p3_310), __rev0_310, __p1_310); \ + __ret_310 = __builtin_shufflevector(__ret_310, __ret_310, 3, 2, 1, 0); \ + __ret_310; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcopy_lane_s8(__p0_311, __p1_311, __p2_311, __p3_311) __extension__ ({ \ + int8x8_t __ret_311; \ + int8x8_t __s0_311 = __p0_311; \ + int8x8_t __s2_311 = __p2_311; \ + __ret_311 = vset_lane_s8(vget_lane_s8(__s2_311, __p3_311), __s0_311, __p1_311); \ + __ret_311; \ +}) +#else +#define vcopy_lane_s8(__p0_312, __p1_312, __p2_312, __p3_312) __extension__ ({ \ + int8x8_t __ret_312; \ + int8x8_t __s0_312 = __p0_312; \ + int8x8_t __s2_312 = __p2_312; \ + int8x8_t __rev0_312; __rev0_312 = __builtin_shufflevector(__s0_312, __s0_312, 7, 6, 5, 4, 3, 2, 1, 0); \ + int8x8_t __rev2_312; __rev2_312 = __builtin_shufflevector(__s2_312, __s2_312, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_312 = __noswap_vset_lane_s8(__noswap_vget_lane_s8(__rev2_312, __p3_312), __rev0_312, __p1_312); \ + __ret_312 = __builtin_shufflevector(__ret_312, __ret_312, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_312; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcopy_lane_f32(__p0_313, __p1_313, __p2_313, __p3_313) __extension__ ({ \ + float32x2_t __ret_313; \ + float32x2_t __s0_313 = __p0_313; \ + float32x2_t __s2_313 = __p2_313; \ + __ret_313 = vset_lane_f32(vget_lane_f32(__s2_313, __p3_313), __s0_313, __p1_313); \ + __ret_313; \ +}) +#else +#define vcopy_lane_f32(__p0_314, __p1_314, __p2_314, __p3_314) __extension__ ({ \ + float32x2_t __ret_314; \ + float32x2_t __s0_314 = __p0_314; \ + float32x2_t __s2_314 = __p2_314; \ + float32x2_t __rev0_314; __rev0_314 = __builtin_shufflevector(__s0_314, __s0_314, 1, 0); \ + float32x2_t __rev2_314; __rev2_314 = __builtin_shufflevector(__s2_314, __s2_314, 1, 0); \ + __ret_314 = __noswap_vset_lane_f32(__noswap_vget_lane_f32(__rev2_314, __p3_314), __rev0_314, __p1_314); \ + __ret_314 = __builtin_shufflevector(__ret_314, __ret_314, 1, 0); \ + __ret_314; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcopy_lane_s32(__p0_315, __p1_315, __p2_315, __p3_315) __extension__ ({ \ + int32x2_t __ret_315; \ + int32x2_t __s0_315 = __p0_315; \ + int32x2_t __s2_315 = __p2_315; \ + __ret_315 = vset_lane_s32(vget_lane_s32(__s2_315, __p3_315), __s0_315, __p1_315); \ + __ret_315; \ +}) +#else +#define vcopy_lane_s32(__p0_316, __p1_316, __p2_316, __p3_316) __extension__ ({ \ + int32x2_t __ret_316; \ + int32x2_t __s0_316 = __p0_316; \ + int32x2_t __s2_316 = __p2_316; \ + int32x2_t __rev0_316; __rev0_316 = __builtin_shufflevector(__s0_316, __s0_316, 1, 0); \ + int32x2_t __rev2_316; __rev2_316 = __builtin_shufflevector(__s2_316, __s2_316, 1, 0); \ + __ret_316 = __noswap_vset_lane_s32(__noswap_vget_lane_s32(__rev2_316, __p3_316), __rev0_316, __p1_316); \ + __ret_316 = __builtin_shufflevector(__ret_316, __ret_316, 1, 0); \ + __ret_316; \ +}) +#endif + +#define vcopy_lane_s64(__p0_317, __p1_317, __p2_317, __p3_317) __extension__ ({ \ + int64x1_t __ret_317; \ + int64x1_t __s0_317 = __p0_317; \ + int64x1_t __s2_317 = __p2_317; \ + __ret_317 = vset_lane_s64(vget_lane_s64(__s2_317, __p3_317), __s0_317, __p1_317); \ + __ret_317; \ +}) +#ifdef __LITTLE_ENDIAN__ +#define vcopy_lane_s16(__p0_318, __p1_318, __p2_318, __p3_318) __extension__ ({ \ + int16x4_t __ret_318; \ + int16x4_t __s0_318 = __p0_318; \ + int16x4_t __s2_318 = __p2_318; \ + __ret_318 = vset_lane_s16(vget_lane_s16(__s2_318, __p3_318), __s0_318, __p1_318); \ + __ret_318; \ +}) +#else +#define vcopy_lane_s16(__p0_319, __p1_319, __p2_319, __p3_319) __extension__ ({ \ + int16x4_t __ret_319; \ + int16x4_t __s0_319 = __p0_319; \ + int16x4_t __s2_319 = __p2_319; \ + int16x4_t __rev0_319; __rev0_319 = __builtin_shufflevector(__s0_319, __s0_319, 3, 2, 1, 0); \ + int16x4_t __rev2_319; __rev2_319 = __builtin_shufflevector(__s2_319, __s2_319, 3, 2, 1, 0); \ + __ret_319 = __noswap_vset_lane_s16(__noswap_vget_lane_s16(__rev2_319, __p3_319), __rev0_319, __p1_319); \ + __ret_319 = __builtin_shufflevector(__ret_319, __ret_319, 3, 2, 1, 0); \ + __ret_319; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcopyq_laneq_p8(__p0_320, __p1_320, __p2_320, __p3_320) __extension__ ({ \ + poly8x16_t __ret_320; \ + poly8x16_t __s0_320 = __p0_320; \ + poly8x16_t __s2_320 = __p2_320; \ + __ret_320 = vsetq_lane_p8(vgetq_lane_p8(__s2_320, __p3_320), __s0_320, __p1_320); \ + __ret_320; \ +}) +#else +#define vcopyq_laneq_p8(__p0_321, __p1_321, __p2_321, __p3_321) __extension__ ({ \ + poly8x16_t __ret_321; \ + poly8x16_t __s0_321 = __p0_321; \ + poly8x16_t __s2_321 = __p2_321; \ + poly8x16_t __rev0_321; __rev0_321 = __builtin_shufflevector(__s0_321, __s0_321, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + poly8x16_t __rev2_321; __rev2_321 = __builtin_shufflevector(__s2_321, __s2_321, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_321 = __noswap_vsetq_lane_p8(__noswap_vgetq_lane_p8(__rev2_321, __p3_321), __rev0_321, __p1_321); \ + __ret_321 = __builtin_shufflevector(__ret_321, __ret_321, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_321; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcopyq_laneq_p16(__p0_322, __p1_322, __p2_322, __p3_322) __extension__ ({ \ + poly16x8_t __ret_322; \ + poly16x8_t __s0_322 = __p0_322; \ + poly16x8_t __s2_322 = __p2_322; \ + __ret_322 = vsetq_lane_p16(vgetq_lane_p16(__s2_322, __p3_322), __s0_322, __p1_322); \ + __ret_322; \ +}) +#else +#define vcopyq_laneq_p16(__p0_323, __p1_323, __p2_323, __p3_323) __extension__ ({ \ + poly16x8_t __ret_323; \ + poly16x8_t __s0_323 = __p0_323; \ + poly16x8_t __s2_323 = __p2_323; \ + poly16x8_t __rev0_323; __rev0_323 = __builtin_shufflevector(__s0_323, __s0_323, 7, 6, 5, 4, 3, 2, 1, 0); \ + poly16x8_t __rev2_323; __rev2_323 = __builtin_shufflevector(__s2_323, __s2_323, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_323 = __noswap_vsetq_lane_p16(__noswap_vgetq_lane_p16(__rev2_323, __p3_323), __rev0_323, __p1_323); \ + __ret_323 = __builtin_shufflevector(__ret_323, __ret_323, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_323; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcopyq_laneq_u8(__p0_324, __p1_324, __p2_324, __p3_324) __extension__ ({ \ + uint8x16_t __ret_324; \ + uint8x16_t __s0_324 = __p0_324; \ + uint8x16_t __s2_324 = __p2_324; \ + __ret_324 = vsetq_lane_u8(vgetq_lane_u8(__s2_324, __p3_324), __s0_324, __p1_324); \ + __ret_324; \ +}) +#else +#define vcopyq_laneq_u8(__p0_325, __p1_325, __p2_325, __p3_325) __extension__ ({ \ + uint8x16_t __ret_325; \ + uint8x16_t __s0_325 = __p0_325; \ + uint8x16_t __s2_325 = __p2_325; \ + uint8x16_t __rev0_325; __rev0_325 = __builtin_shufflevector(__s0_325, __s0_325, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint8x16_t __rev2_325; __rev2_325 = __builtin_shufflevector(__s2_325, __s2_325, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_325 = __noswap_vsetq_lane_u8(__noswap_vgetq_lane_u8(__rev2_325, __p3_325), __rev0_325, __p1_325); \ + __ret_325 = __builtin_shufflevector(__ret_325, __ret_325, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_325; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcopyq_laneq_u32(__p0_326, __p1_326, __p2_326, __p3_326) __extension__ ({ \ + uint32x4_t __ret_326; \ + uint32x4_t __s0_326 = __p0_326; \ + uint32x4_t __s2_326 = __p2_326; \ + __ret_326 = vsetq_lane_u32(vgetq_lane_u32(__s2_326, __p3_326), __s0_326, __p1_326); \ + __ret_326; \ +}) +#else +#define vcopyq_laneq_u32(__p0_327, __p1_327, __p2_327, __p3_327) __extension__ ({ \ + uint32x4_t __ret_327; \ + uint32x4_t __s0_327 = __p0_327; \ + uint32x4_t __s2_327 = __p2_327; \ + uint32x4_t __rev0_327; __rev0_327 = __builtin_shufflevector(__s0_327, __s0_327, 3, 2, 1, 0); \ + uint32x4_t __rev2_327; __rev2_327 = __builtin_shufflevector(__s2_327, __s2_327, 3, 2, 1, 0); \ + __ret_327 = __noswap_vsetq_lane_u32(__noswap_vgetq_lane_u32(__rev2_327, __p3_327), __rev0_327, __p1_327); \ + __ret_327 = __builtin_shufflevector(__ret_327, __ret_327, 3, 2, 1, 0); \ + __ret_327; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcopyq_laneq_u64(__p0_328, __p1_328, __p2_328, __p3_328) __extension__ ({ \ + uint64x2_t __ret_328; \ + uint64x2_t __s0_328 = __p0_328; \ + uint64x2_t __s2_328 = __p2_328; \ + __ret_328 = vsetq_lane_u64(vgetq_lane_u64(__s2_328, __p3_328), __s0_328, __p1_328); \ + __ret_328; \ +}) +#else +#define vcopyq_laneq_u64(__p0_329, __p1_329, __p2_329, __p3_329) __extension__ ({ \ + uint64x2_t __ret_329; \ + uint64x2_t __s0_329 = __p0_329; \ + uint64x2_t __s2_329 = __p2_329; \ + uint64x2_t __rev0_329; __rev0_329 = __builtin_shufflevector(__s0_329, __s0_329, 1, 0); \ + uint64x2_t __rev2_329; __rev2_329 = __builtin_shufflevector(__s2_329, __s2_329, 1, 0); \ + __ret_329 = __noswap_vsetq_lane_u64(__noswap_vgetq_lane_u64(__rev2_329, __p3_329), __rev0_329, __p1_329); \ + __ret_329 = __builtin_shufflevector(__ret_329, __ret_329, 1, 0); \ + __ret_329; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcopyq_laneq_u16(__p0_330, __p1_330, __p2_330, __p3_330) __extension__ ({ \ + uint16x8_t __ret_330; \ + uint16x8_t __s0_330 = __p0_330; \ + uint16x8_t __s2_330 = __p2_330; \ + __ret_330 = vsetq_lane_u16(vgetq_lane_u16(__s2_330, __p3_330), __s0_330, __p1_330); \ + __ret_330; \ +}) +#else +#define vcopyq_laneq_u16(__p0_331, __p1_331, __p2_331, __p3_331) __extension__ ({ \ + uint16x8_t __ret_331; \ + uint16x8_t __s0_331 = __p0_331; \ + uint16x8_t __s2_331 = __p2_331; \ + uint16x8_t __rev0_331; __rev0_331 = __builtin_shufflevector(__s0_331, __s0_331, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint16x8_t __rev2_331; __rev2_331 = __builtin_shufflevector(__s2_331, __s2_331, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_331 = __noswap_vsetq_lane_u16(__noswap_vgetq_lane_u16(__rev2_331, __p3_331), __rev0_331, __p1_331); \ + __ret_331 = __builtin_shufflevector(__ret_331, __ret_331, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_331; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcopyq_laneq_s8(__p0_332, __p1_332, __p2_332, __p3_332) __extension__ ({ \ + int8x16_t __ret_332; \ + int8x16_t __s0_332 = __p0_332; \ + int8x16_t __s2_332 = __p2_332; \ + __ret_332 = vsetq_lane_s8(vgetq_lane_s8(__s2_332, __p3_332), __s0_332, __p1_332); \ + __ret_332; \ +}) +#else +#define vcopyq_laneq_s8(__p0_333, __p1_333, __p2_333, __p3_333) __extension__ ({ \ + int8x16_t __ret_333; \ + int8x16_t __s0_333 = __p0_333; \ + int8x16_t __s2_333 = __p2_333; \ + int8x16_t __rev0_333; __rev0_333 = __builtin_shufflevector(__s0_333, __s0_333, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + int8x16_t __rev2_333; __rev2_333 = __builtin_shufflevector(__s2_333, __s2_333, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_333 = __noswap_vsetq_lane_s8(__noswap_vgetq_lane_s8(__rev2_333, __p3_333), __rev0_333, __p1_333); \ + __ret_333 = __builtin_shufflevector(__ret_333, __ret_333, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_333; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcopyq_laneq_f32(__p0_334, __p1_334, __p2_334, __p3_334) __extension__ ({ \ + float32x4_t __ret_334; \ + float32x4_t __s0_334 = __p0_334; \ + float32x4_t __s2_334 = __p2_334; \ + __ret_334 = vsetq_lane_f32(vgetq_lane_f32(__s2_334, __p3_334), __s0_334, __p1_334); \ + __ret_334; \ +}) +#else +#define vcopyq_laneq_f32(__p0_335, __p1_335, __p2_335, __p3_335) __extension__ ({ \ + float32x4_t __ret_335; \ + float32x4_t __s0_335 = __p0_335; \ + float32x4_t __s2_335 = __p2_335; \ + float32x4_t __rev0_335; __rev0_335 = __builtin_shufflevector(__s0_335, __s0_335, 3, 2, 1, 0); \ + float32x4_t __rev2_335; __rev2_335 = __builtin_shufflevector(__s2_335, __s2_335, 3, 2, 1, 0); \ + __ret_335 = __noswap_vsetq_lane_f32(__noswap_vgetq_lane_f32(__rev2_335, __p3_335), __rev0_335, __p1_335); \ + __ret_335 = __builtin_shufflevector(__ret_335, __ret_335, 3, 2, 1, 0); \ + __ret_335; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcopyq_laneq_s32(__p0_336, __p1_336, __p2_336, __p3_336) __extension__ ({ \ + int32x4_t __ret_336; \ + int32x4_t __s0_336 = __p0_336; \ + int32x4_t __s2_336 = __p2_336; \ + __ret_336 = vsetq_lane_s32(vgetq_lane_s32(__s2_336, __p3_336), __s0_336, __p1_336); \ + __ret_336; \ +}) +#else +#define vcopyq_laneq_s32(__p0_337, __p1_337, __p2_337, __p3_337) __extension__ ({ \ + int32x4_t __ret_337; \ + int32x4_t __s0_337 = __p0_337; \ + int32x4_t __s2_337 = __p2_337; \ + int32x4_t __rev0_337; __rev0_337 = __builtin_shufflevector(__s0_337, __s0_337, 3, 2, 1, 0); \ + int32x4_t __rev2_337; __rev2_337 = __builtin_shufflevector(__s2_337, __s2_337, 3, 2, 1, 0); \ + __ret_337 = __noswap_vsetq_lane_s32(__noswap_vgetq_lane_s32(__rev2_337, __p3_337), __rev0_337, __p1_337); \ + __ret_337 = __builtin_shufflevector(__ret_337, __ret_337, 3, 2, 1, 0); \ + __ret_337; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcopyq_laneq_s64(__p0_338, __p1_338, __p2_338, __p3_338) __extension__ ({ \ + int64x2_t __ret_338; \ + int64x2_t __s0_338 = __p0_338; \ + int64x2_t __s2_338 = __p2_338; \ + __ret_338 = vsetq_lane_s64(vgetq_lane_s64(__s2_338, __p3_338), __s0_338, __p1_338); \ + __ret_338; \ +}) +#else +#define vcopyq_laneq_s64(__p0_339, __p1_339, __p2_339, __p3_339) __extension__ ({ \ + int64x2_t __ret_339; \ + int64x2_t __s0_339 = __p0_339; \ + int64x2_t __s2_339 = __p2_339; \ + int64x2_t __rev0_339; __rev0_339 = __builtin_shufflevector(__s0_339, __s0_339, 1, 0); \ + int64x2_t __rev2_339; __rev2_339 = __builtin_shufflevector(__s2_339, __s2_339, 1, 0); \ + __ret_339 = __noswap_vsetq_lane_s64(__noswap_vgetq_lane_s64(__rev2_339, __p3_339), __rev0_339, __p1_339); \ + __ret_339 = __builtin_shufflevector(__ret_339, __ret_339, 1, 0); \ + __ret_339; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcopyq_laneq_s16(__p0_340, __p1_340, __p2_340, __p3_340) __extension__ ({ \ + int16x8_t __ret_340; \ + int16x8_t __s0_340 = __p0_340; \ + int16x8_t __s2_340 = __p2_340; \ + __ret_340 = vsetq_lane_s16(vgetq_lane_s16(__s2_340, __p3_340), __s0_340, __p1_340); \ + __ret_340; \ +}) +#else +#define vcopyq_laneq_s16(__p0_341, __p1_341, __p2_341, __p3_341) __extension__ ({ \ + int16x8_t __ret_341; \ + int16x8_t __s0_341 = __p0_341; \ + int16x8_t __s2_341 = __p2_341; \ + int16x8_t __rev0_341; __rev0_341 = __builtin_shufflevector(__s0_341, __s0_341, 7, 6, 5, 4, 3, 2, 1, 0); \ + int16x8_t __rev2_341; __rev2_341 = __builtin_shufflevector(__s2_341, __s2_341, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_341 = __noswap_vsetq_lane_s16(__noswap_vgetq_lane_s16(__rev2_341, __p3_341), __rev0_341, __p1_341); \ + __ret_341 = __builtin_shufflevector(__ret_341, __ret_341, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_341; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcopy_laneq_p8(__p0_342, __p1_342, __p2_342, __p3_342) __extension__ ({ \ + poly8x8_t __ret_342; \ + poly8x8_t __s0_342 = __p0_342; \ + poly8x16_t __s2_342 = __p2_342; \ + __ret_342 = vset_lane_p8(vgetq_lane_p8(__s2_342, __p3_342), __s0_342, __p1_342); \ + __ret_342; \ +}) +#else +#define vcopy_laneq_p8(__p0_343, __p1_343, __p2_343, __p3_343) __extension__ ({ \ + poly8x8_t __ret_343; \ + poly8x8_t __s0_343 = __p0_343; \ + poly8x16_t __s2_343 = __p2_343; \ + poly8x8_t __rev0_343; __rev0_343 = __builtin_shufflevector(__s0_343, __s0_343, 7, 6, 5, 4, 3, 2, 1, 0); \ + poly8x16_t __rev2_343; __rev2_343 = __builtin_shufflevector(__s2_343, __s2_343, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_343 = __noswap_vset_lane_p8(__noswap_vgetq_lane_p8(__rev2_343, __p3_343), __rev0_343, __p1_343); \ + __ret_343 = __builtin_shufflevector(__ret_343, __ret_343, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_343; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcopy_laneq_p16(__p0_344, __p1_344, __p2_344, __p3_344) __extension__ ({ \ + poly16x4_t __ret_344; \ + poly16x4_t __s0_344 = __p0_344; \ + poly16x8_t __s2_344 = __p2_344; \ + __ret_344 = vset_lane_p16(vgetq_lane_p16(__s2_344, __p3_344), __s0_344, __p1_344); \ + __ret_344; \ +}) +#else +#define vcopy_laneq_p16(__p0_345, __p1_345, __p2_345, __p3_345) __extension__ ({ \ + poly16x4_t __ret_345; \ + poly16x4_t __s0_345 = __p0_345; \ + poly16x8_t __s2_345 = __p2_345; \ + poly16x4_t __rev0_345; __rev0_345 = __builtin_shufflevector(__s0_345, __s0_345, 3, 2, 1, 0); \ + poly16x8_t __rev2_345; __rev2_345 = __builtin_shufflevector(__s2_345, __s2_345, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_345 = __noswap_vset_lane_p16(__noswap_vgetq_lane_p16(__rev2_345, __p3_345), __rev0_345, __p1_345); \ + __ret_345 = __builtin_shufflevector(__ret_345, __ret_345, 3, 2, 1, 0); \ + __ret_345; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcopy_laneq_u8(__p0_346, __p1_346, __p2_346, __p3_346) __extension__ ({ \ + uint8x8_t __ret_346; \ + uint8x8_t __s0_346 = __p0_346; \ + uint8x16_t __s2_346 = __p2_346; \ + __ret_346 = vset_lane_u8(vgetq_lane_u8(__s2_346, __p3_346), __s0_346, __p1_346); \ + __ret_346; \ +}) +#else +#define vcopy_laneq_u8(__p0_347, __p1_347, __p2_347, __p3_347) __extension__ ({ \ + uint8x8_t __ret_347; \ + uint8x8_t __s0_347 = __p0_347; \ + uint8x16_t __s2_347 = __p2_347; \ + uint8x8_t __rev0_347; __rev0_347 = __builtin_shufflevector(__s0_347, __s0_347, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint8x16_t __rev2_347; __rev2_347 = __builtin_shufflevector(__s2_347, __s2_347, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_347 = __noswap_vset_lane_u8(__noswap_vgetq_lane_u8(__rev2_347, __p3_347), __rev0_347, __p1_347); \ + __ret_347 = __builtin_shufflevector(__ret_347, __ret_347, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_347; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcopy_laneq_u32(__p0_348, __p1_348, __p2_348, __p3_348) __extension__ ({ \ + uint32x2_t __ret_348; \ + uint32x2_t __s0_348 = __p0_348; \ + uint32x4_t __s2_348 = __p2_348; \ + __ret_348 = vset_lane_u32(vgetq_lane_u32(__s2_348, __p3_348), __s0_348, __p1_348); \ + __ret_348; \ +}) +#else +#define vcopy_laneq_u32(__p0_349, __p1_349, __p2_349, __p3_349) __extension__ ({ \ + uint32x2_t __ret_349; \ + uint32x2_t __s0_349 = __p0_349; \ + uint32x4_t __s2_349 = __p2_349; \ + uint32x2_t __rev0_349; __rev0_349 = __builtin_shufflevector(__s0_349, __s0_349, 1, 0); \ + uint32x4_t __rev2_349; __rev2_349 = __builtin_shufflevector(__s2_349, __s2_349, 3, 2, 1, 0); \ + __ret_349 = __noswap_vset_lane_u32(__noswap_vgetq_lane_u32(__rev2_349, __p3_349), __rev0_349, __p1_349); \ + __ret_349 = __builtin_shufflevector(__ret_349, __ret_349, 1, 0); \ + __ret_349; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcopy_laneq_u64(__p0_350, __p1_350, __p2_350, __p3_350) __extension__ ({ \ + uint64x1_t __ret_350; \ + uint64x1_t __s0_350 = __p0_350; \ + uint64x2_t __s2_350 = __p2_350; \ + __ret_350 = vset_lane_u64(vgetq_lane_u64(__s2_350, __p3_350), __s0_350, __p1_350); \ + __ret_350; \ +}) +#else +#define vcopy_laneq_u64(__p0_351, __p1_351, __p2_351, __p3_351) __extension__ ({ \ + uint64x1_t __ret_351; \ + uint64x1_t __s0_351 = __p0_351; \ + uint64x2_t __s2_351 = __p2_351; \ + uint64x2_t __rev2_351; __rev2_351 = __builtin_shufflevector(__s2_351, __s2_351, 1, 0); \ + __ret_351 = vset_lane_u64(__noswap_vgetq_lane_u64(__rev2_351, __p3_351), __s0_351, __p1_351); \ + __ret_351; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcopy_laneq_u16(__p0_352, __p1_352, __p2_352, __p3_352) __extension__ ({ \ + uint16x4_t __ret_352; \ + uint16x4_t __s0_352 = __p0_352; \ + uint16x8_t __s2_352 = __p2_352; \ + __ret_352 = vset_lane_u16(vgetq_lane_u16(__s2_352, __p3_352), __s0_352, __p1_352); \ + __ret_352; \ +}) +#else +#define vcopy_laneq_u16(__p0_353, __p1_353, __p2_353, __p3_353) __extension__ ({ \ + uint16x4_t __ret_353; \ + uint16x4_t __s0_353 = __p0_353; \ + uint16x8_t __s2_353 = __p2_353; \ + uint16x4_t __rev0_353; __rev0_353 = __builtin_shufflevector(__s0_353, __s0_353, 3, 2, 1, 0); \ + uint16x8_t __rev2_353; __rev2_353 = __builtin_shufflevector(__s2_353, __s2_353, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_353 = __noswap_vset_lane_u16(__noswap_vgetq_lane_u16(__rev2_353, __p3_353), __rev0_353, __p1_353); \ + __ret_353 = __builtin_shufflevector(__ret_353, __ret_353, 3, 2, 1, 0); \ + __ret_353; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcopy_laneq_s8(__p0_354, __p1_354, __p2_354, __p3_354) __extension__ ({ \ + int8x8_t __ret_354; \ + int8x8_t __s0_354 = __p0_354; \ + int8x16_t __s2_354 = __p2_354; \ + __ret_354 = vset_lane_s8(vgetq_lane_s8(__s2_354, __p3_354), __s0_354, __p1_354); \ + __ret_354; \ +}) +#else +#define vcopy_laneq_s8(__p0_355, __p1_355, __p2_355, __p3_355) __extension__ ({ \ + int8x8_t __ret_355; \ + int8x8_t __s0_355 = __p0_355; \ + int8x16_t __s2_355 = __p2_355; \ + int8x8_t __rev0_355; __rev0_355 = __builtin_shufflevector(__s0_355, __s0_355, 7, 6, 5, 4, 3, 2, 1, 0); \ + int8x16_t __rev2_355; __rev2_355 = __builtin_shufflevector(__s2_355, __s2_355, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_355 = __noswap_vset_lane_s8(__noswap_vgetq_lane_s8(__rev2_355, __p3_355), __rev0_355, __p1_355); \ + __ret_355 = __builtin_shufflevector(__ret_355, __ret_355, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_355; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcopy_laneq_f32(__p0_356, __p1_356, __p2_356, __p3_356) __extension__ ({ \ + float32x2_t __ret_356; \ + float32x2_t __s0_356 = __p0_356; \ + float32x4_t __s2_356 = __p2_356; \ + __ret_356 = vset_lane_f32(vgetq_lane_f32(__s2_356, __p3_356), __s0_356, __p1_356); \ + __ret_356; \ +}) +#else +#define vcopy_laneq_f32(__p0_357, __p1_357, __p2_357, __p3_357) __extension__ ({ \ + float32x2_t __ret_357; \ + float32x2_t __s0_357 = __p0_357; \ + float32x4_t __s2_357 = __p2_357; \ + float32x2_t __rev0_357; __rev0_357 = __builtin_shufflevector(__s0_357, __s0_357, 1, 0); \ + float32x4_t __rev2_357; __rev2_357 = __builtin_shufflevector(__s2_357, __s2_357, 3, 2, 1, 0); \ + __ret_357 = __noswap_vset_lane_f32(__noswap_vgetq_lane_f32(__rev2_357, __p3_357), __rev0_357, __p1_357); \ + __ret_357 = __builtin_shufflevector(__ret_357, __ret_357, 1, 0); \ + __ret_357; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcopy_laneq_s32(__p0_358, __p1_358, __p2_358, __p3_358) __extension__ ({ \ + int32x2_t __ret_358; \ + int32x2_t __s0_358 = __p0_358; \ + int32x4_t __s2_358 = __p2_358; \ + __ret_358 = vset_lane_s32(vgetq_lane_s32(__s2_358, __p3_358), __s0_358, __p1_358); \ + __ret_358; \ +}) +#else +#define vcopy_laneq_s32(__p0_359, __p1_359, __p2_359, __p3_359) __extension__ ({ \ + int32x2_t __ret_359; \ + int32x2_t __s0_359 = __p0_359; \ + int32x4_t __s2_359 = __p2_359; \ + int32x2_t __rev0_359; __rev0_359 = __builtin_shufflevector(__s0_359, __s0_359, 1, 0); \ + int32x4_t __rev2_359; __rev2_359 = __builtin_shufflevector(__s2_359, __s2_359, 3, 2, 1, 0); \ + __ret_359 = __noswap_vset_lane_s32(__noswap_vgetq_lane_s32(__rev2_359, __p3_359), __rev0_359, __p1_359); \ + __ret_359 = __builtin_shufflevector(__ret_359, __ret_359, 1, 0); \ + __ret_359; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcopy_laneq_s64(__p0_360, __p1_360, __p2_360, __p3_360) __extension__ ({ \ + int64x1_t __ret_360; \ + int64x1_t __s0_360 = __p0_360; \ + int64x2_t __s2_360 = __p2_360; \ + __ret_360 = vset_lane_s64(vgetq_lane_s64(__s2_360, __p3_360), __s0_360, __p1_360); \ + __ret_360; \ +}) +#else +#define vcopy_laneq_s64(__p0_361, __p1_361, __p2_361, __p3_361) __extension__ ({ \ + int64x1_t __ret_361; \ + int64x1_t __s0_361 = __p0_361; \ + int64x2_t __s2_361 = __p2_361; \ + int64x2_t __rev2_361; __rev2_361 = __builtin_shufflevector(__s2_361, __s2_361, 1, 0); \ + __ret_361 = vset_lane_s64(__noswap_vgetq_lane_s64(__rev2_361, __p3_361), __s0_361, __p1_361); \ + __ret_361; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcopy_laneq_s16(__p0_362, __p1_362, __p2_362, __p3_362) __extension__ ({ \ + int16x4_t __ret_362; \ + int16x4_t __s0_362 = __p0_362; \ + int16x8_t __s2_362 = __p2_362; \ + __ret_362 = vset_lane_s16(vgetq_lane_s16(__s2_362, __p3_362), __s0_362, __p1_362); \ + __ret_362; \ +}) +#else +#define vcopy_laneq_s16(__p0_363, __p1_363, __p2_363, __p3_363) __extension__ ({ \ + int16x4_t __ret_363; \ + int16x4_t __s0_363 = __p0_363; \ + int16x8_t __s2_363 = __p2_363; \ + int16x4_t __rev0_363; __rev0_363 = __builtin_shufflevector(__s0_363, __s0_363, 3, 2, 1, 0); \ + int16x8_t __rev2_363; __rev2_363 = __builtin_shufflevector(__s2_363, __s2_363, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_363 = __noswap_vset_lane_s16(__noswap_vgetq_lane_s16(__rev2_363, __p3_363), __rev0_363, __p1_363); \ + __ret_363 = __builtin_shufflevector(__ret_363, __ret_363, 3, 2, 1, 0); \ + __ret_363; \ +}) +#endif + +#define vcreate_p64(__p0) __extension__ ({ \ + poly64x1_t __ret; \ + uint64_t __promote = __p0; \ + __ret = (poly64x1_t)(__promote); \ + __ret; \ +}) +#define vcreate_f64(__p0) __extension__ ({ \ + float64x1_t __ret; \ + uint64_t __promote = __p0; \ + __ret = (float64x1_t)(__promote); \ + __ret; \ +}) +__ai __attribute__((target("neon"))) float32_t vcvts_f32_s32(int32_t __p0) { + float32_t __ret; + __ret = (float32_t) __builtin_neon_vcvts_f32_s32(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) float32_t vcvts_f32_u32(uint32_t __p0) { + float32_t __ret; + __ret = (float32_t) __builtin_neon_vcvts_f32_u32(__p0); + return __ret; +} +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) float32x2_t vcvt_f32_f64(float64x2_t __p0) { + float32x2_t __ret; + __ret = (float32x2_t) __builtin_neon_vcvt_f32_f64((int8x16_t)__p0, 9); + return __ret; +} +#else +__ai __attribute__((target("neon"))) float32x2_t vcvt_f32_f64(float64x2_t __p0) { + float32x2_t __ret; + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = (float32x2_t) __builtin_neon_vcvt_f32_f64((int8x16_t)__rev0, 9); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +__ai __attribute__((target("neon"))) float32x2_t __noswap_vcvt_f32_f64(float64x2_t __p0) { + float32x2_t __ret; + __ret = (float32x2_t) __builtin_neon_vcvt_f32_f64((int8x16_t)__p0, 9); + return __ret; +} +#endif + +__ai __attribute__((target("neon"))) float64_t vcvtd_f64_s64(int64_t __p0) { + float64_t __ret; + __ret = (float64_t) __builtin_neon_vcvtd_f64_s64(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) float64_t vcvtd_f64_u64(uint64_t __p0) { + float64_t __ret; + __ret = (float64_t) __builtin_neon_vcvtd_f64_u64(__p0); + return __ret; +} +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) float64x2_t vcvtq_f64_u64(uint64x2_t __p0) { + float64x2_t __ret; + __ret = (float64x2_t) __builtin_neon_vcvtq_f64_v((int8x16_t)__p0, 51); + return __ret; +} +#else +__ai __attribute__((target("neon"))) float64x2_t vcvtq_f64_u64(uint64x2_t __p0) { + float64x2_t __ret; + uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = (float64x2_t) __builtin_neon_vcvtq_f64_v((int8x16_t)__rev0, 51); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) float64x2_t vcvtq_f64_s64(int64x2_t __p0) { + float64x2_t __ret; + __ret = (float64x2_t) __builtin_neon_vcvtq_f64_v((int8x16_t)__p0, 35); + return __ret; +} +#else +__ai __attribute__((target("neon"))) float64x2_t vcvtq_f64_s64(int64x2_t __p0) { + float64x2_t __ret; + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = (float64x2_t) __builtin_neon_vcvtq_f64_v((int8x16_t)__rev0, 35); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +__ai __attribute__((target("neon"))) float64x1_t vcvt_f64_u64(uint64x1_t __p0) { + float64x1_t __ret; + __ret = (float64x1_t) __builtin_neon_vcvt_f64_v((int8x8_t)__p0, 19); + return __ret; +} +__ai __attribute__((target("neon"))) float64x1_t vcvt_f64_s64(int64x1_t __p0) { + float64x1_t __ret; + __ret = (float64x1_t) __builtin_neon_vcvt_f64_v((int8x8_t)__p0, 3); + return __ret; +} +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) float64x2_t vcvt_f64_f32(float32x2_t __p0) { + float64x2_t __ret; + __ret = (float64x2_t) __builtin_neon_vcvt_f64_f32((int8x8_t)__p0, 42); + return __ret; +} +#else +__ai __attribute__((target("neon"))) float64x2_t vcvt_f64_f32(float32x2_t __p0) { + float64x2_t __ret; + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = (float64x2_t) __builtin_neon_vcvt_f64_f32((int8x8_t)__rev0, 42); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +__ai __attribute__((target("neon"))) float64x2_t __noswap_vcvt_f64_f32(float32x2_t __p0) { + float64x2_t __ret; + __ret = (float64x2_t) __builtin_neon_vcvt_f64_f32((int8x8_t)__p0, 42); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) float16x8_t vcvt_high_f16_f32(float16x4_t __p0, float32x4_t __p1) { + float16x8_t __ret; + __ret = vcombine_f16(__p0, vcvt_f16_f32(__p1)); + return __ret; +} +#else +__ai __attribute__((target("neon"))) float16x8_t vcvt_high_f16_f32(float16x4_t __p0, float32x4_t __p1) { + float16x8_t __ret; + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = __noswap_vcombine_f16(__rev0, __noswap_vcvt_f16_f32(__rev1)); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) float32x4_t vcvt_high_f32_f16(float16x8_t __p0) { + float32x4_t __ret; + __ret = vcvt_f32_f16(vget_high_f16(__p0)); + return __ret; +} +#else +__ai __attribute__((target("neon"))) float32x4_t vcvt_high_f32_f16(float16x8_t __p0) { + float32x4_t __ret; + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __noswap_vcvt_f32_f16(__noswap_vget_high_f16(__rev0)); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) float32x4_t vcvt_high_f32_f64(float32x2_t __p0, float64x2_t __p1) { + float32x4_t __ret; + __ret = vcombine_f32(__p0, vcvt_f32_f64(__p1)); + return __ret; +} +#else +__ai __attribute__((target("neon"))) float32x4_t vcvt_high_f32_f64(float32x2_t __p0, float64x2_t __p1) { + float32x4_t __ret; + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = __noswap_vcombine_f32(__rev0, __noswap_vcvt_f32_f64(__rev1)); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) float64x2_t vcvt_high_f64_f32(float32x4_t __p0) { + float64x2_t __ret; + __ret = vcvt_f64_f32(vget_high_f32(__p0)); + return __ret; +} +#else +__ai __attribute__((target("neon"))) float64x2_t vcvt_high_f64_f32(float32x4_t __p0) { + float64x2_t __ret; + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = __noswap_vcvt_f64_f32(__noswap_vget_high_f32(__rev0)); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#define vcvts_n_f32_u32(__p0, __p1) __extension__ ({ \ + float32_t __ret; \ + uint32_t __s0 = __p0; \ + __ret = (float32_t) __builtin_neon_vcvts_n_f32_u32(__s0, __p1); \ + __ret; \ +}) +#define vcvts_n_f32_s32(__p0, __p1) __extension__ ({ \ + float32_t __ret; \ + int32_t __s0 = __p0; \ + __ret = (float32_t) __builtin_neon_vcvts_n_f32_s32(__s0, __p1); \ + __ret; \ +}) +#ifdef __LITTLE_ENDIAN__ +#define vcvtq_n_f64_u64(__p0, __p1) __extension__ ({ \ + float64x2_t __ret; \ + uint64x2_t __s0 = __p0; \ + __ret = (float64x2_t) __builtin_neon_vcvtq_n_f64_v((int8x16_t)__s0, __p1, 51); \ + __ret; \ +}) +#else +#define vcvtq_n_f64_u64(__p0, __p1) __extension__ ({ \ + float64x2_t __ret; \ + uint64x2_t __s0 = __p0; \ + uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + __ret = (float64x2_t) __builtin_neon_vcvtq_n_f64_v((int8x16_t)__rev0, __p1, 51); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcvtq_n_f64_s64(__p0, __p1) __extension__ ({ \ + float64x2_t __ret; \ + int64x2_t __s0 = __p0; \ + __ret = (float64x2_t) __builtin_neon_vcvtq_n_f64_v((int8x16_t)__s0, __p1, 35); \ + __ret; \ +}) +#else +#define vcvtq_n_f64_s64(__p0, __p1) __extension__ ({ \ + float64x2_t __ret; \ + int64x2_t __s0 = __p0; \ + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + __ret = (float64x2_t) __builtin_neon_vcvtq_n_f64_v((int8x16_t)__rev0, __p1, 35); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#endif + +#define vcvt_n_f64_u64(__p0, __p1) __extension__ ({ \ + float64x1_t __ret; \ + uint64x1_t __s0 = __p0; \ + __ret = (float64x1_t) __builtin_neon_vcvt_n_f64_v((int8x8_t)__s0, __p1, 19); \ + __ret; \ +}) +#define vcvt_n_f64_s64(__p0, __p1) __extension__ ({ \ + float64x1_t __ret; \ + int64x1_t __s0 = __p0; \ + __ret = (float64x1_t) __builtin_neon_vcvt_n_f64_v((int8x8_t)__s0, __p1, 3); \ + __ret; \ +}) +#define vcvtd_n_f64_u64(__p0, __p1) __extension__ ({ \ + float64_t __ret; \ + uint64_t __s0 = __p0; \ + __ret = (float64_t) __builtin_neon_vcvtd_n_f64_u64(__s0, __p1); \ + __ret; \ +}) +#define vcvtd_n_f64_s64(__p0, __p1) __extension__ ({ \ + float64_t __ret; \ + int64_t __s0 = __p0; \ + __ret = (float64_t) __builtin_neon_vcvtd_n_f64_s64(__s0, __p1); \ + __ret; \ +}) +#define vcvts_n_s32_f32(__p0, __p1) __extension__ ({ \ + int32_t __ret; \ + float32_t __s0 = __p0; \ + __ret = (int32_t) __builtin_neon_vcvts_n_s32_f32(__s0, __p1); \ + __ret; \ +}) +#ifdef __LITTLE_ENDIAN__ +#define vcvtq_n_s64_f64(__p0, __p1) __extension__ ({ \ + int64x2_t __ret; \ + float64x2_t __s0 = __p0; \ + __ret = (int64x2_t) __builtin_neon_vcvtq_n_s64_v((int8x16_t)__s0, __p1, 35); \ + __ret; \ +}) +#else +#define vcvtq_n_s64_f64(__p0, __p1) __extension__ ({ \ + int64x2_t __ret; \ + float64x2_t __s0 = __p0; \ + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + __ret = (int64x2_t) __builtin_neon_vcvtq_n_s64_v((int8x16_t)__rev0, __p1, 35); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#endif + +#define vcvt_n_s64_f64(__p0, __p1) __extension__ ({ \ + int64x1_t __ret; \ + float64x1_t __s0 = __p0; \ + __ret = (int64x1_t) __builtin_neon_vcvt_n_s64_v((int8x8_t)__s0, __p1, 3); \ + __ret; \ +}) +#define vcvtd_n_s64_f64(__p0, __p1) __extension__ ({ \ + int64_t __ret; \ + float64_t __s0 = __p0; \ + __ret = (int64_t) __builtin_neon_vcvtd_n_s64_f64(__s0, __p1); \ + __ret; \ +}) +#define vcvts_n_u32_f32(__p0, __p1) __extension__ ({ \ + uint32_t __ret; \ + float32_t __s0 = __p0; \ + __ret = (uint32_t) __builtin_neon_vcvts_n_u32_f32(__s0, __p1); \ + __ret; \ +}) +#ifdef __LITTLE_ENDIAN__ +#define vcvtq_n_u64_f64(__p0, __p1) __extension__ ({ \ + uint64x2_t __ret; \ + float64x2_t __s0 = __p0; \ + __ret = (uint64x2_t) __builtin_neon_vcvtq_n_u64_v((int8x16_t)__s0, __p1, 51); \ + __ret; \ +}) +#else +#define vcvtq_n_u64_f64(__p0, __p1) __extension__ ({ \ + uint64x2_t __ret; \ + float64x2_t __s0 = __p0; \ + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + __ret = (uint64x2_t) __builtin_neon_vcvtq_n_u64_v((int8x16_t)__rev0, __p1, 51); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#endif + +#define vcvt_n_u64_f64(__p0, __p1) __extension__ ({ \ + uint64x1_t __ret; \ + float64x1_t __s0 = __p0; \ + __ret = (uint64x1_t) __builtin_neon_vcvt_n_u64_v((int8x8_t)__s0, __p1, 19); \ + __ret; \ +}) +#define vcvtd_n_u64_f64(__p0, __p1) __extension__ ({ \ + uint64_t __ret; \ + float64_t __s0 = __p0; \ + __ret = (uint64_t) __builtin_neon_vcvtd_n_u64_f64(__s0, __p1); \ + __ret; \ +}) +__ai __attribute__((target("neon"))) int32_t vcvts_s32_f32(float32_t __p0) { + int32_t __ret; + __ret = (int32_t) __builtin_neon_vcvts_s32_f32(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) int64_t vcvtd_s64_f64(float64_t __p0) { + int64_t __ret; + __ret = (int64_t) __builtin_neon_vcvtd_s64_f64(__p0); + return __ret; +} +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int64x2_t vcvtq_s64_f64(float64x2_t __p0) { + int64x2_t __ret; + __ret = (int64x2_t) __builtin_neon_vcvtq_s64_v((int8x16_t)__p0, 35); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int64x2_t vcvtq_s64_f64(float64x2_t __p0) { + int64x2_t __ret; + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = (int64x2_t) __builtin_neon_vcvtq_s64_v((int8x16_t)__rev0, 35); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +__ai __attribute__((target("neon"))) int64x1_t vcvt_s64_f64(float64x1_t __p0) { + int64x1_t __ret; + __ret = (int64x1_t) __builtin_neon_vcvt_s64_v((int8x8_t)__p0, 3); + return __ret; +} +__ai __attribute__((target("neon"))) uint32_t vcvts_u32_f32(float32_t __p0) { + uint32_t __ret; + __ret = (uint32_t) __builtin_neon_vcvts_u32_f32(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) uint64_t vcvtd_u64_f64(float64_t __p0) { + uint64_t __ret; + __ret = (uint64_t) __builtin_neon_vcvtd_u64_f64(__p0); + return __ret; +} +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint64x2_t vcvtq_u64_f64(float64x2_t __p0) { + uint64x2_t __ret; + __ret = (uint64x2_t) __builtin_neon_vcvtq_u64_v((int8x16_t)__p0, 51); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint64x2_t vcvtq_u64_f64(float64x2_t __p0) { + uint64x2_t __ret; + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = (uint64x2_t) __builtin_neon_vcvtq_u64_v((int8x16_t)__rev0, 51); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +__ai __attribute__((target("neon"))) uint64x1_t vcvt_u64_f64(float64x1_t __p0) { + uint64x1_t __ret; + __ret = (uint64x1_t) __builtin_neon_vcvt_u64_v((int8x8_t)__p0, 19); + return __ret; +} +__ai __attribute__((target("neon"))) int32_t vcvtas_s32_f32(float32_t __p0) { + int32_t __ret; + __ret = (int32_t) __builtin_neon_vcvtas_s32_f32(__p0); + return __ret; +} +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int64x2_t vcvtaq_s64_f64(float64x2_t __p0) { + int64x2_t __ret; + __ret = (int64x2_t) __builtin_neon_vcvtaq_s64_v((int8x16_t)__p0, 35); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int64x2_t vcvtaq_s64_f64(float64x2_t __p0) { + int64x2_t __ret; + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = (int64x2_t) __builtin_neon_vcvtaq_s64_v((int8x16_t)__rev0, 35); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +__ai __attribute__((target("neon"))) int64x1_t vcvta_s64_f64(float64x1_t __p0) { + int64x1_t __ret; + __ret = (int64x1_t) __builtin_neon_vcvta_s64_v((int8x8_t)__p0, 3); + return __ret; +} +__ai __attribute__((target("neon"))) int64_t vcvtad_s64_f64(float64_t __p0) { + int64_t __ret; + __ret = (int64_t) __builtin_neon_vcvtad_s64_f64(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) uint32_t vcvtas_u32_f32(float32_t __p0) { + uint32_t __ret; + __ret = (uint32_t) __builtin_neon_vcvtas_u32_f32(__p0); + return __ret; +} +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint64x2_t vcvtaq_u64_f64(float64x2_t __p0) { + uint64x2_t __ret; + __ret = (uint64x2_t) __builtin_neon_vcvtaq_u64_v((int8x16_t)__p0, 51); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint64x2_t vcvtaq_u64_f64(float64x2_t __p0) { + uint64x2_t __ret; + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = (uint64x2_t) __builtin_neon_vcvtaq_u64_v((int8x16_t)__rev0, 51); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +__ai __attribute__((target("neon"))) uint64x1_t vcvta_u64_f64(float64x1_t __p0) { + uint64x1_t __ret; + __ret = (uint64x1_t) __builtin_neon_vcvta_u64_v((int8x8_t)__p0, 19); + return __ret; +} +__ai __attribute__((target("neon"))) uint64_t vcvtad_u64_f64(float64_t __p0) { + uint64_t __ret; + __ret = (uint64_t) __builtin_neon_vcvtad_u64_f64(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) int32_t vcvtms_s32_f32(float32_t __p0) { + int32_t __ret; + __ret = (int32_t) __builtin_neon_vcvtms_s32_f32(__p0); + return __ret; +} +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int64x2_t vcvtmq_s64_f64(float64x2_t __p0) { + int64x2_t __ret; + __ret = (int64x2_t) __builtin_neon_vcvtmq_s64_v((int8x16_t)__p0, 35); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int64x2_t vcvtmq_s64_f64(float64x2_t __p0) { + int64x2_t __ret; + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = (int64x2_t) __builtin_neon_vcvtmq_s64_v((int8x16_t)__rev0, 35); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +__ai __attribute__((target("neon"))) int64x1_t vcvtm_s64_f64(float64x1_t __p0) { + int64x1_t __ret; + __ret = (int64x1_t) __builtin_neon_vcvtm_s64_v((int8x8_t)__p0, 3); + return __ret; +} +__ai __attribute__((target("neon"))) int64_t vcvtmd_s64_f64(float64_t __p0) { + int64_t __ret; + __ret = (int64_t) __builtin_neon_vcvtmd_s64_f64(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) uint32_t vcvtms_u32_f32(float32_t __p0) { + uint32_t __ret; + __ret = (uint32_t) __builtin_neon_vcvtms_u32_f32(__p0); + return __ret; +} +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint64x2_t vcvtmq_u64_f64(float64x2_t __p0) { + uint64x2_t __ret; + __ret = (uint64x2_t) __builtin_neon_vcvtmq_u64_v((int8x16_t)__p0, 51); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint64x2_t vcvtmq_u64_f64(float64x2_t __p0) { + uint64x2_t __ret; + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = (uint64x2_t) __builtin_neon_vcvtmq_u64_v((int8x16_t)__rev0, 51); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +__ai __attribute__((target("neon"))) uint64x1_t vcvtm_u64_f64(float64x1_t __p0) { + uint64x1_t __ret; + __ret = (uint64x1_t) __builtin_neon_vcvtm_u64_v((int8x8_t)__p0, 19); + return __ret; +} +__ai __attribute__((target("neon"))) uint64_t vcvtmd_u64_f64(float64_t __p0) { + uint64_t __ret; + __ret = (uint64_t) __builtin_neon_vcvtmd_u64_f64(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) int32_t vcvtns_s32_f32(float32_t __p0) { + int32_t __ret; + __ret = (int32_t) __builtin_neon_vcvtns_s32_f32(__p0); + return __ret; +} +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int64x2_t vcvtnq_s64_f64(float64x2_t __p0) { + int64x2_t __ret; + __ret = (int64x2_t) __builtin_neon_vcvtnq_s64_v((int8x16_t)__p0, 35); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int64x2_t vcvtnq_s64_f64(float64x2_t __p0) { + int64x2_t __ret; + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = (int64x2_t) __builtin_neon_vcvtnq_s64_v((int8x16_t)__rev0, 35); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +__ai __attribute__((target("neon"))) int64x1_t vcvtn_s64_f64(float64x1_t __p0) { + int64x1_t __ret; + __ret = (int64x1_t) __builtin_neon_vcvtn_s64_v((int8x8_t)__p0, 3); + return __ret; +} +__ai __attribute__((target("neon"))) int64_t vcvtnd_s64_f64(float64_t __p0) { + int64_t __ret; + __ret = (int64_t) __builtin_neon_vcvtnd_s64_f64(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) uint32_t vcvtns_u32_f32(float32_t __p0) { + uint32_t __ret; + __ret = (uint32_t) __builtin_neon_vcvtns_u32_f32(__p0); + return __ret; +} +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint64x2_t vcvtnq_u64_f64(float64x2_t __p0) { + uint64x2_t __ret; + __ret = (uint64x2_t) __builtin_neon_vcvtnq_u64_v((int8x16_t)__p0, 51); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint64x2_t vcvtnq_u64_f64(float64x2_t __p0) { + uint64x2_t __ret; + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = (uint64x2_t) __builtin_neon_vcvtnq_u64_v((int8x16_t)__rev0, 51); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +__ai __attribute__((target("neon"))) uint64x1_t vcvtn_u64_f64(float64x1_t __p0) { + uint64x1_t __ret; + __ret = (uint64x1_t) __builtin_neon_vcvtn_u64_v((int8x8_t)__p0, 19); + return __ret; +} +__ai __attribute__((target("neon"))) uint64_t vcvtnd_u64_f64(float64_t __p0) { + uint64_t __ret; + __ret = (uint64_t) __builtin_neon_vcvtnd_u64_f64(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) int32_t vcvtps_s32_f32(float32_t __p0) { + int32_t __ret; + __ret = (int32_t) __builtin_neon_vcvtps_s32_f32(__p0); + return __ret; +} +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int64x2_t vcvtpq_s64_f64(float64x2_t __p0) { + int64x2_t __ret; + __ret = (int64x2_t) __builtin_neon_vcvtpq_s64_v((int8x16_t)__p0, 35); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int64x2_t vcvtpq_s64_f64(float64x2_t __p0) { + int64x2_t __ret; + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = (int64x2_t) __builtin_neon_vcvtpq_s64_v((int8x16_t)__rev0, 35); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +__ai __attribute__((target("neon"))) int64x1_t vcvtp_s64_f64(float64x1_t __p0) { + int64x1_t __ret; + __ret = (int64x1_t) __builtin_neon_vcvtp_s64_v((int8x8_t)__p0, 3); + return __ret; +} +__ai __attribute__((target("neon"))) int64_t vcvtpd_s64_f64(float64_t __p0) { + int64_t __ret; + __ret = (int64_t) __builtin_neon_vcvtpd_s64_f64(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) uint32_t vcvtps_u32_f32(float32_t __p0) { + uint32_t __ret; + __ret = (uint32_t) __builtin_neon_vcvtps_u32_f32(__p0); + return __ret; +} +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint64x2_t vcvtpq_u64_f64(float64x2_t __p0) { + uint64x2_t __ret; + __ret = (uint64x2_t) __builtin_neon_vcvtpq_u64_v((int8x16_t)__p0, 51); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint64x2_t vcvtpq_u64_f64(float64x2_t __p0) { + uint64x2_t __ret; + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = (uint64x2_t) __builtin_neon_vcvtpq_u64_v((int8x16_t)__rev0, 51); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +__ai __attribute__((target("neon"))) uint64x1_t vcvtp_u64_f64(float64x1_t __p0) { + uint64x1_t __ret; + __ret = (uint64x1_t) __builtin_neon_vcvtp_u64_v((int8x8_t)__p0, 19); + return __ret; +} +__ai __attribute__((target("neon"))) uint64_t vcvtpd_u64_f64(float64_t __p0) { + uint64_t __ret; + __ret = (uint64_t) __builtin_neon_vcvtpd_u64_f64(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) float32_t vcvtxd_f32_f64(float64_t __p0) { + float32_t __ret; + __ret = (float32_t) __builtin_neon_vcvtxd_f32_f64(__p0); + return __ret; +} +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) float32x2_t vcvtx_f32_f64(float64x2_t __p0) { + float32x2_t __ret; + __ret = (float32x2_t) __builtin_neon_vcvtx_f32_v((int8x16_t)__p0, 42); + return __ret; +} +#else +__ai __attribute__((target("neon"))) float32x2_t vcvtx_f32_f64(float64x2_t __p0) { + float32x2_t __ret; + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = (float32x2_t) __builtin_neon_vcvtx_f32_v((int8x16_t)__rev0, 42); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +__ai __attribute__((target("neon"))) float32x2_t __noswap_vcvtx_f32_f64(float64x2_t __p0) { + float32x2_t __ret; + __ret = (float32x2_t) __builtin_neon_vcvtx_f32_v((int8x16_t)__p0, 42); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) float32x4_t vcvtx_high_f32_f64(float32x2_t __p0, float64x2_t __p1) { + float32x4_t __ret; + __ret = vcombine_f32(__p0, vcvtx_f32_f64(__p1)); + return __ret; +} +#else +__ai __attribute__((target("neon"))) float32x4_t vcvtx_high_f32_f64(float32x2_t __p0, float64x2_t __p1) { + float32x4_t __ret; + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = __noswap_vcombine_f32(__rev0, __noswap_vcvtx_f32_f64(__rev1)); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) float64x2_t vdivq_f64(float64x2_t __p0, float64x2_t __p1) { + float64x2_t __ret; + __ret = __p0 / __p1; + return __ret; +} +#else +__ai __attribute__((target("neon"))) float64x2_t vdivq_f64(float64x2_t __p0, float64x2_t __p1) { + float64x2_t __ret; + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = __rev0 / __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) float32x4_t vdivq_f32(float32x4_t __p0, float32x4_t __p1) { + float32x4_t __ret; + __ret = __p0 / __p1; + return __ret; +} +#else +__ai __attribute__((target("neon"))) float32x4_t vdivq_f32(float32x4_t __p0, float32x4_t __p1) { + float32x4_t __ret; + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = __rev0 / __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +__ai __attribute__((target("neon"))) float64x1_t vdiv_f64(float64x1_t __p0, float64x1_t __p1) { + float64x1_t __ret; + __ret = __p0 / __p1; + return __ret; +} +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) float32x2_t vdiv_f32(float32x2_t __p0, float32x2_t __p1) { + float32x2_t __ret; + __ret = __p0 / __p1; + return __ret; +} +#else +__ai __attribute__((target("neon"))) float32x2_t vdiv_f32(float32x2_t __p0, float32x2_t __p1) { + float32x2_t __ret; + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = __rev0 / __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vdupb_lane_p8(__p0, __p1) __extension__ ({ \ + poly8_t __ret; \ + poly8x8_t __s0 = __p0; \ + __ret = (poly8_t) __builtin_neon_vdupb_lane_i8((poly8x8_t)__s0, __p1); \ + __ret; \ +}) +#else +#define vdupb_lane_p8(__p0, __p1) __extension__ ({ \ + poly8_t __ret; \ + poly8x8_t __s0 = __p0; \ + poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (poly8_t) __builtin_neon_vdupb_lane_i8((poly8x8_t)__rev0, __p1); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vduph_lane_p16(__p0, __p1) __extension__ ({ \ + poly16_t __ret; \ + poly16x4_t __s0 = __p0; \ + __ret = (poly16_t) __builtin_neon_vduph_lane_i16((poly16x4_t)__s0, __p1); \ + __ret; \ +}) +#else +#define vduph_lane_p16(__p0, __p1) __extension__ ({ \ + poly16_t __ret; \ + poly16x4_t __s0 = __p0; \ + poly16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + __ret = (poly16_t) __builtin_neon_vduph_lane_i16((poly16x4_t)__rev0, __p1); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vdupb_lane_u8(__p0, __p1) __extension__ ({ \ + uint8_t __ret; \ + uint8x8_t __s0 = __p0; \ + __ret = (uint8_t) __builtin_neon_vdupb_lane_i8((int8x8_t)__s0, __p1); \ + __ret; \ +}) +#else +#define vdupb_lane_u8(__p0, __p1) __extension__ ({ \ + uint8_t __ret; \ + uint8x8_t __s0 = __p0; \ + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (uint8_t) __builtin_neon_vdupb_lane_i8((int8x8_t)__rev0, __p1); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vdups_lane_u32(__p0, __p1) __extension__ ({ \ + uint32_t __ret; \ + uint32x2_t __s0 = __p0; \ + __ret = (uint32_t) __builtin_neon_vdups_lane_i32((int32x2_t)__s0, __p1); \ + __ret; \ +}) +#else +#define vdups_lane_u32(__p0, __p1) __extension__ ({ \ + uint32_t __ret; \ + uint32x2_t __s0 = __p0; \ + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + __ret = (uint32_t) __builtin_neon_vdups_lane_i32((int32x2_t)__rev0, __p1); \ + __ret; \ +}) +#endif + +#define vdupd_lane_u64(__p0, __p1) __extension__ ({ \ + uint64_t __ret; \ + uint64x1_t __s0 = __p0; \ + __ret = (uint64_t) __builtin_neon_vdupd_lane_i64((int64x1_t)__s0, __p1); \ + __ret; \ +}) +#ifdef __LITTLE_ENDIAN__ +#define vduph_lane_u16(__p0, __p1) __extension__ ({ \ + uint16_t __ret; \ + uint16x4_t __s0 = __p0; \ + __ret = (uint16_t) __builtin_neon_vduph_lane_i16((int16x4_t)__s0, __p1); \ + __ret; \ +}) +#else +#define vduph_lane_u16(__p0, __p1) __extension__ ({ \ + uint16_t __ret; \ + uint16x4_t __s0 = __p0; \ + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + __ret = (uint16_t) __builtin_neon_vduph_lane_i16((int16x4_t)__rev0, __p1); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vdupb_lane_s8(__p0, __p1) __extension__ ({ \ + int8_t __ret; \ + int8x8_t __s0 = __p0; \ + __ret = (int8_t) __builtin_neon_vdupb_lane_i8((int8x8_t)__s0, __p1); \ + __ret; \ +}) +#else +#define vdupb_lane_s8(__p0, __p1) __extension__ ({ \ + int8_t __ret; \ + int8x8_t __s0 = __p0; \ + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (int8_t) __builtin_neon_vdupb_lane_i8((int8x8_t)__rev0, __p1); \ + __ret; \ +}) +#endif + +#define vdupd_lane_f64(__p0, __p1) __extension__ ({ \ + float64_t __ret; \ + float64x1_t __s0 = __p0; \ + __ret = (float64_t) __builtin_neon_vdupd_lane_f64((float64x1_t)__s0, __p1); \ + __ret; \ +}) +#ifdef __LITTLE_ENDIAN__ +#define vdups_lane_f32(__p0, __p1) __extension__ ({ \ + float32_t __ret; \ + float32x2_t __s0 = __p0; \ + __ret = (float32_t) __builtin_neon_vdups_lane_f32((float32x2_t)__s0, __p1); \ + __ret; \ +}) +#else +#define vdups_lane_f32(__p0, __p1) __extension__ ({ \ + float32_t __ret; \ + float32x2_t __s0 = __p0; \ + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + __ret = (float32_t) __builtin_neon_vdups_lane_f32((float32x2_t)__rev0, __p1); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vdups_lane_s32(__p0, __p1) __extension__ ({ \ + int32_t __ret; \ + int32x2_t __s0 = __p0; \ + __ret = (int32_t) __builtin_neon_vdups_lane_i32((int32x2_t)__s0, __p1); \ + __ret; \ +}) +#else +#define vdups_lane_s32(__p0, __p1) __extension__ ({ \ + int32_t __ret; \ + int32x2_t __s0 = __p0; \ + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + __ret = (int32_t) __builtin_neon_vdups_lane_i32((int32x2_t)__rev0, __p1); \ + __ret; \ +}) +#endif + +#define vdupd_lane_s64(__p0, __p1) __extension__ ({ \ + int64_t __ret; \ + int64x1_t __s0 = __p0; \ + __ret = (int64_t) __builtin_neon_vdupd_lane_i64((int64x1_t)__s0, __p1); \ + __ret; \ +}) +#ifdef __LITTLE_ENDIAN__ +#define vduph_lane_s16(__p0, __p1) __extension__ ({ \ + int16_t __ret; \ + int16x4_t __s0 = __p0; \ + __ret = (int16_t) __builtin_neon_vduph_lane_i16((int16x4_t)__s0, __p1); \ + __ret; \ +}) +#else +#define vduph_lane_s16(__p0, __p1) __extension__ ({ \ + int16_t __ret; \ + int16x4_t __s0 = __p0; \ + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + __ret = (int16_t) __builtin_neon_vduph_lane_i16((int16x4_t)__rev0, __p1); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vduph_lane_f16(__p0, __p1) __extension__ ({ \ + float16_t __ret; \ + float16x4_t __s0 = __p0; \ + __ret = (float16_t) __builtin_neon_vduph_lane_f16((float16x4_t)__s0, __p1); \ + __ret; \ +}) +#else +#define vduph_lane_f16(__p0, __p1) __extension__ ({ \ + float16_t __ret; \ + float16x4_t __s0 = __p0; \ + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + __ret = (float16_t) __builtin_neon_vduph_lane_f16((float16x4_t)__rev0, __p1); \ + __ret; \ +}) +#endif + +#define vdup_lane_p64(__p0_364, __p1_364) __extension__ ({ \ + poly64x1_t __ret_364; \ + poly64x1_t __s0_364 = __p0_364; \ + __ret_364 = splat_lane_p64(__s0_364, __p1_364); \ + __ret_364; \ +}) +#ifdef __LITTLE_ENDIAN__ +#define vdupq_lane_p64(__p0_365, __p1_365) __extension__ ({ \ + poly64x2_t __ret_365; \ + poly64x1_t __s0_365 = __p0_365; \ + __ret_365 = splatq_lane_p64(__s0_365, __p1_365); \ + __ret_365; \ +}) +#else +#define vdupq_lane_p64(__p0_366, __p1_366) __extension__ ({ \ + poly64x2_t __ret_366; \ + poly64x1_t __s0_366 = __p0_366; \ + __ret_366 = __noswap_splatq_lane_p64(__s0_366, __p1_366); \ + __ret_366 = __builtin_shufflevector(__ret_366, __ret_366, 1, 0); \ + __ret_366; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vdupq_lane_f64(__p0_367, __p1_367) __extension__ ({ \ + float64x2_t __ret_367; \ + float64x1_t __s0_367 = __p0_367; \ + __ret_367 = splatq_lane_f64(__s0_367, __p1_367); \ + __ret_367; \ +}) +#else +#define vdupq_lane_f64(__p0_368, __p1_368) __extension__ ({ \ + float64x2_t __ret_368; \ + float64x1_t __s0_368 = __p0_368; \ + __ret_368 = __noswap_splatq_lane_f64(__s0_368, __p1_368); \ + __ret_368 = __builtin_shufflevector(__ret_368, __ret_368, 1, 0); \ + __ret_368; \ +}) +#endif + +#define vdup_lane_f64(__p0_369, __p1_369) __extension__ ({ \ + float64x1_t __ret_369; \ + float64x1_t __s0_369 = __p0_369; \ + __ret_369 = splat_lane_f64(__s0_369, __p1_369); \ + __ret_369; \ +}) +#ifdef __LITTLE_ENDIAN__ +#define vdupb_laneq_p8(__p0, __p1) __extension__ ({ \ + poly8_t __ret; \ + poly8x16_t __s0 = __p0; \ + __ret = (poly8_t) __builtin_neon_vdupb_laneq_i8((poly8x16_t)__s0, __p1); \ + __ret; \ +}) +#else +#define vdupb_laneq_p8(__p0, __p1) __extension__ ({ \ + poly8_t __ret; \ + poly8x16_t __s0 = __p0; \ + poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (poly8_t) __builtin_neon_vdupb_laneq_i8((poly8x16_t)__rev0, __p1); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vduph_laneq_p16(__p0, __p1) __extension__ ({ \ + poly16_t __ret; \ + poly16x8_t __s0 = __p0; \ + __ret = (poly16_t) __builtin_neon_vduph_laneq_i16((poly16x8_t)__s0, __p1); \ + __ret; \ +}) +#else +#define vduph_laneq_p16(__p0, __p1) __extension__ ({ \ + poly16_t __ret; \ + poly16x8_t __s0 = __p0; \ + poly16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (poly16_t) __builtin_neon_vduph_laneq_i16((poly16x8_t)__rev0, __p1); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vdupb_laneq_u8(__p0, __p1) __extension__ ({ \ + uint8_t __ret; \ + uint8x16_t __s0 = __p0; \ + __ret = (uint8_t) __builtin_neon_vdupb_laneq_i8((int8x16_t)__s0, __p1); \ + __ret; \ +}) +#else +#define vdupb_laneq_u8(__p0, __p1) __extension__ ({ \ + uint8_t __ret; \ + uint8x16_t __s0 = __p0; \ + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (uint8_t) __builtin_neon_vdupb_laneq_i8((int8x16_t)__rev0, __p1); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vdups_laneq_u32(__p0, __p1) __extension__ ({ \ + uint32_t __ret; \ + uint32x4_t __s0 = __p0; \ + __ret = (uint32_t) __builtin_neon_vdups_laneq_i32((int32x4_t)__s0, __p1); \ + __ret; \ +}) +#else +#define vdups_laneq_u32(__p0, __p1) __extension__ ({ \ + uint32_t __ret; \ + uint32x4_t __s0 = __p0; \ + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + __ret = (uint32_t) __builtin_neon_vdups_laneq_i32((int32x4_t)__rev0, __p1); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vdupd_laneq_u64(__p0, __p1) __extension__ ({ \ + uint64_t __ret; \ + uint64x2_t __s0 = __p0; \ + __ret = (uint64_t) __builtin_neon_vdupd_laneq_i64((int64x2_t)__s0, __p1); \ + __ret; \ +}) +#else +#define vdupd_laneq_u64(__p0, __p1) __extension__ ({ \ + uint64_t __ret; \ + uint64x2_t __s0 = __p0; \ + uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + __ret = (uint64_t) __builtin_neon_vdupd_laneq_i64((int64x2_t)__rev0, __p1); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vduph_laneq_u16(__p0, __p1) __extension__ ({ \ + uint16_t __ret; \ + uint16x8_t __s0 = __p0; \ + __ret = (uint16_t) __builtin_neon_vduph_laneq_i16((int16x8_t)__s0, __p1); \ + __ret; \ +}) +#else +#define vduph_laneq_u16(__p0, __p1) __extension__ ({ \ + uint16_t __ret; \ + uint16x8_t __s0 = __p0; \ + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (uint16_t) __builtin_neon_vduph_laneq_i16((int16x8_t)__rev0, __p1); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vdupb_laneq_s8(__p0, __p1) __extension__ ({ \ + int8_t __ret; \ + int8x16_t __s0 = __p0; \ + __ret = (int8_t) __builtin_neon_vdupb_laneq_i8((int8x16_t)__s0, __p1); \ + __ret; \ +}) +#else +#define vdupb_laneq_s8(__p0, __p1) __extension__ ({ \ + int8_t __ret; \ + int8x16_t __s0 = __p0; \ + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (int8_t) __builtin_neon_vdupb_laneq_i8((int8x16_t)__rev0, __p1); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vdupd_laneq_f64(__p0, __p1) __extension__ ({ \ + float64_t __ret; \ + float64x2_t __s0 = __p0; \ + __ret = (float64_t) __builtin_neon_vdupd_laneq_f64((float64x2_t)__s0, __p1); \ + __ret; \ +}) +#else +#define vdupd_laneq_f64(__p0, __p1) __extension__ ({ \ + float64_t __ret; \ + float64x2_t __s0 = __p0; \ + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + __ret = (float64_t) __builtin_neon_vdupd_laneq_f64((float64x2_t)__rev0, __p1); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vdups_laneq_f32(__p0, __p1) __extension__ ({ \ + float32_t __ret; \ + float32x4_t __s0 = __p0; \ + __ret = (float32_t) __builtin_neon_vdups_laneq_f32((float32x4_t)__s0, __p1); \ + __ret; \ +}) +#else +#define vdups_laneq_f32(__p0, __p1) __extension__ ({ \ + float32_t __ret; \ + float32x4_t __s0 = __p0; \ + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + __ret = (float32_t) __builtin_neon_vdups_laneq_f32((float32x4_t)__rev0, __p1); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vdups_laneq_s32(__p0, __p1) __extension__ ({ \ + int32_t __ret; \ + int32x4_t __s0 = __p0; \ + __ret = (int32_t) __builtin_neon_vdups_laneq_i32((int32x4_t)__s0, __p1); \ + __ret; \ +}) +#else +#define vdups_laneq_s32(__p0, __p1) __extension__ ({ \ + int32_t __ret; \ + int32x4_t __s0 = __p0; \ + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + __ret = (int32_t) __builtin_neon_vdups_laneq_i32((int32x4_t)__rev0, __p1); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vdupd_laneq_s64(__p0, __p1) __extension__ ({ \ + int64_t __ret; \ + int64x2_t __s0 = __p0; \ + __ret = (int64_t) __builtin_neon_vdupd_laneq_i64((int64x2_t)__s0, __p1); \ + __ret; \ +}) +#else +#define vdupd_laneq_s64(__p0, __p1) __extension__ ({ \ + int64_t __ret; \ + int64x2_t __s0 = __p0; \ + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + __ret = (int64_t) __builtin_neon_vdupd_laneq_i64((int64x2_t)__rev0, __p1); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vduph_laneq_s16(__p0, __p1) __extension__ ({ \ + int16_t __ret; \ + int16x8_t __s0 = __p0; \ + __ret = (int16_t) __builtin_neon_vduph_laneq_i16((int16x8_t)__s0, __p1); \ + __ret; \ +}) +#else +#define vduph_laneq_s16(__p0, __p1) __extension__ ({ \ + int16_t __ret; \ + int16x8_t __s0 = __p0; \ + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (int16_t) __builtin_neon_vduph_laneq_i16((int16x8_t)__rev0, __p1); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vduph_laneq_f16(__p0, __p1) __extension__ ({ \ + float16_t __ret; \ + float16x8_t __s0 = __p0; \ + __ret = (float16_t) __builtin_neon_vduph_laneq_f16((float16x8_t)__s0, __p1); \ + __ret; \ +}) +#else +#define vduph_laneq_f16(__p0, __p1) __extension__ ({ \ + float16_t __ret; \ + float16x8_t __s0 = __p0; \ + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (float16_t) __builtin_neon_vduph_laneq_f16((float16x8_t)__rev0, __p1); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vdup_laneq_p8(__p0_370, __p1_370) __extension__ ({ \ + poly8x8_t __ret_370; \ + poly8x16_t __s0_370 = __p0_370; \ + __ret_370 = splat_laneq_p8(__s0_370, __p1_370); \ + __ret_370; \ +}) +#else +#define vdup_laneq_p8(__p0_371, __p1_371) __extension__ ({ \ + poly8x8_t __ret_371; \ + poly8x16_t __s0_371 = __p0_371; \ + poly8x16_t __rev0_371; __rev0_371 = __builtin_shufflevector(__s0_371, __s0_371, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_371 = __noswap_splat_laneq_p8(__rev0_371, __p1_371); \ + __ret_371 = __builtin_shufflevector(__ret_371, __ret_371, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_371; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vdup_laneq_p64(__p0_372, __p1_372) __extension__ ({ \ + poly64x1_t __ret_372; \ + poly64x2_t __s0_372 = __p0_372; \ + __ret_372 = splat_laneq_p64(__s0_372, __p1_372); \ + __ret_372; \ +}) +#else +#define vdup_laneq_p64(__p0_373, __p1_373) __extension__ ({ \ + poly64x1_t __ret_373; \ + poly64x2_t __s0_373 = __p0_373; \ + poly64x2_t __rev0_373; __rev0_373 = __builtin_shufflevector(__s0_373, __s0_373, 1, 0); \ + __ret_373 = __noswap_splat_laneq_p64(__rev0_373, __p1_373); \ + __ret_373; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vdup_laneq_p16(__p0_374, __p1_374) __extension__ ({ \ + poly16x4_t __ret_374; \ + poly16x8_t __s0_374 = __p0_374; \ + __ret_374 = splat_laneq_p16(__s0_374, __p1_374); \ + __ret_374; \ +}) +#else +#define vdup_laneq_p16(__p0_375, __p1_375) __extension__ ({ \ + poly16x4_t __ret_375; \ + poly16x8_t __s0_375 = __p0_375; \ + poly16x8_t __rev0_375; __rev0_375 = __builtin_shufflevector(__s0_375, __s0_375, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_375 = __noswap_splat_laneq_p16(__rev0_375, __p1_375); \ + __ret_375 = __builtin_shufflevector(__ret_375, __ret_375, 3, 2, 1, 0); \ + __ret_375; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vdupq_laneq_p8(__p0_376, __p1_376) __extension__ ({ \ + poly8x16_t __ret_376; \ + poly8x16_t __s0_376 = __p0_376; \ + __ret_376 = splatq_laneq_p8(__s0_376, __p1_376); \ + __ret_376; \ +}) +#else +#define vdupq_laneq_p8(__p0_377, __p1_377) __extension__ ({ \ + poly8x16_t __ret_377; \ + poly8x16_t __s0_377 = __p0_377; \ + poly8x16_t __rev0_377; __rev0_377 = __builtin_shufflevector(__s0_377, __s0_377, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_377 = __noswap_splatq_laneq_p8(__rev0_377, __p1_377); \ + __ret_377 = __builtin_shufflevector(__ret_377, __ret_377, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_377; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vdupq_laneq_p64(__p0_378, __p1_378) __extension__ ({ \ + poly64x2_t __ret_378; \ + poly64x2_t __s0_378 = __p0_378; \ + __ret_378 = splatq_laneq_p64(__s0_378, __p1_378); \ + __ret_378; \ +}) +#else +#define vdupq_laneq_p64(__p0_379, __p1_379) __extension__ ({ \ + poly64x2_t __ret_379; \ + poly64x2_t __s0_379 = __p0_379; \ + poly64x2_t __rev0_379; __rev0_379 = __builtin_shufflevector(__s0_379, __s0_379, 1, 0); \ + __ret_379 = __noswap_splatq_laneq_p64(__rev0_379, __p1_379); \ + __ret_379 = __builtin_shufflevector(__ret_379, __ret_379, 1, 0); \ + __ret_379; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vdupq_laneq_p16(__p0_380, __p1_380) __extension__ ({ \ + poly16x8_t __ret_380; \ + poly16x8_t __s0_380 = __p0_380; \ + __ret_380 = splatq_laneq_p16(__s0_380, __p1_380); \ + __ret_380; \ +}) +#else +#define vdupq_laneq_p16(__p0_381, __p1_381) __extension__ ({ \ + poly16x8_t __ret_381; \ + poly16x8_t __s0_381 = __p0_381; \ + poly16x8_t __rev0_381; __rev0_381 = __builtin_shufflevector(__s0_381, __s0_381, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_381 = __noswap_splatq_laneq_p16(__rev0_381, __p1_381); \ + __ret_381 = __builtin_shufflevector(__ret_381, __ret_381, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_381; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vdupq_laneq_u8(__p0_382, __p1_382) __extension__ ({ \ + uint8x16_t __ret_382; \ + uint8x16_t __s0_382 = __p0_382; \ + __ret_382 = splatq_laneq_u8(__s0_382, __p1_382); \ + __ret_382; \ +}) +#else +#define vdupq_laneq_u8(__p0_383, __p1_383) __extension__ ({ \ + uint8x16_t __ret_383; \ + uint8x16_t __s0_383 = __p0_383; \ + uint8x16_t __rev0_383; __rev0_383 = __builtin_shufflevector(__s0_383, __s0_383, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_383 = __noswap_splatq_laneq_u8(__rev0_383, __p1_383); \ + __ret_383 = __builtin_shufflevector(__ret_383, __ret_383, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_383; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vdupq_laneq_u32(__p0_384, __p1_384) __extension__ ({ \ + uint32x4_t __ret_384; \ + uint32x4_t __s0_384 = __p0_384; \ + __ret_384 = splatq_laneq_u32(__s0_384, __p1_384); \ + __ret_384; \ +}) +#else +#define vdupq_laneq_u32(__p0_385, __p1_385) __extension__ ({ \ + uint32x4_t __ret_385; \ + uint32x4_t __s0_385 = __p0_385; \ + uint32x4_t __rev0_385; __rev0_385 = __builtin_shufflevector(__s0_385, __s0_385, 3, 2, 1, 0); \ + __ret_385 = __noswap_splatq_laneq_u32(__rev0_385, __p1_385); \ + __ret_385 = __builtin_shufflevector(__ret_385, __ret_385, 3, 2, 1, 0); \ + __ret_385; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vdupq_laneq_u64(__p0_386, __p1_386) __extension__ ({ \ + uint64x2_t __ret_386; \ + uint64x2_t __s0_386 = __p0_386; \ + __ret_386 = splatq_laneq_u64(__s0_386, __p1_386); \ + __ret_386; \ +}) +#else +#define vdupq_laneq_u64(__p0_387, __p1_387) __extension__ ({ \ + uint64x2_t __ret_387; \ + uint64x2_t __s0_387 = __p0_387; \ + uint64x2_t __rev0_387; __rev0_387 = __builtin_shufflevector(__s0_387, __s0_387, 1, 0); \ + __ret_387 = __noswap_splatq_laneq_u64(__rev0_387, __p1_387); \ + __ret_387 = __builtin_shufflevector(__ret_387, __ret_387, 1, 0); \ + __ret_387; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vdupq_laneq_u16(__p0_388, __p1_388) __extension__ ({ \ + uint16x8_t __ret_388; \ + uint16x8_t __s0_388 = __p0_388; \ + __ret_388 = splatq_laneq_u16(__s0_388, __p1_388); \ + __ret_388; \ +}) +#else +#define vdupq_laneq_u16(__p0_389, __p1_389) __extension__ ({ \ + uint16x8_t __ret_389; \ + uint16x8_t __s0_389 = __p0_389; \ + uint16x8_t __rev0_389; __rev0_389 = __builtin_shufflevector(__s0_389, __s0_389, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_389 = __noswap_splatq_laneq_u16(__rev0_389, __p1_389); \ + __ret_389 = __builtin_shufflevector(__ret_389, __ret_389, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_389; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vdupq_laneq_s8(__p0_390, __p1_390) __extension__ ({ \ + int8x16_t __ret_390; \ + int8x16_t __s0_390 = __p0_390; \ + __ret_390 = splatq_laneq_s8(__s0_390, __p1_390); \ + __ret_390; \ +}) +#else +#define vdupq_laneq_s8(__p0_391, __p1_391) __extension__ ({ \ + int8x16_t __ret_391; \ + int8x16_t __s0_391 = __p0_391; \ + int8x16_t __rev0_391; __rev0_391 = __builtin_shufflevector(__s0_391, __s0_391, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_391 = __noswap_splatq_laneq_s8(__rev0_391, __p1_391); \ + __ret_391 = __builtin_shufflevector(__ret_391, __ret_391, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_391; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vdupq_laneq_f64(__p0_392, __p1_392) __extension__ ({ \ + float64x2_t __ret_392; \ + float64x2_t __s0_392 = __p0_392; \ + __ret_392 = splatq_laneq_f64(__s0_392, __p1_392); \ + __ret_392; \ +}) +#else +#define vdupq_laneq_f64(__p0_393, __p1_393) __extension__ ({ \ + float64x2_t __ret_393; \ + float64x2_t __s0_393 = __p0_393; \ + float64x2_t __rev0_393; __rev0_393 = __builtin_shufflevector(__s0_393, __s0_393, 1, 0); \ + __ret_393 = __noswap_splatq_laneq_f64(__rev0_393, __p1_393); \ + __ret_393 = __builtin_shufflevector(__ret_393, __ret_393, 1, 0); \ + __ret_393; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vdupq_laneq_f32(__p0_394, __p1_394) __extension__ ({ \ + float32x4_t __ret_394; \ + float32x4_t __s0_394 = __p0_394; \ + __ret_394 = splatq_laneq_f32(__s0_394, __p1_394); \ + __ret_394; \ +}) +#else +#define vdupq_laneq_f32(__p0_395, __p1_395) __extension__ ({ \ + float32x4_t __ret_395; \ + float32x4_t __s0_395 = __p0_395; \ + float32x4_t __rev0_395; __rev0_395 = __builtin_shufflevector(__s0_395, __s0_395, 3, 2, 1, 0); \ + __ret_395 = __noswap_splatq_laneq_f32(__rev0_395, __p1_395); \ + __ret_395 = __builtin_shufflevector(__ret_395, __ret_395, 3, 2, 1, 0); \ + __ret_395; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vdupq_laneq_f16(__p0_396, __p1_396) __extension__ ({ \ + float16x8_t __ret_396; \ + float16x8_t __s0_396 = __p0_396; \ + __ret_396 = splatq_laneq_f16(__s0_396, __p1_396); \ + __ret_396; \ +}) +#else +#define vdupq_laneq_f16(__p0_397, __p1_397) __extension__ ({ \ + float16x8_t __ret_397; \ + float16x8_t __s0_397 = __p0_397; \ + float16x8_t __rev0_397; __rev0_397 = __builtin_shufflevector(__s0_397, __s0_397, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_397 = __noswap_splatq_laneq_f16(__rev0_397, __p1_397); \ + __ret_397 = __builtin_shufflevector(__ret_397, __ret_397, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_397; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vdupq_laneq_s32(__p0_398, __p1_398) __extension__ ({ \ + int32x4_t __ret_398; \ + int32x4_t __s0_398 = __p0_398; \ + __ret_398 = splatq_laneq_s32(__s0_398, __p1_398); \ + __ret_398; \ +}) +#else +#define vdupq_laneq_s32(__p0_399, __p1_399) __extension__ ({ \ + int32x4_t __ret_399; \ + int32x4_t __s0_399 = __p0_399; \ + int32x4_t __rev0_399; __rev0_399 = __builtin_shufflevector(__s0_399, __s0_399, 3, 2, 1, 0); \ + __ret_399 = __noswap_splatq_laneq_s32(__rev0_399, __p1_399); \ + __ret_399 = __builtin_shufflevector(__ret_399, __ret_399, 3, 2, 1, 0); \ + __ret_399; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vdupq_laneq_s64(__p0_400, __p1_400) __extension__ ({ \ + int64x2_t __ret_400; \ + int64x2_t __s0_400 = __p0_400; \ + __ret_400 = splatq_laneq_s64(__s0_400, __p1_400); \ + __ret_400; \ +}) +#else +#define vdupq_laneq_s64(__p0_401, __p1_401) __extension__ ({ \ + int64x2_t __ret_401; \ + int64x2_t __s0_401 = __p0_401; \ + int64x2_t __rev0_401; __rev0_401 = __builtin_shufflevector(__s0_401, __s0_401, 1, 0); \ + __ret_401 = __noswap_splatq_laneq_s64(__rev0_401, __p1_401); \ + __ret_401 = __builtin_shufflevector(__ret_401, __ret_401, 1, 0); \ + __ret_401; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vdupq_laneq_s16(__p0_402, __p1_402) __extension__ ({ \ + int16x8_t __ret_402; \ + int16x8_t __s0_402 = __p0_402; \ + __ret_402 = splatq_laneq_s16(__s0_402, __p1_402); \ + __ret_402; \ +}) +#else +#define vdupq_laneq_s16(__p0_403, __p1_403) __extension__ ({ \ + int16x8_t __ret_403; \ + int16x8_t __s0_403 = __p0_403; \ + int16x8_t __rev0_403; __rev0_403 = __builtin_shufflevector(__s0_403, __s0_403, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_403 = __noswap_splatq_laneq_s16(__rev0_403, __p1_403); \ + __ret_403 = __builtin_shufflevector(__ret_403, __ret_403, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_403; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vdup_laneq_u8(__p0_404, __p1_404) __extension__ ({ \ + uint8x8_t __ret_404; \ + uint8x16_t __s0_404 = __p0_404; \ + __ret_404 = splat_laneq_u8(__s0_404, __p1_404); \ + __ret_404; \ +}) +#else +#define vdup_laneq_u8(__p0_405, __p1_405) __extension__ ({ \ + uint8x8_t __ret_405; \ + uint8x16_t __s0_405 = __p0_405; \ + uint8x16_t __rev0_405; __rev0_405 = __builtin_shufflevector(__s0_405, __s0_405, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_405 = __noswap_splat_laneq_u8(__rev0_405, __p1_405); \ + __ret_405 = __builtin_shufflevector(__ret_405, __ret_405, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_405; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vdup_laneq_u32(__p0_406, __p1_406) __extension__ ({ \ + uint32x2_t __ret_406; \ + uint32x4_t __s0_406 = __p0_406; \ + __ret_406 = splat_laneq_u32(__s0_406, __p1_406); \ + __ret_406; \ +}) +#else +#define vdup_laneq_u32(__p0_407, __p1_407) __extension__ ({ \ + uint32x2_t __ret_407; \ + uint32x4_t __s0_407 = __p0_407; \ + uint32x4_t __rev0_407; __rev0_407 = __builtin_shufflevector(__s0_407, __s0_407, 3, 2, 1, 0); \ + __ret_407 = __noswap_splat_laneq_u32(__rev0_407, __p1_407); \ + __ret_407 = __builtin_shufflevector(__ret_407, __ret_407, 1, 0); \ + __ret_407; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vdup_laneq_u64(__p0_408, __p1_408) __extension__ ({ \ + uint64x1_t __ret_408; \ + uint64x2_t __s0_408 = __p0_408; \ + __ret_408 = splat_laneq_u64(__s0_408, __p1_408); \ + __ret_408; \ +}) +#else +#define vdup_laneq_u64(__p0_409, __p1_409) __extension__ ({ \ + uint64x1_t __ret_409; \ + uint64x2_t __s0_409 = __p0_409; \ + uint64x2_t __rev0_409; __rev0_409 = __builtin_shufflevector(__s0_409, __s0_409, 1, 0); \ + __ret_409 = __noswap_splat_laneq_u64(__rev0_409, __p1_409); \ + __ret_409; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vdup_laneq_u16(__p0_410, __p1_410) __extension__ ({ \ + uint16x4_t __ret_410; \ + uint16x8_t __s0_410 = __p0_410; \ + __ret_410 = splat_laneq_u16(__s0_410, __p1_410); \ + __ret_410; \ +}) +#else +#define vdup_laneq_u16(__p0_411, __p1_411) __extension__ ({ \ + uint16x4_t __ret_411; \ + uint16x8_t __s0_411 = __p0_411; \ + uint16x8_t __rev0_411; __rev0_411 = __builtin_shufflevector(__s0_411, __s0_411, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_411 = __noswap_splat_laneq_u16(__rev0_411, __p1_411); \ + __ret_411 = __builtin_shufflevector(__ret_411, __ret_411, 3, 2, 1, 0); \ + __ret_411; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vdup_laneq_s8(__p0_412, __p1_412) __extension__ ({ \ + int8x8_t __ret_412; \ + int8x16_t __s0_412 = __p0_412; \ + __ret_412 = splat_laneq_s8(__s0_412, __p1_412); \ + __ret_412; \ +}) +#else +#define vdup_laneq_s8(__p0_413, __p1_413) __extension__ ({ \ + int8x8_t __ret_413; \ + int8x16_t __s0_413 = __p0_413; \ + int8x16_t __rev0_413; __rev0_413 = __builtin_shufflevector(__s0_413, __s0_413, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_413 = __noswap_splat_laneq_s8(__rev0_413, __p1_413); \ + __ret_413 = __builtin_shufflevector(__ret_413, __ret_413, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_413; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vdup_laneq_f64(__p0_414, __p1_414) __extension__ ({ \ + float64x1_t __ret_414; \ + float64x2_t __s0_414 = __p0_414; \ + __ret_414 = splat_laneq_f64(__s0_414, __p1_414); \ + __ret_414; \ +}) +#else +#define vdup_laneq_f64(__p0_415, __p1_415) __extension__ ({ \ + float64x1_t __ret_415; \ + float64x2_t __s0_415 = __p0_415; \ + float64x2_t __rev0_415; __rev0_415 = __builtin_shufflevector(__s0_415, __s0_415, 1, 0); \ + __ret_415 = __noswap_splat_laneq_f64(__rev0_415, __p1_415); \ + __ret_415; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vdup_laneq_f32(__p0_416, __p1_416) __extension__ ({ \ + float32x2_t __ret_416; \ + float32x4_t __s0_416 = __p0_416; \ + __ret_416 = splat_laneq_f32(__s0_416, __p1_416); \ + __ret_416; \ +}) +#else +#define vdup_laneq_f32(__p0_417, __p1_417) __extension__ ({ \ + float32x2_t __ret_417; \ + float32x4_t __s0_417 = __p0_417; \ + float32x4_t __rev0_417; __rev0_417 = __builtin_shufflevector(__s0_417, __s0_417, 3, 2, 1, 0); \ + __ret_417 = __noswap_splat_laneq_f32(__rev0_417, __p1_417); \ + __ret_417 = __builtin_shufflevector(__ret_417, __ret_417, 1, 0); \ + __ret_417; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vdup_laneq_f16(__p0_418, __p1_418) __extension__ ({ \ + float16x4_t __ret_418; \ + float16x8_t __s0_418 = __p0_418; \ + __ret_418 = splat_laneq_f16(__s0_418, __p1_418); \ + __ret_418; \ +}) +#else +#define vdup_laneq_f16(__p0_419, __p1_419) __extension__ ({ \ + float16x4_t __ret_419; \ + float16x8_t __s0_419 = __p0_419; \ + float16x8_t __rev0_419; __rev0_419 = __builtin_shufflevector(__s0_419, __s0_419, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_419 = __noswap_splat_laneq_f16(__rev0_419, __p1_419); \ + __ret_419 = __builtin_shufflevector(__ret_419, __ret_419, 3, 2, 1, 0); \ + __ret_419; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vdup_laneq_s32(__p0_420, __p1_420) __extension__ ({ \ + int32x2_t __ret_420; \ + int32x4_t __s0_420 = __p0_420; \ + __ret_420 = splat_laneq_s32(__s0_420, __p1_420); \ + __ret_420; \ +}) +#else +#define vdup_laneq_s32(__p0_421, __p1_421) __extension__ ({ \ + int32x2_t __ret_421; \ + int32x4_t __s0_421 = __p0_421; \ + int32x4_t __rev0_421; __rev0_421 = __builtin_shufflevector(__s0_421, __s0_421, 3, 2, 1, 0); \ + __ret_421 = __noswap_splat_laneq_s32(__rev0_421, __p1_421); \ + __ret_421 = __builtin_shufflevector(__ret_421, __ret_421, 1, 0); \ + __ret_421; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vdup_laneq_s64(__p0_422, __p1_422) __extension__ ({ \ + int64x1_t __ret_422; \ + int64x2_t __s0_422 = __p0_422; \ + __ret_422 = splat_laneq_s64(__s0_422, __p1_422); \ + __ret_422; \ +}) +#else +#define vdup_laneq_s64(__p0_423, __p1_423) __extension__ ({ \ + int64x1_t __ret_423; \ + int64x2_t __s0_423 = __p0_423; \ + int64x2_t __rev0_423; __rev0_423 = __builtin_shufflevector(__s0_423, __s0_423, 1, 0); \ + __ret_423 = __noswap_splat_laneq_s64(__rev0_423, __p1_423); \ + __ret_423; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vdup_laneq_s16(__p0_424, __p1_424) __extension__ ({ \ + int16x4_t __ret_424; \ + int16x8_t __s0_424 = __p0_424; \ + __ret_424 = splat_laneq_s16(__s0_424, __p1_424); \ + __ret_424; \ +}) +#else +#define vdup_laneq_s16(__p0_425, __p1_425) __extension__ ({ \ + int16x4_t __ret_425; \ + int16x8_t __s0_425 = __p0_425; \ + int16x8_t __rev0_425; __rev0_425 = __builtin_shufflevector(__s0_425, __s0_425, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_425 = __noswap_splat_laneq_s16(__rev0_425, __p1_425); \ + __ret_425 = __builtin_shufflevector(__ret_425, __ret_425, 3, 2, 1, 0); \ + __ret_425; \ +}) +#endif + +__ai __attribute__((target("neon"))) poly64x1_t vdup_n_p64(poly64_t __p0) { + poly64x1_t __ret; + __ret = (poly64x1_t) {__p0}; + return __ret; +} +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) poly64x2_t vdupq_n_p64(poly64_t __p0) { + poly64x2_t __ret; + __ret = (poly64x2_t) {__p0, __p0}; + return __ret; +} +#else +__ai __attribute__((target("neon"))) poly64x2_t vdupq_n_p64(poly64_t __p0) { + poly64x2_t __ret; + __ret = (poly64x2_t) {__p0, __p0}; + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) float64x2_t vdupq_n_f64(float64_t __p0) { + float64x2_t __ret; + __ret = (float64x2_t) {__p0, __p0}; + return __ret; +} +#else +__ai __attribute__((target("neon"))) float64x2_t vdupq_n_f64(float64_t __p0) { + float64x2_t __ret; + __ret = (float64x2_t) {__p0, __p0}; + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +__ai __attribute__((target("neon"))) float64x1_t vdup_n_f64(float64_t __p0) { + float64x1_t __ret; + __ret = (float64x1_t) {__p0}; + return __ret; +} +#define vext_p64(__p0, __p1, __p2) __extension__ ({ \ + poly64x1_t __ret; \ + poly64x1_t __s0 = __p0; \ + poly64x1_t __s1 = __p1; \ + __ret = (poly64x1_t) __builtin_neon_vext_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 6); \ + __ret; \ +}) +#ifdef __LITTLE_ENDIAN__ +#define vextq_p64(__p0, __p1, __p2) __extension__ ({ \ + poly64x2_t __ret; \ + poly64x2_t __s0 = __p0; \ + poly64x2_t __s1 = __p1; \ + __ret = (poly64x2_t) __builtin_neon_vextq_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 38); \ + __ret; \ +}) +#else +#define vextq_p64(__p0, __p1, __p2) __extension__ ({ \ + poly64x2_t __ret; \ + poly64x2_t __s0 = __p0; \ + poly64x2_t __s1 = __p1; \ + poly64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + poly64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ + __ret = (poly64x2_t) __builtin_neon_vextq_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 38); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vextq_f64(__p0, __p1, __p2) __extension__ ({ \ + float64x2_t __ret; \ + float64x2_t __s0 = __p0; \ + float64x2_t __s1 = __p1; \ + __ret = (float64x2_t) __builtin_neon_vextq_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 42); \ + __ret; \ +}) +#else +#define vextq_f64(__p0, __p1, __p2) __extension__ ({ \ + float64x2_t __ret; \ + float64x2_t __s0 = __p0; \ + float64x2_t __s1 = __p1; \ + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + float64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ + __ret = (float64x2_t) __builtin_neon_vextq_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 42); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#endif + +#define vext_f64(__p0, __p1, __p2) __extension__ ({ \ + float64x1_t __ret; \ + float64x1_t __s0 = __p0; \ + float64x1_t __s1 = __p1; \ + __ret = (float64x1_t) __builtin_neon_vext_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 10); \ + __ret; \ +}) +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) float64x2_t vfmaq_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) { + float64x2_t __ret; + __ret = (float64x2_t) __builtin_neon_vfmaq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 42); + return __ret; +} +#else +__ai __attribute__((target("neon"))) float64x2_t vfmaq_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) { + float64x2_t __ret; + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + float64x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0); + __ret = (float64x2_t) __builtin_neon_vfmaq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 42); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +__ai __attribute__((target("neon"))) float64x2_t __noswap_vfmaq_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) { + float64x2_t __ret; + __ret = (float64x2_t) __builtin_neon_vfmaq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 42); + return __ret; +} +#endif + +__ai __attribute__((target("neon"))) float64x1_t vfma_f64(float64x1_t __p0, float64x1_t __p1, float64x1_t __p2) { + float64x1_t __ret; + __ret = (float64x1_t) __builtin_neon_vfma_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 10); + return __ret; +} +#define vfmad_lane_f64(__p0, __p1, __p2, __p3) __extension__ ({ \ + float64_t __ret; \ + float64_t __s0 = __p0; \ + float64_t __s1 = __p1; \ + float64x1_t __s2 = __p2; \ + __ret = (float64_t) __builtin_neon_vfmad_lane_f64(__s0, __s1, (float64x1_t)__s2, __p3); \ + __ret; \ +}) +#ifdef __LITTLE_ENDIAN__ +#define vfmas_lane_f32(__p0, __p1, __p2, __p3) __extension__ ({ \ + float32_t __ret; \ + float32_t __s0 = __p0; \ + float32_t __s1 = __p1; \ + float32x2_t __s2 = __p2; \ + __ret = (float32_t) __builtin_neon_vfmas_lane_f32(__s0, __s1, (float32x2_t)__s2, __p3); \ + __ret; \ +}) +#else +#define vfmas_lane_f32(__p0, __p1, __p2, __p3) __extension__ ({ \ + float32_t __ret; \ + float32_t __s0 = __p0; \ + float32_t __s1 = __p1; \ + float32x2_t __s2 = __p2; \ + float32x2_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \ + __ret = (float32_t) __builtin_neon_vfmas_lane_f32(__s0, __s1, (float32x2_t)__rev2, __p3); \ + __ret; \ +}) +#define __noswap_vfmas_lane_f32(__p0, __p1, __p2, __p3) __extension__ ({ \ + float32_t __ret; \ + float32_t __s0 = __p0; \ + float32_t __s1 = __p1; \ + float32x2_t __s2 = __p2; \ + __ret = (float32_t) __builtin_neon_vfmas_lane_f32(__s0, __s1, (float32x2_t)__s2, __p3); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vfmaq_lane_f64(__p0, __p1, __p2, __p3) __extension__ ({ \ + float64x2_t __ret; \ + float64x2_t __s0 = __p0; \ + float64x2_t __s1 = __p1; \ + float64x1_t __s2 = __p2; \ + __ret = (float64x2_t) __builtin_neon_vfmaq_lane_v((int8x16_t)__s0, (int8x16_t)__s1, (int8x8_t)__s2, __p3, 42); \ + __ret; \ +}) +#else +#define vfmaq_lane_f64(__p0, __p1, __p2, __p3) __extension__ ({ \ + float64x2_t __ret; \ + float64x2_t __s0 = __p0; \ + float64x2_t __s1 = __p1; \ + float64x1_t __s2 = __p2; \ + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + float64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ + __ret = (float64x2_t) __builtin_neon_vfmaq_lane_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x8_t)__s2, __p3, 42); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#define __noswap_vfmaq_lane_f64(__p0, __p1, __p2, __p3) __extension__ ({ \ + float64x2_t __ret; \ + float64x2_t __s0 = __p0; \ + float64x2_t __s1 = __p1; \ + float64x1_t __s2 = __p2; \ + __ret = (float64x2_t) __builtin_neon_vfmaq_lane_v((int8x16_t)__s0, (int8x16_t)__s1, (int8x8_t)__s2, __p3, 42); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vfmaq_lane_f32(__p0, __p1, __p2, __p3) __extension__ ({ \ + float32x4_t __ret; \ + float32x4_t __s0 = __p0; \ + float32x4_t __s1 = __p1; \ + float32x2_t __s2 = __p2; \ + __ret = (float32x4_t) __builtin_neon_vfmaq_lane_v((int8x16_t)__s0, (int8x16_t)__s1, (int8x8_t)__s2, __p3, 41); \ + __ret; \ +}) +#else +#define vfmaq_lane_f32(__p0, __p1, __p2, __p3) __extension__ ({ \ + float32x4_t __ret; \ + float32x4_t __s0 = __p0; \ + float32x4_t __s1 = __p1; \ + float32x2_t __s2 = __p2; \ + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + float32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + float32x2_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \ + __ret = (float32x4_t) __builtin_neon_vfmaq_lane_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x8_t)__rev2, __p3, 41); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#define __noswap_vfmaq_lane_f32(__p0, __p1, __p2, __p3) __extension__ ({ \ + float32x4_t __ret; \ + float32x4_t __s0 = __p0; \ + float32x4_t __s1 = __p1; \ + float32x2_t __s2 = __p2; \ + __ret = (float32x4_t) __builtin_neon_vfmaq_lane_v((int8x16_t)__s0, (int8x16_t)__s1, (int8x8_t)__s2, __p3, 41); \ + __ret; \ +}) +#endif + +#define vfma_lane_f64(__p0, __p1, __p2, __p3) __extension__ ({ \ + float64x1_t __ret; \ + float64x1_t __s0 = __p0; \ + float64x1_t __s1 = __p1; \ + float64x1_t __s2 = __p2; \ + __ret = (float64x1_t) __builtin_neon_vfma_lane_v((int8x8_t)__s0, (int8x8_t)__s1, (int8x8_t)__s2, __p3, 10); \ + __ret; \ +}) +#ifdef __LITTLE_ENDIAN__ +#define vfma_lane_f32(__p0, __p1, __p2, __p3) __extension__ ({ \ + float32x2_t __ret; \ + float32x2_t __s0 = __p0; \ + float32x2_t __s1 = __p1; \ + float32x2_t __s2 = __p2; \ + __ret = (float32x2_t) __builtin_neon_vfma_lane_v((int8x8_t)__s0, (int8x8_t)__s1, (int8x8_t)__s2, __p3, 9); \ + __ret; \ +}) +#else +#define vfma_lane_f32(__p0, __p1, __p2, __p3) __extension__ ({ \ + float32x2_t __ret; \ + float32x2_t __s0 = __p0; \ + float32x2_t __s1 = __p1; \ + float32x2_t __s2 = __p2; \ + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + float32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ + float32x2_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \ + __ret = (float32x2_t) __builtin_neon_vfma_lane_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, __p3, 9); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#define __noswap_vfma_lane_f32(__p0, __p1, __p2, __p3) __extension__ ({ \ + float32x2_t __ret; \ + float32x2_t __s0 = __p0; \ + float32x2_t __s1 = __p1; \ + float32x2_t __s2 = __p2; \ + __ret = (float32x2_t) __builtin_neon_vfma_lane_v((int8x8_t)__s0, (int8x8_t)__s1, (int8x8_t)__s2, __p3, 9); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vfmad_laneq_f64(__p0, __p1, __p2, __p3) __extension__ ({ \ + float64_t __ret; \ + float64_t __s0 = __p0; \ + float64_t __s1 = __p1; \ + float64x2_t __s2 = __p2; \ + __ret = (float64_t) __builtin_neon_vfmad_laneq_f64(__s0, __s1, (float64x2_t)__s2, __p3); \ + __ret; \ +}) +#else +#define vfmad_laneq_f64(__p0, __p1, __p2, __p3) __extension__ ({ \ + float64_t __ret; \ + float64_t __s0 = __p0; \ + float64_t __s1 = __p1; \ + float64x2_t __s2 = __p2; \ + float64x2_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \ + __ret = (float64_t) __builtin_neon_vfmad_laneq_f64(__s0, __s1, (float64x2_t)__rev2, __p3); \ + __ret; \ +}) +#define __noswap_vfmad_laneq_f64(__p0, __p1, __p2, __p3) __extension__ ({ \ + float64_t __ret; \ + float64_t __s0 = __p0; \ + float64_t __s1 = __p1; \ + float64x2_t __s2 = __p2; \ + __ret = (float64_t) __builtin_neon_vfmad_laneq_f64(__s0, __s1, (float64x2_t)__s2, __p3); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vfmas_laneq_f32(__p0, __p1, __p2, __p3) __extension__ ({ \ + float32_t __ret; \ + float32_t __s0 = __p0; \ + float32_t __s1 = __p1; \ + float32x4_t __s2 = __p2; \ + __ret = (float32_t) __builtin_neon_vfmas_laneq_f32(__s0, __s1, (float32x4_t)__s2, __p3); \ + __ret; \ +}) +#else +#define vfmas_laneq_f32(__p0, __p1, __p2, __p3) __extension__ ({ \ + float32_t __ret; \ + float32_t __s0 = __p0; \ + float32_t __s1 = __p1; \ + float32x4_t __s2 = __p2; \ + float32x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \ + __ret = (float32_t) __builtin_neon_vfmas_laneq_f32(__s0, __s1, (float32x4_t)__rev2, __p3); \ + __ret; \ +}) +#define __noswap_vfmas_laneq_f32(__p0, __p1, __p2, __p3) __extension__ ({ \ + float32_t __ret; \ + float32_t __s0 = __p0; \ + float32_t __s1 = __p1; \ + float32x4_t __s2 = __p2; \ + __ret = (float32_t) __builtin_neon_vfmas_laneq_f32(__s0, __s1, (float32x4_t)__s2, __p3); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vfmaq_laneq_f64(__p0, __p1, __p2, __p3) __extension__ ({ \ + float64x2_t __ret; \ + float64x2_t __s0 = __p0; \ + float64x2_t __s1 = __p1; \ + float64x2_t __s2 = __p2; \ + __ret = (float64x2_t) __builtin_neon_vfmaq_laneq_v((int8x16_t)__s0, (int8x16_t)__s1, (int8x16_t)__s2, __p3, 42); \ + __ret; \ +}) +#else +#define vfmaq_laneq_f64(__p0, __p1, __p2, __p3) __extension__ ({ \ + float64x2_t __ret; \ + float64x2_t __s0 = __p0; \ + float64x2_t __s1 = __p1; \ + float64x2_t __s2 = __p2; \ + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + float64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ + float64x2_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \ + __ret = (float64x2_t) __builtin_neon_vfmaq_laneq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, __p3, 42); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#define __noswap_vfmaq_laneq_f64(__p0, __p1, __p2, __p3) __extension__ ({ \ + float64x2_t __ret; \ + float64x2_t __s0 = __p0; \ + float64x2_t __s1 = __p1; \ + float64x2_t __s2 = __p2; \ + __ret = (float64x2_t) __builtin_neon_vfmaq_laneq_v((int8x16_t)__s0, (int8x16_t)__s1, (int8x16_t)__s2, __p3, 42); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vfmaq_laneq_f32(__p0, __p1, __p2, __p3) __extension__ ({ \ + float32x4_t __ret; \ + float32x4_t __s0 = __p0; \ + float32x4_t __s1 = __p1; \ + float32x4_t __s2 = __p2; \ + __ret = (float32x4_t) __builtin_neon_vfmaq_laneq_v((int8x16_t)__s0, (int8x16_t)__s1, (int8x16_t)__s2, __p3, 41); \ + __ret; \ +}) +#else +#define vfmaq_laneq_f32(__p0, __p1, __p2, __p3) __extension__ ({ \ + float32x4_t __ret; \ + float32x4_t __s0 = __p0; \ + float32x4_t __s1 = __p1; \ + float32x4_t __s2 = __p2; \ + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + float32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + float32x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \ + __ret = (float32x4_t) __builtin_neon_vfmaq_laneq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, __p3, 41); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#define __noswap_vfmaq_laneq_f32(__p0, __p1, __p2, __p3) __extension__ ({ \ + float32x4_t __ret; \ + float32x4_t __s0 = __p0; \ + float32x4_t __s1 = __p1; \ + float32x4_t __s2 = __p2; \ + __ret = (float32x4_t) __builtin_neon_vfmaq_laneq_v((int8x16_t)__s0, (int8x16_t)__s1, (int8x16_t)__s2, __p3, 41); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vfma_laneq_f64(__p0, __p1, __p2, __p3) __extension__ ({ \ + float64x1_t __ret; \ + float64x1_t __s0 = __p0; \ + float64x1_t __s1 = __p1; \ + float64x2_t __s2 = __p2; \ + __ret = (float64x1_t) __builtin_neon_vfma_laneq_v((int8x8_t)__s0, (int8x8_t)__s1, (int8x16_t)__s2, __p3, 10); \ + __ret; \ +}) +#else +#define vfma_laneq_f64(__p0, __p1, __p2, __p3) __extension__ ({ \ + float64x1_t __ret; \ + float64x1_t __s0 = __p0; \ + float64x1_t __s1 = __p1; \ + float64x2_t __s2 = __p2; \ + float64x2_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \ + __ret = (float64x1_t) __builtin_neon_vfma_laneq_v((int8x8_t)__s0, (int8x8_t)__s1, (int8x16_t)__rev2, __p3, 10); \ + __ret; \ +}) +#define __noswap_vfma_laneq_f64(__p0, __p1, __p2, __p3) __extension__ ({ \ + float64x1_t __ret; \ + float64x1_t __s0 = __p0; \ + float64x1_t __s1 = __p1; \ + float64x2_t __s2 = __p2; \ + __ret = (float64x1_t) __builtin_neon_vfma_laneq_v((int8x8_t)__s0, (int8x8_t)__s1, (int8x16_t)__s2, __p3, 10); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vfma_laneq_f32(__p0, __p1, __p2, __p3) __extension__ ({ \ + float32x2_t __ret; \ + float32x2_t __s0 = __p0; \ + float32x2_t __s1 = __p1; \ + float32x4_t __s2 = __p2; \ + __ret = (float32x2_t) __builtin_neon_vfma_laneq_v((int8x8_t)__s0, (int8x8_t)__s1, (int8x16_t)__s2, __p3, 9); \ + __ret; \ +}) +#else +#define vfma_laneq_f32(__p0, __p1, __p2, __p3) __extension__ ({ \ + float32x2_t __ret; \ + float32x2_t __s0 = __p0; \ + float32x2_t __s1 = __p1; \ + float32x4_t __s2 = __p2; \ + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + float32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ + float32x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \ + __ret = (float32x2_t) __builtin_neon_vfma_laneq_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x16_t)__rev2, __p3, 9); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#define __noswap_vfma_laneq_f32(__p0, __p1, __p2, __p3) __extension__ ({ \ + float32x2_t __ret; \ + float32x2_t __s0 = __p0; \ + float32x2_t __s1 = __p1; \ + float32x4_t __s2 = __p2; \ + __ret = (float32x2_t) __builtin_neon_vfma_laneq_v((int8x8_t)__s0, (int8x8_t)__s1, (int8x16_t)__s2, __p3, 9); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) float64x2_t vfmaq_n_f64(float64x2_t __p0, float64x2_t __p1, float64_t __p2) { + float64x2_t __ret; + __ret = vfmaq_f64(__p0, __p1, (float64x2_t) {__p2, __p2}); + return __ret; +} +#else +__ai __attribute__((target("neon"))) float64x2_t vfmaq_n_f64(float64x2_t __p0, float64x2_t __p1, float64_t __p2) { + float64x2_t __ret; + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = __noswap_vfmaq_f64(__rev0, __rev1, (float64x2_t) {__p2, __p2}); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +__ai __attribute__((target("neon"))) float64x1_t vfma_n_f64(float64x1_t __p0, float64x1_t __p1, float64_t __p2) { + float64x1_t __ret; + __ret = vfma_f64(__p0, __p1, (float64x1_t) {__p2}); + return __ret; +} +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) float64x2_t vfmsq_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) { + float64x2_t __ret; + __ret = vfmaq_f64(__p0, -__p1, __p2); + return __ret; +} +#else +__ai __attribute__((target("neon"))) float64x2_t vfmsq_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) { + float64x2_t __ret; + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + float64x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0); + __ret = __noswap_vfmaq_f64(__rev0, -__rev1, __rev2); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +__ai __attribute__((target("neon"))) float64x1_t vfms_f64(float64x1_t __p0, float64x1_t __p1, float64x1_t __p2) { + float64x1_t __ret; + __ret = vfma_f64(__p0, -__p1, __p2); + return __ret; +} +#define vfmsd_lane_f64(__p0_426, __p1_426, __p2_426, __p3_426) __extension__ ({ \ + float64_t __ret_426; \ + float64_t __s0_426 = __p0_426; \ + float64_t __s1_426 = __p1_426; \ + float64x1_t __s2_426 = __p2_426; \ + __ret_426 = vfmad_lane_f64(__s0_426, -__s1_426, __s2_426, __p3_426); \ + __ret_426; \ +}) +#ifdef __LITTLE_ENDIAN__ +#define vfmss_lane_f32(__p0_427, __p1_427, __p2_427, __p3_427) __extension__ ({ \ + float32_t __ret_427; \ + float32_t __s0_427 = __p0_427; \ + float32_t __s1_427 = __p1_427; \ + float32x2_t __s2_427 = __p2_427; \ + __ret_427 = vfmas_lane_f32(__s0_427, -__s1_427, __s2_427, __p3_427); \ + __ret_427; \ +}) +#else +#define vfmss_lane_f32(__p0_428, __p1_428, __p2_428, __p3_428) __extension__ ({ \ + float32_t __ret_428; \ + float32_t __s0_428 = __p0_428; \ + float32_t __s1_428 = __p1_428; \ + float32x2_t __s2_428 = __p2_428; \ + float32x2_t __rev2_428; __rev2_428 = __builtin_shufflevector(__s2_428, __s2_428, 1, 0); \ + __ret_428 = __noswap_vfmas_lane_f32(__s0_428, -__s1_428, __rev2_428, __p3_428); \ + __ret_428; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vfmsq_lane_f64(__p0_429, __p1_429, __p2_429, __p3_429) __extension__ ({ \ + float64x2_t __ret_429; \ + float64x2_t __s0_429 = __p0_429; \ + float64x2_t __s1_429 = __p1_429; \ + float64x1_t __s2_429 = __p2_429; \ + __ret_429 = vfmaq_lane_f64(__s0_429, -__s1_429, __s2_429, __p3_429); \ + __ret_429; \ +}) +#else +#define vfmsq_lane_f64(__p0_430, __p1_430, __p2_430, __p3_430) __extension__ ({ \ + float64x2_t __ret_430; \ + float64x2_t __s0_430 = __p0_430; \ + float64x2_t __s1_430 = __p1_430; \ + float64x1_t __s2_430 = __p2_430; \ + float64x2_t __rev0_430; __rev0_430 = __builtin_shufflevector(__s0_430, __s0_430, 1, 0); \ + float64x2_t __rev1_430; __rev1_430 = __builtin_shufflevector(__s1_430, __s1_430, 1, 0); \ + __ret_430 = __noswap_vfmaq_lane_f64(__rev0_430, -__rev1_430, __s2_430, __p3_430); \ + __ret_430 = __builtin_shufflevector(__ret_430, __ret_430, 1, 0); \ + __ret_430; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vfmsq_lane_f32(__p0_431, __p1_431, __p2_431, __p3_431) __extension__ ({ \ + float32x4_t __ret_431; \ + float32x4_t __s0_431 = __p0_431; \ + float32x4_t __s1_431 = __p1_431; \ + float32x2_t __s2_431 = __p2_431; \ + __ret_431 = vfmaq_lane_f32(__s0_431, -__s1_431, __s2_431, __p3_431); \ + __ret_431; \ +}) +#else +#define vfmsq_lane_f32(__p0_432, __p1_432, __p2_432, __p3_432) __extension__ ({ \ + float32x4_t __ret_432; \ + float32x4_t __s0_432 = __p0_432; \ + float32x4_t __s1_432 = __p1_432; \ + float32x2_t __s2_432 = __p2_432; \ + float32x4_t __rev0_432; __rev0_432 = __builtin_shufflevector(__s0_432, __s0_432, 3, 2, 1, 0); \ + float32x4_t __rev1_432; __rev1_432 = __builtin_shufflevector(__s1_432, __s1_432, 3, 2, 1, 0); \ + float32x2_t __rev2_432; __rev2_432 = __builtin_shufflevector(__s2_432, __s2_432, 1, 0); \ + __ret_432 = __noswap_vfmaq_lane_f32(__rev0_432, -__rev1_432, __rev2_432, __p3_432); \ + __ret_432 = __builtin_shufflevector(__ret_432, __ret_432, 3, 2, 1, 0); \ + __ret_432; \ +}) +#endif + +#define vfms_lane_f64(__p0_433, __p1_433, __p2_433, __p3_433) __extension__ ({ \ + float64x1_t __ret_433; \ + float64x1_t __s0_433 = __p0_433; \ + float64x1_t __s1_433 = __p1_433; \ + float64x1_t __s2_433 = __p2_433; \ + __ret_433 = vfma_lane_f64(__s0_433, -__s1_433, __s2_433, __p3_433); \ + __ret_433; \ +}) +#ifdef __LITTLE_ENDIAN__ +#define vfms_lane_f32(__p0_434, __p1_434, __p2_434, __p3_434) __extension__ ({ \ + float32x2_t __ret_434; \ + float32x2_t __s0_434 = __p0_434; \ + float32x2_t __s1_434 = __p1_434; \ + float32x2_t __s2_434 = __p2_434; \ + __ret_434 = vfma_lane_f32(__s0_434, -__s1_434, __s2_434, __p3_434); \ + __ret_434; \ +}) +#else +#define vfms_lane_f32(__p0_435, __p1_435, __p2_435, __p3_435) __extension__ ({ \ + float32x2_t __ret_435; \ + float32x2_t __s0_435 = __p0_435; \ + float32x2_t __s1_435 = __p1_435; \ + float32x2_t __s2_435 = __p2_435; \ + float32x2_t __rev0_435; __rev0_435 = __builtin_shufflevector(__s0_435, __s0_435, 1, 0); \ + float32x2_t __rev1_435; __rev1_435 = __builtin_shufflevector(__s1_435, __s1_435, 1, 0); \ + float32x2_t __rev2_435; __rev2_435 = __builtin_shufflevector(__s2_435, __s2_435, 1, 0); \ + __ret_435 = __noswap_vfma_lane_f32(__rev0_435, -__rev1_435, __rev2_435, __p3_435); \ + __ret_435 = __builtin_shufflevector(__ret_435, __ret_435, 1, 0); \ + __ret_435; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vfmsd_laneq_f64(__p0_436, __p1_436, __p2_436, __p3_436) __extension__ ({ \ + float64_t __ret_436; \ + float64_t __s0_436 = __p0_436; \ + float64_t __s1_436 = __p1_436; \ + float64x2_t __s2_436 = __p2_436; \ + __ret_436 = vfmad_laneq_f64(__s0_436, -__s1_436, __s2_436, __p3_436); \ + __ret_436; \ +}) +#else +#define vfmsd_laneq_f64(__p0_437, __p1_437, __p2_437, __p3_437) __extension__ ({ \ + float64_t __ret_437; \ + float64_t __s0_437 = __p0_437; \ + float64_t __s1_437 = __p1_437; \ + float64x2_t __s2_437 = __p2_437; \ + float64x2_t __rev2_437; __rev2_437 = __builtin_shufflevector(__s2_437, __s2_437, 1, 0); \ + __ret_437 = __noswap_vfmad_laneq_f64(__s0_437, -__s1_437, __rev2_437, __p3_437); \ + __ret_437; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vfmss_laneq_f32(__p0_438, __p1_438, __p2_438, __p3_438) __extension__ ({ \ + float32_t __ret_438; \ + float32_t __s0_438 = __p0_438; \ + float32_t __s1_438 = __p1_438; \ + float32x4_t __s2_438 = __p2_438; \ + __ret_438 = vfmas_laneq_f32(__s0_438, -__s1_438, __s2_438, __p3_438); \ + __ret_438; \ +}) +#else +#define vfmss_laneq_f32(__p0_439, __p1_439, __p2_439, __p3_439) __extension__ ({ \ + float32_t __ret_439; \ + float32_t __s0_439 = __p0_439; \ + float32_t __s1_439 = __p1_439; \ + float32x4_t __s2_439 = __p2_439; \ + float32x4_t __rev2_439; __rev2_439 = __builtin_shufflevector(__s2_439, __s2_439, 3, 2, 1, 0); \ + __ret_439 = __noswap_vfmas_laneq_f32(__s0_439, -__s1_439, __rev2_439, __p3_439); \ + __ret_439; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vfmsq_laneq_f64(__p0_440, __p1_440, __p2_440, __p3_440) __extension__ ({ \ + float64x2_t __ret_440; \ + float64x2_t __s0_440 = __p0_440; \ + float64x2_t __s1_440 = __p1_440; \ + float64x2_t __s2_440 = __p2_440; \ + __ret_440 = vfmaq_laneq_f64(__s0_440, -__s1_440, __s2_440, __p3_440); \ + __ret_440; \ +}) +#else +#define vfmsq_laneq_f64(__p0_441, __p1_441, __p2_441, __p3_441) __extension__ ({ \ + float64x2_t __ret_441; \ + float64x2_t __s0_441 = __p0_441; \ + float64x2_t __s1_441 = __p1_441; \ + float64x2_t __s2_441 = __p2_441; \ + float64x2_t __rev0_441; __rev0_441 = __builtin_shufflevector(__s0_441, __s0_441, 1, 0); \ + float64x2_t __rev1_441; __rev1_441 = __builtin_shufflevector(__s1_441, __s1_441, 1, 0); \ + float64x2_t __rev2_441; __rev2_441 = __builtin_shufflevector(__s2_441, __s2_441, 1, 0); \ + __ret_441 = __noswap_vfmaq_laneq_f64(__rev0_441, -__rev1_441, __rev2_441, __p3_441); \ + __ret_441 = __builtin_shufflevector(__ret_441, __ret_441, 1, 0); \ + __ret_441; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vfmsq_laneq_f32(__p0_442, __p1_442, __p2_442, __p3_442) __extension__ ({ \ + float32x4_t __ret_442; \ + float32x4_t __s0_442 = __p0_442; \ + float32x4_t __s1_442 = __p1_442; \ + float32x4_t __s2_442 = __p2_442; \ + __ret_442 = vfmaq_laneq_f32(__s0_442, -__s1_442, __s2_442, __p3_442); \ + __ret_442; \ +}) +#else +#define vfmsq_laneq_f32(__p0_443, __p1_443, __p2_443, __p3_443) __extension__ ({ \ + float32x4_t __ret_443; \ + float32x4_t __s0_443 = __p0_443; \ + float32x4_t __s1_443 = __p1_443; \ + float32x4_t __s2_443 = __p2_443; \ + float32x4_t __rev0_443; __rev0_443 = __builtin_shufflevector(__s0_443, __s0_443, 3, 2, 1, 0); \ + float32x4_t __rev1_443; __rev1_443 = __builtin_shufflevector(__s1_443, __s1_443, 3, 2, 1, 0); \ + float32x4_t __rev2_443; __rev2_443 = __builtin_shufflevector(__s2_443, __s2_443, 3, 2, 1, 0); \ + __ret_443 = __noswap_vfmaq_laneq_f32(__rev0_443, -__rev1_443, __rev2_443, __p3_443); \ + __ret_443 = __builtin_shufflevector(__ret_443, __ret_443, 3, 2, 1, 0); \ + __ret_443; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vfms_laneq_f64(__p0_444, __p1_444, __p2_444, __p3_444) __extension__ ({ \ + float64x1_t __ret_444; \ + float64x1_t __s0_444 = __p0_444; \ + float64x1_t __s1_444 = __p1_444; \ + float64x2_t __s2_444 = __p2_444; \ + __ret_444 = vfma_laneq_f64(__s0_444, -__s1_444, __s2_444, __p3_444); \ + __ret_444; \ +}) +#else +#define vfms_laneq_f64(__p0_445, __p1_445, __p2_445, __p3_445) __extension__ ({ \ + float64x1_t __ret_445; \ + float64x1_t __s0_445 = __p0_445; \ + float64x1_t __s1_445 = __p1_445; \ + float64x2_t __s2_445 = __p2_445; \ + float64x2_t __rev2_445; __rev2_445 = __builtin_shufflevector(__s2_445, __s2_445, 1, 0); \ + __ret_445 = __noswap_vfma_laneq_f64(__s0_445, -__s1_445, __rev2_445, __p3_445); \ + __ret_445; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vfms_laneq_f32(__p0_446, __p1_446, __p2_446, __p3_446) __extension__ ({ \ + float32x2_t __ret_446; \ + float32x2_t __s0_446 = __p0_446; \ + float32x2_t __s1_446 = __p1_446; \ + float32x4_t __s2_446 = __p2_446; \ + __ret_446 = vfma_laneq_f32(__s0_446, -__s1_446, __s2_446, __p3_446); \ + __ret_446; \ +}) +#else +#define vfms_laneq_f32(__p0_447, __p1_447, __p2_447, __p3_447) __extension__ ({ \ + float32x2_t __ret_447; \ + float32x2_t __s0_447 = __p0_447; \ + float32x2_t __s1_447 = __p1_447; \ + float32x4_t __s2_447 = __p2_447; \ + float32x2_t __rev0_447; __rev0_447 = __builtin_shufflevector(__s0_447, __s0_447, 1, 0); \ + float32x2_t __rev1_447; __rev1_447 = __builtin_shufflevector(__s1_447, __s1_447, 1, 0); \ + float32x4_t __rev2_447; __rev2_447 = __builtin_shufflevector(__s2_447, __s2_447, 3, 2, 1, 0); \ + __ret_447 = __noswap_vfma_laneq_f32(__rev0_447, -__rev1_447, __rev2_447, __p3_447); \ + __ret_447 = __builtin_shufflevector(__ret_447, __ret_447, 1, 0); \ + __ret_447; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) float64x2_t vfmsq_n_f64(float64x2_t __p0, float64x2_t __p1, float64_t __p2) { + float64x2_t __ret; + __ret = vfmaq_f64(__p0, -__p1, (float64x2_t) {__p2, __p2}); + return __ret; +} +#else +__ai __attribute__((target("neon"))) float64x2_t vfmsq_n_f64(float64x2_t __p0, float64x2_t __p1, float64_t __p2) { + float64x2_t __ret; + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = __noswap_vfmaq_f64(__rev0, -__rev1, (float64x2_t) {__p2, __p2}); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) float32x4_t vfmsq_n_f32(float32x4_t __p0, float32x4_t __p1, float32_t __p2) { + float32x4_t __ret; + __ret = vfmaq_f32(__p0, -__p1, (float32x4_t) {__p2, __p2, __p2, __p2}); + return __ret; +} +#else +__ai __attribute__((target("neon"))) float32x4_t vfmsq_n_f32(float32x4_t __p0, float32x4_t __p1, float32_t __p2) { + float32x4_t __ret; + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = __noswap_vfmaq_f32(__rev0, -__rev1, (float32x4_t) {__p2, __p2, __p2, __p2}); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +__ai __attribute__((target("neon"))) float64x1_t vfms_n_f64(float64x1_t __p0, float64x1_t __p1, float64_t __p2) { + float64x1_t __ret; + __ret = vfma_f64(__p0, -__p1, (float64x1_t) {__p2}); + return __ret; +} +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) float32x2_t vfms_n_f32(float32x2_t __p0, float32x2_t __p1, float32_t __p2) { + float32x2_t __ret; + __ret = vfma_f32(__p0, -__p1, (float32x2_t) {__p2, __p2}); + return __ret; +} +#else +__ai __attribute__((target("neon"))) float32x2_t vfms_n_f32(float32x2_t __p0, float32x2_t __p1, float32_t __p2) { + float32x2_t __ret; + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = __noswap_vfma_f32(__rev0, -__rev1, (float32x2_t) {__p2, __p2}); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) poly64x1_t vget_high_p64(poly64x2_t __p0) { + poly64x1_t __ret; + __ret = __builtin_shufflevector(__p0, __p0, 1); + return __ret; +} +#else +__ai __attribute__((target("neon"))) poly64x1_t vget_high_p64(poly64x2_t __p0) { + poly64x1_t __ret; + poly64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev0, 1); + return __ret; +} +__ai __attribute__((target("neon"))) poly64x1_t __noswap_vget_high_p64(poly64x2_t __p0) { + poly64x1_t __ret; + __ret = __builtin_shufflevector(__p0, __p0, 1); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) float64x1_t vget_high_f64(float64x2_t __p0) { + float64x1_t __ret; + __ret = __builtin_shufflevector(__p0, __p0, 1); + return __ret; +} +#else +__ai __attribute__((target("neon"))) float64x1_t vget_high_f64(float64x2_t __p0) { + float64x1_t __ret; + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev0, 1); + return __ret; +} +#endif + +#define vget_lane_p64(__p0, __p1) __extension__ ({ \ + poly64_t __ret; \ + poly64x1_t __s0 = __p0; \ + __ret = (poly64_t) __builtin_neon_vget_lane_i64((poly64x1_t)__s0, __p1); \ + __ret; \ +}) +#ifdef __LITTLE_ENDIAN__ +#define vgetq_lane_p64(__p0, __p1) __extension__ ({ \ + poly64_t __ret; \ + poly64x2_t __s0 = __p0; \ + __ret = (poly64_t) __builtin_neon_vgetq_lane_i64((poly64x2_t)__s0, __p1); \ + __ret; \ +}) +#else +#define vgetq_lane_p64(__p0, __p1) __extension__ ({ \ + poly64_t __ret; \ + poly64x2_t __s0 = __p0; \ + poly64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + __ret = (poly64_t) __builtin_neon_vgetq_lane_i64((poly64x2_t)__rev0, __p1); \ + __ret; \ +}) +#define __noswap_vgetq_lane_p64(__p0, __p1) __extension__ ({ \ + poly64_t __ret; \ + poly64x2_t __s0 = __p0; \ + __ret = (poly64_t) __builtin_neon_vgetq_lane_i64((poly64x2_t)__s0, __p1); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vgetq_lane_f64(__p0, __p1) __extension__ ({ \ + float64_t __ret; \ + float64x2_t __s0 = __p0; \ + __ret = (float64_t) __builtin_neon_vgetq_lane_f64((float64x2_t)__s0, __p1); \ + __ret; \ +}) +#else +#define vgetq_lane_f64(__p0, __p1) __extension__ ({ \ + float64_t __ret; \ + float64x2_t __s0 = __p0; \ + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + __ret = (float64_t) __builtin_neon_vgetq_lane_f64((float64x2_t)__rev0, __p1); \ + __ret; \ +}) +#define __noswap_vgetq_lane_f64(__p0, __p1) __extension__ ({ \ + float64_t __ret; \ + float64x2_t __s0 = __p0; \ + __ret = (float64_t) __builtin_neon_vgetq_lane_f64((float64x2_t)__s0, __p1); \ + __ret; \ +}) +#endif + +#define vget_lane_f64(__p0, __p1) __extension__ ({ \ + float64_t __ret; \ + float64x1_t __s0 = __p0; \ + __ret = (float64_t) __builtin_neon_vget_lane_f64((float64x1_t)__s0, __p1); \ + __ret; \ +}) +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) poly64x1_t vget_low_p64(poly64x2_t __p0) { + poly64x1_t __ret; + __ret = __builtin_shufflevector(__p0, __p0, 0); + return __ret; +} +#else +__ai __attribute__((target("neon"))) poly64x1_t vget_low_p64(poly64x2_t __p0) { + poly64x1_t __ret; + poly64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev0, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) float64x1_t vget_low_f64(float64x2_t __p0) { + float64x1_t __ret; + __ret = __builtin_shufflevector(__p0, __p0, 0); + return __ret; +} +#else +__ai __attribute__((target("neon"))) float64x1_t vget_low_f64(float64x2_t __p0) { + float64x1_t __ret; + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev0, 0); + return __ret; +} +#endif + +#define vld1_p64(__p0) __extension__ ({ \ + poly64x1_t __ret; \ + __ret = (poly64x1_t) __builtin_neon_vld1_v(__p0, 6); \ + __ret; \ +}) +#ifdef __LITTLE_ENDIAN__ +#define vld1q_p64(__p0) __extension__ ({ \ + poly64x2_t __ret; \ + __ret = (poly64x2_t) __builtin_neon_vld1q_v(__p0, 38); \ + __ret; \ +}) +#else +#define vld1q_p64(__p0) __extension__ ({ \ + poly64x2_t __ret; \ + __ret = (poly64x2_t) __builtin_neon_vld1q_v(__p0, 38); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1q_f64(__p0) __extension__ ({ \ + float64x2_t __ret; \ + __ret = (float64x2_t) __builtin_neon_vld1q_v(__p0, 42); \ + __ret; \ +}) +#else +#define vld1q_f64(__p0) __extension__ ({ \ + float64x2_t __ret; \ + __ret = (float64x2_t) __builtin_neon_vld1q_v(__p0, 42); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#endif + +#define vld1_f64(__p0) __extension__ ({ \ + float64x1_t __ret; \ + __ret = (float64x1_t) __builtin_neon_vld1_v(__p0, 10); \ + __ret; \ +}) +#define vld1_dup_p64(__p0) __extension__ ({ \ + poly64x1_t __ret; \ + __ret = (poly64x1_t) __builtin_neon_vld1_dup_v(__p0, 6); \ + __ret; \ +}) +#ifdef __LITTLE_ENDIAN__ +#define vld1q_dup_p64(__p0) __extension__ ({ \ + poly64x2_t __ret; \ + __ret = (poly64x2_t) __builtin_neon_vld1q_dup_v(__p0, 38); \ + __ret; \ +}) +#else +#define vld1q_dup_p64(__p0) __extension__ ({ \ + poly64x2_t __ret; \ + __ret = (poly64x2_t) __builtin_neon_vld1q_dup_v(__p0, 38); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1q_dup_f64(__p0) __extension__ ({ \ + float64x2_t __ret; \ + __ret = (float64x2_t) __builtin_neon_vld1q_dup_v(__p0, 42); \ + __ret; \ +}) +#else +#define vld1q_dup_f64(__p0) __extension__ ({ \ + float64x2_t __ret; \ + __ret = (float64x2_t) __builtin_neon_vld1q_dup_v(__p0, 42); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#endif + +#define vld1_dup_f64(__p0) __extension__ ({ \ + float64x1_t __ret; \ + __ret = (float64x1_t) __builtin_neon_vld1_dup_v(__p0, 10); \ + __ret; \ +}) +#define vld1_lane_p64(__p0, __p1, __p2) __extension__ ({ \ + poly64x1_t __ret; \ + poly64x1_t __s1 = __p1; \ + __ret = (poly64x1_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__s1, __p2, 6); \ + __ret; \ +}) +#ifdef __LITTLE_ENDIAN__ +#define vld1q_lane_p64(__p0, __p1, __p2) __extension__ ({ \ + poly64x2_t __ret; \ + poly64x2_t __s1 = __p1; \ + __ret = (poly64x2_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__s1, __p2, 38); \ + __ret; \ +}) +#else +#define vld1q_lane_p64(__p0, __p1, __p2) __extension__ ({ \ + poly64x2_t __ret; \ + poly64x2_t __s1 = __p1; \ + poly64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ + __ret = (poly64x2_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 38); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1q_lane_f64(__p0, __p1, __p2) __extension__ ({ \ + float64x2_t __ret; \ + float64x2_t __s1 = __p1; \ + __ret = (float64x2_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__s1, __p2, 42); \ + __ret; \ +}) +#else +#define vld1q_lane_f64(__p0, __p1, __p2) __extension__ ({ \ + float64x2_t __ret; \ + float64x2_t __s1 = __p1; \ + float64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ + __ret = (float64x2_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 42); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#endif + +#define vld1_lane_f64(__p0, __p1, __p2) __extension__ ({ \ + float64x1_t __ret; \ + float64x1_t __s1 = __p1; \ + __ret = (float64x1_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__s1, __p2, 10); \ + __ret; \ +}) +#define vld1_p64_x2(__p0) __extension__ ({ \ + poly64x1x2_t __ret; \ + __builtin_neon_vld1_x2_v(&__ret, __p0, 6); \ + __ret; \ +}) +#ifdef __LITTLE_ENDIAN__ +#define vld1q_p64_x2(__p0) __extension__ ({ \ + poly64x2x2_t __ret; \ + __builtin_neon_vld1q_x2_v(&__ret, __p0, 38); \ + __ret; \ +}) +#else +#define vld1q_p64_x2(__p0) __extension__ ({ \ + poly64x2x2_t __ret; \ + __builtin_neon_vld1q_x2_v(&__ret, __p0, 38); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1q_f64_x2(__p0) __extension__ ({ \ + float64x2x2_t __ret; \ + __builtin_neon_vld1q_x2_v(&__ret, __p0, 42); \ + __ret; \ +}) +#else +#define vld1q_f64_x2(__p0) __extension__ ({ \ + float64x2x2_t __ret; \ + __builtin_neon_vld1q_x2_v(&__ret, __p0, 42); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ + __ret; \ +}) +#endif + +#define vld1_f64_x2(__p0) __extension__ ({ \ + float64x1x2_t __ret; \ + __builtin_neon_vld1_x2_v(&__ret, __p0, 10); \ + __ret; \ +}) +#define vld1_p64_x3(__p0) __extension__ ({ \ + poly64x1x3_t __ret; \ + __builtin_neon_vld1_x3_v(&__ret, __p0, 6); \ + __ret; \ +}) +#ifdef __LITTLE_ENDIAN__ +#define vld1q_p64_x3(__p0) __extension__ ({ \ + poly64x2x3_t __ret; \ + __builtin_neon_vld1q_x3_v(&__ret, __p0, 38); \ + __ret; \ +}) +#else +#define vld1q_p64_x3(__p0) __extension__ ({ \ + poly64x2x3_t __ret; \ + __builtin_neon_vld1q_x3_v(&__ret, __p0, 38); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1q_f64_x3(__p0) __extension__ ({ \ + float64x2x3_t __ret; \ + __builtin_neon_vld1q_x3_v(&__ret, __p0, 42); \ + __ret; \ +}) +#else +#define vld1q_f64_x3(__p0) __extension__ ({ \ + float64x2x3_t __ret; \ + __builtin_neon_vld1q_x3_v(&__ret, __p0, 42); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ + __ret; \ +}) +#endif + +#define vld1_f64_x3(__p0) __extension__ ({ \ + float64x1x3_t __ret; \ + __builtin_neon_vld1_x3_v(&__ret, __p0, 10); \ + __ret; \ +}) +#define vld1_p64_x4(__p0) __extension__ ({ \ + poly64x1x4_t __ret; \ + __builtin_neon_vld1_x4_v(&__ret, __p0, 6); \ + __ret; \ +}) +#ifdef __LITTLE_ENDIAN__ +#define vld1q_p64_x4(__p0) __extension__ ({ \ + poly64x2x4_t __ret; \ + __builtin_neon_vld1q_x4_v(&__ret, __p0, 38); \ + __ret; \ +}) +#else +#define vld1q_p64_x4(__p0) __extension__ ({ \ + poly64x2x4_t __ret; \ + __builtin_neon_vld1q_x4_v(&__ret, __p0, 38); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1q_f64_x4(__p0) __extension__ ({ \ + float64x2x4_t __ret; \ + __builtin_neon_vld1q_x4_v(&__ret, __p0, 42); \ + __ret; \ +}) +#else +#define vld1q_f64_x4(__p0) __extension__ ({ \ + float64x2x4_t __ret; \ + __builtin_neon_vld1q_x4_v(&__ret, __p0, 42); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \ + __ret; \ +}) +#endif + +#define vld1_f64_x4(__p0) __extension__ ({ \ + float64x1x4_t __ret; \ + __builtin_neon_vld1_x4_v(&__ret, __p0, 10); \ + __ret; \ +}) +#define vld2_p64(__p0) __extension__ ({ \ + poly64x1x2_t __ret; \ + __builtin_neon_vld2_v(&__ret, __p0, 6); \ + __ret; \ +}) +#ifdef __LITTLE_ENDIAN__ +#define vld2q_p64(__p0) __extension__ ({ \ + poly64x2x2_t __ret; \ + __builtin_neon_vld2q_v(&__ret, __p0, 38); \ + __ret; \ +}) +#else +#define vld2q_p64(__p0) __extension__ ({ \ + poly64x2x2_t __ret; \ + __builtin_neon_vld2q_v(&__ret, __p0, 38); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld2q_u64(__p0) __extension__ ({ \ + uint64x2x2_t __ret; \ + __builtin_neon_vld2q_v(&__ret, __p0, 51); \ + __ret; \ +}) +#else +#define vld2q_u64(__p0) __extension__ ({ \ + uint64x2x2_t __ret; \ + __builtin_neon_vld2q_v(&__ret, __p0, 51); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld2q_f64(__p0) __extension__ ({ \ + float64x2x2_t __ret; \ + __builtin_neon_vld2q_v(&__ret, __p0, 42); \ + __ret; \ +}) +#else +#define vld2q_f64(__p0) __extension__ ({ \ + float64x2x2_t __ret; \ + __builtin_neon_vld2q_v(&__ret, __p0, 42); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld2q_s64(__p0) __extension__ ({ \ + int64x2x2_t __ret; \ + __builtin_neon_vld2q_v(&__ret, __p0, 35); \ + __ret; \ +}) +#else +#define vld2q_s64(__p0) __extension__ ({ \ + int64x2x2_t __ret; \ + __builtin_neon_vld2q_v(&__ret, __p0, 35); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ + __ret; \ +}) +#endif + +#define vld2_f64(__p0) __extension__ ({ \ + float64x1x2_t __ret; \ + __builtin_neon_vld2_v(&__ret, __p0, 10); \ + __ret; \ +}) +#define vld2_dup_p64(__p0) __extension__ ({ \ + poly64x1x2_t __ret; \ + __builtin_neon_vld2_dup_v(&__ret, __p0, 6); \ + __ret; \ +}) +#ifdef __LITTLE_ENDIAN__ +#define vld2q_dup_p64(__p0) __extension__ ({ \ + poly64x2x2_t __ret; \ + __builtin_neon_vld2q_dup_v(&__ret, __p0, 38); \ + __ret; \ +}) +#else +#define vld2q_dup_p64(__p0) __extension__ ({ \ + poly64x2x2_t __ret; \ + __builtin_neon_vld2q_dup_v(&__ret, __p0, 38); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld2q_dup_f64(__p0) __extension__ ({ \ + float64x2x2_t __ret; \ + __builtin_neon_vld2q_dup_v(&__ret, __p0, 42); \ + __ret; \ +}) +#else +#define vld2q_dup_f64(__p0) __extension__ ({ \ + float64x2x2_t __ret; \ + __builtin_neon_vld2q_dup_v(&__ret, __p0, 42); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ + __ret; \ +}) +#endif + +#define vld2_dup_f64(__p0) __extension__ ({ \ + float64x1x2_t __ret; \ + __builtin_neon_vld2_dup_v(&__ret, __p0, 10); \ + __ret; \ +}) +#define vld2_lane_p64(__p0, __p1, __p2) __extension__ ({ \ + poly64x1x2_t __ret; \ + poly64x1x2_t __s1 = __p1; \ + __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 6); \ + __ret; \ +}) +#ifdef __LITTLE_ENDIAN__ +#define vld2q_lane_p8(__p0, __p1, __p2) __extension__ ({ \ + poly8x16x2_t __ret; \ + poly8x16x2_t __s1 = __p1; \ + __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 36); \ + __ret; \ +}) +#else +#define vld2q_lane_p8(__p0, __p1, __p2) __extension__ ({ \ + poly8x16x2_t __ret; \ + poly8x16x2_t __s1 = __p1; \ + poly8x16x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 36); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld2q_lane_p64(__p0, __p1, __p2) __extension__ ({ \ + poly64x2x2_t __ret; \ + poly64x2x2_t __s1 = __p1; \ + __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 38); \ + __ret; \ +}) +#else +#define vld2q_lane_p64(__p0, __p1, __p2) __extension__ ({ \ + poly64x2x2_t __ret; \ + poly64x2x2_t __s1 = __p1; \ + poly64x2x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ + __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 38); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld2q_lane_u8(__p0, __p1, __p2) __extension__ ({ \ + uint8x16x2_t __ret; \ + uint8x16x2_t __s1 = __p1; \ + __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 48); \ + __ret; \ +}) +#else +#define vld2q_lane_u8(__p0, __p1, __p2) __extension__ ({ \ + uint8x16x2_t __ret; \ + uint8x16x2_t __s1 = __p1; \ + uint8x16x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 48); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld2q_lane_u64(__p0, __p1, __p2) __extension__ ({ \ + uint64x2x2_t __ret; \ + uint64x2x2_t __s1 = __p1; \ + __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 51); \ + __ret; \ +}) +#else +#define vld2q_lane_u64(__p0, __p1, __p2) __extension__ ({ \ + uint64x2x2_t __ret; \ + uint64x2x2_t __s1 = __p1; \ + uint64x2x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ + __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 51); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld2q_lane_s8(__p0, __p1, __p2) __extension__ ({ \ + int8x16x2_t __ret; \ + int8x16x2_t __s1 = __p1; \ + __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 32); \ + __ret; \ +}) +#else +#define vld2q_lane_s8(__p0, __p1, __p2) __extension__ ({ \ + int8x16x2_t __ret; \ + int8x16x2_t __s1 = __p1; \ + int8x16x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 32); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld2q_lane_f64(__p0, __p1, __p2) __extension__ ({ \ + float64x2x2_t __ret; \ + float64x2x2_t __s1 = __p1; \ + __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 42); \ + __ret; \ +}) +#else +#define vld2q_lane_f64(__p0, __p1, __p2) __extension__ ({ \ + float64x2x2_t __ret; \ + float64x2x2_t __s1 = __p1; \ + float64x2x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ + __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 42); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld2q_lane_s64(__p0, __p1, __p2) __extension__ ({ \ + int64x2x2_t __ret; \ + int64x2x2_t __s1 = __p1; \ + __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 35); \ + __ret; \ +}) +#else +#define vld2q_lane_s64(__p0, __p1, __p2) __extension__ ({ \ + int64x2x2_t __ret; \ + int64x2x2_t __s1 = __p1; \ + int64x2x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ + __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 35); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ + __ret; \ +}) +#endif + +#define vld2_lane_u64(__p0, __p1, __p2) __extension__ ({ \ + uint64x1x2_t __ret; \ + uint64x1x2_t __s1 = __p1; \ + __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 19); \ + __ret; \ +}) +#define vld2_lane_f64(__p0, __p1, __p2) __extension__ ({ \ + float64x1x2_t __ret; \ + float64x1x2_t __s1 = __p1; \ + __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 10); \ + __ret; \ +}) +#define vld2_lane_s64(__p0, __p1, __p2) __extension__ ({ \ + int64x1x2_t __ret; \ + int64x1x2_t __s1 = __p1; \ + __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 3); \ + __ret; \ +}) +#define vld3_p64(__p0) __extension__ ({ \ + poly64x1x3_t __ret; \ + __builtin_neon_vld3_v(&__ret, __p0, 6); \ + __ret; \ +}) +#ifdef __LITTLE_ENDIAN__ +#define vld3q_p64(__p0) __extension__ ({ \ + poly64x2x3_t __ret; \ + __builtin_neon_vld3q_v(&__ret, __p0, 38); \ + __ret; \ +}) +#else +#define vld3q_p64(__p0) __extension__ ({ \ + poly64x2x3_t __ret; \ + __builtin_neon_vld3q_v(&__ret, __p0, 38); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld3q_u64(__p0) __extension__ ({ \ + uint64x2x3_t __ret; \ + __builtin_neon_vld3q_v(&__ret, __p0, 51); \ + __ret; \ +}) +#else +#define vld3q_u64(__p0) __extension__ ({ \ + uint64x2x3_t __ret; \ + __builtin_neon_vld3q_v(&__ret, __p0, 51); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld3q_f64(__p0) __extension__ ({ \ + float64x2x3_t __ret; \ + __builtin_neon_vld3q_v(&__ret, __p0, 42); \ + __ret; \ +}) +#else +#define vld3q_f64(__p0) __extension__ ({ \ + float64x2x3_t __ret; \ + __builtin_neon_vld3q_v(&__ret, __p0, 42); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld3q_s64(__p0) __extension__ ({ \ + int64x2x3_t __ret; \ + __builtin_neon_vld3q_v(&__ret, __p0, 35); \ + __ret; \ +}) +#else +#define vld3q_s64(__p0) __extension__ ({ \ + int64x2x3_t __ret; \ + __builtin_neon_vld3q_v(&__ret, __p0, 35); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ + __ret; \ +}) +#endif + +#define vld3_f64(__p0) __extension__ ({ \ + float64x1x3_t __ret; \ + __builtin_neon_vld3_v(&__ret, __p0, 10); \ + __ret; \ +}) +#define vld3_dup_p64(__p0) __extension__ ({ \ + poly64x1x3_t __ret; \ + __builtin_neon_vld3_dup_v(&__ret, __p0, 6); \ + __ret; \ +}) +#ifdef __LITTLE_ENDIAN__ +#define vld3q_dup_p64(__p0) __extension__ ({ \ + poly64x2x3_t __ret; \ + __builtin_neon_vld3q_dup_v(&__ret, __p0, 38); \ + __ret; \ +}) +#else +#define vld3q_dup_p64(__p0) __extension__ ({ \ + poly64x2x3_t __ret; \ + __builtin_neon_vld3q_dup_v(&__ret, __p0, 38); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld3q_dup_f64(__p0) __extension__ ({ \ + float64x2x3_t __ret; \ + __builtin_neon_vld3q_dup_v(&__ret, __p0, 42); \ + __ret; \ +}) +#else +#define vld3q_dup_f64(__p0) __extension__ ({ \ + float64x2x3_t __ret; \ + __builtin_neon_vld3q_dup_v(&__ret, __p0, 42); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ + __ret; \ +}) +#endif + +#define vld3_dup_f64(__p0) __extension__ ({ \ + float64x1x3_t __ret; \ + __builtin_neon_vld3_dup_v(&__ret, __p0, 10); \ + __ret; \ +}) +#define vld3_lane_p64(__p0, __p1, __p2) __extension__ ({ \ + poly64x1x3_t __ret; \ + poly64x1x3_t __s1 = __p1; \ + __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 6); \ + __ret; \ +}) +#ifdef __LITTLE_ENDIAN__ +#define vld3q_lane_p8(__p0, __p1, __p2) __extension__ ({ \ + poly8x16x3_t __ret; \ + poly8x16x3_t __s1 = __p1; \ + __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 36); \ + __ret; \ +}) +#else +#define vld3q_lane_p8(__p0, __p1, __p2) __extension__ ({ \ + poly8x16x3_t __ret; \ + poly8x16x3_t __s1 = __p1; \ + poly8x16x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 36); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld3q_lane_p64(__p0, __p1, __p2) __extension__ ({ \ + poly64x2x3_t __ret; \ + poly64x2x3_t __s1 = __p1; \ + __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 38); \ + __ret; \ +}) +#else +#define vld3q_lane_p64(__p0, __p1, __p2) __extension__ ({ \ + poly64x2x3_t __ret; \ + poly64x2x3_t __s1 = __p1; \ + poly64x2x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ + __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 38); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld3q_lane_u8(__p0, __p1, __p2) __extension__ ({ \ + uint8x16x3_t __ret; \ + uint8x16x3_t __s1 = __p1; \ + __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 48); \ + __ret; \ +}) +#else +#define vld3q_lane_u8(__p0, __p1, __p2) __extension__ ({ \ + uint8x16x3_t __ret; \ + uint8x16x3_t __s1 = __p1; \ + uint8x16x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 48); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld3q_lane_u64(__p0, __p1, __p2) __extension__ ({ \ + uint64x2x3_t __ret; \ + uint64x2x3_t __s1 = __p1; \ + __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 51); \ + __ret; \ +}) +#else +#define vld3q_lane_u64(__p0, __p1, __p2) __extension__ ({ \ + uint64x2x3_t __ret; \ + uint64x2x3_t __s1 = __p1; \ + uint64x2x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ + __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 51); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld3q_lane_s8(__p0, __p1, __p2) __extension__ ({ \ + int8x16x3_t __ret; \ + int8x16x3_t __s1 = __p1; \ + __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 32); \ + __ret; \ +}) +#else +#define vld3q_lane_s8(__p0, __p1, __p2) __extension__ ({ \ + int8x16x3_t __ret; \ + int8x16x3_t __s1 = __p1; \ + int8x16x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 32); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld3q_lane_f64(__p0, __p1, __p2) __extension__ ({ \ + float64x2x3_t __ret; \ + float64x2x3_t __s1 = __p1; \ + __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 42); \ + __ret; \ +}) +#else +#define vld3q_lane_f64(__p0, __p1, __p2) __extension__ ({ \ + float64x2x3_t __ret; \ + float64x2x3_t __s1 = __p1; \ + float64x2x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ + __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 42); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld3q_lane_s64(__p0, __p1, __p2) __extension__ ({ \ + int64x2x3_t __ret; \ + int64x2x3_t __s1 = __p1; \ + __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 35); \ + __ret; \ +}) +#else +#define vld3q_lane_s64(__p0, __p1, __p2) __extension__ ({ \ + int64x2x3_t __ret; \ + int64x2x3_t __s1 = __p1; \ + int64x2x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ + __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 35); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ + __ret; \ +}) +#endif + +#define vld3_lane_u64(__p0, __p1, __p2) __extension__ ({ \ + uint64x1x3_t __ret; \ + uint64x1x3_t __s1 = __p1; \ + __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 19); \ + __ret; \ +}) +#define vld3_lane_f64(__p0, __p1, __p2) __extension__ ({ \ + float64x1x3_t __ret; \ + float64x1x3_t __s1 = __p1; \ + __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 10); \ + __ret; \ +}) +#define vld3_lane_s64(__p0, __p1, __p2) __extension__ ({ \ + int64x1x3_t __ret; \ + int64x1x3_t __s1 = __p1; \ + __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 3); \ + __ret; \ +}) +#define vld4_p64(__p0) __extension__ ({ \ + poly64x1x4_t __ret; \ + __builtin_neon_vld4_v(&__ret, __p0, 6); \ + __ret; \ +}) +#ifdef __LITTLE_ENDIAN__ +#define vld4q_p64(__p0) __extension__ ({ \ + poly64x2x4_t __ret; \ + __builtin_neon_vld4q_v(&__ret, __p0, 38); \ + __ret; \ +}) +#else +#define vld4q_p64(__p0) __extension__ ({ \ + poly64x2x4_t __ret; \ + __builtin_neon_vld4q_v(&__ret, __p0, 38); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld4q_u64(__p0) __extension__ ({ \ + uint64x2x4_t __ret; \ + __builtin_neon_vld4q_v(&__ret, __p0, 51); \ + __ret; \ +}) +#else +#define vld4q_u64(__p0) __extension__ ({ \ + uint64x2x4_t __ret; \ + __builtin_neon_vld4q_v(&__ret, __p0, 51); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld4q_f64(__p0) __extension__ ({ \ + float64x2x4_t __ret; \ + __builtin_neon_vld4q_v(&__ret, __p0, 42); \ + __ret; \ +}) +#else +#define vld4q_f64(__p0) __extension__ ({ \ + float64x2x4_t __ret; \ + __builtin_neon_vld4q_v(&__ret, __p0, 42); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld4q_s64(__p0) __extension__ ({ \ + int64x2x4_t __ret; \ + __builtin_neon_vld4q_v(&__ret, __p0, 35); \ + __ret; \ +}) +#else +#define vld4q_s64(__p0) __extension__ ({ \ + int64x2x4_t __ret; \ + __builtin_neon_vld4q_v(&__ret, __p0, 35); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \ + __ret; \ +}) +#endif + +#define vld4_f64(__p0) __extension__ ({ \ + float64x1x4_t __ret; \ + __builtin_neon_vld4_v(&__ret, __p0, 10); \ + __ret; \ +}) +#define vld4_dup_p64(__p0) __extension__ ({ \ + poly64x1x4_t __ret; \ + __builtin_neon_vld4_dup_v(&__ret, __p0, 6); \ + __ret; \ +}) +#ifdef __LITTLE_ENDIAN__ +#define vld4q_dup_p64(__p0) __extension__ ({ \ + poly64x2x4_t __ret; \ + __builtin_neon_vld4q_dup_v(&__ret, __p0, 38); \ + __ret; \ +}) +#else +#define vld4q_dup_p64(__p0) __extension__ ({ \ + poly64x2x4_t __ret; \ + __builtin_neon_vld4q_dup_v(&__ret, __p0, 38); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld4q_dup_f64(__p0) __extension__ ({ \ + float64x2x4_t __ret; \ + __builtin_neon_vld4q_dup_v(&__ret, __p0, 42); \ + __ret; \ +}) +#else +#define vld4q_dup_f64(__p0) __extension__ ({ \ + float64x2x4_t __ret; \ + __builtin_neon_vld4q_dup_v(&__ret, __p0, 42); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \ + __ret; \ +}) +#endif + +#define vld4_dup_f64(__p0) __extension__ ({ \ + float64x1x4_t __ret; \ + __builtin_neon_vld4_dup_v(&__ret, __p0, 10); \ + __ret; \ +}) +#define vld4_lane_p64(__p0, __p1, __p2) __extension__ ({ \ + poly64x1x4_t __ret; \ + poly64x1x4_t __s1 = __p1; \ + __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 6); \ + __ret; \ +}) +#ifdef __LITTLE_ENDIAN__ +#define vld4q_lane_p8(__p0, __p1, __p2) __extension__ ({ \ + poly8x16x4_t __ret; \ + poly8x16x4_t __s1 = __p1; \ + __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 36); \ + __ret; \ +}) +#else +#define vld4q_lane_p8(__p0, __p1, __p2) __extension__ ({ \ + poly8x16x4_t __ret; \ + poly8x16x4_t __s1 = __p1; \ + poly8x16x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 36); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld4q_lane_p64(__p0, __p1, __p2) __extension__ ({ \ + poly64x2x4_t __ret; \ + poly64x2x4_t __s1 = __p1; \ + __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 38); \ + __ret; \ +}) +#else +#define vld4q_lane_p64(__p0, __p1, __p2) __extension__ ({ \ + poly64x2x4_t __ret; \ + poly64x2x4_t __s1 = __p1; \ + poly64x2x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \ + __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 38); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld4q_lane_u8(__p0, __p1, __p2) __extension__ ({ \ + uint8x16x4_t __ret; \ + uint8x16x4_t __s1 = __p1; \ + __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 48); \ + __ret; \ +}) +#else +#define vld4q_lane_u8(__p0, __p1, __p2) __extension__ ({ \ + uint8x16x4_t __ret; \ + uint8x16x4_t __s1 = __p1; \ + uint8x16x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 48); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld4q_lane_u64(__p0, __p1, __p2) __extension__ ({ \ + uint64x2x4_t __ret; \ + uint64x2x4_t __s1 = __p1; \ + __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 51); \ + __ret; \ +}) +#else +#define vld4q_lane_u64(__p0, __p1, __p2) __extension__ ({ \ + uint64x2x4_t __ret; \ + uint64x2x4_t __s1 = __p1; \ + uint64x2x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \ + __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 51); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld4q_lane_s8(__p0, __p1, __p2) __extension__ ({ \ + int8x16x4_t __ret; \ + int8x16x4_t __s1 = __p1; \ + __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 32); \ + __ret; \ +}) +#else +#define vld4q_lane_s8(__p0, __p1, __p2) __extension__ ({ \ + int8x16x4_t __ret; \ + int8x16x4_t __s1 = __p1; \ + int8x16x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 32); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld4q_lane_f64(__p0, __p1, __p2) __extension__ ({ \ + float64x2x4_t __ret; \ + float64x2x4_t __s1 = __p1; \ + __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 42); \ + __ret; \ +}) +#else +#define vld4q_lane_f64(__p0, __p1, __p2) __extension__ ({ \ + float64x2x4_t __ret; \ + float64x2x4_t __s1 = __p1; \ + float64x2x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \ + __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 42); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld4q_lane_s64(__p0, __p1, __p2) __extension__ ({ \ + int64x2x4_t __ret; \ + int64x2x4_t __s1 = __p1; \ + __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 35); \ + __ret; \ +}) +#else +#define vld4q_lane_s64(__p0, __p1, __p2) __extension__ ({ \ + int64x2x4_t __ret; \ + int64x2x4_t __s1 = __p1; \ + int64x2x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \ + __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 35); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \ + __ret; \ +}) +#endif + +#define vld4_lane_u64(__p0, __p1, __p2) __extension__ ({ \ + uint64x1x4_t __ret; \ + uint64x1x4_t __s1 = __p1; \ + __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 19); \ + __ret; \ +}) +#define vld4_lane_f64(__p0, __p1, __p2) __extension__ ({ \ + float64x1x4_t __ret; \ + float64x1x4_t __s1 = __p1; \ + __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 10); \ + __ret; \ +}) +#define vld4_lane_s64(__p0, __p1, __p2) __extension__ ({ \ + int64x1x4_t __ret; \ + int64x1x4_t __s1 = __p1; \ + __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 3); \ + __ret; \ +}) +#define vldrq_p128(__p0) __extension__ ({ \ + poly128_t __ret; \ + __ret = (poly128_t) __builtin_neon_vldrq_p128(__p0); \ + __ret; \ +}) +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) float64x2_t vmaxq_f64(float64x2_t __p0, float64x2_t __p1) { + float64x2_t __ret; + __ret = (float64x2_t) __builtin_neon_vmaxq_v((int8x16_t)__p0, (int8x16_t)__p1, 42); + return __ret; +} +#else +__ai __attribute__((target("neon"))) float64x2_t vmaxq_f64(float64x2_t __p0, float64x2_t __p1) { + float64x2_t __ret; + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (float64x2_t) __builtin_neon_vmaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 42); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +__ai __attribute__((target("neon"))) float64x1_t vmax_f64(float64x1_t __p0, float64x1_t __p1) { + float64x1_t __ret; + __ret = (float64x1_t) __builtin_neon_vmax_v((int8x8_t)__p0, (int8x8_t)__p1, 10); + return __ret; +} +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) float64_t vmaxnmvq_f64(float64x2_t __p0) { + float64_t __ret; + __ret = (float64_t) __builtin_neon_vmaxnmvq_f64(__p0); + return __ret; +} +#else +__ai __attribute__((target("neon"))) float64_t vmaxnmvq_f64(float64x2_t __p0) { + float64_t __ret; + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = (float64_t) __builtin_neon_vmaxnmvq_f64(__rev0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) float32_t vmaxnmvq_f32(float32x4_t __p0) { + float32_t __ret; + __ret = (float32_t) __builtin_neon_vmaxnmvq_f32(__p0); + return __ret; +} +#else +__ai __attribute__((target("neon"))) float32_t vmaxnmvq_f32(float32x4_t __p0) { + float32_t __ret; + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = (float32_t) __builtin_neon_vmaxnmvq_f32(__rev0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) float32_t vmaxnmv_f32(float32x2_t __p0) { + float32_t __ret; + __ret = (float32_t) __builtin_neon_vmaxnmv_f32(__p0); + return __ret; +} +#else +__ai __attribute__((target("neon"))) float32_t vmaxnmv_f32(float32x2_t __p0) { + float32_t __ret; + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = (float32_t) __builtin_neon_vmaxnmv_f32(__rev0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint8_t vmaxvq_u8(uint8x16_t __p0) { + uint8_t __ret; + __ret = (uint8_t) __builtin_neon_vmaxvq_u8(__p0); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint8_t vmaxvq_u8(uint8x16_t __p0) { + uint8_t __ret; + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint8_t) __builtin_neon_vmaxvq_u8(__rev0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint32_t vmaxvq_u32(uint32x4_t __p0) { + uint32_t __ret; + __ret = (uint32_t) __builtin_neon_vmaxvq_u32(__p0); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint32_t vmaxvq_u32(uint32x4_t __p0) { + uint32_t __ret; + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = (uint32_t) __builtin_neon_vmaxvq_u32(__rev0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint16_t vmaxvq_u16(uint16x8_t __p0) { + uint16_t __ret; + __ret = (uint16_t) __builtin_neon_vmaxvq_u16(__p0); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint16_t vmaxvq_u16(uint16x8_t __p0) { + uint16_t __ret; + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint16_t) __builtin_neon_vmaxvq_u16(__rev0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int8_t vmaxvq_s8(int8x16_t __p0) { + int8_t __ret; + __ret = (int8_t) __builtin_neon_vmaxvq_s8(__p0); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int8_t vmaxvq_s8(int8x16_t __p0) { + int8_t __ret; + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int8_t) __builtin_neon_vmaxvq_s8(__rev0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) float64_t vmaxvq_f64(float64x2_t __p0) { + float64_t __ret; + __ret = (float64_t) __builtin_neon_vmaxvq_f64(__p0); + return __ret; +} +#else +__ai __attribute__((target("neon"))) float64_t vmaxvq_f64(float64x2_t __p0) { + float64_t __ret; + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = (float64_t) __builtin_neon_vmaxvq_f64(__rev0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) float32_t vmaxvq_f32(float32x4_t __p0) { + float32_t __ret; + __ret = (float32_t) __builtin_neon_vmaxvq_f32(__p0); + return __ret; +} +#else +__ai __attribute__((target("neon"))) float32_t vmaxvq_f32(float32x4_t __p0) { + float32_t __ret; + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = (float32_t) __builtin_neon_vmaxvq_f32(__rev0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int32_t vmaxvq_s32(int32x4_t __p0) { + int32_t __ret; + __ret = (int32_t) __builtin_neon_vmaxvq_s32(__p0); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int32_t vmaxvq_s32(int32x4_t __p0) { + int32_t __ret; + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = (int32_t) __builtin_neon_vmaxvq_s32(__rev0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int16_t vmaxvq_s16(int16x8_t __p0) { + int16_t __ret; + __ret = (int16_t) __builtin_neon_vmaxvq_s16(__p0); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int16_t vmaxvq_s16(int16x8_t __p0) { + int16_t __ret; + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int16_t) __builtin_neon_vmaxvq_s16(__rev0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint8_t vmaxv_u8(uint8x8_t __p0) { + uint8_t __ret; + __ret = (uint8_t) __builtin_neon_vmaxv_u8(__p0); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint8_t vmaxv_u8(uint8x8_t __p0) { + uint8_t __ret; + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint8_t) __builtin_neon_vmaxv_u8(__rev0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint32_t vmaxv_u32(uint32x2_t __p0) { + uint32_t __ret; + __ret = (uint32_t) __builtin_neon_vmaxv_u32(__p0); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint32_t vmaxv_u32(uint32x2_t __p0) { + uint32_t __ret; + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = (uint32_t) __builtin_neon_vmaxv_u32(__rev0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint16_t vmaxv_u16(uint16x4_t __p0) { + uint16_t __ret; + __ret = (uint16_t) __builtin_neon_vmaxv_u16(__p0); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint16_t vmaxv_u16(uint16x4_t __p0) { + uint16_t __ret; + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = (uint16_t) __builtin_neon_vmaxv_u16(__rev0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int8_t vmaxv_s8(int8x8_t __p0) { + int8_t __ret; + __ret = (int8_t) __builtin_neon_vmaxv_s8(__p0); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int8_t vmaxv_s8(int8x8_t __p0) { + int8_t __ret; + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int8_t) __builtin_neon_vmaxv_s8(__rev0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) float32_t vmaxv_f32(float32x2_t __p0) { + float32_t __ret; + __ret = (float32_t) __builtin_neon_vmaxv_f32(__p0); + return __ret; +} +#else +__ai __attribute__((target("neon"))) float32_t vmaxv_f32(float32x2_t __p0) { + float32_t __ret; + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = (float32_t) __builtin_neon_vmaxv_f32(__rev0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int32_t vmaxv_s32(int32x2_t __p0) { + int32_t __ret; + __ret = (int32_t) __builtin_neon_vmaxv_s32(__p0); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int32_t vmaxv_s32(int32x2_t __p0) { + int32_t __ret; + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = (int32_t) __builtin_neon_vmaxv_s32(__rev0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int16_t vmaxv_s16(int16x4_t __p0) { + int16_t __ret; + __ret = (int16_t) __builtin_neon_vmaxv_s16(__p0); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int16_t vmaxv_s16(int16x4_t __p0) { + int16_t __ret; + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = (int16_t) __builtin_neon_vmaxv_s16(__rev0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) float64x2_t vminq_f64(float64x2_t __p0, float64x2_t __p1) { + float64x2_t __ret; + __ret = (float64x2_t) __builtin_neon_vminq_v((int8x16_t)__p0, (int8x16_t)__p1, 42); + return __ret; +} +#else +__ai __attribute__((target("neon"))) float64x2_t vminq_f64(float64x2_t __p0, float64x2_t __p1) { + float64x2_t __ret; + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (float64x2_t) __builtin_neon_vminq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 42); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +__ai __attribute__((target("neon"))) float64x1_t vmin_f64(float64x1_t __p0, float64x1_t __p1) { + float64x1_t __ret; + __ret = (float64x1_t) __builtin_neon_vmin_v((int8x8_t)__p0, (int8x8_t)__p1, 10); + return __ret; +} +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) float64_t vminnmvq_f64(float64x2_t __p0) { + float64_t __ret; + __ret = (float64_t) __builtin_neon_vminnmvq_f64(__p0); + return __ret; +} +#else +__ai __attribute__((target("neon"))) float64_t vminnmvq_f64(float64x2_t __p0) { + float64_t __ret; + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = (float64_t) __builtin_neon_vminnmvq_f64(__rev0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) float32_t vminnmvq_f32(float32x4_t __p0) { + float32_t __ret; + __ret = (float32_t) __builtin_neon_vminnmvq_f32(__p0); + return __ret; +} +#else +__ai __attribute__((target("neon"))) float32_t vminnmvq_f32(float32x4_t __p0) { + float32_t __ret; + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = (float32_t) __builtin_neon_vminnmvq_f32(__rev0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) float32_t vminnmv_f32(float32x2_t __p0) { + float32_t __ret; + __ret = (float32_t) __builtin_neon_vminnmv_f32(__p0); + return __ret; +} +#else +__ai __attribute__((target("neon"))) float32_t vminnmv_f32(float32x2_t __p0) { + float32_t __ret; + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = (float32_t) __builtin_neon_vminnmv_f32(__rev0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint8_t vminvq_u8(uint8x16_t __p0) { + uint8_t __ret; + __ret = (uint8_t) __builtin_neon_vminvq_u8(__p0); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint8_t vminvq_u8(uint8x16_t __p0) { + uint8_t __ret; + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint8_t) __builtin_neon_vminvq_u8(__rev0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint32_t vminvq_u32(uint32x4_t __p0) { + uint32_t __ret; + __ret = (uint32_t) __builtin_neon_vminvq_u32(__p0); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint32_t vminvq_u32(uint32x4_t __p0) { + uint32_t __ret; + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = (uint32_t) __builtin_neon_vminvq_u32(__rev0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint16_t vminvq_u16(uint16x8_t __p0) { + uint16_t __ret; + __ret = (uint16_t) __builtin_neon_vminvq_u16(__p0); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint16_t vminvq_u16(uint16x8_t __p0) { + uint16_t __ret; + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint16_t) __builtin_neon_vminvq_u16(__rev0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int8_t vminvq_s8(int8x16_t __p0) { + int8_t __ret; + __ret = (int8_t) __builtin_neon_vminvq_s8(__p0); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int8_t vminvq_s8(int8x16_t __p0) { + int8_t __ret; + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int8_t) __builtin_neon_vminvq_s8(__rev0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) float64_t vminvq_f64(float64x2_t __p0) { + float64_t __ret; + __ret = (float64_t) __builtin_neon_vminvq_f64(__p0); + return __ret; +} +#else +__ai __attribute__((target("neon"))) float64_t vminvq_f64(float64x2_t __p0) { + float64_t __ret; + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = (float64_t) __builtin_neon_vminvq_f64(__rev0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) float32_t vminvq_f32(float32x4_t __p0) { + float32_t __ret; + __ret = (float32_t) __builtin_neon_vminvq_f32(__p0); + return __ret; +} +#else +__ai __attribute__((target("neon"))) float32_t vminvq_f32(float32x4_t __p0) { + float32_t __ret; + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = (float32_t) __builtin_neon_vminvq_f32(__rev0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int32_t vminvq_s32(int32x4_t __p0) { + int32_t __ret; + __ret = (int32_t) __builtin_neon_vminvq_s32(__p0); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int32_t vminvq_s32(int32x4_t __p0) { + int32_t __ret; + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = (int32_t) __builtin_neon_vminvq_s32(__rev0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int16_t vminvq_s16(int16x8_t __p0) { + int16_t __ret; + __ret = (int16_t) __builtin_neon_vminvq_s16(__p0); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int16_t vminvq_s16(int16x8_t __p0) { + int16_t __ret; + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int16_t) __builtin_neon_vminvq_s16(__rev0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint8_t vminv_u8(uint8x8_t __p0) { + uint8_t __ret; + __ret = (uint8_t) __builtin_neon_vminv_u8(__p0); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint8_t vminv_u8(uint8x8_t __p0) { + uint8_t __ret; + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint8_t) __builtin_neon_vminv_u8(__rev0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint32_t vminv_u32(uint32x2_t __p0) { + uint32_t __ret; + __ret = (uint32_t) __builtin_neon_vminv_u32(__p0); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint32_t vminv_u32(uint32x2_t __p0) { + uint32_t __ret; + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = (uint32_t) __builtin_neon_vminv_u32(__rev0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint16_t vminv_u16(uint16x4_t __p0) { + uint16_t __ret; + __ret = (uint16_t) __builtin_neon_vminv_u16(__p0); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint16_t vminv_u16(uint16x4_t __p0) { + uint16_t __ret; + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = (uint16_t) __builtin_neon_vminv_u16(__rev0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int8_t vminv_s8(int8x8_t __p0) { + int8_t __ret; + __ret = (int8_t) __builtin_neon_vminv_s8(__p0); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int8_t vminv_s8(int8x8_t __p0) { + int8_t __ret; + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int8_t) __builtin_neon_vminv_s8(__rev0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) float32_t vminv_f32(float32x2_t __p0) { + float32_t __ret; + __ret = (float32_t) __builtin_neon_vminv_f32(__p0); + return __ret; +} +#else +__ai __attribute__((target("neon"))) float32_t vminv_f32(float32x2_t __p0) { + float32_t __ret; + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = (float32_t) __builtin_neon_vminv_f32(__rev0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int32_t vminv_s32(int32x2_t __p0) { + int32_t __ret; + __ret = (int32_t) __builtin_neon_vminv_s32(__p0); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int32_t vminv_s32(int32x2_t __p0) { + int32_t __ret; + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = (int32_t) __builtin_neon_vminv_s32(__rev0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int16_t vminv_s16(int16x4_t __p0) { + int16_t __ret; + __ret = (int16_t) __builtin_neon_vminv_s16(__p0); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int16_t vminv_s16(int16x4_t __p0) { + int16_t __ret; + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = (int16_t) __builtin_neon_vminv_s16(__rev0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) float64x2_t vmlaq_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) { + float64x2_t __ret; + __ret = __p0 + __p1 * __p2; + return __ret; +} +#else +__ai __attribute__((target("neon"))) float64x2_t vmlaq_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) { + float64x2_t __ret; + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + float64x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0); + __ret = __rev0 + __rev1 * __rev2; + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +__ai __attribute__((target("neon"))) float64x1_t vmla_f64(float64x1_t __p0, float64x1_t __p1, float64x1_t __p2) { + float64x1_t __ret; + __ret = __p0 + __p1 * __p2; + return __ret; +} +#ifdef __LITTLE_ENDIAN__ +#define vmlaq_laneq_u32(__p0_448, __p1_448, __p2_448, __p3_448) __extension__ ({ \ + uint32x4_t __ret_448; \ + uint32x4_t __s0_448 = __p0_448; \ + uint32x4_t __s1_448 = __p1_448; \ + uint32x4_t __s2_448 = __p2_448; \ + __ret_448 = __s0_448 + __s1_448 * splatq_laneq_u32(__s2_448, __p3_448); \ + __ret_448; \ +}) +#else +#define vmlaq_laneq_u32(__p0_449, __p1_449, __p2_449, __p3_449) __extension__ ({ \ + uint32x4_t __ret_449; \ + uint32x4_t __s0_449 = __p0_449; \ + uint32x4_t __s1_449 = __p1_449; \ + uint32x4_t __s2_449 = __p2_449; \ + uint32x4_t __rev0_449; __rev0_449 = __builtin_shufflevector(__s0_449, __s0_449, 3, 2, 1, 0); \ + uint32x4_t __rev1_449; __rev1_449 = __builtin_shufflevector(__s1_449, __s1_449, 3, 2, 1, 0); \ + uint32x4_t __rev2_449; __rev2_449 = __builtin_shufflevector(__s2_449, __s2_449, 3, 2, 1, 0); \ + __ret_449 = __rev0_449 + __rev1_449 * __noswap_splatq_laneq_u32(__rev2_449, __p3_449); \ + __ret_449 = __builtin_shufflevector(__ret_449, __ret_449, 3, 2, 1, 0); \ + __ret_449; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmlaq_laneq_u16(__p0_450, __p1_450, __p2_450, __p3_450) __extension__ ({ \ + uint16x8_t __ret_450; \ + uint16x8_t __s0_450 = __p0_450; \ + uint16x8_t __s1_450 = __p1_450; \ + uint16x8_t __s2_450 = __p2_450; \ + __ret_450 = __s0_450 + __s1_450 * splatq_laneq_u16(__s2_450, __p3_450); \ + __ret_450; \ +}) +#else +#define vmlaq_laneq_u16(__p0_451, __p1_451, __p2_451, __p3_451) __extension__ ({ \ + uint16x8_t __ret_451; \ + uint16x8_t __s0_451 = __p0_451; \ + uint16x8_t __s1_451 = __p1_451; \ + uint16x8_t __s2_451 = __p2_451; \ + uint16x8_t __rev0_451; __rev0_451 = __builtin_shufflevector(__s0_451, __s0_451, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint16x8_t __rev1_451; __rev1_451 = __builtin_shufflevector(__s1_451, __s1_451, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint16x8_t __rev2_451; __rev2_451 = __builtin_shufflevector(__s2_451, __s2_451, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_451 = __rev0_451 + __rev1_451 * __noswap_splatq_laneq_u16(__rev2_451, __p3_451); \ + __ret_451 = __builtin_shufflevector(__ret_451, __ret_451, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_451; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmlaq_laneq_f32(__p0_452, __p1_452, __p2_452, __p3_452) __extension__ ({ \ + float32x4_t __ret_452; \ + float32x4_t __s0_452 = __p0_452; \ + float32x4_t __s1_452 = __p1_452; \ + float32x4_t __s2_452 = __p2_452; \ + __ret_452 = __s0_452 + __s1_452 * splatq_laneq_f32(__s2_452, __p3_452); \ + __ret_452; \ +}) +#else +#define vmlaq_laneq_f32(__p0_453, __p1_453, __p2_453, __p3_453) __extension__ ({ \ + float32x4_t __ret_453; \ + float32x4_t __s0_453 = __p0_453; \ + float32x4_t __s1_453 = __p1_453; \ + float32x4_t __s2_453 = __p2_453; \ + float32x4_t __rev0_453; __rev0_453 = __builtin_shufflevector(__s0_453, __s0_453, 3, 2, 1, 0); \ + float32x4_t __rev1_453; __rev1_453 = __builtin_shufflevector(__s1_453, __s1_453, 3, 2, 1, 0); \ + float32x4_t __rev2_453; __rev2_453 = __builtin_shufflevector(__s2_453, __s2_453, 3, 2, 1, 0); \ + __ret_453 = __rev0_453 + __rev1_453 * __noswap_splatq_laneq_f32(__rev2_453, __p3_453); \ + __ret_453 = __builtin_shufflevector(__ret_453, __ret_453, 3, 2, 1, 0); \ + __ret_453; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmlaq_laneq_s32(__p0_454, __p1_454, __p2_454, __p3_454) __extension__ ({ \ + int32x4_t __ret_454; \ + int32x4_t __s0_454 = __p0_454; \ + int32x4_t __s1_454 = __p1_454; \ + int32x4_t __s2_454 = __p2_454; \ + __ret_454 = __s0_454 + __s1_454 * splatq_laneq_s32(__s2_454, __p3_454); \ + __ret_454; \ +}) +#else +#define vmlaq_laneq_s32(__p0_455, __p1_455, __p2_455, __p3_455) __extension__ ({ \ + int32x4_t __ret_455; \ + int32x4_t __s0_455 = __p0_455; \ + int32x4_t __s1_455 = __p1_455; \ + int32x4_t __s2_455 = __p2_455; \ + int32x4_t __rev0_455; __rev0_455 = __builtin_shufflevector(__s0_455, __s0_455, 3, 2, 1, 0); \ + int32x4_t __rev1_455; __rev1_455 = __builtin_shufflevector(__s1_455, __s1_455, 3, 2, 1, 0); \ + int32x4_t __rev2_455; __rev2_455 = __builtin_shufflevector(__s2_455, __s2_455, 3, 2, 1, 0); \ + __ret_455 = __rev0_455 + __rev1_455 * __noswap_splatq_laneq_s32(__rev2_455, __p3_455); \ + __ret_455 = __builtin_shufflevector(__ret_455, __ret_455, 3, 2, 1, 0); \ + __ret_455; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmlaq_laneq_s16(__p0_456, __p1_456, __p2_456, __p3_456) __extension__ ({ \ + int16x8_t __ret_456; \ + int16x8_t __s0_456 = __p0_456; \ + int16x8_t __s1_456 = __p1_456; \ + int16x8_t __s2_456 = __p2_456; \ + __ret_456 = __s0_456 + __s1_456 * splatq_laneq_s16(__s2_456, __p3_456); \ + __ret_456; \ +}) +#else +#define vmlaq_laneq_s16(__p0_457, __p1_457, __p2_457, __p3_457) __extension__ ({ \ + int16x8_t __ret_457; \ + int16x8_t __s0_457 = __p0_457; \ + int16x8_t __s1_457 = __p1_457; \ + int16x8_t __s2_457 = __p2_457; \ + int16x8_t __rev0_457; __rev0_457 = __builtin_shufflevector(__s0_457, __s0_457, 7, 6, 5, 4, 3, 2, 1, 0); \ + int16x8_t __rev1_457; __rev1_457 = __builtin_shufflevector(__s1_457, __s1_457, 7, 6, 5, 4, 3, 2, 1, 0); \ + int16x8_t __rev2_457; __rev2_457 = __builtin_shufflevector(__s2_457, __s2_457, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_457 = __rev0_457 + __rev1_457 * __noswap_splatq_laneq_s16(__rev2_457, __p3_457); \ + __ret_457 = __builtin_shufflevector(__ret_457, __ret_457, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_457; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmla_laneq_u32(__p0_458, __p1_458, __p2_458, __p3_458) __extension__ ({ \ + uint32x2_t __ret_458; \ + uint32x2_t __s0_458 = __p0_458; \ + uint32x2_t __s1_458 = __p1_458; \ + uint32x4_t __s2_458 = __p2_458; \ + __ret_458 = __s0_458 + __s1_458 * splat_laneq_u32(__s2_458, __p3_458); \ + __ret_458; \ +}) +#else +#define vmla_laneq_u32(__p0_459, __p1_459, __p2_459, __p3_459) __extension__ ({ \ + uint32x2_t __ret_459; \ + uint32x2_t __s0_459 = __p0_459; \ + uint32x2_t __s1_459 = __p1_459; \ + uint32x4_t __s2_459 = __p2_459; \ + uint32x2_t __rev0_459; __rev0_459 = __builtin_shufflevector(__s0_459, __s0_459, 1, 0); \ + uint32x2_t __rev1_459; __rev1_459 = __builtin_shufflevector(__s1_459, __s1_459, 1, 0); \ + uint32x4_t __rev2_459; __rev2_459 = __builtin_shufflevector(__s2_459, __s2_459, 3, 2, 1, 0); \ + __ret_459 = __rev0_459 + __rev1_459 * __noswap_splat_laneq_u32(__rev2_459, __p3_459); \ + __ret_459 = __builtin_shufflevector(__ret_459, __ret_459, 1, 0); \ + __ret_459; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmla_laneq_u16(__p0_460, __p1_460, __p2_460, __p3_460) __extension__ ({ \ + uint16x4_t __ret_460; \ + uint16x4_t __s0_460 = __p0_460; \ + uint16x4_t __s1_460 = __p1_460; \ + uint16x8_t __s2_460 = __p2_460; \ + __ret_460 = __s0_460 + __s1_460 * splat_laneq_u16(__s2_460, __p3_460); \ + __ret_460; \ +}) +#else +#define vmla_laneq_u16(__p0_461, __p1_461, __p2_461, __p3_461) __extension__ ({ \ + uint16x4_t __ret_461; \ + uint16x4_t __s0_461 = __p0_461; \ + uint16x4_t __s1_461 = __p1_461; \ + uint16x8_t __s2_461 = __p2_461; \ + uint16x4_t __rev0_461; __rev0_461 = __builtin_shufflevector(__s0_461, __s0_461, 3, 2, 1, 0); \ + uint16x4_t __rev1_461; __rev1_461 = __builtin_shufflevector(__s1_461, __s1_461, 3, 2, 1, 0); \ + uint16x8_t __rev2_461; __rev2_461 = __builtin_shufflevector(__s2_461, __s2_461, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_461 = __rev0_461 + __rev1_461 * __noswap_splat_laneq_u16(__rev2_461, __p3_461); \ + __ret_461 = __builtin_shufflevector(__ret_461, __ret_461, 3, 2, 1, 0); \ + __ret_461; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmla_laneq_f32(__p0_462, __p1_462, __p2_462, __p3_462) __extension__ ({ \ + float32x2_t __ret_462; \ + float32x2_t __s0_462 = __p0_462; \ + float32x2_t __s1_462 = __p1_462; \ + float32x4_t __s2_462 = __p2_462; \ + __ret_462 = __s0_462 + __s1_462 * splat_laneq_f32(__s2_462, __p3_462); \ + __ret_462; \ +}) +#else +#define vmla_laneq_f32(__p0_463, __p1_463, __p2_463, __p3_463) __extension__ ({ \ + float32x2_t __ret_463; \ + float32x2_t __s0_463 = __p0_463; \ + float32x2_t __s1_463 = __p1_463; \ + float32x4_t __s2_463 = __p2_463; \ + float32x2_t __rev0_463; __rev0_463 = __builtin_shufflevector(__s0_463, __s0_463, 1, 0); \ + float32x2_t __rev1_463; __rev1_463 = __builtin_shufflevector(__s1_463, __s1_463, 1, 0); \ + float32x4_t __rev2_463; __rev2_463 = __builtin_shufflevector(__s2_463, __s2_463, 3, 2, 1, 0); \ + __ret_463 = __rev0_463 + __rev1_463 * __noswap_splat_laneq_f32(__rev2_463, __p3_463); \ + __ret_463 = __builtin_shufflevector(__ret_463, __ret_463, 1, 0); \ + __ret_463; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmla_laneq_s32(__p0_464, __p1_464, __p2_464, __p3_464) __extension__ ({ \ + int32x2_t __ret_464; \ + int32x2_t __s0_464 = __p0_464; \ + int32x2_t __s1_464 = __p1_464; \ + int32x4_t __s2_464 = __p2_464; \ + __ret_464 = __s0_464 + __s1_464 * splat_laneq_s32(__s2_464, __p3_464); \ + __ret_464; \ +}) +#else +#define vmla_laneq_s32(__p0_465, __p1_465, __p2_465, __p3_465) __extension__ ({ \ + int32x2_t __ret_465; \ + int32x2_t __s0_465 = __p0_465; \ + int32x2_t __s1_465 = __p1_465; \ + int32x4_t __s2_465 = __p2_465; \ + int32x2_t __rev0_465; __rev0_465 = __builtin_shufflevector(__s0_465, __s0_465, 1, 0); \ + int32x2_t __rev1_465; __rev1_465 = __builtin_shufflevector(__s1_465, __s1_465, 1, 0); \ + int32x4_t __rev2_465; __rev2_465 = __builtin_shufflevector(__s2_465, __s2_465, 3, 2, 1, 0); \ + __ret_465 = __rev0_465 + __rev1_465 * __noswap_splat_laneq_s32(__rev2_465, __p3_465); \ + __ret_465 = __builtin_shufflevector(__ret_465, __ret_465, 1, 0); \ + __ret_465; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmla_laneq_s16(__p0_466, __p1_466, __p2_466, __p3_466) __extension__ ({ \ + int16x4_t __ret_466; \ + int16x4_t __s0_466 = __p0_466; \ + int16x4_t __s1_466 = __p1_466; \ + int16x8_t __s2_466 = __p2_466; \ + __ret_466 = __s0_466 + __s1_466 * splat_laneq_s16(__s2_466, __p3_466); \ + __ret_466; \ +}) +#else +#define vmla_laneq_s16(__p0_467, __p1_467, __p2_467, __p3_467) __extension__ ({ \ + int16x4_t __ret_467; \ + int16x4_t __s0_467 = __p0_467; \ + int16x4_t __s1_467 = __p1_467; \ + int16x8_t __s2_467 = __p2_467; \ + int16x4_t __rev0_467; __rev0_467 = __builtin_shufflevector(__s0_467, __s0_467, 3, 2, 1, 0); \ + int16x4_t __rev1_467; __rev1_467 = __builtin_shufflevector(__s1_467, __s1_467, 3, 2, 1, 0); \ + int16x8_t __rev2_467; __rev2_467 = __builtin_shufflevector(__s2_467, __s2_467, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_467 = __rev0_467 + __rev1_467 * __noswap_splat_laneq_s16(__rev2_467, __p3_467); \ + __ret_467 = __builtin_shufflevector(__ret_467, __ret_467, 3, 2, 1, 0); \ + __ret_467; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmlal_high_lane_u32(__p0_468, __p1_468, __p2_468, __p3_468) __extension__ ({ \ + uint64x2_t __ret_468; \ + uint64x2_t __s0_468 = __p0_468; \ + uint32x4_t __s1_468 = __p1_468; \ + uint32x2_t __s2_468 = __p2_468; \ + __ret_468 = __s0_468 + vmull_u32(vget_high_u32(__s1_468), splat_lane_u32(__s2_468, __p3_468)); \ + __ret_468; \ +}) +#else +#define vmlal_high_lane_u32(__p0_469, __p1_469, __p2_469, __p3_469) __extension__ ({ \ + uint64x2_t __ret_469; \ + uint64x2_t __s0_469 = __p0_469; \ + uint32x4_t __s1_469 = __p1_469; \ + uint32x2_t __s2_469 = __p2_469; \ + uint64x2_t __rev0_469; __rev0_469 = __builtin_shufflevector(__s0_469, __s0_469, 1, 0); \ + uint32x4_t __rev1_469; __rev1_469 = __builtin_shufflevector(__s1_469, __s1_469, 3, 2, 1, 0); \ + uint32x2_t __rev2_469; __rev2_469 = __builtin_shufflevector(__s2_469, __s2_469, 1, 0); \ + __ret_469 = __rev0_469 + __noswap_vmull_u32(__noswap_vget_high_u32(__rev1_469), __noswap_splat_lane_u32(__rev2_469, __p3_469)); \ + __ret_469 = __builtin_shufflevector(__ret_469, __ret_469, 1, 0); \ + __ret_469; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmlal_high_lane_u16(__p0_470, __p1_470, __p2_470, __p3_470) __extension__ ({ \ + uint32x4_t __ret_470; \ + uint32x4_t __s0_470 = __p0_470; \ + uint16x8_t __s1_470 = __p1_470; \ + uint16x4_t __s2_470 = __p2_470; \ + __ret_470 = __s0_470 + vmull_u16(vget_high_u16(__s1_470), splat_lane_u16(__s2_470, __p3_470)); \ + __ret_470; \ +}) +#else +#define vmlal_high_lane_u16(__p0_471, __p1_471, __p2_471, __p3_471) __extension__ ({ \ + uint32x4_t __ret_471; \ + uint32x4_t __s0_471 = __p0_471; \ + uint16x8_t __s1_471 = __p1_471; \ + uint16x4_t __s2_471 = __p2_471; \ + uint32x4_t __rev0_471; __rev0_471 = __builtin_shufflevector(__s0_471, __s0_471, 3, 2, 1, 0); \ + uint16x8_t __rev1_471; __rev1_471 = __builtin_shufflevector(__s1_471, __s1_471, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint16x4_t __rev2_471; __rev2_471 = __builtin_shufflevector(__s2_471, __s2_471, 3, 2, 1, 0); \ + __ret_471 = __rev0_471 + __noswap_vmull_u16(__noswap_vget_high_u16(__rev1_471), __noswap_splat_lane_u16(__rev2_471, __p3_471)); \ + __ret_471 = __builtin_shufflevector(__ret_471, __ret_471, 3, 2, 1, 0); \ + __ret_471; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmlal_high_lane_s32(__p0_472, __p1_472, __p2_472, __p3_472) __extension__ ({ \ + int64x2_t __ret_472; \ + int64x2_t __s0_472 = __p0_472; \ + int32x4_t __s1_472 = __p1_472; \ + int32x2_t __s2_472 = __p2_472; \ + __ret_472 = __s0_472 + vmull_s32(vget_high_s32(__s1_472), splat_lane_s32(__s2_472, __p3_472)); \ + __ret_472; \ +}) +#else +#define vmlal_high_lane_s32(__p0_473, __p1_473, __p2_473, __p3_473) __extension__ ({ \ + int64x2_t __ret_473; \ + int64x2_t __s0_473 = __p0_473; \ + int32x4_t __s1_473 = __p1_473; \ + int32x2_t __s2_473 = __p2_473; \ + int64x2_t __rev0_473; __rev0_473 = __builtin_shufflevector(__s0_473, __s0_473, 1, 0); \ + int32x4_t __rev1_473; __rev1_473 = __builtin_shufflevector(__s1_473, __s1_473, 3, 2, 1, 0); \ + int32x2_t __rev2_473; __rev2_473 = __builtin_shufflevector(__s2_473, __s2_473, 1, 0); \ + __ret_473 = __rev0_473 + __noswap_vmull_s32(__noswap_vget_high_s32(__rev1_473), __noswap_splat_lane_s32(__rev2_473, __p3_473)); \ + __ret_473 = __builtin_shufflevector(__ret_473, __ret_473, 1, 0); \ + __ret_473; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmlal_high_lane_s16(__p0_474, __p1_474, __p2_474, __p3_474) __extension__ ({ \ + int32x4_t __ret_474; \ + int32x4_t __s0_474 = __p0_474; \ + int16x8_t __s1_474 = __p1_474; \ + int16x4_t __s2_474 = __p2_474; \ + __ret_474 = __s0_474 + vmull_s16(vget_high_s16(__s1_474), splat_lane_s16(__s2_474, __p3_474)); \ + __ret_474; \ +}) +#else +#define vmlal_high_lane_s16(__p0_475, __p1_475, __p2_475, __p3_475) __extension__ ({ \ + int32x4_t __ret_475; \ + int32x4_t __s0_475 = __p0_475; \ + int16x8_t __s1_475 = __p1_475; \ + int16x4_t __s2_475 = __p2_475; \ + int32x4_t __rev0_475; __rev0_475 = __builtin_shufflevector(__s0_475, __s0_475, 3, 2, 1, 0); \ + int16x8_t __rev1_475; __rev1_475 = __builtin_shufflevector(__s1_475, __s1_475, 7, 6, 5, 4, 3, 2, 1, 0); \ + int16x4_t __rev2_475; __rev2_475 = __builtin_shufflevector(__s2_475, __s2_475, 3, 2, 1, 0); \ + __ret_475 = __rev0_475 + __noswap_vmull_s16(__noswap_vget_high_s16(__rev1_475), __noswap_splat_lane_s16(__rev2_475, __p3_475)); \ + __ret_475 = __builtin_shufflevector(__ret_475, __ret_475, 3, 2, 1, 0); \ + __ret_475; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmlal_high_laneq_u32(__p0_476, __p1_476, __p2_476, __p3_476) __extension__ ({ \ + uint64x2_t __ret_476; \ + uint64x2_t __s0_476 = __p0_476; \ + uint32x4_t __s1_476 = __p1_476; \ + uint32x4_t __s2_476 = __p2_476; \ + __ret_476 = __s0_476 + vmull_u32(vget_high_u32(__s1_476), splat_laneq_u32(__s2_476, __p3_476)); \ + __ret_476; \ +}) +#else +#define vmlal_high_laneq_u32(__p0_477, __p1_477, __p2_477, __p3_477) __extension__ ({ \ + uint64x2_t __ret_477; \ + uint64x2_t __s0_477 = __p0_477; \ + uint32x4_t __s1_477 = __p1_477; \ + uint32x4_t __s2_477 = __p2_477; \ + uint64x2_t __rev0_477; __rev0_477 = __builtin_shufflevector(__s0_477, __s0_477, 1, 0); \ + uint32x4_t __rev1_477; __rev1_477 = __builtin_shufflevector(__s1_477, __s1_477, 3, 2, 1, 0); \ + uint32x4_t __rev2_477; __rev2_477 = __builtin_shufflevector(__s2_477, __s2_477, 3, 2, 1, 0); \ + __ret_477 = __rev0_477 + __noswap_vmull_u32(__noswap_vget_high_u32(__rev1_477), __noswap_splat_laneq_u32(__rev2_477, __p3_477)); \ + __ret_477 = __builtin_shufflevector(__ret_477, __ret_477, 1, 0); \ + __ret_477; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmlal_high_laneq_u16(__p0_478, __p1_478, __p2_478, __p3_478) __extension__ ({ \ + uint32x4_t __ret_478; \ + uint32x4_t __s0_478 = __p0_478; \ + uint16x8_t __s1_478 = __p1_478; \ + uint16x8_t __s2_478 = __p2_478; \ + __ret_478 = __s0_478 + vmull_u16(vget_high_u16(__s1_478), splat_laneq_u16(__s2_478, __p3_478)); \ + __ret_478; \ +}) +#else +#define vmlal_high_laneq_u16(__p0_479, __p1_479, __p2_479, __p3_479) __extension__ ({ \ + uint32x4_t __ret_479; \ + uint32x4_t __s0_479 = __p0_479; \ + uint16x8_t __s1_479 = __p1_479; \ + uint16x8_t __s2_479 = __p2_479; \ + uint32x4_t __rev0_479; __rev0_479 = __builtin_shufflevector(__s0_479, __s0_479, 3, 2, 1, 0); \ + uint16x8_t __rev1_479; __rev1_479 = __builtin_shufflevector(__s1_479, __s1_479, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint16x8_t __rev2_479; __rev2_479 = __builtin_shufflevector(__s2_479, __s2_479, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_479 = __rev0_479 + __noswap_vmull_u16(__noswap_vget_high_u16(__rev1_479), __noswap_splat_laneq_u16(__rev2_479, __p3_479)); \ + __ret_479 = __builtin_shufflevector(__ret_479, __ret_479, 3, 2, 1, 0); \ + __ret_479; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmlal_high_laneq_s32(__p0_480, __p1_480, __p2_480, __p3_480) __extension__ ({ \ + int64x2_t __ret_480; \ + int64x2_t __s0_480 = __p0_480; \ + int32x4_t __s1_480 = __p1_480; \ + int32x4_t __s2_480 = __p2_480; \ + __ret_480 = __s0_480 + vmull_s32(vget_high_s32(__s1_480), splat_laneq_s32(__s2_480, __p3_480)); \ + __ret_480; \ +}) +#else +#define vmlal_high_laneq_s32(__p0_481, __p1_481, __p2_481, __p3_481) __extension__ ({ \ + int64x2_t __ret_481; \ + int64x2_t __s0_481 = __p0_481; \ + int32x4_t __s1_481 = __p1_481; \ + int32x4_t __s2_481 = __p2_481; \ + int64x2_t __rev0_481; __rev0_481 = __builtin_shufflevector(__s0_481, __s0_481, 1, 0); \ + int32x4_t __rev1_481; __rev1_481 = __builtin_shufflevector(__s1_481, __s1_481, 3, 2, 1, 0); \ + int32x4_t __rev2_481; __rev2_481 = __builtin_shufflevector(__s2_481, __s2_481, 3, 2, 1, 0); \ + __ret_481 = __rev0_481 + __noswap_vmull_s32(__noswap_vget_high_s32(__rev1_481), __noswap_splat_laneq_s32(__rev2_481, __p3_481)); \ + __ret_481 = __builtin_shufflevector(__ret_481, __ret_481, 1, 0); \ + __ret_481; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmlal_high_laneq_s16(__p0_482, __p1_482, __p2_482, __p3_482) __extension__ ({ \ + int32x4_t __ret_482; \ + int32x4_t __s0_482 = __p0_482; \ + int16x8_t __s1_482 = __p1_482; \ + int16x8_t __s2_482 = __p2_482; \ + __ret_482 = __s0_482 + vmull_s16(vget_high_s16(__s1_482), splat_laneq_s16(__s2_482, __p3_482)); \ + __ret_482; \ +}) +#else +#define vmlal_high_laneq_s16(__p0_483, __p1_483, __p2_483, __p3_483) __extension__ ({ \ + int32x4_t __ret_483; \ + int32x4_t __s0_483 = __p0_483; \ + int16x8_t __s1_483 = __p1_483; \ + int16x8_t __s2_483 = __p2_483; \ + int32x4_t __rev0_483; __rev0_483 = __builtin_shufflevector(__s0_483, __s0_483, 3, 2, 1, 0); \ + int16x8_t __rev1_483; __rev1_483 = __builtin_shufflevector(__s1_483, __s1_483, 7, 6, 5, 4, 3, 2, 1, 0); \ + int16x8_t __rev2_483; __rev2_483 = __builtin_shufflevector(__s2_483, __s2_483, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_483 = __rev0_483 + __noswap_vmull_s16(__noswap_vget_high_s16(__rev1_483), __noswap_splat_laneq_s16(__rev2_483, __p3_483)); \ + __ret_483 = __builtin_shufflevector(__ret_483, __ret_483, 3, 2, 1, 0); \ + __ret_483; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmlal_laneq_u32(__p0_484, __p1_484, __p2_484, __p3_484) __extension__ ({ \ + uint64x2_t __ret_484; \ + uint64x2_t __s0_484 = __p0_484; \ + uint32x2_t __s1_484 = __p1_484; \ + uint32x4_t __s2_484 = __p2_484; \ + __ret_484 = __s0_484 + vmull_u32(__s1_484, splat_laneq_u32(__s2_484, __p3_484)); \ + __ret_484; \ +}) +#else +#define vmlal_laneq_u32(__p0_485, __p1_485, __p2_485, __p3_485) __extension__ ({ \ + uint64x2_t __ret_485; \ + uint64x2_t __s0_485 = __p0_485; \ + uint32x2_t __s1_485 = __p1_485; \ + uint32x4_t __s2_485 = __p2_485; \ + uint64x2_t __rev0_485; __rev0_485 = __builtin_shufflevector(__s0_485, __s0_485, 1, 0); \ + uint32x2_t __rev1_485; __rev1_485 = __builtin_shufflevector(__s1_485, __s1_485, 1, 0); \ + uint32x4_t __rev2_485; __rev2_485 = __builtin_shufflevector(__s2_485, __s2_485, 3, 2, 1, 0); \ + __ret_485 = __rev0_485 + __noswap_vmull_u32(__rev1_485, __noswap_splat_laneq_u32(__rev2_485, __p3_485)); \ + __ret_485 = __builtin_shufflevector(__ret_485, __ret_485, 1, 0); \ + __ret_485; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmlal_laneq_u16(__p0_486, __p1_486, __p2_486, __p3_486) __extension__ ({ \ + uint32x4_t __ret_486; \ + uint32x4_t __s0_486 = __p0_486; \ + uint16x4_t __s1_486 = __p1_486; \ + uint16x8_t __s2_486 = __p2_486; \ + __ret_486 = __s0_486 + vmull_u16(__s1_486, splat_laneq_u16(__s2_486, __p3_486)); \ + __ret_486; \ +}) +#else +#define vmlal_laneq_u16(__p0_487, __p1_487, __p2_487, __p3_487) __extension__ ({ \ + uint32x4_t __ret_487; \ + uint32x4_t __s0_487 = __p0_487; \ + uint16x4_t __s1_487 = __p1_487; \ + uint16x8_t __s2_487 = __p2_487; \ + uint32x4_t __rev0_487; __rev0_487 = __builtin_shufflevector(__s0_487, __s0_487, 3, 2, 1, 0); \ + uint16x4_t __rev1_487; __rev1_487 = __builtin_shufflevector(__s1_487, __s1_487, 3, 2, 1, 0); \ + uint16x8_t __rev2_487; __rev2_487 = __builtin_shufflevector(__s2_487, __s2_487, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_487 = __rev0_487 + __noswap_vmull_u16(__rev1_487, __noswap_splat_laneq_u16(__rev2_487, __p3_487)); \ + __ret_487 = __builtin_shufflevector(__ret_487, __ret_487, 3, 2, 1, 0); \ + __ret_487; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmlal_laneq_s32(__p0_488, __p1_488, __p2_488, __p3_488) __extension__ ({ \ + int64x2_t __ret_488; \ + int64x2_t __s0_488 = __p0_488; \ + int32x2_t __s1_488 = __p1_488; \ + int32x4_t __s2_488 = __p2_488; \ + __ret_488 = __s0_488 + vmull_s32(__s1_488, splat_laneq_s32(__s2_488, __p3_488)); \ + __ret_488; \ +}) +#else +#define vmlal_laneq_s32(__p0_489, __p1_489, __p2_489, __p3_489) __extension__ ({ \ + int64x2_t __ret_489; \ + int64x2_t __s0_489 = __p0_489; \ + int32x2_t __s1_489 = __p1_489; \ + int32x4_t __s2_489 = __p2_489; \ + int64x2_t __rev0_489; __rev0_489 = __builtin_shufflevector(__s0_489, __s0_489, 1, 0); \ + int32x2_t __rev1_489; __rev1_489 = __builtin_shufflevector(__s1_489, __s1_489, 1, 0); \ + int32x4_t __rev2_489; __rev2_489 = __builtin_shufflevector(__s2_489, __s2_489, 3, 2, 1, 0); \ + __ret_489 = __rev0_489 + __noswap_vmull_s32(__rev1_489, __noswap_splat_laneq_s32(__rev2_489, __p3_489)); \ + __ret_489 = __builtin_shufflevector(__ret_489, __ret_489, 1, 0); \ + __ret_489; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmlal_laneq_s16(__p0_490, __p1_490, __p2_490, __p3_490) __extension__ ({ \ + int32x4_t __ret_490; \ + int32x4_t __s0_490 = __p0_490; \ + int16x4_t __s1_490 = __p1_490; \ + int16x8_t __s2_490 = __p2_490; \ + __ret_490 = __s0_490 + vmull_s16(__s1_490, splat_laneq_s16(__s2_490, __p3_490)); \ + __ret_490; \ +}) +#else +#define vmlal_laneq_s16(__p0_491, __p1_491, __p2_491, __p3_491) __extension__ ({ \ + int32x4_t __ret_491; \ + int32x4_t __s0_491 = __p0_491; \ + int16x4_t __s1_491 = __p1_491; \ + int16x8_t __s2_491 = __p2_491; \ + int32x4_t __rev0_491; __rev0_491 = __builtin_shufflevector(__s0_491, __s0_491, 3, 2, 1, 0); \ + int16x4_t __rev1_491; __rev1_491 = __builtin_shufflevector(__s1_491, __s1_491, 3, 2, 1, 0); \ + int16x8_t __rev2_491; __rev2_491 = __builtin_shufflevector(__s2_491, __s2_491, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_491 = __rev0_491 + __noswap_vmull_s16(__rev1_491, __noswap_splat_laneq_s16(__rev2_491, __p3_491)); \ + __ret_491 = __builtin_shufflevector(__ret_491, __ret_491, 3, 2, 1, 0); \ + __ret_491; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) float64x2_t vmlsq_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) { + float64x2_t __ret; + __ret = __p0 - __p1 * __p2; + return __ret; +} +#else +__ai __attribute__((target("neon"))) float64x2_t vmlsq_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) { + float64x2_t __ret; + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + float64x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0); + __ret = __rev0 - __rev1 * __rev2; + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +__ai __attribute__((target("neon"))) float64x1_t vmls_f64(float64x1_t __p0, float64x1_t __p1, float64x1_t __p2) { + float64x1_t __ret; + __ret = __p0 - __p1 * __p2; + return __ret; +} +#ifdef __LITTLE_ENDIAN__ +#define vmlsq_laneq_u32(__p0_492, __p1_492, __p2_492, __p3_492) __extension__ ({ \ + uint32x4_t __ret_492; \ + uint32x4_t __s0_492 = __p0_492; \ + uint32x4_t __s1_492 = __p1_492; \ + uint32x4_t __s2_492 = __p2_492; \ + __ret_492 = __s0_492 - __s1_492 * splatq_laneq_u32(__s2_492, __p3_492); \ + __ret_492; \ +}) +#else +#define vmlsq_laneq_u32(__p0_493, __p1_493, __p2_493, __p3_493) __extension__ ({ \ + uint32x4_t __ret_493; \ + uint32x4_t __s0_493 = __p0_493; \ + uint32x4_t __s1_493 = __p1_493; \ + uint32x4_t __s2_493 = __p2_493; \ + uint32x4_t __rev0_493; __rev0_493 = __builtin_shufflevector(__s0_493, __s0_493, 3, 2, 1, 0); \ + uint32x4_t __rev1_493; __rev1_493 = __builtin_shufflevector(__s1_493, __s1_493, 3, 2, 1, 0); \ + uint32x4_t __rev2_493; __rev2_493 = __builtin_shufflevector(__s2_493, __s2_493, 3, 2, 1, 0); \ + __ret_493 = __rev0_493 - __rev1_493 * __noswap_splatq_laneq_u32(__rev2_493, __p3_493); \ + __ret_493 = __builtin_shufflevector(__ret_493, __ret_493, 3, 2, 1, 0); \ + __ret_493; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmlsq_laneq_u16(__p0_494, __p1_494, __p2_494, __p3_494) __extension__ ({ \ + uint16x8_t __ret_494; \ + uint16x8_t __s0_494 = __p0_494; \ + uint16x8_t __s1_494 = __p1_494; \ + uint16x8_t __s2_494 = __p2_494; \ + __ret_494 = __s0_494 - __s1_494 * splatq_laneq_u16(__s2_494, __p3_494); \ + __ret_494; \ +}) +#else +#define vmlsq_laneq_u16(__p0_495, __p1_495, __p2_495, __p3_495) __extension__ ({ \ + uint16x8_t __ret_495; \ + uint16x8_t __s0_495 = __p0_495; \ + uint16x8_t __s1_495 = __p1_495; \ + uint16x8_t __s2_495 = __p2_495; \ + uint16x8_t __rev0_495; __rev0_495 = __builtin_shufflevector(__s0_495, __s0_495, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint16x8_t __rev1_495; __rev1_495 = __builtin_shufflevector(__s1_495, __s1_495, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint16x8_t __rev2_495; __rev2_495 = __builtin_shufflevector(__s2_495, __s2_495, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_495 = __rev0_495 - __rev1_495 * __noswap_splatq_laneq_u16(__rev2_495, __p3_495); \ + __ret_495 = __builtin_shufflevector(__ret_495, __ret_495, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_495; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmlsq_laneq_f32(__p0_496, __p1_496, __p2_496, __p3_496) __extension__ ({ \ + float32x4_t __ret_496; \ + float32x4_t __s0_496 = __p0_496; \ + float32x4_t __s1_496 = __p1_496; \ + float32x4_t __s2_496 = __p2_496; \ + __ret_496 = __s0_496 - __s1_496 * splatq_laneq_f32(__s2_496, __p3_496); \ + __ret_496; \ +}) +#else +#define vmlsq_laneq_f32(__p0_497, __p1_497, __p2_497, __p3_497) __extension__ ({ \ + float32x4_t __ret_497; \ + float32x4_t __s0_497 = __p0_497; \ + float32x4_t __s1_497 = __p1_497; \ + float32x4_t __s2_497 = __p2_497; \ + float32x4_t __rev0_497; __rev0_497 = __builtin_shufflevector(__s0_497, __s0_497, 3, 2, 1, 0); \ + float32x4_t __rev1_497; __rev1_497 = __builtin_shufflevector(__s1_497, __s1_497, 3, 2, 1, 0); \ + float32x4_t __rev2_497; __rev2_497 = __builtin_shufflevector(__s2_497, __s2_497, 3, 2, 1, 0); \ + __ret_497 = __rev0_497 - __rev1_497 * __noswap_splatq_laneq_f32(__rev2_497, __p3_497); \ + __ret_497 = __builtin_shufflevector(__ret_497, __ret_497, 3, 2, 1, 0); \ + __ret_497; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmlsq_laneq_s32(__p0_498, __p1_498, __p2_498, __p3_498) __extension__ ({ \ + int32x4_t __ret_498; \ + int32x4_t __s0_498 = __p0_498; \ + int32x4_t __s1_498 = __p1_498; \ + int32x4_t __s2_498 = __p2_498; \ + __ret_498 = __s0_498 - __s1_498 * splatq_laneq_s32(__s2_498, __p3_498); \ + __ret_498; \ +}) +#else +#define vmlsq_laneq_s32(__p0_499, __p1_499, __p2_499, __p3_499) __extension__ ({ \ + int32x4_t __ret_499; \ + int32x4_t __s0_499 = __p0_499; \ + int32x4_t __s1_499 = __p1_499; \ + int32x4_t __s2_499 = __p2_499; \ + int32x4_t __rev0_499; __rev0_499 = __builtin_shufflevector(__s0_499, __s0_499, 3, 2, 1, 0); \ + int32x4_t __rev1_499; __rev1_499 = __builtin_shufflevector(__s1_499, __s1_499, 3, 2, 1, 0); \ + int32x4_t __rev2_499; __rev2_499 = __builtin_shufflevector(__s2_499, __s2_499, 3, 2, 1, 0); \ + __ret_499 = __rev0_499 - __rev1_499 * __noswap_splatq_laneq_s32(__rev2_499, __p3_499); \ + __ret_499 = __builtin_shufflevector(__ret_499, __ret_499, 3, 2, 1, 0); \ + __ret_499; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmlsq_laneq_s16(__p0_500, __p1_500, __p2_500, __p3_500) __extension__ ({ \ + int16x8_t __ret_500; \ + int16x8_t __s0_500 = __p0_500; \ + int16x8_t __s1_500 = __p1_500; \ + int16x8_t __s2_500 = __p2_500; \ + __ret_500 = __s0_500 - __s1_500 * splatq_laneq_s16(__s2_500, __p3_500); \ + __ret_500; \ +}) +#else +#define vmlsq_laneq_s16(__p0_501, __p1_501, __p2_501, __p3_501) __extension__ ({ \ + int16x8_t __ret_501; \ + int16x8_t __s0_501 = __p0_501; \ + int16x8_t __s1_501 = __p1_501; \ + int16x8_t __s2_501 = __p2_501; \ + int16x8_t __rev0_501; __rev0_501 = __builtin_shufflevector(__s0_501, __s0_501, 7, 6, 5, 4, 3, 2, 1, 0); \ + int16x8_t __rev1_501; __rev1_501 = __builtin_shufflevector(__s1_501, __s1_501, 7, 6, 5, 4, 3, 2, 1, 0); \ + int16x8_t __rev2_501; __rev2_501 = __builtin_shufflevector(__s2_501, __s2_501, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_501 = __rev0_501 - __rev1_501 * __noswap_splatq_laneq_s16(__rev2_501, __p3_501); \ + __ret_501 = __builtin_shufflevector(__ret_501, __ret_501, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_501; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmls_laneq_u32(__p0_502, __p1_502, __p2_502, __p3_502) __extension__ ({ \ + uint32x2_t __ret_502; \ + uint32x2_t __s0_502 = __p0_502; \ + uint32x2_t __s1_502 = __p1_502; \ + uint32x4_t __s2_502 = __p2_502; \ + __ret_502 = __s0_502 - __s1_502 * splat_laneq_u32(__s2_502, __p3_502); \ + __ret_502; \ +}) +#else +#define vmls_laneq_u32(__p0_503, __p1_503, __p2_503, __p3_503) __extension__ ({ \ + uint32x2_t __ret_503; \ + uint32x2_t __s0_503 = __p0_503; \ + uint32x2_t __s1_503 = __p1_503; \ + uint32x4_t __s2_503 = __p2_503; \ + uint32x2_t __rev0_503; __rev0_503 = __builtin_shufflevector(__s0_503, __s0_503, 1, 0); \ + uint32x2_t __rev1_503; __rev1_503 = __builtin_shufflevector(__s1_503, __s1_503, 1, 0); \ + uint32x4_t __rev2_503; __rev2_503 = __builtin_shufflevector(__s2_503, __s2_503, 3, 2, 1, 0); \ + __ret_503 = __rev0_503 - __rev1_503 * __noswap_splat_laneq_u32(__rev2_503, __p3_503); \ + __ret_503 = __builtin_shufflevector(__ret_503, __ret_503, 1, 0); \ + __ret_503; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmls_laneq_u16(__p0_504, __p1_504, __p2_504, __p3_504) __extension__ ({ \ + uint16x4_t __ret_504; \ + uint16x4_t __s0_504 = __p0_504; \ + uint16x4_t __s1_504 = __p1_504; \ + uint16x8_t __s2_504 = __p2_504; \ + __ret_504 = __s0_504 - __s1_504 * splat_laneq_u16(__s2_504, __p3_504); \ + __ret_504; \ +}) +#else +#define vmls_laneq_u16(__p0_505, __p1_505, __p2_505, __p3_505) __extension__ ({ \ + uint16x4_t __ret_505; \ + uint16x4_t __s0_505 = __p0_505; \ + uint16x4_t __s1_505 = __p1_505; \ + uint16x8_t __s2_505 = __p2_505; \ + uint16x4_t __rev0_505; __rev0_505 = __builtin_shufflevector(__s0_505, __s0_505, 3, 2, 1, 0); \ + uint16x4_t __rev1_505; __rev1_505 = __builtin_shufflevector(__s1_505, __s1_505, 3, 2, 1, 0); \ + uint16x8_t __rev2_505; __rev2_505 = __builtin_shufflevector(__s2_505, __s2_505, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_505 = __rev0_505 - __rev1_505 * __noswap_splat_laneq_u16(__rev2_505, __p3_505); \ + __ret_505 = __builtin_shufflevector(__ret_505, __ret_505, 3, 2, 1, 0); \ + __ret_505; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmls_laneq_f32(__p0_506, __p1_506, __p2_506, __p3_506) __extension__ ({ \ + float32x2_t __ret_506; \ + float32x2_t __s0_506 = __p0_506; \ + float32x2_t __s1_506 = __p1_506; \ + float32x4_t __s2_506 = __p2_506; \ + __ret_506 = __s0_506 - __s1_506 * splat_laneq_f32(__s2_506, __p3_506); \ + __ret_506; \ +}) +#else +#define vmls_laneq_f32(__p0_507, __p1_507, __p2_507, __p3_507) __extension__ ({ \ + float32x2_t __ret_507; \ + float32x2_t __s0_507 = __p0_507; \ + float32x2_t __s1_507 = __p1_507; \ + float32x4_t __s2_507 = __p2_507; \ + float32x2_t __rev0_507; __rev0_507 = __builtin_shufflevector(__s0_507, __s0_507, 1, 0); \ + float32x2_t __rev1_507; __rev1_507 = __builtin_shufflevector(__s1_507, __s1_507, 1, 0); \ + float32x4_t __rev2_507; __rev2_507 = __builtin_shufflevector(__s2_507, __s2_507, 3, 2, 1, 0); \ + __ret_507 = __rev0_507 - __rev1_507 * __noswap_splat_laneq_f32(__rev2_507, __p3_507); \ + __ret_507 = __builtin_shufflevector(__ret_507, __ret_507, 1, 0); \ + __ret_507; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmls_laneq_s32(__p0_508, __p1_508, __p2_508, __p3_508) __extension__ ({ \ + int32x2_t __ret_508; \ + int32x2_t __s0_508 = __p0_508; \ + int32x2_t __s1_508 = __p1_508; \ + int32x4_t __s2_508 = __p2_508; \ + __ret_508 = __s0_508 - __s1_508 * splat_laneq_s32(__s2_508, __p3_508); \ + __ret_508; \ +}) +#else +#define vmls_laneq_s32(__p0_509, __p1_509, __p2_509, __p3_509) __extension__ ({ \ + int32x2_t __ret_509; \ + int32x2_t __s0_509 = __p0_509; \ + int32x2_t __s1_509 = __p1_509; \ + int32x4_t __s2_509 = __p2_509; \ + int32x2_t __rev0_509; __rev0_509 = __builtin_shufflevector(__s0_509, __s0_509, 1, 0); \ + int32x2_t __rev1_509; __rev1_509 = __builtin_shufflevector(__s1_509, __s1_509, 1, 0); \ + int32x4_t __rev2_509; __rev2_509 = __builtin_shufflevector(__s2_509, __s2_509, 3, 2, 1, 0); \ + __ret_509 = __rev0_509 - __rev1_509 * __noswap_splat_laneq_s32(__rev2_509, __p3_509); \ + __ret_509 = __builtin_shufflevector(__ret_509, __ret_509, 1, 0); \ + __ret_509; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmls_laneq_s16(__p0_510, __p1_510, __p2_510, __p3_510) __extension__ ({ \ + int16x4_t __ret_510; \ + int16x4_t __s0_510 = __p0_510; \ + int16x4_t __s1_510 = __p1_510; \ + int16x8_t __s2_510 = __p2_510; \ + __ret_510 = __s0_510 - __s1_510 * splat_laneq_s16(__s2_510, __p3_510); \ + __ret_510; \ +}) +#else +#define vmls_laneq_s16(__p0_511, __p1_511, __p2_511, __p3_511) __extension__ ({ \ + int16x4_t __ret_511; \ + int16x4_t __s0_511 = __p0_511; \ + int16x4_t __s1_511 = __p1_511; \ + int16x8_t __s2_511 = __p2_511; \ + int16x4_t __rev0_511; __rev0_511 = __builtin_shufflevector(__s0_511, __s0_511, 3, 2, 1, 0); \ + int16x4_t __rev1_511; __rev1_511 = __builtin_shufflevector(__s1_511, __s1_511, 3, 2, 1, 0); \ + int16x8_t __rev2_511; __rev2_511 = __builtin_shufflevector(__s2_511, __s2_511, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_511 = __rev0_511 - __rev1_511 * __noswap_splat_laneq_s16(__rev2_511, __p3_511); \ + __ret_511 = __builtin_shufflevector(__ret_511, __ret_511, 3, 2, 1, 0); \ + __ret_511; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmlsl_high_lane_u32(__p0_512, __p1_512, __p2_512, __p3_512) __extension__ ({ \ + uint64x2_t __ret_512; \ + uint64x2_t __s0_512 = __p0_512; \ + uint32x4_t __s1_512 = __p1_512; \ + uint32x2_t __s2_512 = __p2_512; \ + __ret_512 = __s0_512 - vmull_u32(vget_high_u32(__s1_512), splat_lane_u32(__s2_512, __p3_512)); \ + __ret_512; \ +}) +#else +#define vmlsl_high_lane_u32(__p0_513, __p1_513, __p2_513, __p3_513) __extension__ ({ \ + uint64x2_t __ret_513; \ + uint64x2_t __s0_513 = __p0_513; \ + uint32x4_t __s1_513 = __p1_513; \ + uint32x2_t __s2_513 = __p2_513; \ + uint64x2_t __rev0_513; __rev0_513 = __builtin_shufflevector(__s0_513, __s0_513, 1, 0); \ + uint32x4_t __rev1_513; __rev1_513 = __builtin_shufflevector(__s1_513, __s1_513, 3, 2, 1, 0); \ + uint32x2_t __rev2_513; __rev2_513 = __builtin_shufflevector(__s2_513, __s2_513, 1, 0); \ + __ret_513 = __rev0_513 - __noswap_vmull_u32(__noswap_vget_high_u32(__rev1_513), __noswap_splat_lane_u32(__rev2_513, __p3_513)); \ + __ret_513 = __builtin_shufflevector(__ret_513, __ret_513, 1, 0); \ + __ret_513; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmlsl_high_lane_u16(__p0_514, __p1_514, __p2_514, __p3_514) __extension__ ({ \ + uint32x4_t __ret_514; \ + uint32x4_t __s0_514 = __p0_514; \ + uint16x8_t __s1_514 = __p1_514; \ + uint16x4_t __s2_514 = __p2_514; \ + __ret_514 = __s0_514 - vmull_u16(vget_high_u16(__s1_514), splat_lane_u16(__s2_514, __p3_514)); \ + __ret_514; \ +}) +#else +#define vmlsl_high_lane_u16(__p0_515, __p1_515, __p2_515, __p3_515) __extension__ ({ \ + uint32x4_t __ret_515; \ + uint32x4_t __s0_515 = __p0_515; \ + uint16x8_t __s1_515 = __p1_515; \ + uint16x4_t __s2_515 = __p2_515; \ + uint32x4_t __rev0_515; __rev0_515 = __builtin_shufflevector(__s0_515, __s0_515, 3, 2, 1, 0); \ + uint16x8_t __rev1_515; __rev1_515 = __builtin_shufflevector(__s1_515, __s1_515, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint16x4_t __rev2_515; __rev2_515 = __builtin_shufflevector(__s2_515, __s2_515, 3, 2, 1, 0); \ + __ret_515 = __rev0_515 - __noswap_vmull_u16(__noswap_vget_high_u16(__rev1_515), __noswap_splat_lane_u16(__rev2_515, __p3_515)); \ + __ret_515 = __builtin_shufflevector(__ret_515, __ret_515, 3, 2, 1, 0); \ + __ret_515; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmlsl_high_lane_s32(__p0_516, __p1_516, __p2_516, __p3_516) __extension__ ({ \ + int64x2_t __ret_516; \ + int64x2_t __s0_516 = __p0_516; \ + int32x4_t __s1_516 = __p1_516; \ + int32x2_t __s2_516 = __p2_516; \ + __ret_516 = __s0_516 - vmull_s32(vget_high_s32(__s1_516), splat_lane_s32(__s2_516, __p3_516)); \ + __ret_516; \ +}) +#else +#define vmlsl_high_lane_s32(__p0_517, __p1_517, __p2_517, __p3_517) __extension__ ({ \ + int64x2_t __ret_517; \ + int64x2_t __s0_517 = __p0_517; \ + int32x4_t __s1_517 = __p1_517; \ + int32x2_t __s2_517 = __p2_517; \ + int64x2_t __rev0_517; __rev0_517 = __builtin_shufflevector(__s0_517, __s0_517, 1, 0); \ + int32x4_t __rev1_517; __rev1_517 = __builtin_shufflevector(__s1_517, __s1_517, 3, 2, 1, 0); \ + int32x2_t __rev2_517; __rev2_517 = __builtin_shufflevector(__s2_517, __s2_517, 1, 0); \ + __ret_517 = __rev0_517 - __noswap_vmull_s32(__noswap_vget_high_s32(__rev1_517), __noswap_splat_lane_s32(__rev2_517, __p3_517)); \ + __ret_517 = __builtin_shufflevector(__ret_517, __ret_517, 1, 0); \ + __ret_517; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmlsl_high_lane_s16(__p0_518, __p1_518, __p2_518, __p3_518) __extension__ ({ \ + int32x4_t __ret_518; \ + int32x4_t __s0_518 = __p0_518; \ + int16x8_t __s1_518 = __p1_518; \ + int16x4_t __s2_518 = __p2_518; \ + __ret_518 = __s0_518 - vmull_s16(vget_high_s16(__s1_518), splat_lane_s16(__s2_518, __p3_518)); \ + __ret_518; \ +}) +#else +#define vmlsl_high_lane_s16(__p0_519, __p1_519, __p2_519, __p3_519) __extension__ ({ \ + int32x4_t __ret_519; \ + int32x4_t __s0_519 = __p0_519; \ + int16x8_t __s1_519 = __p1_519; \ + int16x4_t __s2_519 = __p2_519; \ + int32x4_t __rev0_519; __rev0_519 = __builtin_shufflevector(__s0_519, __s0_519, 3, 2, 1, 0); \ + int16x8_t __rev1_519; __rev1_519 = __builtin_shufflevector(__s1_519, __s1_519, 7, 6, 5, 4, 3, 2, 1, 0); \ + int16x4_t __rev2_519; __rev2_519 = __builtin_shufflevector(__s2_519, __s2_519, 3, 2, 1, 0); \ + __ret_519 = __rev0_519 - __noswap_vmull_s16(__noswap_vget_high_s16(__rev1_519), __noswap_splat_lane_s16(__rev2_519, __p3_519)); \ + __ret_519 = __builtin_shufflevector(__ret_519, __ret_519, 3, 2, 1, 0); \ + __ret_519; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmlsl_high_laneq_u32(__p0_520, __p1_520, __p2_520, __p3_520) __extension__ ({ \ + uint64x2_t __ret_520; \ + uint64x2_t __s0_520 = __p0_520; \ + uint32x4_t __s1_520 = __p1_520; \ + uint32x4_t __s2_520 = __p2_520; \ + __ret_520 = __s0_520 - vmull_u32(vget_high_u32(__s1_520), splat_laneq_u32(__s2_520, __p3_520)); \ + __ret_520; \ +}) +#else +#define vmlsl_high_laneq_u32(__p0_521, __p1_521, __p2_521, __p3_521) __extension__ ({ \ + uint64x2_t __ret_521; \ + uint64x2_t __s0_521 = __p0_521; \ + uint32x4_t __s1_521 = __p1_521; \ + uint32x4_t __s2_521 = __p2_521; \ + uint64x2_t __rev0_521; __rev0_521 = __builtin_shufflevector(__s0_521, __s0_521, 1, 0); \ + uint32x4_t __rev1_521; __rev1_521 = __builtin_shufflevector(__s1_521, __s1_521, 3, 2, 1, 0); \ + uint32x4_t __rev2_521; __rev2_521 = __builtin_shufflevector(__s2_521, __s2_521, 3, 2, 1, 0); \ + __ret_521 = __rev0_521 - __noswap_vmull_u32(__noswap_vget_high_u32(__rev1_521), __noswap_splat_laneq_u32(__rev2_521, __p3_521)); \ + __ret_521 = __builtin_shufflevector(__ret_521, __ret_521, 1, 0); \ + __ret_521; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmlsl_high_laneq_u16(__p0_522, __p1_522, __p2_522, __p3_522) __extension__ ({ \ + uint32x4_t __ret_522; \ + uint32x4_t __s0_522 = __p0_522; \ + uint16x8_t __s1_522 = __p1_522; \ + uint16x8_t __s2_522 = __p2_522; \ + __ret_522 = __s0_522 - vmull_u16(vget_high_u16(__s1_522), splat_laneq_u16(__s2_522, __p3_522)); \ + __ret_522; \ +}) +#else +#define vmlsl_high_laneq_u16(__p0_523, __p1_523, __p2_523, __p3_523) __extension__ ({ \ + uint32x4_t __ret_523; \ + uint32x4_t __s0_523 = __p0_523; \ + uint16x8_t __s1_523 = __p1_523; \ + uint16x8_t __s2_523 = __p2_523; \ + uint32x4_t __rev0_523; __rev0_523 = __builtin_shufflevector(__s0_523, __s0_523, 3, 2, 1, 0); \ + uint16x8_t __rev1_523; __rev1_523 = __builtin_shufflevector(__s1_523, __s1_523, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint16x8_t __rev2_523; __rev2_523 = __builtin_shufflevector(__s2_523, __s2_523, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_523 = __rev0_523 - __noswap_vmull_u16(__noswap_vget_high_u16(__rev1_523), __noswap_splat_laneq_u16(__rev2_523, __p3_523)); \ + __ret_523 = __builtin_shufflevector(__ret_523, __ret_523, 3, 2, 1, 0); \ + __ret_523; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmlsl_high_laneq_s32(__p0_524, __p1_524, __p2_524, __p3_524) __extension__ ({ \ + int64x2_t __ret_524; \ + int64x2_t __s0_524 = __p0_524; \ + int32x4_t __s1_524 = __p1_524; \ + int32x4_t __s2_524 = __p2_524; \ + __ret_524 = __s0_524 - vmull_s32(vget_high_s32(__s1_524), splat_laneq_s32(__s2_524, __p3_524)); \ + __ret_524; \ +}) +#else +#define vmlsl_high_laneq_s32(__p0_525, __p1_525, __p2_525, __p3_525) __extension__ ({ \ + int64x2_t __ret_525; \ + int64x2_t __s0_525 = __p0_525; \ + int32x4_t __s1_525 = __p1_525; \ + int32x4_t __s2_525 = __p2_525; \ + int64x2_t __rev0_525; __rev0_525 = __builtin_shufflevector(__s0_525, __s0_525, 1, 0); \ + int32x4_t __rev1_525; __rev1_525 = __builtin_shufflevector(__s1_525, __s1_525, 3, 2, 1, 0); \ + int32x4_t __rev2_525; __rev2_525 = __builtin_shufflevector(__s2_525, __s2_525, 3, 2, 1, 0); \ + __ret_525 = __rev0_525 - __noswap_vmull_s32(__noswap_vget_high_s32(__rev1_525), __noswap_splat_laneq_s32(__rev2_525, __p3_525)); \ + __ret_525 = __builtin_shufflevector(__ret_525, __ret_525, 1, 0); \ + __ret_525; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmlsl_high_laneq_s16(__p0_526, __p1_526, __p2_526, __p3_526) __extension__ ({ \ + int32x4_t __ret_526; \ + int32x4_t __s0_526 = __p0_526; \ + int16x8_t __s1_526 = __p1_526; \ + int16x8_t __s2_526 = __p2_526; \ + __ret_526 = __s0_526 - vmull_s16(vget_high_s16(__s1_526), splat_laneq_s16(__s2_526, __p3_526)); \ + __ret_526; \ +}) +#else +#define vmlsl_high_laneq_s16(__p0_527, __p1_527, __p2_527, __p3_527) __extension__ ({ \ + int32x4_t __ret_527; \ + int32x4_t __s0_527 = __p0_527; \ + int16x8_t __s1_527 = __p1_527; \ + int16x8_t __s2_527 = __p2_527; \ + int32x4_t __rev0_527; __rev0_527 = __builtin_shufflevector(__s0_527, __s0_527, 3, 2, 1, 0); \ + int16x8_t __rev1_527; __rev1_527 = __builtin_shufflevector(__s1_527, __s1_527, 7, 6, 5, 4, 3, 2, 1, 0); \ + int16x8_t __rev2_527; __rev2_527 = __builtin_shufflevector(__s2_527, __s2_527, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_527 = __rev0_527 - __noswap_vmull_s16(__noswap_vget_high_s16(__rev1_527), __noswap_splat_laneq_s16(__rev2_527, __p3_527)); \ + __ret_527 = __builtin_shufflevector(__ret_527, __ret_527, 3, 2, 1, 0); \ + __ret_527; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmlsl_laneq_u32(__p0_528, __p1_528, __p2_528, __p3_528) __extension__ ({ \ + uint64x2_t __ret_528; \ + uint64x2_t __s0_528 = __p0_528; \ + uint32x2_t __s1_528 = __p1_528; \ + uint32x4_t __s2_528 = __p2_528; \ + __ret_528 = __s0_528 - vmull_u32(__s1_528, splat_laneq_u32(__s2_528, __p3_528)); \ + __ret_528; \ +}) +#else +#define vmlsl_laneq_u32(__p0_529, __p1_529, __p2_529, __p3_529) __extension__ ({ \ + uint64x2_t __ret_529; \ + uint64x2_t __s0_529 = __p0_529; \ + uint32x2_t __s1_529 = __p1_529; \ + uint32x4_t __s2_529 = __p2_529; \ + uint64x2_t __rev0_529; __rev0_529 = __builtin_shufflevector(__s0_529, __s0_529, 1, 0); \ + uint32x2_t __rev1_529; __rev1_529 = __builtin_shufflevector(__s1_529, __s1_529, 1, 0); \ + uint32x4_t __rev2_529; __rev2_529 = __builtin_shufflevector(__s2_529, __s2_529, 3, 2, 1, 0); \ + __ret_529 = __rev0_529 - __noswap_vmull_u32(__rev1_529, __noswap_splat_laneq_u32(__rev2_529, __p3_529)); \ + __ret_529 = __builtin_shufflevector(__ret_529, __ret_529, 1, 0); \ + __ret_529; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmlsl_laneq_u16(__p0_530, __p1_530, __p2_530, __p3_530) __extension__ ({ \ + uint32x4_t __ret_530; \ + uint32x4_t __s0_530 = __p0_530; \ + uint16x4_t __s1_530 = __p1_530; \ + uint16x8_t __s2_530 = __p2_530; \ + __ret_530 = __s0_530 - vmull_u16(__s1_530, splat_laneq_u16(__s2_530, __p3_530)); \ + __ret_530; \ +}) +#else +#define vmlsl_laneq_u16(__p0_531, __p1_531, __p2_531, __p3_531) __extension__ ({ \ + uint32x4_t __ret_531; \ + uint32x4_t __s0_531 = __p0_531; \ + uint16x4_t __s1_531 = __p1_531; \ + uint16x8_t __s2_531 = __p2_531; \ + uint32x4_t __rev0_531; __rev0_531 = __builtin_shufflevector(__s0_531, __s0_531, 3, 2, 1, 0); \ + uint16x4_t __rev1_531; __rev1_531 = __builtin_shufflevector(__s1_531, __s1_531, 3, 2, 1, 0); \ + uint16x8_t __rev2_531; __rev2_531 = __builtin_shufflevector(__s2_531, __s2_531, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_531 = __rev0_531 - __noswap_vmull_u16(__rev1_531, __noswap_splat_laneq_u16(__rev2_531, __p3_531)); \ + __ret_531 = __builtin_shufflevector(__ret_531, __ret_531, 3, 2, 1, 0); \ + __ret_531; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmlsl_laneq_s32(__p0_532, __p1_532, __p2_532, __p3_532) __extension__ ({ \ + int64x2_t __ret_532; \ + int64x2_t __s0_532 = __p0_532; \ + int32x2_t __s1_532 = __p1_532; \ + int32x4_t __s2_532 = __p2_532; \ + __ret_532 = __s0_532 - vmull_s32(__s1_532, splat_laneq_s32(__s2_532, __p3_532)); \ + __ret_532; \ +}) +#else +#define vmlsl_laneq_s32(__p0_533, __p1_533, __p2_533, __p3_533) __extension__ ({ \ + int64x2_t __ret_533; \ + int64x2_t __s0_533 = __p0_533; \ + int32x2_t __s1_533 = __p1_533; \ + int32x4_t __s2_533 = __p2_533; \ + int64x2_t __rev0_533; __rev0_533 = __builtin_shufflevector(__s0_533, __s0_533, 1, 0); \ + int32x2_t __rev1_533; __rev1_533 = __builtin_shufflevector(__s1_533, __s1_533, 1, 0); \ + int32x4_t __rev2_533; __rev2_533 = __builtin_shufflevector(__s2_533, __s2_533, 3, 2, 1, 0); \ + __ret_533 = __rev0_533 - __noswap_vmull_s32(__rev1_533, __noswap_splat_laneq_s32(__rev2_533, __p3_533)); \ + __ret_533 = __builtin_shufflevector(__ret_533, __ret_533, 1, 0); \ + __ret_533; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmlsl_laneq_s16(__p0_534, __p1_534, __p2_534, __p3_534) __extension__ ({ \ + int32x4_t __ret_534; \ + int32x4_t __s0_534 = __p0_534; \ + int16x4_t __s1_534 = __p1_534; \ + int16x8_t __s2_534 = __p2_534; \ + __ret_534 = __s0_534 - vmull_s16(__s1_534, splat_laneq_s16(__s2_534, __p3_534)); \ + __ret_534; \ +}) +#else +#define vmlsl_laneq_s16(__p0_535, __p1_535, __p2_535, __p3_535) __extension__ ({ \ + int32x4_t __ret_535; \ + int32x4_t __s0_535 = __p0_535; \ + int16x4_t __s1_535 = __p1_535; \ + int16x8_t __s2_535 = __p2_535; \ + int32x4_t __rev0_535; __rev0_535 = __builtin_shufflevector(__s0_535, __s0_535, 3, 2, 1, 0); \ + int16x4_t __rev1_535; __rev1_535 = __builtin_shufflevector(__s1_535, __s1_535, 3, 2, 1, 0); \ + int16x8_t __rev2_535; __rev2_535 = __builtin_shufflevector(__s2_535, __s2_535, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_535 = __rev0_535 - __noswap_vmull_s16(__rev1_535, __noswap_splat_laneq_s16(__rev2_535, __p3_535)); \ + __ret_535 = __builtin_shufflevector(__ret_535, __ret_535, 3, 2, 1, 0); \ + __ret_535; \ +}) +#endif + +__ai __attribute__((target("neon"))) poly64x1_t vmov_n_p64(poly64_t __p0) { + poly64x1_t __ret; + __ret = (poly64x1_t) {__p0}; + return __ret; +} +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) poly64x2_t vmovq_n_p64(poly64_t __p0) { + poly64x2_t __ret; + __ret = (poly64x2_t) {__p0, __p0}; + return __ret; +} +#else +__ai __attribute__((target("neon"))) poly64x2_t vmovq_n_p64(poly64_t __p0) { + poly64x2_t __ret; + __ret = (poly64x2_t) {__p0, __p0}; + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) float64x2_t vmovq_n_f64(float64_t __p0) { + float64x2_t __ret; + __ret = (float64x2_t) {__p0, __p0}; + return __ret; +} +#else +__ai __attribute__((target("neon"))) float64x2_t vmovq_n_f64(float64_t __p0) { + float64x2_t __ret; + __ret = (float64x2_t) {__p0, __p0}; + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +__ai __attribute__((target("neon"))) float64x1_t vmov_n_f64(float64_t __p0) { + float64x1_t __ret; + __ret = (float64x1_t) {__p0}; + return __ret; +} +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint16x8_t vmovl_high_u8(uint8x16_t __p0_536) { + uint16x8_t __ret_536; + uint8x8_t __a1_536 = vget_high_u8(__p0_536); + __ret_536 = (uint16x8_t)(vshll_n_u8(__a1_536, 0)); + return __ret_536; +} +#else +__ai __attribute__((target("neon"))) uint16x8_t vmovl_high_u8(uint8x16_t __p0_537) { + uint16x8_t __ret_537; + uint8x16_t __rev0_537; __rev0_537 = __builtin_shufflevector(__p0_537, __p0_537, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __a1_537 = __noswap_vget_high_u8(__rev0_537); + __ret_537 = (uint16x8_t)(__noswap_vshll_n_u8(__a1_537, 0)); + __ret_537 = __builtin_shufflevector(__ret_537, __ret_537, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret_537; +} +__ai __attribute__((target("neon"))) uint16x8_t __noswap_vmovl_high_u8(uint8x16_t __p0_538) { + uint16x8_t __ret_538; + uint8x8_t __a1_538 = __noswap_vget_high_u8(__p0_538); + __ret_538 = (uint16x8_t)(__noswap_vshll_n_u8(__a1_538, 0)); + return __ret_538; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint64x2_t vmovl_high_u32(uint32x4_t __p0_539) { + uint64x2_t __ret_539; + uint32x2_t __a1_539 = vget_high_u32(__p0_539); + __ret_539 = (uint64x2_t)(vshll_n_u32(__a1_539, 0)); + return __ret_539; +} +#else +__ai __attribute__((target("neon"))) uint64x2_t vmovl_high_u32(uint32x4_t __p0_540) { + uint64x2_t __ret_540; + uint32x4_t __rev0_540; __rev0_540 = __builtin_shufflevector(__p0_540, __p0_540, 3, 2, 1, 0); + uint32x2_t __a1_540 = __noswap_vget_high_u32(__rev0_540); + __ret_540 = (uint64x2_t)(__noswap_vshll_n_u32(__a1_540, 0)); + __ret_540 = __builtin_shufflevector(__ret_540, __ret_540, 1, 0); + return __ret_540; +} +__ai __attribute__((target("neon"))) uint64x2_t __noswap_vmovl_high_u32(uint32x4_t __p0_541) { + uint64x2_t __ret_541; + uint32x2_t __a1_541 = __noswap_vget_high_u32(__p0_541); + __ret_541 = (uint64x2_t)(__noswap_vshll_n_u32(__a1_541, 0)); + return __ret_541; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint32x4_t vmovl_high_u16(uint16x8_t __p0_542) { + uint32x4_t __ret_542; + uint16x4_t __a1_542 = vget_high_u16(__p0_542); + __ret_542 = (uint32x4_t)(vshll_n_u16(__a1_542, 0)); + return __ret_542; +} +#else +__ai __attribute__((target("neon"))) uint32x4_t vmovl_high_u16(uint16x8_t __p0_543) { + uint32x4_t __ret_543; + uint16x8_t __rev0_543; __rev0_543 = __builtin_shufflevector(__p0_543, __p0_543, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x4_t __a1_543 = __noswap_vget_high_u16(__rev0_543); + __ret_543 = (uint32x4_t)(__noswap_vshll_n_u16(__a1_543, 0)); + __ret_543 = __builtin_shufflevector(__ret_543, __ret_543, 3, 2, 1, 0); + return __ret_543; +} +__ai __attribute__((target("neon"))) uint32x4_t __noswap_vmovl_high_u16(uint16x8_t __p0_544) { + uint32x4_t __ret_544; + uint16x4_t __a1_544 = __noswap_vget_high_u16(__p0_544); + __ret_544 = (uint32x4_t)(__noswap_vshll_n_u16(__a1_544, 0)); + return __ret_544; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int16x8_t vmovl_high_s8(int8x16_t __p0_545) { + int16x8_t __ret_545; + int8x8_t __a1_545 = vget_high_s8(__p0_545); + __ret_545 = (int16x8_t)(vshll_n_s8(__a1_545, 0)); + return __ret_545; +} +#else +__ai __attribute__((target("neon"))) int16x8_t vmovl_high_s8(int8x16_t __p0_546) { + int16x8_t __ret_546; + int8x16_t __rev0_546; __rev0_546 = __builtin_shufflevector(__p0_546, __p0_546, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __a1_546 = __noswap_vget_high_s8(__rev0_546); + __ret_546 = (int16x8_t)(__noswap_vshll_n_s8(__a1_546, 0)); + __ret_546 = __builtin_shufflevector(__ret_546, __ret_546, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret_546; +} +__ai __attribute__((target("neon"))) int16x8_t __noswap_vmovl_high_s8(int8x16_t __p0_547) { + int16x8_t __ret_547; + int8x8_t __a1_547 = __noswap_vget_high_s8(__p0_547); + __ret_547 = (int16x8_t)(__noswap_vshll_n_s8(__a1_547, 0)); + return __ret_547; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int64x2_t vmovl_high_s32(int32x4_t __p0_548) { + int64x2_t __ret_548; + int32x2_t __a1_548 = vget_high_s32(__p0_548); + __ret_548 = (int64x2_t)(vshll_n_s32(__a1_548, 0)); + return __ret_548; +} +#else +__ai __attribute__((target("neon"))) int64x2_t vmovl_high_s32(int32x4_t __p0_549) { + int64x2_t __ret_549; + int32x4_t __rev0_549; __rev0_549 = __builtin_shufflevector(__p0_549, __p0_549, 3, 2, 1, 0); + int32x2_t __a1_549 = __noswap_vget_high_s32(__rev0_549); + __ret_549 = (int64x2_t)(__noswap_vshll_n_s32(__a1_549, 0)); + __ret_549 = __builtin_shufflevector(__ret_549, __ret_549, 1, 0); + return __ret_549; +} +__ai __attribute__((target("neon"))) int64x2_t __noswap_vmovl_high_s32(int32x4_t __p0_550) { + int64x2_t __ret_550; + int32x2_t __a1_550 = __noswap_vget_high_s32(__p0_550); + __ret_550 = (int64x2_t)(__noswap_vshll_n_s32(__a1_550, 0)); + return __ret_550; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int32x4_t vmovl_high_s16(int16x8_t __p0_551) { + int32x4_t __ret_551; + int16x4_t __a1_551 = vget_high_s16(__p0_551); + __ret_551 = (int32x4_t)(vshll_n_s16(__a1_551, 0)); + return __ret_551; +} +#else +__ai __attribute__((target("neon"))) int32x4_t vmovl_high_s16(int16x8_t __p0_552) { + int32x4_t __ret_552; + int16x8_t __rev0_552; __rev0_552 = __builtin_shufflevector(__p0_552, __p0_552, 7, 6, 5, 4, 3, 2, 1, 0); + int16x4_t __a1_552 = __noswap_vget_high_s16(__rev0_552); + __ret_552 = (int32x4_t)(__noswap_vshll_n_s16(__a1_552, 0)); + __ret_552 = __builtin_shufflevector(__ret_552, __ret_552, 3, 2, 1, 0); + return __ret_552; +} +__ai __attribute__((target("neon"))) int32x4_t __noswap_vmovl_high_s16(int16x8_t __p0_553) { + int32x4_t __ret_553; + int16x4_t __a1_553 = __noswap_vget_high_s16(__p0_553); + __ret_553 = (int32x4_t)(__noswap_vshll_n_s16(__a1_553, 0)); + return __ret_553; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint16x8_t vmovn_high_u32(uint16x4_t __p0, uint32x4_t __p1) { + uint16x8_t __ret; + __ret = vcombine_u16(__p0, vmovn_u32(__p1)); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint16x8_t vmovn_high_u32(uint16x4_t __p0, uint32x4_t __p1) { + uint16x8_t __ret; + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = __noswap_vcombine_u16(__rev0, __noswap_vmovn_u32(__rev1)); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint32x4_t vmovn_high_u64(uint32x2_t __p0, uint64x2_t __p1) { + uint32x4_t __ret; + __ret = vcombine_u32(__p0, vmovn_u64(__p1)); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint32x4_t vmovn_high_u64(uint32x2_t __p0, uint64x2_t __p1) { + uint32x4_t __ret; + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = __noswap_vcombine_u32(__rev0, __noswap_vmovn_u64(__rev1)); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint8x16_t vmovn_high_u16(uint8x8_t __p0, uint16x8_t __p1) { + uint8x16_t __ret; + __ret = vcombine_u8(__p0, vmovn_u16(__p1)); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint8x16_t vmovn_high_u16(uint8x8_t __p0, uint16x8_t __p1) { + uint8x16_t __ret; + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __noswap_vcombine_u8(__rev0, __noswap_vmovn_u16(__rev1)); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int16x8_t vmovn_high_s32(int16x4_t __p0, int32x4_t __p1) { + int16x8_t __ret; + __ret = vcombine_s16(__p0, vmovn_s32(__p1)); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int16x8_t vmovn_high_s32(int16x4_t __p0, int32x4_t __p1) { + int16x8_t __ret; + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = __noswap_vcombine_s16(__rev0, __noswap_vmovn_s32(__rev1)); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int32x4_t vmovn_high_s64(int32x2_t __p0, int64x2_t __p1) { + int32x4_t __ret; + __ret = vcombine_s32(__p0, vmovn_s64(__p1)); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int32x4_t vmovn_high_s64(int32x2_t __p0, int64x2_t __p1) { + int32x4_t __ret; + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = __noswap_vcombine_s32(__rev0, __noswap_vmovn_s64(__rev1)); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int8x16_t vmovn_high_s16(int8x8_t __p0, int16x8_t __p1) { + int8x16_t __ret; + __ret = vcombine_s8(__p0, vmovn_s16(__p1)); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int8x16_t vmovn_high_s16(int8x8_t __p0, int16x8_t __p1) { + int8x16_t __ret; + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __noswap_vcombine_s8(__rev0, __noswap_vmovn_s16(__rev1)); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) float64x2_t vmulq_f64(float64x2_t __p0, float64x2_t __p1) { + float64x2_t __ret; + __ret = __p0 * __p1; + return __ret; +} +#else +__ai __attribute__((target("neon"))) float64x2_t vmulq_f64(float64x2_t __p0, float64x2_t __p1) { + float64x2_t __ret; + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = __rev0 * __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +__ai __attribute__((target("neon"))) float64x1_t vmul_f64(float64x1_t __p0, float64x1_t __p1) { + float64x1_t __ret; + __ret = __p0 * __p1; + return __ret; +} +#define vmuld_lane_f64(__p0_554, __p1_554, __p2_554) __extension__ ({ \ + float64_t __ret_554; \ + float64_t __s0_554 = __p0_554; \ + float64x1_t __s1_554 = __p1_554; \ + __ret_554 = __s0_554 * vget_lane_f64(__s1_554, __p2_554); \ + __ret_554; \ +}) +#ifdef __LITTLE_ENDIAN__ +#define vmuls_lane_f32(__p0_555, __p1_555, __p2_555) __extension__ ({ \ + float32_t __ret_555; \ + float32_t __s0_555 = __p0_555; \ + float32x2_t __s1_555 = __p1_555; \ + __ret_555 = __s0_555 * vget_lane_f32(__s1_555, __p2_555); \ + __ret_555; \ +}) +#else +#define vmuls_lane_f32(__p0_556, __p1_556, __p2_556) __extension__ ({ \ + float32_t __ret_556; \ + float32_t __s0_556 = __p0_556; \ + float32x2_t __s1_556 = __p1_556; \ + float32x2_t __rev1_556; __rev1_556 = __builtin_shufflevector(__s1_556, __s1_556, 1, 0); \ + __ret_556 = __s0_556 * __noswap_vget_lane_f32(__rev1_556, __p2_556); \ + __ret_556; \ +}) +#endif + +#define vmul_lane_f64(__p0, __p1, __p2) __extension__ ({ \ + float64x1_t __ret; \ + float64x1_t __s0 = __p0; \ + float64x1_t __s1 = __p1; \ + __ret = (float64x1_t) __builtin_neon_vmul_lane_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 10); \ + __ret; \ +}) +#ifdef __LITTLE_ENDIAN__ +#define vmulq_lane_f64(__p0_557, __p1_557, __p2_557) __extension__ ({ \ + float64x2_t __ret_557; \ + float64x2_t __s0_557 = __p0_557; \ + float64x1_t __s1_557 = __p1_557; \ + __ret_557 = __s0_557 * splatq_lane_f64(__s1_557, __p2_557); \ + __ret_557; \ +}) +#else +#define vmulq_lane_f64(__p0_558, __p1_558, __p2_558) __extension__ ({ \ + float64x2_t __ret_558; \ + float64x2_t __s0_558 = __p0_558; \ + float64x1_t __s1_558 = __p1_558; \ + float64x2_t __rev0_558; __rev0_558 = __builtin_shufflevector(__s0_558, __s0_558, 1, 0); \ + __ret_558 = __rev0_558 * __noswap_splatq_lane_f64(__s1_558, __p2_558); \ + __ret_558 = __builtin_shufflevector(__ret_558, __ret_558, 1, 0); \ + __ret_558; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmuld_laneq_f64(__p0_559, __p1_559, __p2_559) __extension__ ({ \ + float64_t __ret_559; \ + float64_t __s0_559 = __p0_559; \ + float64x2_t __s1_559 = __p1_559; \ + __ret_559 = __s0_559 * vgetq_lane_f64(__s1_559, __p2_559); \ + __ret_559; \ +}) +#else +#define vmuld_laneq_f64(__p0_560, __p1_560, __p2_560) __extension__ ({ \ + float64_t __ret_560; \ + float64_t __s0_560 = __p0_560; \ + float64x2_t __s1_560 = __p1_560; \ + float64x2_t __rev1_560; __rev1_560 = __builtin_shufflevector(__s1_560, __s1_560, 1, 0); \ + __ret_560 = __s0_560 * __noswap_vgetq_lane_f64(__rev1_560, __p2_560); \ + __ret_560; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmuls_laneq_f32(__p0_561, __p1_561, __p2_561) __extension__ ({ \ + float32_t __ret_561; \ + float32_t __s0_561 = __p0_561; \ + float32x4_t __s1_561 = __p1_561; \ + __ret_561 = __s0_561 * vgetq_lane_f32(__s1_561, __p2_561); \ + __ret_561; \ +}) +#else +#define vmuls_laneq_f32(__p0_562, __p1_562, __p2_562) __extension__ ({ \ + float32_t __ret_562; \ + float32_t __s0_562 = __p0_562; \ + float32x4_t __s1_562 = __p1_562; \ + float32x4_t __rev1_562; __rev1_562 = __builtin_shufflevector(__s1_562, __s1_562, 3, 2, 1, 0); \ + __ret_562 = __s0_562 * __noswap_vgetq_lane_f32(__rev1_562, __p2_562); \ + __ret_562; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmul_laneq_f64(__p0, __p1, __p2) __extension__ ({ \ + float64x1_t __ret; \ + float64x1_t __s0 = __p0; \ + float64x2_t __s1 = __p1; \ + __ret = (float64x1_t) __builtin_neon_vmul_laneq_v((int8x8_t)__s0, (int8x16_t)__s1, __p2, 10); \ + __ret; \ +}) +#else +#define vmul_laneq_f64(__p0, __p1, __p2) __extension__ ({ \ + float64x1_t __ret; \ + float64x1_t __s0 = __p0; \ + float64x2_t __s1 = __p1; \ + float64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ + __ret = (float64x1_t) __builtin_neon_vmul_laneq_v((int8x8_t)__s0, (int8x16_t)__rev1, __p2, 10); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmulq_laneq_u32(__p0_563, __p1_563, __p2_563) __extension__ ({ \ + uint32x4_t __ret_563; \ + uint32x4_t __s0_563 = __p0_563; \ + uint32x4_t __s1_563 = __p1_563; \ + __ret_563 = __s0_563 * splatq_laneq_u32(__s1_563, __p2_563); \ + __ret_563; \ +}) +#else +#define vmulq_laneq_u32(__p0_564, __p1_564, __p2_564) __extension__ ({ \ + uint32x4_t __ret_564; \ + uint32x4_t __s0_564 = __p0_564; \ + uint32x4_t __s1_564 = __p1_564; \ + uint32x4_t __rev0_564; __rev0_564 = __builtin_shufflevector(__s0_564, __s0_564, 3, 2, 1, 0); \ + uint32x4_t __rev1_564; __rev1_564 = __builtin_shufflevector(__s1_564, __s1_564, 3, 2, 1, 0); \ + __ret_564 = __rev0_564 * __noswap_splatq_laneq_u32(__rev1_564, __p2_564); \ + __ret_564 = __builtin_shufflevector(__ret_564, __ret_564, 3, 2, 1, 0); \ + __ret_564; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmulq_laneq_u16(__p0_565, __p1_565, __p2_565) __extension__ ({ \ + uint16x8_t __ret_565; \ + uint16x8_t __s0_565 = __p0_565; \ + uint16x8_t __s1_565 = __p1_565; \ + __ret_565 = __s0_565 * splatq_laneq_u16(__s1_565, __p2_565); \ + __ret_565; \ +}) +#else +#define vmulq_laneq_u16(__p0_566, __p1_566, __p2_566) __extension__ ({ \ + uint16x8_t __ret_566; \ + uint16x8_t __s0_566 = __p0_566; \ + uint16x8_t __s1_566 = __p1_566; \ + uint16x8_t __rev0_566; __rev0_566 = __builtin_shufflevector(__s0_566, __s0_566, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint16x8_t __rev1_566; __rev1_566 = __builtin_shufflevector(__s1_566, __s1_566, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_566 = __rev0_566 * __noswap_splatq_laneq_u16(__rev1_566, __p2_566); \ + __ret_566 = __builtin_shufflevector(__ret_566, __ret_566, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_566; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmulq_laneq_f64(__p0_567, __p1_567, __p2_567) __extension__ ({ \ + float64x2_t __ret_567; \ + float64x2_t __s0_567 = __p0_567; \ + float64x2_t __s1_567 = __p1_567; \ + __ret_567 = __s0_567 * splatq_laneq_f64(__s1_567, __p2_567); \ + __ret_567; \ +}) +#else +#define vmulq_laneq_f64(__p0_568, __p1_568, __p2_568) __extension__ ({ \ + float64x2_t __ret_568; \ + float64x2_t __s0_568 = __p0_568; \ + float64x2_t __s1_568 = __p1_568; \ + float64x2_t __rev0_568; __rev0_568 = __builtin_shufflevector(__s0_568, __s0_568, 1, 0); \ + float64x2_t __rev1_568; __rev1_568 = __builtin_shufflevector(__s1_568, __s1_568, 1, 0); \ + __ret_568 = __rev0_568 * __noswap_splatq_laneq_f64(__rev1_568, __p2_568); \ + __ret_568 = __builtin_shufflevector(__ret_568, __ret_568, 1, 0); \ + __ret_568; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmulq_laneq_f32(__p0_569, __p1_569, __p2_569) __extension__ ({ \ + float32x4_t __ret_569; \ + float32x4_t __s0_569 = __p0_569; \ + float32x4_t __s1_569 = __p1_569; \ + __ret_569 = __s0_569 * splatq_laneq_f32(__s1_569, __p2_569); \ + __ret_569; \ +}) +#else +#define vmulq_laneq_f32(__p0_570, __p1_570, __p2_570) __extension__ ({ \ + float32x4_t __ret_570; \ + float32x4_t __s0_570 = __p0_570; \ + float32x4_t __s1_570 = __p1_570; \ + float32x4_t __rev0_570; __rev0_570 = __builtin_shufflevector(__s0_570, __s0_570, 3, 2, 1, 0); \ + float32x4_t __rev1_570; __rev1_570 = __builtin_shufflevector(__s1_570, __s1_570, 3, 2, 1, 0); \ + __ret_570 = __rev0_570 * __noswap_splatq_laneq_f32(__rev1_570, __p2_570); \ + __ret_570 = __builtin_shufflevector(__ret_570, __ret_570, 3, 2, 1, 0); \ + __ret_570; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmulq_laneq_s32(__p0_571, __p1_571, __p2_571) __extension__ ({ \ + int32x4_t __ret_571; \ + int32x4_t __s0_571 = __p0_571; \ + int32x4_t __s1_571 = __p1_571; \ + __ret_571 = __s0_571 * splatq_laneq_s32(__s1_571, __p2_571); \ + __ret_571; \ +}) +#else +#define vmulq_laneq_s32(__p0_572, __p1_572, __p2_572) __extension__ ({ \ + int32x4_t __ret_572; \ + int32x4_t __s0_572 = __p0_572; \ + int32x4_t __s1_572 = __p1_572; \ + int32x4_t __rev0_572; __rev0_572 = __builtin_shufflevector(__s0_572, __s0_572, 3, 2, 1, 0); \ + int32x4_t __rev1_572; __rev1_572 = __builtin_shufflevector(__s1_572, __s1_572, 3, 2, 1, 0); \ + __ret_572 = __rev0_572 * __noswap_splatq_laneq_s32(__rev1_572, __p2_572); \ + __ret_572 = __builtin_shufflevector(__ret_572, __ret_572, 3, 2, 1, 0); \ + __ret_572; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmulq_laneq_s16(__p0_573, __p1_573, __p2_573) __extension__ ({ \ + int16x8_t __ret_573; \ + int16x8_t __s0_573 = __p0_573; \ + int16x8_t __s1_573 = __p1_573; \ + __ret_573 = __s0_573 * splatq_laneq_s16(__s1_573, __p2_573); \ + __ret_573; \ +}) +#else +#define vmulq_laneq_s16(__p0_574, __p1_574, __p2_574) __extension__ ({ \ + int16x8_t __ret_574; \ + int16x8_t __s0_574 = __p0_574; \ + int16x8_t __s1_574 = __p1_574; \ + int16x8_t __rev0_574; __rev0_574 = __builtin_shufflevector(__s0_574, __s0_574, 7, 6, 5, 4, 3, 2, 1, 0); \ + int16x8_t __rev1_574; __rev1_574 = __builtin_shufflevector(__s1_574, __s1_574, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_574 = __rev0_574 * __noswap_splatq_laneq_s16(__rev1_574, __p2_574); \ + __ret_574 = __builtin_shufflevector(__ret_574, __ret_574, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_574; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmul_laneq_u32(__p0_575, __p1_575, __p2_575) __extension__ ({ \ + uint32x2_t __ret_575; \ + uint32x2_t __s0_575 = __p0_575; \ + uint32x4_t __s1_575 = __p1_575; \ + __ret_575 = __s0_575 * splat_laneq_u32(__s1_575, __p2_575); \ + __ret_575; \ +}) +#else +#define vmul_laneq_u32(__p0_576, __p1_576, __p2_576) __extension__ ({ \ + uint32x2_t __ret_576; \ + uint32x2_t __s0_576 = __p0_576; \ + uint32x4_t __s1_576 = __p1_576; \ + uint32x2_t __rev0_576; __rev0_576 = __builtin_shufflevector(__s0_576, __s0_576, 1, 0); \ + uint32x4_t __rev1_576; __rev1_576 = __builtin_shufflevector(__s1_576, __s1_576, 3, 2, 1, 0); \ + __ret_576 = __rev0_576 * __noswap_splat_laneq_u32(__rev1_576, __p2_576); \ + __ret_576 = __builtin_shufflevector(__ret_576, __ret_576, 1, 0); \ + __ret_576; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmul_laneq_u16(__p0_577, __p1_577, __p2_577) __extension__ ({ \ + uint16x4_t __ret_577; \ + uint16x4_t __s0_577 = __p0_577; \ + uint16x8_t __s1_577 = __p1_577; \ + __ret_577 = __s0_577 * splat_laneq_u16(__s1_577, __p2_577); \ + __ret_577; \ +}) +#else +#define vmul_laneq_u16(__p0_578, __p1_578, __p2_578) __extension__ ({ \ + uint16x4_t __ret_578; \ + uint16x4_t __s0_578 = __p0_578; \ + uint16x8_t __s1_578 = __p1_578; \ + uint16x4_t __rev0_578; __rev0_578 = __builtin_shufflevector(__s0_578, __s0_578, 3, 2, 1, 0); \ + uint16x8_t __rev1_578; __rev1_578 = __builtin_shufflevector(__s1_578, __s1_578, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_578 = __rev0_578 * __noswap_splat_laneq_u16(__rev1_578, __p2_578); \ + __ret_578 = __builtin_shufflevector(__ret_578, __ret_578, 3, 2, 1, 0); \ + __ret_578; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmul_laneq_f32(__p0_579, __p1_579, __p2_579) __extension__ ({ \ + float32x2_t __ret_579; \ + float32x2_t __s0_579 = __p0_579; \ + float32x4_t __s1_579 = __p1_579; \ + __ret_579 = __s0_579 * splat_laneq_f32(__s1_579, __p2_579); \ + __ret_579; \ +}) +#else +#define vmul_laneq_f32(__p0_580, __p1_580, __p2_580) __extension__ ({ \ + float32x2_t __ret_580; \ + float32x2_t __s0_580 = __p0_580; \ + float32x4_t __s1_580 = __p1_580; \ + float32x2_t __rev0_580; __rev0_580 = __builtin_shufflevector(__s0_580, __s0_580, 1, 0); \ + float32x4_t __rev1_580; __rev1_580 = __builtin_shufflevector(__s1_580, __s1_580, 3, 2, 1, 0); \ + __ret_580 = __rev0_580 * __noswap_splat_laneq_f32(__rev1_580, __p2_580); \ + __ret_580 = __builtin_shufflevector(__ret_580, __ret_580, 1, 0); \ + __ret_580; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmul_laneq_s32(__p0_581, __p1_581, __p2_581) __extension__ ({ \ + int32x2_t __ret_581; \ + int32x2_t __s0_581 = __p0_581; \ + int32x4_t __s1_581 = __p1_581; \ + __ret_581 = __s0_581 * splat_laneq_s32(__s1_581, __p2_581); \ + __ret_581; \ +}) +#else +#define vmul_laneq_s32(__p0_582, __p1_582, __p2_582) __extension__ ({ \ + int32x2_t __ret_582; \ + int32x2_t __s0_582 = __p0_582; \ + int32x4_t __s1_582 = __p1_582; \ + int32x2_t __rev0_582; __rev0_582 = __builtin_shufflevector(__s0_582, __s0_582, 1, 0); \ + int32x4_t __rev1_582; __rev1_582 = __builtin_shufflevector(__s1_582, __s1_582, 3, 2, 1, 0); \ + __ret_582 = __rev0_582 * __noswap_splat_laneq_s32(__rev1_582, __p2_582); \ + __ret_582 = __builtin_shufflevector(__ret_582, __ret_582, 1, 0); \ + __ret_582; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmul_laneq_s16(__p0_583, __p1_583, __p2_583) __extension__ ({ \ + int16x4_t __ret_583; \ + int16x4_t __s0_583 = __p0_583; \ + int16x8_t __s1_583 = __p1_583; \ + __ret_583 = __s0_583 * splat_laneq_s16(__s1_583, __p2_583); \ + __ret_583; \ +}) +#else +#define vmul_laneq_s16(__p0_584, __p1_584, __p2_584) __extension__ ({ \ + int16x4_t __ret_584; \ + int16x4_t __s0_584 = __p0_584; \ + int16x8_t __s1_584 = __p1_584; \ + int16x4_t __rev0_584; __rev0_584 = __builtin_shufflevector(__s0_584, __s0_584, 3, 2, 1, 0); \ + int16x8_t __rev1_584; __rev1_584 = __builtin_shufflevector(__s1_584, __s1_584, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_584 = __rev0_584 * __noswap_splat_laneq_s16(__rev1_584, __p2_584); \ + __ret_584 = __builtin_shufflevector(__ret_584, __ret_584, 3, 2, 1, 0); \ + __ret_584; \ +}) +#endif + +__ai __attribute__((target("neon"))) float64x1_t vmul_n_f64(float64x1_t __p0, float64_t __p1) { + float64x1_t __ret; + __ret = (float64x1_t) __builtin_neon_vmul_n_f64((float64x1_t)__p0, __p1); + return __ret; +} +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) float64x2_t vmulq_n_f64(float64x2_t __p0, float64_t __p1) { + float64x2_t __ret; + __ret = __p0 * (float64x2_t) {__p1, __p1}; + return __ret; +} +#else +__ai __attribute__((target("neon"))) float64x2_t vmulq_n_f64(float64x2_t __p0, float64_t __p1) { + float64x2_t __ret; + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = __rev0 * (float64x2_t) {__p1, __p1}; + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) poly16x8_t vmull_high_p8(poly8x16_t __p0, poly8x16_t __p1) { + poly16x8_t __ret; + __ret = vmull_p8(vget_high_p8(__p0), vget_high_p8(__p1)); + return __ret; +} +#else +__ai __attribute__((target("neon"))) poly16x8_t vmull_high_p8(poly8x16_t __p0, poly8x16_t __p1) { + poly16x8_t __ret; + poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + poly8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __noswap_vmull_p8(__noswap_vget_high_p8(__rev0), __noswap_vget_high_p8(__rev1)); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint16x8_t vmull_high_u8(uint8x16_t __p0, uint8x16_t __p1) { + uint16x8_t __ret; + __ret = vmull_u8(vget_high_u8(__p0), vget_high_u8(__p1)); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint16x8_t vmull_high_u8(uint8x16_t __p0, uint8x16_t __p1) { + uint16x8_t __ret; + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __noswap_vmull_u8(__noswap_vget_high_u8(__rev0), __noswap_vget_high_u8(__rev1)); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint64x2_t vmull_high_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint64x2_t __ret; + __ret = vmull_u32(vget_high_u32(__p0), vget_high_u32(__p1)); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint64x2_t vmull_high_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint64x2_t __ret; + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = __noswap_vmull_u32(__noswap_vget_high_u32(__rev0), __noswap_vget_high_u32(__rev1)); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint32x4_t vmull_high_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint32x4_t __ret; + __ret = vmull_u16(vget_high_u16(__p0), vget_high_u16(__p1)); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint32x4_t vmull_high_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint32x4_t __ret; + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __noswap_vmull_u16(__noswap_vget_high_u16(__rev0), __noswap_vget_high_u16(__rev1)); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int16x8_t vmull_high_s8(int8x16_t __p0, int8x16_t __p1) { + int16x8_t __ret; + __ret = vmull_s8(vget_high_s8(__p0), vget_high_s8(__p1)); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int16x8_t vmull_high_s8(int8x16_t __p0, int8x16_t __p1) { + int16x8_t __ret; + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __noswap_vmull_s8(__noswap_vget_high_s8(__rev0), __noswap_vget_high_s8(__rev1)); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int64x2_t vmull_high_s32(int32x4_t __p0, int32x4_t __p1) { + int64x2_t __ret; + __ret = vmull_s32(vget_high_s32(__p0), vget_high_s32(__p1)); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int64x2_t vmull_high_s32(int32x4_t __p0, int32x4_t __p1) { + int64x2_t __ret; + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = __noswap_vmull_s32(__noswap_vget_high_s32(__rev0), __noswap_vget_high_s32(__rev1)); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int32x4_t vmull_high_s16(int16x8_t __p0, int16x8_t __p1) { + int32x4_t __ret; + __ret = vmull_s16(vget_high_s16(__p0), vget_high_s16(__p1)); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int32x4_t vmull_high_s16(int16x8_t __p0, int16x8_t __p1) { + int32x4_t __ret; + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __noswap_vmull_s16(__noswap_vget_high_s16(__rev0), __noswap_vget_high_s16(__rev1)); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmull_high_lane_u32(__p0_585, __p1_585, __p2_585) __extension__ ({ \ + uint64x2_t __ret_585; \ + uint32x4_t __s0_585 = __p0_585; \ + uint32x2_t __s1_585 = __p1_585; \ + __ret_585 = vmull_u32(vget_high_u32(__s0_585), splat_lane_u32(__s1_585, __p2_585)); \ + __ret_585; \ +}) +#else +#define vmull_high_lane_u32(__p0_586, __p1_586, __p2_586) __extension__ ({ \ + uint64x2_t __ret_586; \ + uint32x4_t __s0_586 = __p0_586; \ + uint32x2_t __s1_586 = __p1_586; \ + uint32x4_t __rev0_586; __rev0_586 = __builtin_shufflevector(__s0_586, __s0_586, 3, 2, 1, 0); \ + uint32x2_t __rev1_586; __rev1_586 = __builtin_shufflevector(__s1_586, __s1_586, 1, 0); \ + __ret_586 = __noswap_vmull_u32(__noswap_vget_high_u32(__rev0_586), __noswap_splat_lane_u32(__rev1_586, __p2_586)); \ + __ret_586 = __builtin_shufflevector(__ret_586, __ret_586, 1, 0); \ + __ret_586; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmull_high_lane_u16(__p0_587, __p1_587, __p2_587) __extension__ ({ \ + uint32x4_t __ret_587; \ + uint16x8_t __s0_587 = __p0_587; \ + uint16x4_t __s1_587 = __p1_587; \ + __ret_587 = vmull_u16(vget_high_u16(__s0_587), splat_lane_u16(__s1_587, __p2_587)); \ + __ret_587; \ +}) +#else +#define vmull_high_lane_u16(__p0_588, __p1_588, __p2_588) __extension__ ({ \ + uint32x4_t __ret_588; \ + uint16x8_t __s0_588 = __p0_588; \ + uint16x4_t __s1_588 = __p1_588; \ + uint16x8_t __rev0_588; __rev0_588 = __builtin_shufflevector(__s0_588, __s0_588, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint16x4_t __rev1_588; __rev1_588 = __builtin_shufflevector(__s1_588, __s1_588, 3, 2, 1, 0); \ + __ret_588 = __noswap_vmull_u16(__noswap_vget_high_u16(__rev0_588), __noswap_splat_lane_u16(__rev1_588, __p2_588)); \ + __ret_588 = __builtin_shufflevector(__ret_588, __ret_588, 3, 2, 1, 0); \ + __ret_588; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmull_high_lane_s32(__p0_589, __p1_589, __p2_589) __extension__ ({ \ + int64x2_t __ret_589; \ + int32x4_t __s0_589 = __p0_589; \ + int32x2_t __s1_589 = __p1_589; \ + __ret_589 = vmull_s32(vget_high_s32(__s0_589), splat_lane_s32(__s1_589, __p2_589)); \ + __ret_589; \ +}) +#else +#define vmull_high_lane_s32(__p0_590, __p1_590, __p2_590) __extension__ ({ \ + int64x2_t __ret_590; \ + int32x4_t __s0_590 = __p0_590; \ + int32x2_t __s1_590 = __p1_590; \ + int32x4_t __rev0_590; __rev0_590 = __builtin_shufflevector(__s0_590, __s0_590, 3, 2, 1, 0); \ + int32x2_t __rev1_590; __rev1_590 = __builtin_shufflevector(__s1_590, __s1_590, 1, 0); \ + __ret_590 = __noswap_vmull_s32(__noswap_vget_high_s32(__rev0_590), __noswap_splat_lane_s32(__rev1_590, __p2_590)); \ + __ret_590 = __builtin_shufflevector(__ret_590, __ret_590, 1, 0); \ + __ret_590; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmull_high_lane_s16(__p0_591, __p1_591, __p2_591) __extension__ ({ \ + int32x4_t __ret_591; \ + int16x8_t __s0_591 = __p0_591; \ + int16x4_t __s1_591 = __p1_591; \ + __ret_591 = vmull_s16(vget_high_s16(__s0_591), splat_lane_s16(__s1_591, __p2_591)); \ + __ret_591; \ +}) +#else +#define vmull_high_lane_s16(__p0_592, __p1_592, __p2_592) __extension__ ({ \ + int32x4_t __ret_592; \ + int16x8_t __s0_592 = __p0_592; \ + int16x4_t __s1_592 = __p1_592; \ + int16x8_t __rev0_592; __rev0_592 = __builtin_shufflevector(__s0_592, __s0_592, 7, 6, 5, 4, 3, 2, 1, 0); \ + int16x4_t __rev1_592; __rev1_592 = __builtin_shufflevector(__s1_592, __s1_592, 3, 2, 1, 0); \ + __ret_592 = __noswap_vmull_s16(__noswap_vget_high_s16(__rev0_592), __noswap_splat_lane_s16(__rev1_592, __p2_592)); \ + __ret_592 = __builtin_shufflevector(__ret_592, __ret_592, 3, 2, 1, 0); \ + __ret_592; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmull_high_laneq_u32(__p0_593, __p1_593, __p2_593) __extension__ ({ \ + uint64x2_t __ret_593; \ + uint32x4_t __s0_593 = __p0_593; \ + uint32x4_t __s1_593 = __p1_593; \ + __ret_593 = vmull_u32(vget_high_u32(__s0_593), splat_laneq_u32(__s1_593, __p2_593)); \ + __ret_593; \ +}) +#else +#define vmull_high_laneq_u32(__p0_594, __p1_594, __p2_594) __extension__ ({ \ + uint64x2_t __ret_594; \ + uint32x4_t __s0_594 = __p0_594; \ + uint32x4_t __s1_594 = __p1_594; \ + uint32x4_t __rev0_594; __rev0_594 = __builtin_shufflevector(__s0_594, __s0_594, 3, 2, 1, 0); \ + uint32x4_t __rev1_594; __rev1_594 = __builtin_shufflevector(__s1_594, __s1_594, 3, 2, 1, 0); \ + __ret_594 = __noswap_vmull_u32(__noswap_vget_high_u32(__rev0_594), __noswap_splat_laneq_u32(__rev1_594, __p2_594)); \ + __ret_594 = __builtin_shufflevector(__ret_594, __ret_594, 1, 0); \ + __ret_594; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmull_high_laneq_u16(__p0_595, __p1_595, __p2_595) __extension__ ({ \ + uint32x4_t __ret_595; \ + uint16x8_t __s0_595 = __p0_595; \ + uint16x8_t __s1_595 = __p1_595; \ + __ret_595 = vmull_u16(vget_high_u16(__s0_595), splat_laneq_u16(__s1_595, __p2_595)); \ + __ret_595; \ +}) +#else +#define vmull_high_laneq_u16(__p0_596, __p1_596, __p2_596) __extension__ ({ \ + uint32x4_t __ret_596; \ + uint16x8_t __s0_596 = __p0_596; \ + uint16x8_t __s1_596 = __p1_596; \ + uint16x8_t __rev0_596; __rev0_596 = __builtin_shufflevector(__s0_596, __s0_596, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint16x8_t __rev1_596; __rev1_596 = __builtin_shufflevector(__s1_596, __s1_596, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_596 = __noswap_vmull_u16(__noswap_vget_high_u16(__rev0_596), __noswap_splat_laneq_u16(__rev1_596, __p2_596)); \ + __ret_596 = __builtin_shufflevector(__ret_596, __ret_596, 3, 2, 1, 0); \ + __ret_596; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmull_high_laneq_s32(__p0_597, __p1_597, __p2_597) __extension__ ({ \ + int64x2_t __ret_597; \ + int32x4_t __s0_597 = __p0_597; \ + int32x4_t __s1_597 = __p1_597; \ + __ret_597 = vmull_s32(vget_high_s32(__s0_597), splat_laneq_s32(__s1_597, __p2_597)); \ + __ret_597; \ +}) +#else +#define vmull_high_laneq_s32(__p0_598, __p1_598, __p2_598) __extension__ ({ \ + int64x2_t __ret_598; \ + int32x4_t __s0_598 = __p0_598; \ + int32x4_t __s1_598 = __p1_598; \ + int32x4_t __rev0_598; __rev0_598 = __builtin_shufflevector(__s0_598, __s0_598, 3, 2, 1, 0); \ + int32x4_t __rev1_598; __rev1_598 = __builtin_shufflevector(__s1_598, __s1_598, 3, 2, 1, 0); \ + __ret_598 = __noswap_vmull_s32(__noswap_vget_high_s32(__rev0_598), __noswap_splat_laneq_s32(__rev1_598, __p2_598)); \ + __ret_598 = __builtin_shufflevector(__ret_598, __ret_598, 1, 0); \ + __ret_598; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmull_high_laneq_s16(__p0_599, __p1_599, __p2_599) __extension__ ({ \ + int32x4_t __ret_599; \ + int16x8_t __s0_599 = __p0_599; \ + int16x8_t __s1_599 = __p1_599; \ + __ret_599 = vmull_s16(vget_high_s16(__s0_599), splat_laneq_s16(__s1_599, __p2_599)); \ + __ret_599; \ +}) +#else +#define vmull_high_laneq_s16(__p0_600, __p1_600, __p2_600) __extension__ ({ \ + int32x4_t __ret_600; \ + int16x8_t __s0_600 = __p0_600; \ + int16x8_t __s1_600 = __p1_600; \ + int16x8_t __rev0_600; __rev0_600 = __builtin_shufflevector(__s0_600, __s0_600, 7, 6, 5, 4, 3, 2, 1, 0); \ + int16x8_t __rev1_600; __rev1_600 = __builtin_shufflevector(__s1_600, __s1_600, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_600 = __noswap_vmull_s16(__noswap_vget_high_s16(__rev0_600), __noswap_splat_laneq_s16(__rev1_600, __p2_600)); \ + __ret_600 = __builtin_shufflevector(__ret_600, __ret_600, 3, 2, 1, 0); \ + __ret_600; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint64x2_t vmull_high_n_u32(uint32x4_t __p0, uint32_t __p1) { + uint64x2_t __ret; + __ret = vmull_n_u32(vget_high_u32(__p0), __p1); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint64x2_t vmull_high_n_u32(uint32x4_t __p0, uint32_t __p1) { + uint64x2_t __ret; + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = __noswap_vmull_n_u32(__noswap_vget_high_u32(__rev0), __p1); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint32x4_t vmull_high_n_u16(uint16x8_t __p0, uint16_t __p1) { + uint32x4_t __ret; + __ret = vmull_n_u16(vget_high_u16(__p0), __p1); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint32x4_t vmull_high_n_u16(uint16x8_t __p0, uint16_t __p1) { + uint32x4_t __ret; + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __noswap_vmull_n_u16(__noswap_vget_high_u16(__rev0), __p1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int64x2_t vmull_high_n_s32(int32x4_t __p0, int32_t __p1) { + int64x2_t __ret; + __ret = vmull_n_s32(vget_high_s32(__p0), __p1); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int64x2_t vmull_high_n_s32(int32x4_t __p0, int32_t __p1) { + int64x2_t __ret; + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = __noswap_vmull_n_s32(__noswap_vget_high_s32(__rev0), __p1); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int32x4_t vmull_high_n_s16(int16x8_t __p0, int16_t __p1) { + int32x4_t __ret; + __ret = vmull_n_s16(vget_high_s16(__p0), __p1); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int32x4_t vmull_high_n_s16(int16x8_t __p0, int16_t __p1) { + int32x4_t __ret; + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __noswap_vmull_n_s16(__noswap_vget_high_s16(__rev0), __p1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmull_laneq_u32(__p0_601, __p1_601, __p2_601) __extension__ ({ \ + uint64x2_t __ret_601; \ + uint32x2_t __s0_601 = __p0_601; \ + uint32x4_t __s1_601 = __p1_601; \ + __ret_601 = vmull_u32(__s0_601, splat_laneq_u32(__s1_601, __p2_601)); \ + __ret_601; \ +}) +#else +#define vmull_laneq_u32(__p0_602, __p1_602, __p2_602) __extension__ ({ \ + uint64x2_t __ret_602; \ + uint32x2_t __s0_602 = __p0_602; \ + uint32x4_t __s1_602 = __p1_602; \ + uint32x2_t __rev0_602; __rev0_602 = __builtin_shufflevector(__s0_602, __s0_602, 1, 0); \ + uint32x4_t __rev1_602; __rev1_602 = __builtin_shufflevector(__s1_602, __s1_602, 3, 2, 1, 0); \ + __ret_602 = __noswap_vmull_u32(__rev0_602, __noswap_splat_laneq_u32(__rev1_602, __p2_602)); \ + __ret_602 = __builtin_shufflevector(__ret_602, __ret_602, 1, 0); \ + __ret_602; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmull_laneq_u16(__p0_603, __p1_603, __p2_603) __extension__ ({ \ + uint32x4_t __ret_603; \ + uint16x4_t __s0_603 = __p0_603; \ + uint16x8_t __s1_603 = __p1_603; \ + __ret_603 = vmull_u16(__s0_603, splat_laneq_u16(__s1_603, __p2_603)); \ + __ret_603; \ +}) +#else +#define vmull_laneq_u16(__p0_604, __p1_604, __p2_604) __extension__ ({ \ + uint32x4_t __ret_604; \ + uint16x4_t __s0_604 = __p0_604; \ + uint16x8_t __s1_604 = __p1_604; \ + uint16x4_t __rev0_604; __rev0_604 = __builtin_shufflevector(__s0_604, __s0_604, 3, 2, 1, 0); \ + uint16x8_t __rev1_604; __rev1_604 = __builtin_shufflevector(__s1_604, __s1_604, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_604 = __noswap_vmull_u16(__rev0_604, __noswap_splat_laneq_u16(__rev1_604, __p2_604)); \ + __ret_604 = __builtin_shufflevector(__ret_604, __ret_604, 3, 2, 1, 0); \ + __ret_604; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmull_laneq_s32(__p0_605, __p1_605, __p2_605) __extension__ ({ \ + int64x2_t __ret_605; \ + int32x2_t __s0_605 = __p0_605; \ + int32x4_t __s1_605 = __p1_605; \ + __ret_605 = vmull_s32(__s0_605, splat_laneq_s32(__s1_605, __p2_605)); \ + __ret_605; \ +}) +#else +#define vmull_laneq_s32(__p0_606, __p1_606, __p2_606) __extension__ ({ \ + int64x2_t __ret_606; \ + int32x2_t __s0_606 = __p0_606; \ + int32x4_t __s1_606 = __p1_606; \ + int32x2_t __rev0_606; __rev0_606 = __builtin_shufflevector(__s0_606, __s0_606, 1, 0); \ + int32x4_t __rev1_606; __rev1_606 = __builtin_shufflevector(__s1_606, __s1_606, 3, 2, 1, 0); \ + __ret_606 = __noswap_vmull_s32(__rev0_606, __noswap_splat_laneq_s32(__rev1_606, __p2_606)); \ + __ret_606 = __builtin_shufflevector(__ret_606, __ret_606, 1, 0); \ + __ret_606; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmull_laneq_s16(__p0_607, __p1_607, __p2_607) __extension__ ({ \ + int32x4_t __ret_607; \ + int16x4_t __s0_607 = __p0_607; \ + int16x8_t __s1_607 = __p1_607; \ + __ret_607 = vmull_s16(__s0_607, splat_laneq_s16(__s1_607, __p2_607)); \ + __ret_607; \ +}) +#else +#define vmull_laneq_s16(__p0_608, __p1_608, __p2_608) __extension__ ({ \ + int32x4_t __ret_608; \ + int16x4_t __s0_608 = __p0_608; \ + int16x8_t __s1_608 = __p1_608; \ + int16x4_t __rev0_608; __rev0_608 = __builtin_shufflevector(__s0_608, __s0_608, 3, 2, 1, 0); \ + int16x8_t __rev1_608; __rev1_608 = __builtin_shufflevector(__s1_608, __s1_608, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_608 = __noswap_vmull_s16(__rev0_608, __noswap_splat_laneq_s16(__rev1_608, __p2_608)); \ + __ret_608 = __builtin_shufflevector(__ret_608, __ret_608, 3, 2, 1, 0); \ + __ret_608; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) float64x2_t vmulxq_f64(float64x2_t __p0, float64x2_t __p1) { + float64x2_t __ret; + __ret = (float64x2_t) __builtin_neon_vmulxq_v((int8x16_t)__p0, (int8x16_t)__p1, 42); + return __ret; +} +#else +__ai __attribute__((target("neon"))) float64x2_t vmulxq_f64(float64x2_t __p0, float64x2_t __p1) { + float64x2_t __ret; + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (float64x2_t) __builtin_neon_vmulxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 42); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +__ai __attribute__((target("neon"))) float64x2_t __noswap_vmulxq_f64(float64x2_t __p0, float64x2_t __p1) { + float64x2_t __ret; + __ret = (float64x2_t) __builtin_neon_vmulxq_v((int8x16_t)__p0, (int8x16_t)__p1, 42); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) float32x4_t vmulxq_f32(float32x4_t __p0, float32x4_t __p1) { + float32x4_t __ret; + __ret = (float32x4_t) __builtin_neon_vmulxq_v((int8x16_t)__p0, (int8x16_t)__p1, 41); + return __ret; +} +#else +__ai __attribute__((target("neon"))) float32x4_t vmulxq_f32(float32x4_t __p0, float32x4_t __p1) { + float32x4_t __ret; + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (float32x4_t) __builtin_neon_vmulxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 41); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +__ai __attribute__((target("neon"))) float32x4_t __noswap_vmulxq_f32(float32x4_t __p0, float32x4_t __p1) { + float32x4_t __ret; + __ret = (float32x4_t) __builtin_neon_vmulxq_v((int8x16_t)__p0, (int8x16_t)__p1, 41); + return __ret; +} +#endif + +__ai __attribute__((target("neon"))) float64x1_t vmulx_f64(float64x1_t __p0, float64x1_t __p1) { + float64x1_t __ret; + __ret = (float64x1_t) __builtin_neon_vmulx_v((int8x8_t)__p0, (int8x8_t)__p1, 10); + return __ret; +} +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) float32x2_t vmulx_f32(float32x2_t __p0, float32x2_t __p1) { + float32x2_t __ret; + __ret = (float32x2_t) __builtin_neon_vmulx_v((int8x8_t)__p0, (int8x8_t)__p1, 9); + return __ret; +} +#else +__ai __attribute__((target("neon"))) float32x2_t vmulx_f32(float32x2_t __p0, float32x2_t __p1) { + float32x2_t __ret; + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (float32x2_t) __builtin_neon_vmulx_v((int8x8_t)__rev0, (int8x8_t)__rev1, 9); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +__ai __attribute__((target("neon"))) float32x2_t __noswap_vmulx_f32(float32x2_t __p0, float32x2_t __p1) { + float32x2_t __ret; + __ret = (float32x2_t) __builtin_neon_vmulx_v((int8x8_t)__p0, (int8x8_t)__p1, 9); + return __ret; +} +#endif + +__ai __attribute__((target("neon"))) float64_t vmulxd_f64(float64_t __p0, float64_t __p1) { + float64_t __ret; + __ret = (float64_t) __builtin_neon_vmulxd_f64(__p0, __p1); + return __ret; +} +__ai __attribute__((target("neon"))) float32_t vmulxs_f32(float32_t __p0, float32_t __p1) { + float32_t __ret; + __ret = (float32_t) __builtin_neon_vmulxs_f32(__p0, __p1); + return __ret; +} +#define vmulxd_lane_f64(__p0_609, __p1_609, __p2_609) __extension__ ({ \ + float64_t __ret_609; \ + float64_t __s0_609 = __p0_609; \ + float64x1_t __s1_609 = __p1_609; \ + __ret_609 = vmulxd_f64(__s0_609, vget_lane_f64(__s1_609, __p2_609)); \ + __ret_609; \ +}) +#ifdef __LITTLE_ENDIAN__ +#define vmulxs_lane_f32(__p0_610, __p1_610, __p2_610) __extension__ ({ \ + float32_t __ret_610; \ + float32_t __s0_610 = __p0_610; \ + float32x2_t __s1_610 = __p1_610; \ + __ret_610 = vmulxs_f32(__s0_610, vget_lane_f32(__s1_610, __p2_610)); \ + __ret_610; \ +}) +#else +#define vmulxs_lane_f32(__p0_611, __p1_611, __p2_611) __extension__ ({ \ + float32_t __ret_611; \ + float32_t __s0_611 = __p0_611; \ + float32x2_t __s1_611 = __p1_611; \ + float32x2_t __rev1_611; __rev1_611 = __builtin_shufflevector(__s1_611, __s1_611, 1, 0); \ + __ret_611 = vmulxs_f32(__s0_611, __noswap_vget_lane_f32(__rev1_611, __p2_611)); \ + __ret_611; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmulxq_lane_f64(__p0_612, __p1_612, __p2_612) __extension__ ({ \ + float64x2_t __ret_612; \ + float64x2_t __s0_612 = __p0_612; \ + float64x1_t __s1_612 = __p1_612; \ + __ret_612 = vmulxq_f64(__s0_612, splatq_lane_f64(__s1_612, __p2_612)); \ + __ret_612; \ +}) +#else +#define vmulxq_lane_f64(__p0_613, __p1_613, __p2_613) __extension__ ({ \ + float64x2_t __ret_613; \ + float64x2_t __s0_613 = __p0_613; \ + float64x1_t __s1_613 = __p1_613; \ + float64x2_t __rev0_613; __rev0_613 = __builtin_shufflevector(__s0_613, __s0_613, 1, 0); \ + __ret_613 = __noswap_vmulxq_f64(__rev0_613, __noswap_splatq_lane_f64(__s1_613, __p2_613)); \ + __ret_613 = __builtin_shufflevector(__ret_613, __ret_613, 1, 0); \ + __ret_613; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmulxq_lane_f32(__p0_614, __p1_614, __p2_614) __extension__ ({ \ + float32x4_t __ret_614; \ + float32x4_t __s0_614 = __p0_614; \ + float32x2_t __s1_614 = __p1_614; \ + __ret_614 = vmulxq_f32(__s0_614, splatq_lane_f32(__s1_614, __p2_614)); \ + __ret_614; \ +}) +#else +#define vmulxq_lane_f32(__p0_615, __p1_615, __p2_615) __extension__ ({ \ + float32x4_t __ret_615; \ + float32x4_t __s0_615 = __p0_615; \ + float32x2_t __s1_615 = __p1_615; \ + float32x4_t __rev0_615; __rev0_615 = __builtin_shufflevector(__s0_615, __s0_615, 3, 2, 1, 0); \ + float32x2_t __rev1_615; __rev1_615 = __builtin_shufflevector(__s1_615, __s1_615, 1, 0); \ + __ret_615 = __noswap_vmulxq_f32(__rev0_615, __noswap_splatq_lane_f32(__rev1_615, __p2_615)); \ + __ret_615 = __builtin_shufflevector(__ret_615, __ret_615, 3, 2, 1, 0); \ + __ret_615; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmulx_lane_f32(__p0_616, __p1_616, __p2_616) __extension__ ({ \ + float32x2_t __ret_616; \ + float32x2_t __s0_616 = __p0_616; \ + float32x2_t __s1_616 = __p1_616; \ + __ret_616 = vmulx_f32(__s0_616, splat_lane_f32(__s1_616, __p2_616)); \ + __ret_616; \ +}) +#else +#define vmulx_lane_f32(__p0_617, __p1_617, __p2_617) __extension__ ({ \ + float32x2_t __ret_617; \ + float32x2_t __s0_617 = __p0_617; \ + float32x2_t __s1_617 = __p1_617; \ + float32x2_t __rev0_617; __rev0_617 = __builtin_shufflevector(__s0_617, __s0_617, 1, 0); \ + float32x2_t __rev1_617; __rev1_617 = __builtin_shufflevector(__s1_617, __s1_617, 1, 0); \ + __ret_617 = __noswap_vmulx_f32(__rev0_617, __noswap_splat_lane_f32(__rev1_617, __p2_617)); \ + __ret_617 = __builtin_shufflevector(__ret_617, __ret_617, 1, 0); \ + __ret_617; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmulxd_laneq_f64(__p0_618, __p1_618, __p2_618) __extension__ ({ \ + float64_t __ret_618; \ + float64_t __s0_618 = __p0_618; \ + float64x2_t __s1_618 = __p1_618; \ + __ret_618 = vmulxd_f64(__s0_618, vgetq_lane_f64(__s1_618, __p2_618)); \ + __ret_618; \ +}) +#else +#define vmulxd_laneq_f64(__p0_619, __p1_619, __p2_619) __extension__ ({ \ + float64_t __ret_619; \ + float64_t __s0_619 = __p0_619; \ + float64x2_t __s1_619 = __p1_619; \ + float64x2_t __rev1_619; __rev1_619 = __builtin_shufflevector(__s1_619, __s1_619, 1, 0); \ + __ret_619 = vmulxd_f64(__s0_619, __noswap_vgetq_lane_f64(__rev1_619, __p2_619)); \ + __ret_619; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmulxs_laneq_f32(__p0_620, __p1_620, __p2_620) __extension__ ({ \ + float32_t __ret_620; \ + float32_t __s0_620 = __p0_620; \ + float32x4_t __s1_620 = __p1_620; \ + __ret_620 = vmulxs_f32(__s0_620, vgetq_lane_f32(__s1_620, __p2_620)); \ + __ret_620; \ +}) +#else +#define vmulxs_laneq_f32(__p0_621, __p1_621, __p2_621) __extension__ ({ \ + float32_t __ret_621; \ + float32_t __s0_621 = __p0_621; \ + float32x4_t __s1_621 = __p1_621; \ + float32x4_t __rev1_621; __rev1_621 = __builtin_shufflevector(__s1_621, __s1_621, 3, 2, 1, 0); \ + __ret_621 = vmulxs_f32(__s0_621, __noswap_vgetq_lane_f32(__rev1_621, __p2_621)); \ + __ret_621; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmulxq_laneq_f64(__p0_622, __p1_622, __p2_622) __extension__ ({ \ + float64x2_t __ret_622; \ + float64x2_t __s0_622 = __p0_622; \ + float64x2_t __s1_622 = __p1_622; \ + __ret_622 = vmulxq_f64(__s0_622, splatq_laneq_f64(__s1_622, __p2_622)); \ + __ret_622; \ +}) +#else +#define vmulxq_laneq_f64(__p0_623, __p1_623, __p2_623) __extension__ ({ \ + float64x2_t __ret_623; \ + float64x2_t __s0_623 = __p0_623; \ + float64x2_t __s1_623 = __p1_623; \ + float64x2_t __rev0_623; __rev0_623 = __builtin_shufflevector(__s0_623, __s0_623, 1, 0); \ + float64x2_t __rev1_623; __rev1_623 = __builtin_shufflevector(__s1_623, __s1_623, 1, 0); \ + __ret_623 = __noswap_vmulxq_f64(__rev0_623, __noswap_splatq_laneq_f64(__rev1_623, __p2_623)); \ + __ret_623 = __builtin_shufflevector(__ret_623, __ret_623, 1, 0); \ + __ret_623; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmulxq_laneq_f32(__p0_624, __p1_624, __p2_624) __extension__ ({ \ + float32x4_t __ret_624; \ + float32x4_t __s0_624 = __p0_624; \ + float32x4_t __s1_624 = __p1_624; \ + __ret_624 = vmulxq_f32(__s0_624, splatq_laneq_f32(__s1_624, __p2_624)); \ + __ret_624; \ +}) +#else +#define vmulxq_laneq_f32(__p0_625, __p1_625, __p2_625) __extension__ ({ \ + float32x4_t __ret_625; \ + float32x4_t __s0_625 = __p0_625; \ + float32x4_t __s1_625 = __p1_625; \ + float32x4_t __rev0_625; __rev0_625 = __builtin_shufflevector(__s0_625, __s0_625, 3, 2, 1, 0); \ + float32x4_t __rev1_625; __rev1_625 = __builtin_shufflevector(__s1_625, __s1_625, 3, 2, 1, 0); \ + __ret_625 = __noswap_vmulxq_f32(__rev0_625, __noswap_splatq_laneq_f32(__rev1_625, __p2_625)); \ + __ret_625 = __builtin_shufflevector(__ret_625, __ret_625, 3, 2, 1, 0); \ + __ret_625; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmulx_laneq_f32(__p0_626, __p1_626, __p2_626) __extension__ ({ \ + float32x2_t __ret_626; \ + float32x2_t __s0_626 = __p0_626; \ + float32x4_t __s1_626 = __p1_626; \ + __ret_626 = vmulx_f32(__s0_626, splat_laneq_f32(__s1_626, __p2_626)); \ + __ret_626; \ +}) +#else +#define vmulx_laneq_f32(__p0_627, __p1_627, __p2_627) __extension__ ({ \ + float32x2_t __ret_627; \ + float32x2_t __s0_627 = __p0_627; \ + float32x4_t __s1_627 = __p1_627; \ + float32x2_t __rev0_627; __rev0_627 = __builtin_shufflevector(__s0_627, __s0_627, 1, 0); \ + float32x4_t __rev1_627; __rev1_627 = __builtin_shufflevector(__s1_627, __s1_627, 3, 2, 1, 0); \ + __ret_627 = __noswap_vmulx_f32(__rev0_627, __noswap_splat_laneq_f32(__rev1_627, __p2_627)); \ + __ret_627 = __builtin_shufflevector(__ret_627, __ret_627, 1, 0); \ + __ret_627; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) float64x2_t vnegq_f64(float64x2_t __p0) { + float64x2_t __ret; + __ret = -__p0; + return __ret; +} +#else +__ai __attribute__((target("neon"))) float64x2_t vnegq_f64(float64x2_t __p0) { + float64x2_t __ret; + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = -__rev0; + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int64x2_t vnegq_s64(int64x2_t __p0) { + int64x2_t __ret; + __ret = -__p0; + return __ret; +} +#else +__ai __attribute__((target("neon"))) int64x2_t vnegq_s64(int64x2_t __p0) { + int64x2_t __ret; + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = -__rev0; + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +__ai __attribute__((target("neon"))) float64x1_t vneg_f64(float64x1_t __p0) { + float64x1_t __ret; + __ret = -__p0; + return __ret; +} +__ai __attribute__((target("neon"))) int64x1_t vneg_s64(int64x1_t __p0) { + int64x1_t __ret; + __ret = -__p0; + return __ret; +} +__ai __attribute__((target("neon"))) int64_t vnegd_s64(int64_t __p0) { + int64_t __ret; + __ret = (int64_t) __builtin_neon_vnegd_s64(__p0); + return __ret; +} +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint8x16_t vpaddq_u8(uint8x16_t __p0, uint8x16_t __p1) { + uint8x16_t __ret; + __ret = (uint8x16_t) __builtin_neon_vpaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 48); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint8x16_t vpaddq_u8(uint8x16_t __p0, uint8x16_t __p1) { + uint8x16_t __ret; + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint8x16_t) __builtin_neon_vpaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint32x4_t vpaddq_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vpaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 50); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint32x4_t vpaddq_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint32x4_t __ret; + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (uint32x4_t) __builtin_neon_vpaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint64x2_t vpaddq_u64(uint64x2_t __p0, uint64x2_t __p1) { + uint64x2_t __ret; + __ret = (uint64x2_t) __builtin_neon_vpaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 51); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint64x2_t vpaddq_u64(uint64x2_t __p0, uint64x2_t __p1) { + uint64x2_t __ret; + uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (uint64x2_t) __builtin_neon_vpaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 51); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint16x8_t vpaddq_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint16x8_t __ret; + __ret = (uint16x8_t) __builtin_neon_vpaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 49); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint16x8_t vpaddq_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint16x8_t __ret; + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint16x8_t) __builtin_neon_vpaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int8x16_t vpaddq_s8(int8x16_t __p0, int8x16_t __p1) { + int8x16_t __ret; + __ret = (int8x16_t) __builtin_neon_vpaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 32); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int8x16_t vpaddq_s8(int8x16_t __p0, int8x16_t __p1) { + int8x16_t __ret; + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int8x16_t) __builtin_neon_vpaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 32); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) float64x2_t vpaddq_f64(float64x2_t __p0, float64x2_t __p1) { + float64x2_t __ret; + __ret = (float64x2_t) __builtin_neon_vpaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 42); + return __ret; +} +#else +__ai __attribute__((target("neon"))) float64x2_t vpaddq_f64(float64x2_t __p0, float64x2_t __p1) { + float64x2_t __ret; + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (float64x2_t) __builtin_neon_vpaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 42); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) float32x4_t vpaddq_f32(float32x4_t __p0, float32x4_t __p1) { + float32x4_t __ret; + __ret = (float32x4_t) __builtin_neon_vpaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 41); + return __ret; +} +#else +__ai __attribute__((target("neon"))) float32x4_t vpaddq_f32(float32x4_t __p0, float32x4_t __p1) { + float32x4_t __ret; + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (float32x4_t) __builtin_neon_vpaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 41); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int32x4_t vpaddq_s32(int32x4_t __p0, int32x4_t __p1) { + int32x4_t __ret; + __ret = (int32x4_t) __builtin_neon_vpaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 34); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int32x4_t vpaddq_s32(int32x4_t __p0, int32x4_t __p1) { + int32x4_t __ret; + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (int32x4_t) __builtin_neon_vpaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 34); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int64x2_t vpaddq_s64(int64x2_t __p0, int64x2_t __p1) { + int64x2_t __ret; + __ret = (int64x2_t) __builtin_neon_vpaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 35); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int64x2_t vpaddq_s64(int64x2_t __p0, int64x2_t __p1) { + int64x2_t __ret; + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (int64x2_t) __builtin_neon_vpaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 35); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int16x8_t vpaddq_s16(int16x8_t __p0, int16x8_t __p1) { + int16x8_t __ret; + __ret = (int16x8_t) __builtin_neon_vpaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 33); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int16x8_t vpaddq_s16(int16x8_t __p0, int16x8_t __p1) { + int16x8_t __ret; + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int16x8_t) __builtin_neon_vpaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 33); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint64_t vpaddd_u64(uint64x2_t __p0) { + uint64_t __ret; + __ret = (uint64_t) __builtin_neon_vpaddd_u64(__p0); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint64_t vpaddd_u64(uint64x2_t __p0) { + uint64_t __ret; + uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = (uint64_t) __builtin_neon_vpaddd_u64(__rev0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) float64_t vpaddd_f64(float64x2_t __p0) { + float64_t __ret; + __ret = (float64_t) __builtin_neon_vpaddd_f64(__p0); + return __ret; +} +#else +__ai __attribute__((target("neon"))) float64_t vpaddd_f64(float64x2_t __p0) { + float64_t __ret; + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = (float64_t) __builtin_neon_vpaddd_f64(__rev0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int64_t vpaddd_s64(int64x2_t __p0) { + int64_t __ret; + __ret = (int64_t) __builtin_neon_vpaddd_s64(__p0); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int64_t vpaddd_s64(int64x2_t __p0) { + int64_t __ret; + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = (int64_t) __builtin_neon_vpaddd_s64(__rev0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) float32_t vpadds_f32(float32x2_t __p0) { + float32_t __ret; + __ret = (float32_t) __builtin_neon_vpadds_f32(__p0); + return __ret; +} +#else +__ai __attribute__((target("neon"))) float32_t vpadds_f32(float32x2_t __p0) { + float32_t __ret; + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = (float32_t) __builtin_neon_vpadds_f32(__rev0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint8x16_t vpmaxq_u8(uint8x16_t __p0, uint8x16_t __p1) { + uint8x16_t __ret; + __ret = (uint8x16_t) __builtin_neon_vpmaxq_v((int8x16_t)__p0, (int8x16_t)__p1, 48); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint8x16_t vpmaxq_u8(uint8x16_t __p0, uint8x16_t __p1) { + uint8x16_t __ret; + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint8x16_t) __builtin_neon_vpmaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint32x4_t vpmaxq_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vpmaxq_v((int8x16_t)__p0, (int8x16_t)__p1, 50); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint32x4_t vpmaxq_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint32x4_t __ret; + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (uint32x4_t) __builtin_neon_vpmaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint16x8_t vpmaxq_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint16x8_t __ret; + __ret = (uint16x8_t) __builtin_neon_vpmaxq_v((int8x16_t)__p0, (int8x16_t)__p1, 49); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint16x8_t vpmaxq_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint16x8_t __ret; + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint16x8_t) __builtin_neon_vpmaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int8x16_t vpmaxq_s8(int8x16_t __p0, int8x16_t __p1) { + int8x16_t __ret; + __ret = (int8x16_t) __builtin_neon_vpmaxq_v((int8x16_t)__p0, (int8x16_t)__p1, 32); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int8x16_t vpmaxq_s8(int8x16_t __p0, int8x16_t __p1) { + int8x16_t __ret; + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int8x16_t) __builtin_neon_vpmaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 32); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) float64x2_t vpmaxq_f64(float64x2_t __p0, float64x2_t __p1) { + float64x2_t __ret; + __ret = (float64x2_t) __builtin_neon_vpmaxq_v((int8x16_t)__p0, (int8x16_t)__p1, 42); + return __ret; +} +#else +__ai __attribute__((target("neon"))) float64x2_t vpmaxq_f64(float64x2_t __p0, float64x2_t __p1) { + float64x2_t __ret; + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (float64x2_t) __builtin_neon_vpmaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 42); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) float32x4_t vpmaxq_f32(float32x4_t __p0, float32x4_t __p1) { + float32x4_t __ret; + __ret = (float32x4_t) __builtin_neon_vpmaxq_v((int8x16_t)__p0, (int8x16_t)__p1, 41); + return __ret; +} +#else +__ai __attribute__((target("neon"))) float32x4_t vpmaxq_f32(float32x4_t __p0, float32x4_t __p1) { + float32x4_t __ret; + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (float32x4_t) __builtin_neon_vpmaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 41); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int32x4_t vpmaxq_s32(int32x4_t __p0, int32x4_t __p1) { + int32x4_t __ret; + __ret = (int32x4_t) __builtin_neon_vpmaxq_v((int8x16_t)__p0, (int8x16_t)__p1, 34); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int32x4_t vpmaxq_s32(int32x4_t __p0, int32x4_t __p1) { + int32x4_t __ret; + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (int32x4_t) __builtin_neon_vpmaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 34); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int16x8_t vpmaxq_s16(int16x8_t __p0, int16x8_t __p1) { + int16x8_t __ret; + __ret = (int16x8_t) __builtin_neon_vpmaxq_v((int8x16_t)__p0, (int8x16_t)__p1, 33); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int16x8_t vpmaxq_s16(int16x8_t __p0, int16x8_t __p1) { + int16x8_t __ret; + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int16x8_t) __builtin_neon_vpmaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 33); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) float64_t vpmaxqd_f64(float64x2_t __p0) { + float64_t __ret; + __ret = (float64_t) __builtin_neon_vpmaxqd_f64(__p0); + return __ret; +} +#else +__ai __attribute__((target("neon"))) float64_t vpmaxqd_f64(float64x2_t __p0) { + float64_t __ret; + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = (float64_t) __builtin_neon_vpmaxqd_f64(__rev0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) float32_t vpmaxs_f32(float32x2_t __p0) { + float32_t __ret; + __ret = (float32_t) __builtin_neon_vpmaxs_f32(__p0); + return __ret; +} +#else +__ai __attribute__((target("neon"))) float32_t vpmaxs_f32(float32x2_t __p0) { + float32_t __ret; + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = (float32_t) __builtin_neon_vpmaxs_f32(__rev0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) float64x2_t vpmaxnmq_f64(float64x2_t __p0, float64x2_t __p1) { + float64x2_t __ret; + __ret = (float64x2_t) __builtin_neon_vpmaxnmq_v((int8x16_t)__p0, (int8x16_t)__p1, 42); + return __ret; +} +#else +__ai __attribute__((target("neon"))) float64x2_t vpmaxnmq_f64(float64x2_t __p0, float64x2_t __p1) { + float64x2_t __ret; + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (float64x2_t) __builtin_neon_vpmaxnmq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 42); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) float32x4_t vpmaxnmq_f32(float32x4_t __p0, float32x4_t __p1) { + float32x4_t __ret; + __ret = (float32x4_t) __builtin_neon_vpmaxnmq_v((int8x16_t)__p0, (int8x16_t)__p1, 41); + return __ret; +} +#else +__ai __attribute__((target("neon"))) float32x4_t vpmaxnmq_f32(float32x4_t __p0, float32x4_t __p1) { + float32x4_t __ret; + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (float32x4_t) __builtin_neon_vpmaxnmq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 41); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) float32x2_t vpmaxnm_f32(float32x2_t __p0, float32x2_t __p1) { + float32x2_t __ret; + __ret = (float32x2_t) __builtin_neon_vpmaxnm_v((int8x8_t)__p0, (int8x8_t)__p1, 9); + return __ret; +} +#else +__ai __attribute__((target("neon"))) float32x2_t vpmaxnm_f32(float32x2_t __p0, float32x2_t __p1) { + float32x2_t __ret; + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (float32x2_t) __builtin_neon_vpmaxnm_v((int8x8_t)__rev0, (int8x8_t)__rev1, 9); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) float64_t vpmaxnmqd_f64(float64x2_t __p0) { + float64_t __ret; + __ret = (float64_t) __builtin_neon_vpmaxnmqd_f64(__p0); + return __ret; +} +#else +__ai __attribute__((target("neon"))) float64_t vpmaxnmqd_f64(float64x2_t __p0) { + float64_t __ret; + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = (float64_t) __builtin_neon_vpmaxnmqd_f64(__rev0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) float32_t vpmaxnms_f32(float32x2_t __p0) { + float32_t __ret; + __ret = (float32_t) __builtin_neon_vpmaxnms_f32(__p0); + return __ret; +} +#else +__ai __attribute__((target("neon"))) float32_t vpmaxnms_f32(float32x2_t __p0) { + float32_t __ret; + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = (float32_t) __builtin_neon_vpmaxnms_f32(__rev0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint8x16_t vpminq_u8(uint8x16_t __p0, uint8x16_t __p1) { + uint8x16_t __ret; + __ret = (uint8x16_t) __builtin_neon_vpminq_v((int8x16_t)__p0, (int8x16_t)__p1, 48); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint8x16_t vpminq_u8(uint8x16_t __p0, uint8x16_t __p1) { + uint8x16_t __ret; + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint8x16_t) __builtin_neon_vpminq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint32x4_t vpminq_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vpminq_v((int8x16_t)__p0, (int8x16_t)__p1, 50); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint32x4_t vpminq_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint32x4_t __ret; + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (uint32x4_t) __builtin_neon_vpminq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint16x8_t vpminq_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint16x8_t __ret; + __ret = (uint16x8_t) __builtin_neon_vpminq_v((int8x16_t)__p0, (int8x16_t)__p1, 49); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint16x8_t vpminq_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint16x8_t __ret; + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint16x8_t) __builtin_neon_vpminq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int8x16_t vpminq_s8(int8x16_t __p0, int8x16_t __p1) { + int8x16_t __ret; + __ret = (int8x16_t) __builtin_neon_vpminq_v((int8x16_t)__p0, (int8x16_t)__p1, 32); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int8x16_t vpminq_s8(int8x16_t __p0, int8x16_t __p1) { + int8x16_t __ret; + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int8x16_t) __builtin_neon_vpminq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 32); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) float64x2_t vpminq_f64(float64x2_t __p0, float64x2_t __p1) { + float64x2_t __ret; + __ret = (float64x2_t) __builtin_neon_vpminq_v((int8x16_t)__p0, (int8x16_t)__p1, 42); + return __ret; +} +#else +__ai __attribute__((target("neon"))) float64x2_t vpminq_f64(float64x2_t __p0, float64x2_t __p1) { + float64x2_t __ret; + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (float64x2_t) __builtin_neon_vpminq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 42); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) float32x4_t vpminq_f32(float32x4_t __p0, float32x4_t __p1) { + float32x4_t __ret; + __ret = (float32x4_t) __builtin_neon_vpminq_v((int8x16_t)__p0, (int8x16_t)__p1, 41); + return __ret; +} +#else +__ai __attribute__((target("neon"))) float32x4_t vpminq_f32(float32x4_t __p0, float32x4_t __p1) { + float32x4_t __ret; + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (float32x4_t) __builtin_neon_vpminq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 41); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int32x4_t vpminq_s32(int32x4_t __p0, int32x4_t __p1) { + int32x4_t __ret; + __ret = (int32x4_t) __builtin_neon_vpminq_v((int8x16_t)__p0, (int8x16_t)__p1, 34); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int32x4_t vpminq_s32(int32x4_t __p0, int32x4_t __p1) { + int32x4_t __ret; + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (int32x4_t) __builtin_neon_vpminq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 34); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int16x8_t vpminq_s16(int16x8_t __p0, int16x8_t __p1) { + int16x8_t __ret; + __ret = (int16x8_t) __builtin_neon_vpminq_v((int8x16_t)__p0, (int8x16_t)__p1, 33); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int16x8_t vpminq_s16(int16x8_t __p0, int16x8_t __p1) { + int16x8_t __ret; + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int16x8_t) __builtin_neon_vpminq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 33); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) float64_t vpminqd_f64(float64x2_t __p0) { + float64_t __ret; + __ret = (float64_t) __builtin_neon_vpminqd_f64(__p0); + return __ret; +} +#else +__ai __attribute__((target("neon"))) float64_t vpminqd_f64(float64x2_t __p0) { + float64_t __ret; + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = (float64_t) __builtin_neon_vpminqd_f64(__rev0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) float32_t vpmins_f32(float32x2_t __p0) { + float32_t __ret; + __ret = (float32_t) __builtin_neon_vpmins_f32(__p0); + return __ret; +} +#else +__ai __attribute__((target("neon"))) float32_t vpmins_f32(float32x2_t __p0) { + float32_t __ret; + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = (float32_t) __builtin_neon_vpmins_f32(__rev0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) float64x2_t vpminnmq_f64(float64x2_t __p0, float64x2_t __p1) { + float64x2_t __ret; + __ret = (float64x2_t) __builtin_neon_vpminnmq_v((int8x16_t)__p0, (int8x16_t)__p1, 42); + return __ret; +} +#else +__ai __attribute__((target("neon"))) float64x2_t vpminnmq_f64(float64x2_t __p0, float64x2_t __p1) { + float64x2_t __ret; + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (float64x2_t) __builtin_neon_vpminnmq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 42); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) float32x4_t vpminnmq_f32(float32x4_t __p0, float32x4_t __p1) { + float32x4_t __ret; + __ret = (float32x4_t) __builtin_neon_vpminnmq_v((int8x16_t)__p0, (int8x16_t)__p1, 41); + return __ret; +} +#else +__ai __attribute__((target("neon"))) float32x4_t vpminnmq_f32(float32x4_t __p0, float32x4_t __p1) { + float32x4_t __ret; + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (float32x4_t) __builtin_neon_vpminnmq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 41); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) float32x2_t vpminnm_f32(float32x2_t __p0, float32x2_t __p1) { + float32x2_t __ret; + __ret = (float32x2_t) __builtin_neon_vpminnm_v((int8x8_t)__p0, (int8x8_t)__p1, 9); + return __ret; +} +#else +__ai __attribute__((target("neon"))) float32x2_t vpminnm_f32(float32x2_t __p0, float32x2_t __p1) { + float32x2_t __ret; + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (float32x2_t) __builtin_neon_vpminnm_v((int8x8_t)__rev0, (int8x8_t)__rev1, 9); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) float64_t vpminnmqd_f64(float64x2_t __p0) { + float64_t __ret; + __ret = (float64_t) __builtin_neon_vpminnmqd_f64(__p0); + return __ret; +} +#else +__ai __attribute__((target("neon"))) float64_t vpminnmqd_f64(float64x2_t __p0) { + float64_t __ret; + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = (float64_t) __builtin_neon_vpminnmqd_f64(__rev0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) float32_t vpminnms_f32(float32x2_t __p0) { + float32_t __ret; + __ret = (float32_t) __builtin_neon_vpminnms_f32(__p0); + return __ret; +} +#else +__ai __attribute__((target("neon"))) float32_t vpminnms_f32(float32x2_t __p0) { + float32_t __ret; + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = (float32_t) __builtin_neon_vpminnms_f32(__rev0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int64x2_t vqabsq_s64(int64x2_t __p0) { + int64x2_t __ret; + __ret = (int64x2_t) __builtin_neon_vqabsq_v((int8x16_t)__p0, 35); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int64x2_t vqabsq_s64(int64x2_t __p0) { + int64x2_t __ret; + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = (int64x2_t) __builtin_neon_vqabsq_v((int8x16_t)__rev0, 35); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +__ai __attribute__((target("neon"))) int64x1_t vqabs_s64(int64x1_t __p0) { + int64x1_t __ret; + __ret = (int64x1_t) __builtin_neon_vqabs_v((int8x8_t)__p0, 3); + return __ret; +} +__ai __attribute__((target("neon"))) int8_t vqabsb_s8(int8_t __p0) { + int8_t __ret; + __ret = (int8_t) __builtin_neon_vqabsb_s8(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) int32_t vqabss_s32(int32_t __p0) { + int32_t __ret; + __ret = (int32_t) __builtin_neon_vqabss_s32(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) int64_t vqabsd_s64(int64_t __p0) { + int64_t __ret; + __ret = (int64_t) __builtin_neon_vqabsd_s64(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) int16_t vqabsh_s16(int16_t __p0) { + int16_t __ret; + __ret = (int16_t) __builtin_neon_vqabsh_s16(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) uint8_t vqaddb_u8(uint8_t __p0, uint8_t __p1) { + uint8_t __ret; + __ret = (uint8_t) __builtin_neon_vqaddb_u8(__p0, __p1); + return __ret; +} +__ai __attribute__((target("neon"))) uint32_t vqadds_u32(uint32_t __p0, uint32_t __p1) { + uint32_t __ret; + __ret = (uint32_t) __builtin_neon_vqadds_u32(__p0, __p1); + return __ret; +} +__ai __attribute__((target("neon"))) uint64_t vqaddd_u64(uint64_t __p0, uint64_t __p1) { + uint64_t __ret; + __ret = (uint64_t) __builtin_neon_vqaddd_u64(__p0, __p1); + return __ret; +} +__ai __attribute__((target("neon"))) uint16_t vqaddh_u16(uint16_t __p0, uint16_t __p1) { + uint16_t __ret; + __ret = (uint16_t) __builtin_neon_vqaddh_u16(__p0, __p1); + return __ret; +} +__ai __attribute__((target("neon"))) int8_t vqaddb_s8(int8_t __p0, int8_t __p1) { + int8_t __ret; + __ret = (int8_t) __builtin_neon_vqaddb_s8(__p0, __p1); + return __ret; +} +__ai __attribute__((target("neon"))) int32_t vqadds_s32(int32_t __p0, int32_t __p1) { + int32_t __ret; + __ret = (int32_t) __builtin_neon_vqadds_s32(__p0, __p1); + return __ret; +} +__ai __attribute__((target("neon"))) int64_t vqaddd_s64(int64_t __p0, int64_t __p1) { + int64_t __ret; + __ret = (int64_t) __builtin_neon_vqaddd_s64(__p0, __p1); + return __ret; +} +__ai __attribute__((target("neon"))) int16_t vqaddh_s16(int16_t __p0, int16_t __p1) { + int16_t __ret; + __ret = (int16_t) __builtin_neon_vqaddh_s16(__p0, __p1); + return __ret; +} +__ai __attribute__((target("neon"))) int64_t vqdmlals_s32(int64_t __p0, int32_t __p1, int32_t __p2) { + int64_t __ret; + __ret = (int64_t) __builtin_neon_vqdmlals_s32(__p0, __p1, __p2); + return __ret; +} +__ai __attribute__((target("neon"))) int32_t vqdmlalh_s16(int32_t __p0, int16_t __p1, int16_t __p2) { + int32_t __ret; + __ret = (int32_t) __builtin_neon_vqdmlalh_s16(__p0, __p1, __p2); + return __ret; +} +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int64x2_t vqdmlal_high_s32(int64x2_t __p0, int32x4_t __p1, int32x4_t __p2) { + int64x2_t __ret; + __ret = vqdmlal_s32(__p0, vget_high_s32(__p1), vget_high_s32(__p2)); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int64x2_t vqdmlal_high_s32(int64x2_t __p0, int32x4_t __p1, int32x4_t __p2) { + int64x2_t __ret; + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + int32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); + __ret = __noswap_vqdmlal_s32(__rev0, __noswap_vget_high_s32(__rev1), __noswap_vget_high_s32(__rev2)); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int32x4_t vqdmlal_high_s16(int32x4_t __p0, int16x8_t __p1, int16x8_t __p2) { + int32x4_t __ret; + __ret = vqdmlal_s16(__p0, vget_high_s16(__p1), vget_high_s16(__p2)); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int32x4_t vqdmlal_high_s16(int32x4_t __p0, int16x8_t __p1, int16x8_t __p2) { + int32x4_t __ret; + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __noswap_vqdmlal_s16(__rev0, __noswap_vget_high_s16(__rev1), __noswap_vget_high_s16(__rev2)); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqdmlal_high_lane_s32(__p0_628, __p1_628, __p2_628, __p3_628) __extension__ ({ \ + int64x2_t __ret_628; \ + int64x2_t __s0_628 = __p0_628; \ + int32x4_t __s1_628 = __p1_628; \ + int32x2_t __s2_628 = __p2_628; \ + __ret_628 = vqdmlal_s32(__s0_628, vget_high_s32(__s1_628), splat_lane_s32(__s2_628, __p3_628)); \ + __ret_628; \ +}) +#else +#define vqdmlal_high_lane_s32(__p0_629, __p1_629, __p2_629, __p3_629) __extension__ ({ \ + int64x2_t __ret_629; \ + int64x2_t __s0_629 = __p0_629; \ + int32x4_t __s1_629 = __p1_629; \ + int32x2_t __s2_629 = __p2_629; \ + int64x2_t __rev0_629; __rev0_629 = __builtin_shufflevector(__s0_629, __s0_629, 1, 0); \ + int32x4_t __rev1_629; __rev1_629 = __builtin_shufflevector(__s1_629, __s1_629, 3, 2, 1, 0); \ + int32x2_t __rev2_629; __rev2_629 = __builtin_shufflevector(__s2_629, __s2_629, 1, 0); \ + __ret_629 = __noswap_vqdmlal_s32(__rev0_629, __noswap_vget_high_s32(__rev1_629), __noswap_splat_lane_s32(__rev2_629, __p3_629)); \ + __ret_629 = __builtin_shufflevector(__ret_629, __ret_629, 1, 0); \ + __ret_629; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqdmlal_high_lane_s16(__p0_630, __p1_630, __p2_630, __p3_630) __extension__ ({ \ + int32x4_t __ret_630; \ + int32x4_t __s0_630 = __p0_630; \ + int16x8_t __s1_630 = __p1_630; \ + int16x4_t __s2_630 = __p2_630; \ + __ret_630 = vqdmlal_s16(__s0_630, vget_high_s16(__s1_630), splat_lane_s16(__s2_630, __p3_630)); \ + __ret_630; \ +}) +#else +#define vqdmlal_high_lane_s16(__p0_631, __p1_631, __p2_631, __p3_631) __extension__ ({ \ + int32x4_t __ret_631; \ + int32x4_t __s0_631 = __p0_631; \ + int16x8_t __s1_631 = __p1_631; \ + int16x4_t __s2_631 = __p2_631; \ + int32x4_t __rev0_631; __rev0_631 = __builtin_shufflevector(__s0_631, __s0_631, 3, 2, 1, 0); \ + int16x8_t __rev1_631; __rev1_631 = __builtin_shufflevector(__s1_631, __s1_631, 7, 6, 5, 4, 3, 2, 1, 0); \ + int16x4_t __rev2_631; __rev2_631 = __builtin_shufflevector(__s2_631, __s2_631, 3, 2, 1, 0); \ + __ret_631 = __noswap_vqdmlal_s16(__rev0_631, __noswap_vget_high_s16(__rev1_631), __noswap_splat_lane_s16(__rev2_631, __p3_631)); \ + __ret_631 = __builtin_shufflevector(__ret_631, __ret_631, 3, 2, 1, 0); \ + __ret_631; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqdmlal_high_laneq_s32(__p0_632, __p1_632, __p2_632, __p3_632) __extension__ ({ \ + int64x2_t __ret_632; \ + int64x2_t __s0_632 = __p0_632; \ + int32x4_t __s1_632 = __p1_632; \ + int32x4_t __s2_632 = __p2_632; \ + __ret_632 = vqdmlal_s32(__s0_632, vget_high_s32(__s1_632), splat_laneq_s32(__s2_632, __p3_632)); \ + __ret_632; \ +}) +#else +#define vqdmlal_high_laneq_s32(__p0_633, __p1_633, __p2_633, __p3_633) __extension__ ({ \ + int64x2_t __ret_633; \ + int64x2_t __s0_633 = __p0_633; \ + int32x4_t __s1_633 = __p1_633; \ + int32x4_t __s2_633 = __p2_633; \ + int64x2_t __rev0_633; __rev0_633 = __builtin_shufflevector(__s0_633, __s0_633, 1, 0); \ + int32x4_t __rev1_633; __rev1_633 = __builtin_shufflevector(__s1_633, __s1_633, 3, 2, 1, 0); \ + int32x4_t __rev2_633; __rev2_633 = __builtin_shufflevector(__s2_633, __s2_633, 3, 2, 1, 0); \ + __ret_633 = __noswap_vqdmlal_s32(__rev0_633, __noswap_vget_high_s32(__rev1_633), __noswap_splat_laneq_s32(__rev2_633, __p3_633)); \ + __ret_633 = __builtin_shufflevector(__ret_633, __ret_633, 1, 0); \ + __ret_633; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqdmlal_high_laneq_s16(__p0_634, __p1_634, __p2_634, __p3_634) __extension__ ({ \ + int32x4_t __ret_634; \ + int32x4_t __s0_634 = __p0_634; \ + int16x8_t __s1_634 = __p1_634; \ + int16x8_t __s2_634 = __p2_634; \ + __ret_634 = vqdmlal_s16(__s0_634, vget_high_s16(__s1_634), splat_laneq_s16(__s2_634, __p3_634)); \ + __ret_634; \ +}) +#else +#define vqdmlal_high_laneq_s16(__p0_635, __p1_635, __p2_635, __p3_635) __extension__ ({ \ + int32x4_t __ret_635; \ + int32x4_t __s0_635 = __p0_635; \ + int16x8_t __s1_635 = __p1_635; \ + int16x8_t __s2_635 = __p2_635; \ + int32x4_t __rev0_635; __rev0_635 = __builtin_shufflevector(__s0_635, __s0_635, 3, 2, 1, 0); \ + int16x8_t __rev1_635; __rev1_635 = __builtin_shufflevector(__s1_635, __s1_635, 7, 6, 5, 4, 3, 2, 1, 0); \ + int16x8_t __rev2_635; __rev2_635 = __builtin_shufflevector(__s2_635, __s2_635, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_635 = __noswap_vqdmlal_s16(__rev0_635, __noswap_vget_high_s16(__rev1_635), __noswap_splat_laneq_s16(__rev2_635, __p3_635)); \ + __ret_635 = __builtin_shufflevector(__ret_635, __ret_635, 3, 2, 1, 0); \ + __ret_635; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int64x2_t vqdmlal_high_n_s32(int64x2_t __p0, int32x4_t __p1, int32_t __p2) { + int64x2_t __ret; + __ret = vqdmlal_n_s32(__p0, vget_high_s32(__p1), __p2); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int64x2_t vqdmlal_high_n_s32(int64x2_t __p0, int32x4_t __p1, int32_t __p2) { + int64x2_t __ret; + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = __noswap_vqdmlal_n_s32(__rev0, __noswap_vget_high_s32(__rev1), __p2); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int32x4_t vqdmlal_high_n_s16(int32x4_t __p0, int16x8_t __p1, int16_t __p2) { + int32x4_t __ret; + __ret = vqdmlal_n_s16(__p0, vget_high_s16(__p1), __p2); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int32x4_t vqdmlal_high_n_s16(int32x4_t __p0, int16x8_t __p1, int16_t __p2) { + int32x4_t __ret; + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __noswap_vqdmlal_n_s16(__rev0, __noswap_vget_high_s16(__rev1), __p2); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqdmlals_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \ + int64_t __ret; \ + int64_t __s0 = __p0; \ + int32_t __s1 = __p1; \ + int32x2_t __s2 = __p2; \ + __ret = (int64_t) __builtin_neon_vqdmlals_lane_s32(__s0, __s1, __s2, __p3); \ + __ret; \ +}) +#else +#define vqdmlals_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \ + int64_t __ret; \ + int64_t __s0 = __p0; \ + int32_t __s1 = __p1; \ + int32x2_t __s2 = __p2; \ + int32x2_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \ + __ret = (int64_t) __builtin_neon_vqdmlals_lane_s32(__s0, __s1, __rev2, __p3); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqdmlalh_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \ + int32_t __ret; \ + int32_t __s0 = __p0; \ + int16_t __s1 = __p1; \ + int16x4_t __s2 = __p2; \ + __ret = (int32_t) __builtin_neon_vqdmlalh_lane_s16(__s0, __s1, __s2, __p3); \ + __ret; \ +}) +#else +#define vqdmlalh_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \ + int32_t __ret; \ + int32_t __s0 = __p0; \ + int16_t __s1 = __p1; \ + int16x4_t __s2 = __p2; \ + int16x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \ + __ret = (int32_t) __builtin_neon_vqdmlalh_lane_s16(__s0, __s1, __rev2, __p3); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqdmlals_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \ + int64_t __ret; \ + int64_t __s0 = __p0; \ + int32_t __s1 = __p1; \ + int32x4_t __s2 = __p2; \ + __ret = (int64_t) __builtin_neon_vqdmlals_laneq_s32(__s0, __s1, __s2, __p3); \ + __ret; \ +}) +#else +#define vqdmlals_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \ + int64_t __ret; \ + int64_t __s0 = __p0; \ + int32_t __s1 = __p1; \ + int32x4_t __s2 = __p2; \ + int32x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \ + __ret = (int64_t) __builtin_neon_vqdmlals_laneq_s32(__s0, __s1, __rev2, __p3); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqdmlalh_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \ + int32_t __ret; \ + int32_t __s0 = __p0; \ + int16_t __s1 = __p1; \ + int16x8_t __s2 = __p2; \ + __ret = (int32_t) __builtin_neon_vqdmlalh_laneq_s16(__s0, __s1, __s2, __p3); \ + __ret; \ +}) +#else +#define vqdmlalh_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \ + int32_t __ret; \ + int32_t __s0 = __p0; \ + int16_t __s1 = __p1; \ + int16x8_t __s2 = __p2; \ + int16x8_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (int32_t) __builtin_neon_vqdmlalh_laneq_s16(__s0, __s1, __rev2, __p3); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqdmlal_laneq_s32(__p0_636, __p1_636, __p2_636, __p3_636) __extension__ ({ \ + int64x2_t __ret_636; \ + int64x2_t __s0_636 = __p0_636; \ + int32x2_t __s1_636 = __p1_636; \ + int32x4_t __s2_636 = __p2_636; \ + __ret_636 = vqdmlal_s32(__s0_636, __s1_636, splat_laneq_s32(__s2_636, __p3_636)); \ + __ret_636; \ +}) +#else +#define vqdmlal_laneq_s32(__p0_637, __p1_637, __p2_637, __p3_637) __extension__ ({ \ + int64x2_t __ret_637; \ + int64x2_t __s0_637 = __p0_637; \ + int32x2_t __s1_637 = __p1_637; \ + int32x4_t __s2_637 = __p2_637; \ + int64x2_t __rev0_637; __rev0_637 = __builtin_shufflevector(__s0_637, __s0_637, 1, 0); \ + int32x2_t __rev1_637; __rev1_637 = __builtin_shufflevector(__s1_637, __s1_637, 1, 0); \ + int32x4_t __rev2_637; __rev2_637 = __builtin_shufflevector(__s2_637, __s2_637, 3, 2, 1, 0); \ + __ret_637 = __noswap_vqdmlal_s32(__rev0_637, __rev1_637, __noswap_splat_laneq_s32(__rev2_637, __p3_637)); \ + __ret_637 = __builtin_shufflevector(__ret_637, __ret_637, 1, 0); \ + __ret_637; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqdmlal_laneq_s16(__p0_638, __p1_638, __p2_638, __p3_638) __extension__ ({ \ + int32x4_t __ret_638; \ + int32x4_t __s0_638 = __p0_638; \ + int16x4_t __s1_638 = __p1_638; \ + int16x8_t __s2_638 = __p2_638; \ + __ret_638 = vqdmlal_s16(__s0_638, __s1_638, splat_laneq_s16(__s2_638, __p3_638)); \ + __ret_638; \ +}) +#else +#define vqdmlal_laneq_s16(__p0_639, __p1_639, __p2_639, __p3_639) __extension__ ({ \ + int32x4_t __ret_639; \ + int32x4_t __s0_639 = __p0_639; \ + int16x4_t __s1_639 = __p1_639; \ + int16x8_t __s2_639 = __p2_639; \ + int32x4_t __rev0_639; __rev0_639 = __builtin_shufflevector(__s0_639, __s0_639, 3, 2, 1, 0); \ + int16x4_t __rev1_639; __rev1_639 = __builtin_shufflevector(__s1_639, __s1_639, 3, 2, 1, 0); \ + int16x8_t __rev2_639; __rev2_639 = __builtin_shufflevector(__s2_639, __s2_639, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_639 = __noswap_vqdmlal_s16(__rev0_639, __rev1_639, __noswap_splat_laneq_s16(__rev2_639, __p3_639)); \ + __ret_639 = __builtin_shufflevector(__ret_639, __ret_639, 3, 2, 1, 0); \ + __ret_639; \ +}) +#endif + +__ai __attribute__((target("neon"))) int64_t vqdmlsls_s32(int64_t __p0, int32_t __p1, int32_t __p2) { + int64_t __ret; + __ret = (int64_t) __builtin_neon_vqdmlsls_s32(__p0, __p1, __p2); + return __ret; +} +__ai __attribute__((target("neon"))) int32_t vqdmlslh_s16(int32_t __p0, int16_t __p1, int16_t __p2) { + int32_t __ret; + __ret = (int32_t) __builtin_neon_vqdmlslh_s16(__p0, __p1, __p2); + return __ret; +} +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int64x2_t vqdmlsl_high_s32(int64x2_t __p0, int32x4_t __p1, int32x4_t __p2) { + int64x2_t __ret; + __ret = vqdmlsl_s32(__p0, vget_high_s32(__p1), vget_high_s32(__p2)); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int64x2_t vqdmlsl_high_s32(int64x2_t __p0, int32x4_t __p1, int32x4_t __p2) { + int64x2_t __ret; + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + int32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); + __ret = __noswap_vqdmlsl_s32(__rev0, __noswap_vget_high_s32(__rev1), __noswap_vget_high_s32(__rev2)); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int32x4_t vqdmlsl_high_s16(int32x4_t __p0, int16x8_t __p1, int16x8_t __p2) { + int32x4_t __ret; + __ret = vqdmlsl_s16(__p0, vget_high_s16(__p1), vget_high_s16(__p2)); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int32x4_t vqdmlsl_high_s16(int32x4_t __p0, int16x8_t __p1, int16x8_t __p2) { + int32x4_t __ret; + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __noswap_vqdmlsl_s16(__rev0, __noswap_vget_high_s16(__rev1), __noswap_vget_high_s16(__rev2)); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqdmlsl_high_lane_s32(__p0_640, __p1_640, __p2_640, __p3_640) __extension__ ({ \ + int64x2_t __ret_640; \ + int64x2_t __s0_640 = __p0_640; \ + int32x4_t __s1_640 = __p1_640; \ + int32x2_t __s2_640 = __p2_640; \ + __ret_640 = vqdmlsl_s32(__s0_640, vget_high_s32(__s1_640), splat_lane_s32(__s2_640, __p3_640)); \ + __ret_640; \ +}) +#else +#define vqdmlsl_high_lane_s32(__p0_641, __p1_641, __p2_641, __p3_641) __extension__ ({ \ + int64x2_t __ret_641; \ + int64x2_t __s0_641 = __p0_641; \ + int32x4_t __s1_641 = __p1_641; \ + int32x2_t __s2_641 = __p2_641; \ + int64x2_t __rev0_641; __rev0_641 = __builtin_shufflevector(__s0_641, __s0_641, 1, 0); \ + int32x4_t __rev1_641; __rev1_641 = __builtin_shufflevector(__s1_641, __s1_641, 3, 2, 1, 0); \ + int32x2_t __rev2_641; __rev2_641 = __builtin_shufflevector(__s2_641, __s2_641, 1, 0); \ + __ret_641 = __noswap_vqdmlsl_s32(__rev0_641, __noswap_vget_high_s32(__rev1_641), __noswap_splat_lane_s32(__rev2_641, __p3_641)); \ + __ret_641 = __builtin_shufflevector(__ret_641, __ret_641, 1, 0); \ + __ret_641; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqdmlsl_high_lane_s16(__p0_642, __p1_642, __p2_642, __p3_642) __extension__ ({ \ + int32x4_t __ret_642; \ + int32x4_t __s0_642 = __p0_642; \ + int16x8_t __s1_642 = __p1_642; \ + int16x4_t __s2_642 = __p2_642; \ + __ret_642 = vqdmlsl_s16(__s0_642, vget_high_s16(__s1_642), splat_lane_s16(__s2_642, __p3_642)); \ + __ret_642; \ +}) +#else +#define vqdmlsl_high_lane_s16(__p0_643, __p1_643, __p2_643, __p3_643) __extension__ ({ \ + int32x4_t __ret_643; \ + int32x4_t __s0_643 = __p0_643; \ + int16x8_t __s1_643 = __p1_643; \ + int16x4_t __s2_643 = __p2_643; \ + int32x4_t __rev0_643; __rev0_643 = __builtin_shufflevector(__s0_643, __s0_643, 3, 2, 1, 0); \ + int16x8_t __rev1_643; __rev1_643 = __builtin_shufflevector(__s1_643, __s1_643, 7, 6, 5, 4, 3, 2, 1, 0); \ + int16x4_t __rev2_643; __rev2_643 = __builtin_shufflevector(__s2_643, __s2_643, 3, 2, 1, 0); \ + __ret_643 = __noswap_vqdmlsl_s16(__rev0_643, __noswap_vget_high_s16(__rev1_643), __noswap_splat_lane_s16(__rev2_643, __p3_643)); \ + __ret_643 = __builtin_shufflevector(__ret_643, __ret_643, 3, 2, 1, 0); \ + __ret_643; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqdmlsl_high_laneq_s32(__p0_644, __p1_644, __p2_644, __p3_644) __extension__ ({ \ + int64x2_t __ret_644; \ + int64x2_t __s0_644 = __p0_644; \ + int32x4_t __s1_644 = __p1_644; \ + int32x4_t __s2_644 = __p2_644; \ + __ret_644 = vqdmlsl_s32(__s0_644, vget_high_s32(__s1_644), splat_laneq_s32(__s2_644, __p3_644)); \ + __ret_644; \ +}) +#else +#define vqdmlsl_high_laneq_s32(__p0_645, __p1_645, __p2_645, __p3_645) __extension__ ({ \ + int64x2_t __ret_645; \ + int64x2_t __s0_645 = __p0_645; \ + int32x4_t __s1_645 = __p1_645; \ + int32x4_t __s2_645 = __p2_645; \ + int64x2_t __rev0_645; __rev0_645 = __builtin_shufflevector(__s0_645, __s0_645, 1, 0); \ + int32x4_t __rev1_645; __rev1_645 = __builtin_shufflevector(__s1_645, __s1_645, 3, 2, 1, 0); \ + int32x4_t __rev2_645; __rev2_645 = __builtin_shufflevector(__s2_645, __s2_645, 3, 2, 1, 0); \ + __ret_645 = __noswap_vqdmlsl_s32(__rev0_645, __noswap_vget_high_s32(__rev1_645), __noswap_splat_laneq_s32(__rev2_645, __p3_645)); \ + __ret_645 = __builtin_shufflevector(__ret_645, __ret_645, 1, 0); \ + __ret_645; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqdmlsl_high_laneq_s16(__p0_646, __p1_646, __p2_646, __p3_646) __extension__ ({ \ + int32x4_t __ret_646; \ + int32x4_t __s0_646 = __p0_646; \ + int16x8_t __s1_646 = __p1_646; \ + int16x8_t __s2_646 = __p2_646; \ + __ret_646 = vqdmlsl_s16(__s0_646, vget_high_s16(__s1_646), splat_laneq_s16(__s2_646, __p3_646)); \ + __ret_646; \ +}) +#else +#define vqdmlsl_high_laneq_s16(__p0_647, __p1_647, __p2_647, __p3_647) __extension__ ({ \ + int32x4_t __ret_647; \ + int32x4_t __s0_647 = __p0_647; \ + int16x8_t __s1_647 = __p1_647; \ + int16x8_t __s2_647 = __p2_647; \ + int32x4_t __rev0_647; __rev0_647 = __builtin_shufflevector(__s0_647, __s0_647, 3, 2, 1, 0); \ + int16x8_t __rev1_647; __rev1_647 = __builtin_shufflevector(__s1_647, __s1_647, 7, 6, 5, 4, 3, 2, 1, 0); \ + int16x8_t __rev2_647; __rev2_647 = __builtin_shufflevector(__s2_647, __s2_647, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_647 = __noswap_vqdmlsl_s16(__rev0_647, __noswap_vget_high_s16(__rev1_647), __noswap_splat_laneq_s16(__rev2_647, __p3_647)); \ + __ret_647 = __builtin_shufflevector(__ret_647, __ret_647, 3, 2, 1, 0); \ + __ret_647; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int64x2_t vqdmlsl_high_n_s32(int64x2_t __p0, int32x4_t __p1, int32_t __p2) { + int64x2_t __ret; + __ret = vqdmlsl_n_s32(__p0, vget_high_s32(__p1), __p2); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int64x2_t vqdmlsl_high_n_s32(int64x2_t __p0, int32x4_t __p1, int32_t __p2) { + int64x2_t __ret; + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = __noswap_vqdmlsl_n_s32(__rev0, __noswap_vget_high_s32(__rev1), __p2); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int32x4_t vqdmlsl_high_n_s16(int32x4_t __p0, int16x8_t __p1, int16_t __p2) { + int32x4_t __ret; + __ret = vqdmlsl_n_s16(__p0, vget_high_s16(__p1), __p2); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int32x4_t vqdmlsl_high_n_s16(int32x4_t __p0, int16x8_t __p1, int16_t __p2) { + int32x4_t __ret; + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __noswap_vqdmlsl_n_s16(__rev0, __noswap_vget_high_s16(__rev1), __p2); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqdmlsls_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \ + int64_t __ret; \ + int64_t __s0 = __p0; \ + int32_t __s1 = __p1; \ + int32x2_t __s2 = __p2; \ + __ret = (int64_t) __builtin_neon_vqdmlsls_lane_s32(__s0, __s1, __s2, __p3); \ + __ret; \ +}) +#else +#define vqdmlsls_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \ + int64_t __ret; \ + int64_t __s0 = __p0; \ + int32_t __s1 = __p1; \ + int32x2_t __s2 = __p2; \ + int32x2_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \ + __ret = (int64_t) __builtin_neon_vqdmlsls_lane_s32(__s0, __s1, __rev2, __p3); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqdmlslh_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \ + int32_t __ret; \ + int32_t __s0 = __p0; \ + int16_t __s1 = __p1; \ + int16x4_t __s2 = __p2; \ + __ret = (int32_t) __builtin_neon_vqdmlslh_lane_s16(__s0, __s1, __s2, __p3); \ + __ret; \ +}) +#else +#define vqdmlslh_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \ + int32_t __ret; \ + int32_t __s0 = __p0; \ + int16_t __s1 = __p1; \ + int16x4_t __s2 = __p2; \ + int16x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \ + __ret = (int32_t) __builtin_neon_vqdmlslh_lane_s16(__s0, __s1, __rev2, __p3); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqdmlsls_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \ + int64_t __ret; \ + int64_t __s0 = __p0; \ + int32_t __s1 = __p1; \ + int32x4_t __s2 = __p2; \ + __ret = (int64_t) __builtin_neon_vqdmlsls_laneq_s32(__s0, __s1, __s2, __p3); \ + __ret; \ +}) +#else +#define vqdmlsls_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \ + int64_t __ret; \ + int64_t __s0 = __p0; \ + int32_t __s1 = __p1; \ + int32x4_t __s2 = __p2; \ + int32x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \ + __ret = (int64_t) __builtin_neon_vqdmlsls_laneq_s32(__s0, __s1, __rev2, __p3); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqdmlslh_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \ + int32_t __ret; \ + int32_t __s0 = __p0; \ + int16_t __s1 = __p1; \ + int16x8_t __s2 = __p2; \ + __ret = (int32_t) __builtin_neon_vqdmlslh_laneq_s16(__s0, __s1, __s2, __p3); \ + __ret; \ +}) +#else +#define vqdmlslh_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \ + int32_t __ret; \ + int32_t __s0 = __p0; \ + int16_t __s1 = __p1; \ + int16x8_t __s2 = __p2; \ + int16x8_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (int32_t) __builtin_neon_vqdmlslh_laneq_s16(__s0, __s1, __rev2, __p3); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqdmlsl_laneq_s32(__p0_648, __p1_648, __p2_648, __p3_648) __extension__ ({ \ + int64x2_t __ret_648; \ + int64x2_t __s0_648 = __p0_648; \ + int32x2_t __s1_648 = __p1_648; \ + int32x4_t __s2_648 = __p2_648; \ + __ret_648 = vqdmlsl_s32(__s0_648, __s1_648, splat_laneq_s32(__s2_648, __p3_648)); \ + __ret_648; \ +}) +#else +#define vqdmlsl_laneq_s32(__p0_649, __p1_649, __p2_649, __p3_649) __extension__ ({ \ + int64x2_t __ret_649; \ + int64x2_t __s0_649 = __p0_649; \ + int32x2_t __s1_649 = __p1_649; \ + int32x4_t __s2_649 = __p2_649; \ + int64x2_t __rev0_649; __rev0_649 = __builtin_shufflevector(__s0_649, __s0_649, 1, 0); \ + int32x2_t __rev1_649; __rev1_649 = __builtin_shufflevector(__s1_649, __s1_649, 1, 0); \ + int32x4_t __rev2_649; __rev2_649 = __builtin_shufflevector(__s2_649, __s2_649, 3, 2, 1, 0); \ + __ret_649 = __noswap_vqdmlsl_s32(__rev0_649, __rev1_649, __noswap_splat_laneq_s32(__rev2_649, __p3_649)); \ + __ret_649 = __builtin_shufflevector(__ret_649, __ret_649, 1, 0); \ + __ret_649; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqdmlsl_laneq_s16(__p0_650, __p1_650, __p2_650, __p3_650) __extension__ ({ \ + int32x4_t __ret_650; \ + int32x4_t __s0_650 = __p0_650; \ + int16x4_t __s1_650 = __p1_650; \ + int16x8_t __s2_650 = __p2_650; \ + __ret_650 = vqdmlsl_s16(__s0_650, __s1_650, splat_laneq_s16(__s2_650, __p3_650)); \ + __ret_650; \ +}) +#else +#define vqdmlsl_laneq_s16(__p0_651, __p1_651, __p2_651, __p3_651) __extension__ ({ \ + int32x4_t __ret_651; \ + int32x4_t __s0_651 = __p0_651; \ + int16x4_t __s1_651 = __p1_651; \ + int16x8_t __s2_651 = __p2_651; \ + int32x4_t __rev0_651; __rev0_651 = __builtin_shufflevector(__s0_651, __s0_651, 3, 2, 1, 0); \ + int16x4_t __rev1_651; __rev1_651 = __builtin_shufflevector(__s1_651, __s1_651, 3, 2, 1, 0); \ + int16x8_t __rev2_651; __rev2_651 = __builtin_shufflevector(__s2_651, __s2_651, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_651 = __noswap_vqdmlsl_s16(__rev0_651, __rev1_651, __noswap_splat_laneq_s16(__rev2_651, __p3_651)); \ + __ret_651 = __builtin_shufflevector(__ret_651, __ret_651, 3, 2, 1, 0); \ + __ret_651; \ +}) +#endif + +__ai __attribute__((target("neon"))) int32_t vqdmulhs_s32(int32_t __p0, int32_t __p1) { + int32_t __ret; + __ret = (int32_t) __builtin_neon_vqdmulhs_s32(__p0, __p1); + return __ret; +} +__ai __attribute__((target("neon"))) int16_t vqdmulhh_s16(int16_t __p0, int16_t __p1) { + int16_t __ret; + __ret = (int16_t) __builtin_neon_vqdmulhh_s16(__p0, __p1); + return __ret; +} +#ifdef __LITTLE_ENDIAN__ +#define vqdmulhq_lane_s32(__p0, __p1, __p2) __extension__ ({ \ + int32x4_t __ret; \ + int32x4_t __s0 = __p0; \ + int32x2_t __s1 = __p1; \ + __ret = (int32x4_t) __builtin_neon_vqdmulhq_lane_v((int8x16_t)__s0, (int8x8_t)__s1, __p2, 2); \ + __ret; \ +}) +#else +#define vqdmulhq_lane_s32(__p0, __p1, __p2) __extension__ ({ \ + int32x4_t __ret; \ + int32x4_t __s0 = __p0; \ + int32x2_t __s1 = __p1; \ + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ + __ret = (int32x4_t) __builtin_neon_vqdmulhq_lane_v((int8x16_t)__rev0, (int8x8_t)__rev1, __p2, 2); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqdmulhq_lane_s16(__p0, __p1, __p2) __extension__ ({ \ + int16x8_t __ret; \ + int16x8_t __s0 = __p0; \ + int16x4_t __s1 = __p1; \ + __ret = (int16x8_t) __builtin_neon_vqdmulhq_lane_v((int8x16_t)__s0, (int8x8_t)__s1, __p2, 1); \ + __ret; \ +}) +#else +#define vqdmulhq_lane_s16(__p0, __p1, __p2) __extension__ ({ \ + int16x8_t __ret; \ + int16x8_t __s0 = __p0; \ + int16x4_t __s1 = __p1; \ + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + int16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + __ret = (int16x8_t) __builtin_neon_vqdmulhq_lane_v((int8x16_t)__rev0, (int8x8_t)__rev1, __p2, 1); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqdmulh_lane_s32(__p0, __p1, __p2) __extension__ ({ \ + int32x2_t __ret; \ + int32x2_t __s0 = __p0; \ + int32x2_t __s1 = __p1; \ + __ret = (int32x2_t) __builtin_neon_vqdmulh_lane_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 2); \ + __ret; \ +}) +#else +#define vqdmulh_lane_s32(__p0, __p1, __p2) __extension__ ({ \ + int32x2_t __ret; \ + int32x2_t __s0 = __p0; \ + int32x2_t __s1 = __p1; \ + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ + __ret = (int32x2_t) __builtin_neon_vqdmulh_lane_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 2); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqdmulh_lane_s16(__p0, __p1, __p2) __extension__ ({ \ + int16x4_t __ret; \ + int16x4_t __s0 = __p0; \ + int16x4_t __s1 = __p1; \ + __ret = (int16x4_t) __builtin_neon_vqdmulh_lane_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 1); \ + __ret; \ +}) +#else +#define vqdmulh_lane_s16(__p0, __p1, __p2) __extension__ ({ \ + int16x4_t __ret; \ + int16x4_t __s0 = __p0; \ + int16x4_t __s1 = __p1; \ + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + int16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + __ret = (int16x4_t) __builtin_neon_vqdmulh_lane_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 1); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqdmulhs_lane_s32(__p0_652, __p1_652, __p2_652) __extension__ ({ \ + int32_t __ret_652; \ + int32_t __s0_652 = __p0_652; \ + int32x2_t __s1_652 = __p1_652; \ + __ret_652 = vqdmulhs_s32(__s0_652, vget_lane_s32(__s1_652, __p2_652)); \ + __ret_652; \ +}) +#else +#define vqdmulhs_lane_s32(__p0_653, __p1_653, __p2_653) __extension__ ({ \ + int32_t __ret_653; \ + int32_t __s0_653 = __p0_653; \ + int32x2_t __s1_653 = __p1_653; \ + int32x2_t __rev1_653; __rev1_653 = __builtin_shufflevector(__s1_653, __s1_653, 1, 0); \ + __ret_653 = vqdmulhs_s32(__s0_653, __noswap_vget_lane_s32(__rev1_653, __p2_653)); \ + __ret_653; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqdmulhh_lane_s16(__p0_654, __p1_654, __p2_654) __extension__ ({ \ + int16_t __ret_654; \ + int16_t __s0_654 = __p0_654; \ + int16x4_t __s1_654 = __p1_654; \ + __ret_654 = vqdmulhh_s16(__s0_654, vget_lane_s16(__s1_654, __p2_654)); \ + __ret_654; \ +}) +#else +#define vqdmulhh_lane_s16(__p0_655, __p1_655, __p2_655) __extension__ ({ \ + int16_t __ret_655; \ + int16_t __s0_655 = __p0_655; \ + int16x4_t __s1_655 = __p1_655; \ + int16x4_t __rev1_655; __rev1_655 = __builtin_shufflevector(__s1_655, __s1_655, 3, 2, 1, 0); \ + __ret_655 = vqdmulhh_s16(__s0_655, __noswap_vget_lane_s16(__rev1_655, __p2_655)); \ + __ret_655; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqdmulhs_laneq_s32(__p0_656, __p1_656, __p2_656) __extension__ ({ \ + int32_t __ret_656; \ + int32_t __s0_656 = __p0_656; \ + int32x4_t __s1_656 = __p1_656; \ + __ret_656 = vqdmulhs_s32(__s0_656, vgetq_lane_s32(__s1_656, __p2_656)); \ + __ret_656; \ +}) +#else +#define vqdmulhs_laneq_s32(__p0_657, __p1_657, __p2_657) __extension__ ({ \ + int32_t __ret_657; \ + int32_t __s0_657 = __p0_657; \ + int32x4_t __s1_657 = __p1_657; \ + int32x4_t __rev1_657; __rev1_657 = __builtin_shufflevector(__s1_657, __s1_657, 3, 2, 1, 0); \ + __ret_657 = vqdmulhs_s32(__s0_657, __noswap_vgetq_lane_s32(__rev1_657, __p2_657)); \ + __ret_657; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqdmulhh_laneq_s16(__p0_658, __p1_658, __p2_658) __extension__ ({ \ + int16_t __ret_658; \ + int16_t __s0_658 = __p0_658; \ + int16x8_t __s1_658 = __p1_658; \ + __ret_658 = vqdmulhh_s16(__s0_658, vgetq_lane_s16(__s1_658, __p2_658)); \ + __ret_658; \ +}) +#else +#define vqdmulhh_laneq_s16(__p0_659, __p1_659, __p2_659) __extension__ ({ \ + int16_t __ret_659; \ + int16_t __s0_659 = __p0_659; \ + int16x8_t __s1_659 = __p1_659; \ + int16x8_t __rev1_659; __rev1_659 = __builtin_shufflevector(__s1_659, __s1_659, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_659 = vqdmulhh_s16(__s0_659, __noswap_vgetq_lane_s16(__rev1_659, __p2_659)); \ + __ret_659; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqdmulhq_laneq_s32(__p0, __p1, __p2) __extension__ ({ \ + int32x4_t __ret; \ + int32x4_t __s0 = __p0; \ + int32x4_t __s1 = __p1; \ + __ret = (int32x4_t) __builtin_neon_vqdmulhq_laneq_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 34); \ + __ret; \ +}) +#else +#define vqdmulhq_laneq_s32(__p0, __p1, __p2) __extension__ ({ \ + int32x4_t __ret; \ + int32x4_t __s0 = __p0; \ + int32x4_t __s1 = __p1; \ + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + __ret = (int32x4_t) __builtin_neon_vqdmulhq_laneq_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 34); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqdmulhq_laneq_s16(__p0, __p1, __p2) __extension__ ({ \ + int16x8_t __ret; \ + int16x8_t __s0 = __p0; \ + int16x8_t __s1 = __p1; \ + __ret = (int16x8_t) __builtin_neon_vqdmulhq_laneq_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 33); \ + __ret; \ +}) +#else +#define vqdmulhq_laneq_s16(__p0, __p1, __p2) __extension__ ({ \ + int16x8_t __ret; \ + int16x8_t __s0 = __p0; \ + int16x8_t __s1 = __p1; \ + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (int16x8_t) __builtin_neon_vqdmulhq_laneq_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 33); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqdmulh_laneq_s32(__p0, __p1, __p2) __extension__ ({ \ + int32x2_t __ret; \ + int32x2_t __s0 = __p0; \ + int32x4_t __s1 = __p1; \ + __ret = (int32x2_t) __builtin_neon_vqdmulh_laneq_v((int8x8_t)__s0, (int8x16_t)__s1, __p2, 2); \ + __ret; \ +}) +#else +#define vqdmulh_laneq_s32(__p0, __p1, __p2) __extension__ ({ \ + int32x2_t __ret; \ + int32x2_t __s0 = __p0; \ + int32x4_t __s1 = __p1; \ + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + __ret = (int32x2_t) __builtin_neon_vqdmulh_laneq_v((int8x8_t)__rev0, (int8x16_t)__rev1, __p2, 2); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqdmulh_laneq_s16(__p0, __p1, __p2) __extension__ ({ \ + int16x4_t __ret; \ + int16x4_t __s0 = __p0; \ + int16x8_t __s1 = __p1; \ + __ret = (int16x4_t) __builtin_neon_vqdmulh_laneq_v((int8x8_t)__s0, (int8x16_t)__s1, __p2, 1); \ + __ret; \ +}) +#else +#define vqdmulh_laneq_s16(__p0, __p1, __p2) __extension__ ({ \ + int16x4_t __ret; \ + int16x4_t __s0 = __p0; \ + int16x8_t __s1 = __p1; \ + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (int16x4_t) __builtin_neon_vqdmulh_laneq_v((int8x8_t)__rev0, (int8x16_t)__rev1, __p2, 1); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +__ai __attribute__((target("neon"))) int64_t vqdmulls_s32(int32_t __p0, int32_t __p1) { + int64_t __ret; + __ret = (int64_t) __builtin_neon_vqdmulls_s32(__p0, __p1); + return __ret; +} +__ai __attribute__((target("neon"))) int32_t vqdmullh_s16(int16_t __p0, int16_t __p1) { + int32_t __ret; + __ret = (int32_t) __builtin_neon_vqdmullh_s16(__p0, __p1); + return __ret; +} +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int64x2_t vqdmull_high_s32(int32x4_t __p0, int32x4_t __p1) { + int64x2_t __ret; + __ret = vqdmull_s32(vget_high_s32(__p0), vget_high_s32(__p1)); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int64x2_t vqdmull_high_s32(int32x4_t __p0, int32x4_t __p1) { + int64x2_t __ret; + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = __noswap_vqdmull_s32(__noswap_vget_high_s32(__rev0), __noswap_vget_high_s32(__rev1)); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int32x4_t vqdmull_high_s16(int16x8_t __p0, int16x8_t __p1) { + int32x4_t __ret; + __ret = vqdmull_s16(vget_high_s16(__p0), vget_high_s16(__p1)); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int32x4_t vqdmull_high_s16(int16x8_t __p0, int16x8_t __p1) { + int32x4_t __ret; + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __noswap_vqdmull_s16(__noswap_vget_high_s16(__rev0), __noswap_vget_high_s16(__rev1)); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqdmull_high_lane_s32(__p0_660, __p1_660, __p2_660) __extension__ ({ \ + int64x2_t __ret_660; \ + int32x4_t __s0_660 = __p0_660; \ + int32x2_t __s1_660 = __p1_660; \ + __ret_660 = vqdmull_s32(vget_high_s32(__s0_660), splat_lane_s32(__s1_660, __p2_660)); \ + __ret_660; \ +}) +#else +#define vqdmull_high_lane_s32(__p0_661, __p1_661, __p2_661) __extension__ ({ \ + int64x2_t __ret_661; \ + int32x4_t __s0_661 = __p0_661; \ + int32x2_t __s1_661 = __p1_661; \ + int32x4_t __rev0_661; __rev0_661 = __builtin_shufflevector(__s0_661, __s0_661, 3, 2, 1, 0); \ + int32x2_t __rev1_661; __rev1_661 = __builtin_shufflevector(__s1_661, __s1_661, 1, 0); \ + __ret_661 = __noswap_vqdmull_s32(__noswap_vget_high_s32(__rev0_661), __noswap_splat_lane_s32(__rev1_661, __p2_661)); \ + __ret_661 = __builtin_shufflevector(__ret_661, __ret_661, 1, 0); \ + __ret_661; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqdmull_high_lane_s16(__p0_662, __p1_662, __p2_662) __extension__ ({ \ + int32x4_t __ret_662; \ + int16x8_t __s0_662 = __p0_662; \ + int16x4_t __s1_662 = __p1_662; \ + __ret_662 = vqdmull_s16(vget_high_s16(__s0_662), splat_lane_s16(__s1_662, __p2_662)); \ + __ret_662; \ +}) +#else +#define vqdmull_high_lane_s16(__p0_663, __p1_663, __p2_663) __extension__ ({ \ + int32x4_t __ret_663; \ + int16x8_t __s0_663 = __p0_663; \ + int16x4_t __s1_663 = __p1_663; \ + int16x8_t __rev0_663; __rev0_663 = __builtin_shufflevector(__s0_663, __s0_663, 7, 6, 5, 4, 3, 2, 1, 0); \ + int16x4_t __rev1_663; __rev1_663 = __builtin_shufflevector(__s1_663, __s1_663, 3, 2, 1, 0); \ + __ret_663 = __noswap_vqdmull_s16(__noswap_vget_high_s16(__rev0_663), __noswap_splat_lane_s16(__rev1_663, __p2_663)); \ + __ret_663 = __builtin_shufflevector(__ret_663, __ret_663, 3, 2, 1, 0); \ + __ret_663; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqdmull_high_laneq_s32(__p0_664, __p1_664, __p2_664) __extension__ ({ \ + int64x2_t __ret_664; \ + int32x4_t __s0_664 = __p0_664; \ + int32x4_t __s1_664 = __p1_664; \ + __ret_664 = vqdmull_s32(vget_high_s32(__s0_664), splat_laneq_s32(__s1_664, __p2_664)); \ + __ret_664; \ +}) +#else +#define vqdmull_high_laneq_s32(__p0_665, __p1_665, __p2_665) __extension__ ({ \ + int64x2_t __ret_665; \ + int32x4_t __s0_665 = __p0_665; \ + int32x4_t __s1_665 = __p1_665; \ + int32x4_t __rev0_665; __rev0_665 = __builtin_shufflevector(__s0_665, __s0_665, 3, 2, 1, 0); \ + int32x4_t __rev1_665; __rev1_665 = __builtin_shufflevector(__s1_665, __s1_665, 3, 2, 1, 0); \ + __ret_665 = __noswap_vqdmull_s32(__noswap_vget_high_s32(__rev0_665), __noswap_splat_laneq_s32(__rev1_665, __p2_665)); \ + __ret_665 = __builtin_shufflevector(__ret_665, __ret_665, 1, 0); \ + __ret_665; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqdmull_high_laneq_s16(__p0_666, __p1_666, __p2_666) __extension__ ({ \ + int32x4_t __ret_666; \ + int16x8_t __s0_666 = __p0_666; \ + int16x8_t __s1_666 = __p1_666; \ + __ret_666 = vqdmull_s16(vget_high_s16(__s0_666), splat_laneq_s16(__s1_666, __p2_666)); \ + __ret_666; \ +}) +#else +#define vqdmull_high_laneq_s16(__p0_667, __p1_667, __p2_667) __extension__ ({ \ + int32x4_t __ret_667; \ + int16x8_t __s0_667 = __p0_667; \ + int16x8_t __s1_667 = __p1_667; \ + int16x8_t __rev0_667; __rev0_667 = __builtin_shufflevector(__s0_667, __s0_667, 7, 6, 5, 4, 3, 2, 1, 0); \ + int16x8_t __rev1_667; __rev1_667 = __builtin_shufflevector(__s1_667, __s1_667, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_667 = __noswap_vqdmull_s16(__noswap_vget_high_s16(__rev0_667), __noswap_splat_laneq_s16(__rev1_667, __p2_667)); \ + __ret_667 = __builtin_shufflevector(__ret_667, __ret_667, 3, 2, 1, 0); \ + __ret_667; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int64x2_t vqdmull_high_n_s32(int32x4_t __p0, int32_t __p1) { + int64x2_t __ret; + __ret = vqdmull_n_s32(vget_high_s32(__p0), __p1); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int64x2_t vqdmull_high_n_s32(int32x4_t __p0, int32_t __p1) { + int64x2_t __ret; + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = __noswap_vqdmull_n_s32(__noswap_vget_high_s32(__rev0), __p1); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int32x4_t vqdmull_high_n_s16(int16x8_t __p0, int16_t __p1) { + int32x4_t __ret; + __ret = vqdmull_n_s16(vget_high_s16(__p0), __p1); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int32x4_t vqdmull_high_n_s16(int16x8_t __p0, int16_t __p1) { + int32x4_t __ret; + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __noswap_vqdmull_n_s16(__noswap_vget_high_s16(__rev0), __p1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqdmulls_lane_s32(__p0_668, __p1_668, __p2_668) __extension__ ({ \ + int64_t __ret_668; \ + int32_t __s0_668 = __p0_668; \ + int32x2_t __s1_668 = __p1_668; \ + __ret_668 = vqdmulls_s32(__s0_668, vget_lane_s32(__s1_668, __p2_668)); \ + __ret_668; \ +}) +#else +#define vqdmulls_lane_s32(__p0_669, __p1_669, __p2_669) __extension__ ({ \ + int64_t __ret_669; \ + int32_t __s0_669 = __p0_669; \ + int32x2_t __s1_669 = __p1_669; \ + int32x2_t __rev1_669; __rev1_669 = __builtin_shufflevector(__s1_669, __s1_669, 1, 0); \ + __ret_669 = vqdmulls_s32(__s0_669, __noswap_vget_lane_s32(__rev1_669, __p2_669)); \ + __ret_669; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqdmullh_lane_s16(__p0_670, __p1_670, __p2_670) __extension__ ({ \ + int32_t __ret_670; \ + int16_t __s0_670 = __p0_670; \ + int16x4_t __s1_670 = __p1_670; \ + __ret_670 = vqdmullh_s16(__s0_670, vget_lane_s16(__s1_670, __p2_670)); \ + __ret_670; \ +}) +#else +#define vqdmullh_lane_s16(__p0_671, __p1_671, __p2_671) __extension__ ({ \ + int32_t __ret_671; \ + int16_t __s0_671 = __p0_671; \ + int16x4_t __s1_671 = __p1_671; \ + int16x4_t __rev1_671; __rev1_671 = __builtin_shufflevector(__s1_671, __s1_671, 3, 2, 1, 0); \ + __ret_671 = vqdmullh_s16(__s0_671, __noswap_vget_lane_s16(__rev1_671, __p2_671)); \ + __ret_671; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqdmulls_laneq_s32(__p0_672, __p1_672, __p2_672) __extension__ ({ \ + int64_t __ret_672; \ + int32_t __s0_672 = __p0_672; \ + int32x4_t __s1_672 = __p1_672; \ + __ret_672 = vqdmulls_s32(__s0_672, vgetq_lane_s32(__s1_672, __p2_672)); \ + __ret_672; \ +}) +#else +#define vqdmulls_laneq_s32(__p0_673, __p1_673, __p2_673) __extension__ ({ \ + int64_t __ret_673; \ + int32_t __s0_673 = __p0_673; \ + int32x4_t __s1_673 = __p1_673; \ + int32x4_t __rev1_673; __rev1_673 = __builtin_shufflevector(__s1_673, __s1_673, 3, 2, 1, 0); \ + __ret_673 = vqdmulls_s32(__s0_673, __noswap_vgetq_lane_s32(__rev1_673, __p2_673)); \ + __ret_673; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqdmullh_laneq_s16(__p0_674, __p1_674, __p2_674) __extension__ ({ \ + int32_t __ret_674; \ + int16_t __s0_674 = __p0_674; \ + int16x8_t __s1_674 = __p1_674; \ + __ret_674 = vqdmullh_s16(__s0_674, vgetq_lane_s16(__s1_674, __p2_674)); \ + __ret_674; \ +}) +#else +#define vqdmullh_laneq_s16(__p0_675, __p1_675, __p2_675) __extension__ ({ \ + int32_t __ret_675; \ + int16_t __s0_675 = __p0_675; \ + int16x8_t __s1_675 = __p1_675; \ + int16x8_t __rev1_675; __rev1_675 = __builtin_shufflevector(__s1_675, __s1_675, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_675 = vqdmullh_s16(__s0_675, __noswap_vgetq_lane_s16(__rev1_675, __p2_675)); \ + __ret_675; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqdmull_laneq_s32(__p0_676, __p1_676, __p2_676) __extension__ ({ \ + int64x2_t __ret_676; \ + int32x2_t __s0_676 = __p0_676; \ + int32x4_t __s1_676 = __p1_676; \ + __ret_676 = vqdmull_s32(__s0_676, splat_laneq_s32(__s1_676, __p2_676)); \ + __ret_676; \ +}) +#else +#define vqdmull_laneq_s32(__p0_677, __p1_677, __p2_677) __extension__ ({ \ + int64x2_t __ret_677; \ + int32x2_t __s0_677 = __p0_677; \ + int32x4_t __s1_677 = __p1_677; \ + int32x2_t __rev0_677; __rev0_677 = __builtin_shufflevector(__s0_677, __s0_677, 1, 0); \ + int32x4_t __rev1_677; __rev1_677 = __builtin_shufflevector(__s1_677, __s1_677, 3, 2, 1, 0); \ + __ret_677 = __noswap_vqdmull_s32(__rev0_677, __noswap_splat_laneq_s32(__rev1_677, __p2_677)); \ + __ret_677 = __builtin_shufflevector(__ret_677, __ret_677, 1, 0); \ + __ret_677; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqdmull_laneq_s16(__p0_678, __p1_678, __p2_678) __extension__ ({ \ + int32x4_t __ret_678; \ + int16x4_t __s0_678 = __p0_678; \ + int16x8_t __s1_678 = __p1_678; \ + __ret_678 = vqdmull_s16(__s0_678, splat_laneq_s16(__s1_678, __p2_678)); \ + __ret_678; \ +}) +#else +#define vqdmull_laneq_s16(__p0_679, __p1_679, __p2_679) __extension__ ({ \ + int32x4_t __ret_679; \ + int16x4_t __s0_679 = __p0_679; \ + int16x8_t __s1_679 = __p1_679; \ + int16x4_t __rev0_679; __rev0_679 = __builtin_shufflevector(__s0_679, __s0_679, 3, 2, 1, 0); \ + int16x8_t __rev1_679; __rev1_679 = __builtin_shufflevector(__s1_679, __s1_679, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_679 = __noswap_vqdmull_s16(__rev0_679, __noswap_splat_laneq_s16(__rev1_679, __p2_679)); \ + __ret_679 = __builtin_shufflevector(__ret_679, __ret_679, 3, 2, 1, 0); \ + __ret_679; \ +}) +#endif + +__ai __attribute__((target("neon"))) int16_t vqmovns_s32(int32_t __p0) { + int16_t __ret; + __ret = (int16_t) __builtin_neon_vqmovns_s32(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) int32_t vqmovnd_s64(int64_t __p0) { + int32_t __ret; + __ret = (int32_t) __builtin_neon_vqmovnd_s64(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) int8_t vqmovnh_s16(int16_t __p0) { + int8_t __ret; + __ret = (int8_t) __builtin_neon_vqmovnh_s16(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) uint16_t vqmovns_u32(uint32_t __p0) { + uint16_t __ret; + __ret = (uint16_t) __builtin_neon_vqmovns_u32(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) uint32_t vqmovnd_u64(uint64_t __p0) { + uint32_t __ret; + __ret = (uint32_t) __builtin_neon_vqmovnd_u64(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) uint8_t vqmovnh_u16(uint16_t __p0) { + uint8_t __ret; + __ret = (uint8_t) __builtin_neon_vqmovnh_u16(__p0); + return __ret; +} +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint16x8_t vqmovn_high_u32(uint16x4_t __p0, uint32x4_t __p1) { + uint16x8_t __ret; + __ret = vcombine_u16(__p0, vqmovn_u32(__p1)); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint16x8_t vqmovn_high_u32(uint16x4_t __p0, uint32x4_t __p1) { + uint16x8_t __ret; + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = __noswap_vcombine_u16(__rev0, __noswap_vqmovn_u32(__rev1)); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint32x4_t vqmovn_high_u64(uint32x2_t __p0, uint64x2_t __p1) { + uint32x4_t __ret; + __ret = vcombine_u32(__p0, vqmovn_u64(__p1)); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint32x4_t vqmovn_high_u64(uint32x2_t __p0, uint64x2_t __p1) { + uint32x4_t __ret; + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = __noswap_vcombine_u32(__rev0, __noswap_vqmovn_u64(__rev1)); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint8x16_t vqmovn_high_u16(uint8x8_t __p0, uint16x8_t __p1) { + uint8x16_t __ret; + __ret = vcombine_u8(__p0, vqmovn_u16(__p1)); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint8x16_t vqmovn_high_u16(uint8x8_t __p0, uint16x8_t __p1) { + uint8x16_t __ret; + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __noswap_vcombine_u8(__rev0, __noswap_vqmovn_u16(__rev1)); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int16x8_t vqmovn_high_s32(int16x4_t __p0, int32x4_t __p1) { + int16x8_t __ret; + __ret = vcombine_s16(__p0, vqmovn_s32(__p1)); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int16x8_t vqmovn_high_s32(int16x4_t __p0, int32x4_t __p1) { + int16x8_t __ret; + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = __noswap_vcombine_s16(__rev0, __noswap_vqmovn_s32(__rev1)); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int32x4_t vqmovn_high_s64(int32x2_t __p0, int64x2_t __p1) { + int32x4_t __ret; + __ret = vcombine_s32(__p0, vqmovn_s64(__p1)); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int32x4_t vqmovn_high_s64(int32x2_t __p0, int64x2_t __p1) { + int32x4_t __ret; + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = __noswap_vcombine_s32(__rev0, __noswap_vqmovn_s64(__rev1)); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int8x16_t vqmovn_high_s16(int8x8_t __p0, int16x8_t __p1) { + int8x16_t __ret; + __ret = vcombine_s8(__p0, vqmovn_s16(__p1)); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int8x16_t vqmovn_high_s16(int8x8_t __p0, int16x8_t __p1) { + int8x16_t __ret; + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __noswap_vcombine_s8(__rev0, __noswap_vqmovn_s16(__rev1)); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +__ai __attribute__((target("neon"))) uint16_t vqmovuns_s32(int32_t __p0) { + uint16_t __ret; + __ret = (uint16_t) __builtin_neon_vqmovuns_s32(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) uint32_t vqmovund_s64(int64_t __p0) { + uint32_t __ret; + __ret = (uint32_t) __builtin_neon_vqmovund_s64(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) uint8_t vqmovunh_s16(int16_t __p0) { + uint8_t __ret; + __ret = (uint8_t) __builtin_neon_vqmovunh_s16(__p0); + return __ret; +} +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint16x8_t vqmovun_high_s32(uint16x4_t __p0, int32x4_t __p1) { + uint16x8_t __ret; + __ret = vcombine_u16((uint16x4_t)(__p0), vqmovun_s32(__p1)); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint16x8_t vqmovun_high_s32(uint16x4_t __p0, int32x4_t __p1) { + uint16x8_t __ret; + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = __noswap_vcombine_u16((uint16x4_t)(__rev0), __noswap_vqmovun_s32(__rev1)); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint32x4_t vqmovun_high_s64(uint32x2_t __p0, int64x2_t __p1) { + uint32x4_t __ret; + __ret = vcombine_u32((uint32x2_t)(__p0), vqmovun_s64(__p1)); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint32x4_t vqmovun_high_s64(uint32x2_t __p0, int64x2_t __p1) { + uint32x4_t __ret; + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = __noswap_vcombine_u32((uint32x2_t)(__rev0), __noswap_vqmovun_s64(__rev1)); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint8x16_t vqmovun_high_s16(uint8x8_t __p0, int16x8_t __p1) { + uint8x16_t __ret; + __ret = vcombine_u8((uint8x8_t)(__p0), vqmovun_s16(__p1)); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint8x16_t vqmovun_high_s16(uint8x8_t __p0, int16x8_t __p1) { + uint8x16_t __ret; + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __noswap_vcombine_u8((uint8x8_t)(__rev0), __noswap_vqmovun_s16(__rev1)); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int64x2_t vqnegq_s64(int64x2_t __p0) { + int64x2_t __ret; + __ret = (int64x2_t) __builtin_neon_vqnegq_v((int8x16_t)__p0, 35); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int64x2_t vqnegq_s64(int64x2_t __p0) { + int64x2_t __ret; + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = (int64x2_t) __builtin_neon_vqnegq_v((int8x16_t)__rev0, 35); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +__ai __attribute__((target("neon"))) int64x1_t vqneg_s64(int64x1_t __p0) { + int64x1_t __ret; + __ret = (int64x1_t) __builtin_neon_vqneg_v((int8x8_t)__p0, 3); + return __ret; +} +__ai __attribute__((target("neon"))) int8_t vqnegb_s8(int8_t __p0) { + int8_t __ret; + __ret = (int8_t) __builtin_neon_vqnegb_s8(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) int32_t vqnegs_s32(int32_t __p0) { + int32_t __ret; + __ret = (int32_t) __builtin_neon_vqnegs_s32(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) int64_t vqnegd_s64(int64_t __p0) { + int64_t __ret; + __ret = (int64_t) __builtin_neon_vqnegd_s64(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) int16_t vqnegh_s16(int16_t __p0) { + int16_t __ret; + __ret = (int16_t) __builtin_neon_vqnegh_s16(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) int32_t vqrdmulhs_s32(int32_t __p0, int32_t __p1) { + int32_t __ret; + __ret = (int32_t) __builtin_neon_vqrdmulhs_s32(__p0, __p1); + return __ret; +} +__ai __attribute__((target("neon"))) int16_t vqrdmulhh_s16(int16_t __p0, int16_t __p1) { + int16_t __ret; + __ret = (int16_t) __builtin_neon_vqrdmulhh_s16(__p0, __p1); + return __ret; +} +#ifdef __LITTLE_ENDIAN__ +#define vqrdmulhq_lane_s32(__p0, __p1, __p2) __extension__ ({ \ + int32x4_t __ret; \ + int32x4_t __s0 = __p0; \ + int32x2_t __s1 = __p1; \ + __ret = (int32x4_t) __builtin_neon_vqrdmulhq_lane_v((int8x16_t)__s0, (int8x8_t)__s1, __p2, 2); \ + __ret; \ +}) +#else +#define vqrdmulhq_lane_s32(__p0, __p1, __p2) __extension__ ({ \ + int32x4_t __ret; \ + int32x4_t __s0 = __p0; \ + int32x2_t __s1 = __p1; \ + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ + __ret = (int32x4_t) __builtin_neon_vqrdmulhq_lane_v((int8x16_t)__rev0, (int8x8_t)__rev1, __p2, 2); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqrdmulhq_lane_s16(__p0, __p1, __p2) __extension__ ({ \ + int16x8_t __ret; \ + int16x8_t __s0 = __p0; \ + int16x4_t __s1 = __p1; \ + __ret = (int16x8_t) __builtin_neon_vqrdmulhq_lane_v((int8x16_t)__s0, (int8x8_t)__s1, __p2, 1); \ + __ret; \ +}) +#else +#define vqrdmulhq_lane_s16(__p0, __p1, __p2) __extension__ ({ \ + int16x8_t __ret; \ + int16x8_t __s0 = __p0; \ + int16x4_t __s1 = __p1; \ + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + int16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + __ret = (int16x8_t) __builtin_neon_vqrdmulhq_lane_v((int8x16_t)__rev0, (int8x8_t)__rev1, __p2, 1); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqrdmulh_lane_s32(__p0, __p1, __p2) __extension__ ({ \ + int32x2_t __ret; \ + int32x2_t __s0 = __p0; \ + int32x2_t __s1 = __p1; \ + __ret = (int32x2_t) __builtin_neon_vqrdmulh_lane_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 2); \ + __ret; \ +}) +#else +#define vqrdmulh_lane_s32(__p0, __p1, __p2) __extension__ ({ \ + int32x2_t __ret; \ + int32x2_t __s0 = __p0; \ + int32x2_t __s1 = __p1; \ + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ + __ret = (int32x2_t) __builtin_neon_vqrdmulh_lane_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 2); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqrdmulh_lane_s16(__p0, __p1, __p2) __extension__ ({ \ + int16x4_t __ret; \ + int16x4_t __s0 = __p0; \ + int16x4_t __s1 = __p1; \ + __ret = (int16x4_t) __builtin_neon_vqrdmulh_lane_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 1); \ + __ret; \ +}) +#else +#define vqrdmulh_lane_s16(__p0, __p1, __p2) __extension__ ({ \ + int16x4_t __ret; \ + int16x4_t __s0 = __p0; \ + int16x4_t __s1 = __p1; \ + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + int16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + __ret = (int16x4_t) __builtin_neon_vqrdmulh_lane_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 1); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqrdmulhs_lane_s32(__p0_680, __p1_680, __p2_680) __extension__ ({ \ + int32_t __ret_680; \ + int32_t __s0_680 = __p0_680; \ + int32x2_t __s1_680 = __p1_680; \ + __ret_680 = vqrdmulhs_s32(__s0_680, vget_lane_s32(__s1_680, __p2_680)); \ + __ret_680; \ +}) +#else +#define vqrdmulhs_lane_s32(__p0_681, __p1_681, __p2_681) __extension__ ({ \ + int32_t __ret_681; \ + int32_t __s0_681 = __p0_681; \ + int32x2_t __s1_681 = __p1_681; \ + int32x2_t __rev1_681; __rev1_681 = __builtin_shufflevector(__s1_681, __s1_681, 1, 0); \ + __ret_681 = vqrdmulhs_s32(__s0_681, __noswap_vget_lane_s32(__rev1_681, __p2_681)); \ + __ret_681; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqrdmulhh_lane_s16(__p0_682, __p1_682, __p2_682) __extension__ ({ \ + int16_t __ret_682; \ + int16_t __s0_682 = __p0_682; \ + int16x4_t __s1_682 = __p1_682; \ + __ret_682 = vqrdmulhh_s16(__s0_682, vget_lane_s16(__s1_682, __p2_682)); \ + __ret_682; \ +}) +#else +#define vqrdmulhh_lane_s16(__p0_683, __p1_683, __p2_683) __extension__ ({ \ + int16_t __ret_683; \ + int16_t __s0_683 = __p0_683; \ + int16x4_t __s1_683 = __p1_683; \ + int16x4_t __rev1_683; __rev1_683 = __builtin_shufflevector(__s1_683, __s1_683, 3, 2, 1, 0); \ + __ret_683 = vqrdmulhh_s16(__s0_683, __noswap_vget_lane_s16(__rev1_683, __p2_683)); \ + __ret_683; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqrdmulhs_laneq_s32(__p0_684, __p1_684, __p2_684) __extension__ ({ \ + int32_t __ret_684; \ + int32_t __s0_684 = __p0_684; \ + int32x4_t __s1_684 = __p1_684; \ + __ret_684 = vqrdmulhs_s32(__s0_684, vgetq_lane_s32(__s1_684, __p2_684)); \ + __ret_684; \ +}) +#else +#define vqrdmulhs_laneq_s32(__p0_685, __p1_685, __p2_685) __extension__ ({ \ + int32_t __ret_685; \ + int32_t __s0_685 = __p0_685; \ + int32x4_t __s1_685 = __p1_685; \ + int32x4_t __rev1_685; __rev1_685 = __builtin_shufflevector(__s1_685, __s1_685, 3, 2, 1, 0); \ + __ret_685 = vqrdmulhs_s32(__s0_685, __noswap_vgetq_lane_s32(__rev1_685, __p2_685)); \ + __ret_685; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqrdmulhh_laneq_s16(__p0_686, __p1_686, __p2_686) __extension__ ({ \ + int16_t __ret_686; \ + int16_t __s0_686 = __p0_686; \ + int16x8_t __s1_686 = __p1_686; \ + __ret_686 = vqrdmulhh_s16(__s0_686, vgetq_lane_s16(__s1_686, __p2_686)); \ + __ret_686; \ +}) +#else +#define vqrdmulhh_laneq_s16(__p0_687, __p1_687, __p2_687) __extension__ ({ \ + int16_t __ret_687; \ + int16_t __s0_687 = __p0_687; \ + int16x8_t __s1_687 = __p1_687; \ + int16x8_t __rev1_687; __rev1_687 = __builtin_shufflevector(__s1_687, __s1_687, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_687 = vqrdmulhh_s16(__s0_687, __noswap_vgetq_lane_s16(__rev1_687, __p2_687)); \ + __ret_687; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqrdmulhq_laneq_s32(__p0, __p1, __p2) __extension__ ({ \ + int32x4_t __ret; \ + int32x4_t __s0 = __p0; \ + int32x4_t __s1 = __p1; \ + __ret = (int32x4_t) __builtin_neon_vqrdmulhq_laneq_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 34); \ + __ret; \ +}) +#else +#define vqrdmulhq_laneq_s32(__p0, __p1, __p2) __extension__ ({ \ + int32x4_t __ret; \ + int32x4_t __s0 = __p0; \ + int32x4_t __s1 = __p1; \ + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + __ret = (int32x4_t) __builtin_neon_vqrdmulhq_laneq_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 34); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqrdmulhq_laneq_s16(__p0, __p1, __p2) __extension__ ({ \ + int16x8_t __ret; \ + int16x8_t __s0 = __p0; \ + int16x8_t __s1 = __p1; \ + __ret = (int16x8_t) __builtin_neon_vqrdmulhq_laneq_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 33); \ + __ret; \ +}) +#else +#define vqrdmulhq_laneq_s16(__p0, __p1, __p2) __extension__ ({ \ + int16x8_t __ret; \ + int16x8_t __s0 = __p0; \ + int16x8_t __s1 = __p1; \ + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (int16x8_t) __builtin_neon_vqrdmulhq_laneq_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 33); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqrdmulh_laneq_s32(__p0, __p1, __p2) __extension__ ({ \ + int32x2_t __ret; \ + int32x2_t __s0 = __p0; \ + int32x4_t __s1 = __p1; \ + __ret = (int32x2_t) __builtin_neon_vqrdmulh_laneq_v((int8x8_t)__s0, (int8x16_t)__s1, __p2, 2); \ + __ret; \ +}) +#else +#define vqrdmulh_laneq_s32(__p0, __p1, __p2) __extension__ ({ \ + int32x2_t __ret; \ + int32x2_t __s0 = __p0; \ + int32x4_t __s1 = __p1; \ + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + __ret = (int32x2_t) __builtin_neon_vqrdmulh_laneq_v((int8x8_t)__rev0, (int8x16_t)__rev1, __p2, 2); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqrdmulh_laneq_s16(__p0, __p1, __p2) __extension__ ({ \ + int16x4_t __ret; \ + int16x4_t __s0 = __p0; \ + int16x8_t __s1 = __p1; \ + __ret = (int16x4_t) __builtin_neon_vqrdmulh_laneq_v((int8x8_t)__s0, (int8x16_t)__s1, __p2, 1); \ + __ret; \ +}) +#else +#define vqrdmulh_laneq_s16(__p0, __p1, __p2) __extension__ ({ \ + int16x4_t __ret; \ + int16x4_t __s0 = __p0; \ + int16x8_t __s1 = __p1; \ + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (int16x4_t) __builtin_neon_vqrdmulh_laneq_v((int8x8_t)__rev0, (int8x16_t)__rev1, __p2, 1); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +__ai __attribute__((target("neon"))) uint8_t vqrshlb_u8(uint8_t __p0, int8_t __p1) { + uint8_t __ret; + __ret = (uint8_t) __builtin_neon_vqrshlb_u8(__p0, __p1); + return __ret; +} +__ai __attribute__((target("neon"))) uint32_t vqrshls_u32(uint32_t __p0, int32_t __p1) { + uint32_t __ret; + __ret = (uint32_t) __builtin_neon_vqrshls_u32(__p0, __p1); + return __ret; +} +__ai __attribute__((target("neon"))) uint64_t vqrshld_u64(uint64_t __p0, int64_t __p1) { + uint64_t __ret; + __ret = (uint64_t) __builtin_neon_vqrshld_u64(__p0, __p1); + return __ret; +} +__ai __attribute__((target("neon"))) uint16_t vqrshlh_u16(uint16_t __p0, int16_t __p1) { + uint16_t __ret; + __ret = (uint16_t) __builtin_neon_vqrshlh_u16(__p0, __p1); + return __ret; +} +__ai __attribute__((target("neon"))) int8_t vqrshlb_s8(int8_t __p0, int8_t __p1) { + int8_t __ret; + __ret = (int8_t) __builtin_neon_vqrshlb_s8(__p0, __p1); + return __ret; +} +__ai __attribute__((target("neon"))) int32_t vqrshls_s32(int32_t __p0, int32_t __p1) { + int32_t __ret; + __ret = (int32_t) __builtin_neon_vqrshls_s32(__p0, __p1); + return __ret; +} +__ai __attribute__((target("neon"))) int64_t vqrshld_s64(int64_t __p0, int64_t __p1) { + int64_t __ret; + __ret = (int64_t) __builtin_neon_vqrshld_s64(__p0, __p1); + return __ret; +} +__ai __attribute__((target("neon"))) int16_t vqrshlh_s16(int16_t __p0, int16_t __p1) { + int16_t __ret; + __ret = (int16_t) __builtin_neon_vqrshlh_s16(__p0, __p1); + return __ret; +} +#ifdef __LITTLE_ENDIAN__ +#define vqrshrn_high_n_u32(__p0_688, __p1_688, __p2_688) __extension__ ({ \ + uint16x8_t __ret_688; \ + uint16x4_t __s0_688 = __p0_688; \ + uint32x4_t __s1_688 = __p1_688; \ + __ret_688 = (uint16x8_t)(vcombine_u16((uint16x4_t)(__s0_688), (uint16x4_t)(vqrshrn_n_u32(__s1_688, __p2_688)))); \ + __ret_688; \ +}) +#else +#define vqrshrn_high_n_u32(__p0_689, __p1_689, __p2_689) __extension__ ({ \ + uint16x8_t __ret_689; \ + uint16x4_t __s0_689 = __p0_689; \ + uint32x4_t __s1_689 = __p1_689; \ + uint16x4_t __rev0_689; __rev0_689 = __builtin_shufflevector(__s0_689, __s0_689, 3, 2, 1, 0); \ + uint32x4_t __rev1_689; __rev1_689 = __builtin_shufflevector(__s1_689, __s1_689, 3, 2, 1, 0); \ + __ret_689 = (uint16x8_t)(__noswap_vcombine_u16((uint16x4_t)(__rev0_689), (uint16x4_t)(__noswap_vqrshrn_n_u32(__rev1_689, __p2_689)))); \ + __ret_689 = __builtin_shufflevector(__ret_689, __ret_689, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_689; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqrshrn_high_n_u64(__p0_690, __p1_690, __p2_690) __extension__ ({ \ + uint32x4_t __ret_690; \ + uint32x2_t __s0_690 = __p0_690; \ + uint64x2_t __s1_690 = __p1_690; \ + __ret_690 = (uint32x4_t)(vcombine_u32((uint32x2_t)(__s0_690), (uint32x2_t)(vqrshrn_n_u64(__s1_690, __p2_690)))); \ + __ret_690; \ +}) +#else +#define vqrshrn_high_n_u64(__p0_691, __p1_691, __p2_691) __extension__ ({ \ + uint32x4_t __ret_691; \ + uint32x2_t __s0_691 = __p0_691; \ + uint64x2_t __s1_691 = __p1_691; \ + uint32x2_t __rev0_691; __rev0_691 = __builtin_shufflevector(__s0_691, __s0_691, 1, 0); \ + uint64x2_t __rev1_691; __rev1_691 = __builtin_shufflevector(__s1_691, __s1_691, 1, 0); \ + __ret_691 = (uint32x4_t)(__noswap_vcombine_u32((uint32x2_t)(__rev0_691), (uint32x2_t)(__noswap_vqrshrn_n_u64(__rev1_691, __p2_691)))); \ + __ret_691 = __builtin_shufflevector(__ret_691, __ret_691, 3, 2, 1, 0); \ + __ret_691; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqrshrn_high_n_u16(__p0_692, __p1_692, __p2_692) __extension__ ({ \ + uint8x16_t __ret_692; \ + uint8x8_t __s0_692 = __p0_692; \ + uint16x8_t __s1_692 = __p1_692; \ + __ret_692 = (uint8x16_t)(vcombine_u8((uint8x8_t)(__s0_692), (uint8x8_t)(vqrshrn_n_u16(__s1_692, __p2_692)))); \ + __ret_692; \ +}) +#else +#define vqrshrn_high_n_u16(__p0_693, __p1_693, __p2_693) __extension__ ({ \ + uint8x16_t __ret_693; \ + uint8x8_t __s0_693 = __p0_693; \ + uint16x8_t __s1_693 = __p1_693; \ + uint8x8_t __rev0_693; __rev0_693 = __builtin_shufflevector(__s0_693, __s0_693, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint16x8_t __rev1_693; __rev1_693 = __builtin_shufflevector(__s1_693, __s1_693, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_693 = (uint8x16_t)(__noswap_vcombine_u8((uint8x8_t)(__rev0_693), (uint8x8_t)(__noswap_vqrshrn_n_u16(__rev1_693, __p2_693)))); \ + __ret_693 = __builtin_shufflevector(__ret_693, __ret_693, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_693; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqrshrn_high_n_s32(__p0_694, __p1_694, __p2_694) __extension__ ({ \ + int16x8_t __ret_694; \ + int16x4_t __s0_694 = __p0_694; \ + int32x4_t __s1_694 = __p1_694; \ + __ret_694 = (int16x8_t)(vcombine_s16((int16x4_t)(__s0_694), (int16x4_t)(vqrshrn_n_s32(__s1_694, __p2_694)))); \ + __ret_694; \ +}) +#else +#define vqrshrn_high_n_s32(__p0_695, __p1_695, __p2_695) __extension__ ({ \ + int16x8_t __ret_695; \ + int16x4_t __s0_695 = __p0_695; \ + int32x4_t __s1_695 = __p1_695; \ + int16x4_t __rev0_695; __rev0_695 = __builtin_shufflevector(__s0_695, __s0_695, 3, 2, 1, 0); \ + int32x4_t __rev1_695; __rev1_695 = __builtin_shufflevector(__s1_695, __s1_695, 3, 2, 1, 0); \ + __ret_695 = (int16x8_t)(__noswap_vcombine_s16((int16x4_t)(__rev0_695), (int16x4_t)(__noswap_vqrshrn_n_s32(__rev1_695, __p2_695)))); \ + __ret_695 = __builtin_shufflevector(__ret_695, __ret_695, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_695; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqrshrn_high_n_s64(__p0_696, __p1_696, __p2_696) __extension__ ({ \ + int32x4_t __ret_696; \ + int32x2_t __s0_696 = __p0_696; \ + int64x2_t __s1_696 = __p1_696; \ + __ret_696 = (int32x4_t)(vcombine_s32((int32x2_t)(__s0_696), (int32x2_t)(vqrshrn_n_s64(__s1_696, __p2_696)))); \ + __ret_696; \ +}) +#else +#define vqrshrn_high_n_s64(__p0_697, __p1_697, __p2_697) __extension__ ({ \ + int32x4_t __ret_697; \ + int32x2_t __s0_697 = __p0_697; \ + int64x2_t __s1_697 = __p1_697; \ + int32x2_t __rev0_697; __rev0_697 = __builtin_shufflevector(__s0_697, __s0_697, 1, 0); \ + int64x2_t __rev1_697; __rev1_697 = __builtin_shufflevector(__s1_697, __s1_697, 1, 0); \ + __ret_697 = (int32x4_t)(__noswap_vcombine_s32((int32x2_t)(__rev0_697), (int32x2_t)(__noswap_vqrshrn_n_s64(__rev1_697, __p2_697)))); \ + __ret_697 = __builtin_shufflevector(__ret_697, __ret_697, 3, 2, 1, 0); \ + __ret_697; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqrshrn_high_n_s16(__p0_698, __p1_698, __p2_698) __extension__ ({ \ + int8x16_t __ret_698; \ + int8x8_t __s0_698 = __p0_698; \ + int16x8_t __s1_698 = __p1_698; \ + __ret_698 = (int8x16_t)(vcombine_s8((int8x8_t)(__s0_698), (int8x8_t)(vqrshrn_n_s16(__s1_698, __p2_698)))); \ + __ret_698; \ +}) +#else +#define vqrshrn_high_n_s16(__p0_699, __p1_699, __p2_699) __extension__ ({ \ + int8x16_t __ret_699; \ + int8x8_t __s0_699 = __p0_699; \ + int16x8_t __s1_699 = __p1_699; \ + int8x8_t __rev0_699; __rev0_699 = __builtin_shufflevector(__s0_699, __s0_699, 7, 6, 5, 4, 3, 2, 1, 0); \ + int16x8_t __rev1_699; __rev1_699 = __builtin_shufflevector(__s1_699, __s1_699, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_699 = (int8x16_t)(__noswap_vcombine_s8((int8x8_t)(__rev0_699), (int8x8_t)(__noswap_vqrshrn_n_s16(__rev1_699, __p2_699)))); \ + __ret_699 = __builtin_shufflevector(__ret_699, __ret_699, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_699; \ +}) +#endif + +#define vqrshrns_n_u32(__p0, __p1) __extension__ ({ \ + uint16_t __ret; \ + uint32_t __s0 = __p0; \ + __ret = (uint16_t) __builtin_neon_vqrshrns_n_u32(__s0, __p1); \ + __ret; \ +}) +#define vqrshrnd_n_u64(__p0, __p1) __extension__ ({ \ + uint32_t __ret; \ + uint64_t __s0 = __p0; \ + __ret = (uint32_t) __builtin_neon_vqrshrnd_n_u64(__s0, __p1); \ + __ret; \ +}) +#define vqrshrnh_n_u16(__p0, __p1) __extension__ ({ \ + uint8_t __ret; \ + uint16_t __s0 = __p0; \ + __ret = (uint8_t) __builtin_neon_vqrshrnh_n_u16(__s0, __p1); \ + __ret; \ +}) +#define vqrshrns_n_s32(__p0, __p1) __extension__ ({ \ + int16_t __ret; \ + int32_t __s0 = __p0; \ + __ret = (int16_t) __builtin_neon_vqrshrns_n_s32(__s0, __p1); \ + __ret; \ +}) +#define vqrshrnd_n_s64(__p0, __p1) __extension__ ({ \ + int32_t __ret; \ + int64_t __s0 = __p0; \ + __ret = (int32_t) __builtin_neon_vqrshrnd_n_s64(__s0, __p1); \ + __ret; \ +}) +#define vqrshrnh_n_s16(__p0, __p1) __extension__ ({ \ + int8_t __ret; \ + int16_t __s0 = __p0; \ + __ret = (int8_t) __builtin_neon_vqrshrnh_n_s16(__s0, __p1); \ + __ret; \ +}) +#ifdef __LITTLE_ENDIAN__ +#define vqrshrun_high_n_s32(__p0_700, __p1_700, __p2_700) __extension__ ({ \ + int16x8_t __ret_700; \ + int16x4_t __s0_700 = __p0_700; \ + int32x4_t __s1_700 = __p1_700; \ + __ret_700 = (int16x8_t)(vcombine_s16((int16x4_t)(__s0_700), (int16x4_t)(vqrshrun_n_s32(__s1_700, __p2_700)))); \ + __ret_700; \ +}) +#else +#define vqrshrun_high_n_s32(__p0_701, __p1_701, __p2_701) __extension__ ({ \ + int16x8_t __ret_701; \ + int16x4_t __s0_701 = __p0_701; \ + int32x4_t __s1_701 = __p1_701; \ + int16x4_t __rev0_701; __rev0_701 = __builtin_shufflevector(__s0_701, __s0_701, 3, 2, 1, 0); \ + int32x4_t __rev1_701; __rev1_701 = __builtin_shufflevector(__s1_701, __s1_701, 3, 2, 1, 0); \ + __ret_701 = (int16x8_t)(__noswap_vcombine_s16((int16x4_t)(__rev0_701), (int16x4_t)(__noswap_vqrshrun_n_s32(__rev1_701, __p2_701)))); \ + __ret_701 = __builtin_shufflevector(__ret_701, __ret_701, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_701; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqrshrun_high_n_s64(__p0_702, __p1_702, __p2_702) __extension__ ({ \ + int32x4_t __ret_702; \ + int32x2_t __s0_702 = __p0_702; \ + int64x2_t __s1_702 = __p1_702; \ + __ret_702 = (int32x4_t)(vcombine_s32((int32x2_t)(__s0_702), (int32x2_t)(vqrshrun_n_s64(__s1_702, __p2_702)))); \ + __ret_702; \ +}) +#else +#define vqrshrun_high_n_s64(__p0_703, __p1_703, __p2_703) __extension__ ({ \ + int32x4_t __ret_703; \ + int32x2_t __s0_703 = __p0_703; \ + int64x2_t __s1_703 = __p1_703; \ + int32x2_t __rev0_703; __rev0_703 = __builtin_shufflevector(__s0_703, __s0_703, 1, 0); \ + int64x2_t __rev1_703; __rev1_703 = __builtin_shufflevector(__s1_703, __s1_703, 1, 0); \ + __ret_703 = (int32x4_t)(__noswap_vcombine_s32((int32x2_t)(__rev0_703), (int32x2_t)(__noswap_vqrshrun_n_s64(__rev1_703, __p2_703)))); \ + __ret_703 = __builtin_shufflevector(__ret_703, __ret_703, 3, 2, 1, 0); \ + __ret_703; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqrshrun_high_n_s16(__p0_704, __p1_704, __p2_704) __extension__ ({ \ + int8x16_t __ret_704; \ + int8x8_t __s0_704 = __p0_704; \ + int16x8_t __s1_704 = __p1_704; \ + __ret_704 = (int8x16_t)(vcombine_s8((int8x8_t)(__s0_704), (int8x8_t)(vqrshrun_n_s16(__s1_704, __p2_704)))); \ + __ret_704; \ +}) +#else +#define vqrshrun_high_n_s16(__p0_705, __p1_705, __p2_705) __extension__ ({ \ + int8x16_t __ret_705; \ + int8x8_t __s0_705 = __p0_705; \ + int16x8_t __s1_705 = __p1_705; \ + int8x8_t __rev0_705; __rev0_705 = __builtin_shufflevector(__s0_705, __s0_705, 7, 6, 5, 4, 3, 2, 1, 0); \ + int16x8_t __rev1_705; __rev1_705 = __builtin_shufflevector(__s1_705, __s1_705, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_705 = (int8x16_t)(__noswap_vcombine_s8((int8x8_t)(__rev0_705), (int8x8_t)(__noswap_vqrshrun_n_s16(__rev1_705, __p2_705)))); \ + __ret_705 = __builtin_shufflevector(__ret_705, __ret_705, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_705; \ +}) +#endif + +#define vqrshruns_n_s32(__p0, __p1) __extension__ ({ \ + uint16_t __ret; \ + int32_t __s0 = __p0; \ + __ret = (uint16_t) __builtin_neon_vqrshruns_n_s32(__s0, __p1); \ + __ret; \ +}) +#define vqrshrund_n_s64(__p0, __p1) __extension__ ({ \ + uint32_t __ret; \ + int64_t __s0 = __p0; \ + __ret = (uint32_t) __builtin_neon_vqrshrund_n_s64(__s0, __p1); \ + __ret; \ +}) +#define vqrshrunh_n_s16(__p0, __p1) __extension__ ({ \ + uint8_t __ret; \ + int16_t __s0 = __p0; \ + __ret = (uint8_t) __builtin_neon_vqrshrunh_n_s16(__s0, __p1); \ + __ret; \ +}) +__ai __attribute__((target("neon"))) uint8_t vqshlb_u8(uint8_t __p0, int8_t __p1) { + uint8_t __ret; + __ret = (uint8_t) __builtin_neon_vqshlb_u8(__p0, __p1); + return __ret; +} +__ai __attribute__((target("neon"))) uint32_t vqshls_u32(uint32_t __p0, int32_t __p1) { + uint32_t __ret; + __ret = (uint32_t) __builtin_neon_vqshls_u32(__p0, __p1); + return __ret; +} +__ai __attribute__((target("neon"))) uint64_t vqshld_u64(uint64_t __p0, int64_t __p1) { + uint64_t __ret; + __ret = (uint64_t) __builtin_neon_vqshld_u64(__p0, __p1); + return __ret; +} +__ai __attribute__((target("neon"))) uint16_t vqshlh_u16(uint16_t __p0, int16_t __p1) { + uint16_t __ret; + __ret = (uint16_t) __builtin_neon_vqshlh_u16(__p0, __p1); + return __ret; +} +__ai __attribute__((target("neon"))) int8_t vqshlb_s8(int8_t __p0, int8_t __p1) { + int8_t __ret; + __ret = (int8_t) __builtin_neon_vqshlb_s8(__p0, __p1); + return __ret; +} +__ai __attribute__((target("neon"))) int32_t vqshls_s32(int32_t __p0, int32_t __p1) { + int32_t __ret; + __ret = (int32_t) __builtin_neon_vqshls_s32(__p0, __p1); + return __ret; +} +__ai __attribute__((target("neon"))) int64_t vqshld_s64(int64_t __p0, int64_t __p1) { + int64_t __ret; + __ret = (int64_t) __builtin_neon_vqshld_s64(__p0, __p1); + return __ret; +} +__ai __attribute__((target("neon"))) int16_t vqshlh_s16(int16_t __p0, int16_t __p1) { + int16_t __ret; + __ret = (int16_t) __builtin_neon_vqshlh_s16(__p0, __p1); + return __ret; +} +#define vqshlb_n_u8(__p0, __p1) __extension__ ({ \ + uint8_t __ret; \ + uint8_t __s0 = __p0; \ + __ret = (uint8_t) __builtin_neon_vqshlb_n_u8(__s0, __p1); \ + __ret; \ +}) +#define vqshls_n_u32(__p0, __p1) __extension__ ({ \ + uint32_t __ret; \ + uint32_t __s0 = __p0; \ + __ret = (uint32_t) __builtin_neon_vqshls_n_u32(__s0, __p1); \ + __ret; \ +}) +#define vqshld_n_u64(__p0, __p1) __extension__ ({ \ + uint64_t __ret; \ + uint64_t __s0 = __p0; \ + __ret = (uint64_t) __builtin_neon_vqshld_n_u64(__s0, __p1); \ + __ret; \ +}) +#define vqshlh_n_u16(__p0, __p1) __extension__ ({ \ + uint16_t __ret; \ + uint16_t __s0 = __p0; \ + __ret = (uint16_t) __builtin_neon_vqshlh_n_u16(__s0, __p1); \ + __ret; \ +}) +#define vqshlb_n_s8(__p0, __p1) __extension__ ({ \ + int8_t __ret; \ + int8_t __s0 = __p0; \ + __ret = (int8_t) __builtin_neon_vqshlb_n_s8(__s0, __p1); \ + __ret; \ +}) +#define vqshls_n_s32(__p0, __p1) __extension__ ({ \ + int32_t __ret; \ + int32_t __s0 = __p0; \ + __ret = (int32_t) __builtin_neon_vqshls_n_s32(__s0, __p1); \ + __ret; \ +}) +#define vqshld_n_s64(__p0, __p1) __extension__ ({ \ + int64_t __ret; \ + int64_t __s0 = __p0; \ + __ret = (int64_t) __builtin_neon_vqshld_n_s64(__s0, __p1); \ + __ret; \ +}) +#define vqshlh_n_s16(__p0, __p1) __extension__ ({ \ + int16_t __ret; \ + int16_t __s0 = __p0; \ + __ret = (int16_t) __builtin_neon_vqshlh_n_s16(__s0, __p1); \ + __ret; \ +}) +#define vqshlub_n_s8(__p0, __p1) __extension__ ({ \ + int8_t __ret; \ + int8_t __s0 = __p0; \ + __ret = (int8_t) __builtin_neon_vqshlub_n_s8(__s0, __p1); \ + __ret; \ +}) +#define vqshlus_n_s32(__p0, __p1) __extension__ ({ \ + int32_t __ret; \ + int32_t __s0 = __p0; \ + __ret = (int32_t) __builtin_neon_vqshlus_n_s32(__s0, __p1); \ + __ret; \ +}) +#define vqshlud_n_s64(__p0, __p1) __extension__ ({ \ + int64_t __ret; \ + int64_t __s0 = __p0; \ + __ret = (int64_t) __builtin_neon_vqshlud_n_s64(__s0, __p1); \ + __ret; \ +}) +#define vqshluh_n_s16(__p0, __p1) __extension__ ({ \ + int16_t __ret; \ + int16_t __s0 = __p0; \ + __ret = (int16_t) __builtin_neon_vqshluh_n_s16(__s0, __p1); \ + __ret; \ +}) +#ifdef __LITTLE_ENDIAN__ +#define vqshrn_high_n_u32(__p0_706, __p1_706, __p2_706) __extension__ ({ \ + uint16x8_t __ret_706; \ + uint16x4_t __s0_706 = __p0_706; \ + uint32x4_t __s1_706 = __p1_706; \ + __ret_706 = (uint16x8_t)(vcombine_u16((uint16x4_t)(__s0_706), (uint16x4_t)(vqshrn_n_u32(__s1_706, __p2_706)))); \ + __ret_706; \ +}) +#else +#define vqshrn_high_n_u32(__p0_707, __p1_707, __p2_707) __extension__ ({ \ + uint16x8_t __ret_707; \ + uint16x4_t __s0_707 = __p0_707; \ + uint32x4_t __s1_707 = __p1_707; \ + uint16x4_t __rev0_707; __rev0_707 = __builtin_shufflevector(__s0_707, __s0_707, 3, 2, 1, 0); \ + uint32x4_t __rev1_707; __rev1_707 = __builtin_shufflevector(__s1_707, __s1_707, 3, 2, 1, 0); \ + __ret_707 = (uint16x8_t)(__noswap_vcombine_u16((uint16x4_t)(__rev0_707), (uint16x4_t)(__noswap_vqshrn_n_u32(__rev1_707, __p2_707)))); \ + __ret_707 = __builtin_shufflevector(__ret_707, __ret_707, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_707; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqshrn_high_n_u64(__p0_708, __p1_708, __p2_708) __extension__ ({ \ + uint32x4_t __ret_708; \ + uint32x2_t __s0_708 = __p0_708; \ + uint64x2_t __s1_708 = __p1_708; \ + __ret_708 = (uint32x4_t)(vcombine_u32((uint32x2_t)(__s0_708), (uint32x2_t)(vqshrn_n_u64(__s1_708, __p2_708)))); \ + __ret_708; \ +}) +#else +#define vqshrn_high_n_u64(__p0_709, __p1_709, __p2_709) __extension__ ({ \ + uint32x4_t __ret_709; \ + uint32x2_t __s0_709 = __p0_709; \ + uint64x2_t __s1_709 = __p1_709; \ + uint32x2_t __rev0_709; __rev0_709 = __builtin_shufflevector(__s0_709, __s0_709, 1, 0); \ + uint64x2_t __rev1_709; __rev1_709 = __builtin_shufflevector(__s1_709, __s1_709, 1, 0); \ + __ret_709 = (uint32x4_t)(__noswap_vcombine_u32((uint32x2_t)(__rev0_709), (uint32x2_t)(__noswap_vqshrn_n_u64(__rev1_709, __p2_709)))); \ + __ret_709 = __builtin_shufflevector(__ret_709, __ret_709, 3, 2, 1, 0); \ + __ret_709; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqshrn_high_n_u16(__p0_710, __p1_710, __p2_710) __extension__ ({ \ + uint8x16_t __ret_710; \ + uint8x8_t __s0_710 = __p0_710; \ + uint16x8_t __s1_710 = __p1_710; \ + __ret_710 = (uint8x16_t)(vcombine_u8((uint8x8_t)(__s0_710), (uint8x8_t)(vqshrn_n_u16(__s1_710, __p2_710)))); \ + __ret_710; \ +}) +#else +#define vqshrn_high_n_u16(__p0_711, __p1_711, __p2_711) __extension__ ({ \ + uint8x16_t __ret_711; \ + uint8x8_t __s0_711 = __p0_711; \ + uint16x8_t __s1_711 = __p1_711; \ + uint8x8_t __rev0_711; __rev0_711 = __builtin_shufflevector(__s0_711, __s0_711, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint16x8_t __rev1_711; __rev1_711 = __builtin_shufflevector(__s1_711, __s1_711, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_711 = (uint8x16_t)(__noswap_vcombine_u8((uint8x8_t)(__rev0_711), (uint8x8_t)(__noswap_vqshrn_n_u16(__rev1_711, __p2_711)))); \ + __ret_711 = __builtin_shufflevector(__ret_711, __ret_711, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_711; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqshrn_high_n_s32(__p0_712, __p1_712, __p2_712) __extension__ ({ \ + int16x8_t __ret_712; \ + int16x4_t __s0_712 = __p0_712; \ + int32x4_t __s1_712 = __p1_712; \ + __ret_712 = (int16x8_t)(vcombine_s16((int16x4_t)(__s0_712), (int16x4_t)(vqshrn_n_s32(__s1_712, __p2_712)))); \ + __ret_712; \ +}) +#else +#define vqshrn_high_n_s32(__p0_713, __p1_713, __p2_713) __extension__ ({ \ + int16x8_t __ret_713; \ + int16x4_t __s0_713 = __p0_713; \ + int32x4_t __s1_713 = __p1_713; \ + int16x4_t __rev0_713; __rev0_713 = __builtin_shufflevector(__s0_713, __s0_713, 3, 2, 1, 0); \ + int32x4_t __rev1_713; __rev1_713 = __builtin_shufflevector(__s1_713, __s1_713, 3, 2, 1, 0); \ + __ret_713 = (int16x8_t)(__noswap_vcombine_s16((int16x4_t)(__rev0_713), (int16x4_t)(__noswap_vqshrn_n_s32(__rev1_713, __p2_713)))); \ + __ret_713 = __builtin_shufflevector(__ret_713, __ret_713, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_713; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqshrn_high_n_s64(__p0_714, __p1_714, __p2_714) __extension__ ({ \ + int32x4_t __ret_714; \ + int32x2_t __s0_714 = __p0_714; \ + int64x2_t __s1_714 = __p1_714; \ + __ret_714 = (int32x4_t)(vcombine_s32((int32x2_t)(__s0_714), (int32x2_t)(vqshrn_n_s64(__s1_714, __p2_714)))); \ + __ret_714; \ +}) +#else +#define vqshrn_high_n_s64(__p0_715, __p1_715, __p2_715) __extension__ ({ \ + int32x4_t __ret_715; \ + int32x2_t __s0_715 = __p0_715; \ + int64x2_t __s1_715 = __p1_715; \ + int32x2_t __rev0_715; __rev0_715 = __builtin_shufflevector(__s0_715, __s0_715, 1, 0); \ + int64x2_t __rev1_715; __rev1_715 = __builtin_shufflevector(__s1_715, __s1_715, 1, 0); \ + __ret_715 = (int32x4_t)(__noswap_vcombine_s32((int32x2_t)(__rev0_715), (int32x2_t)(__noswap_vqshrn_n_s64(__rev1_715, __p2_715)))); \ + __ret_715 = __builtin_shufflevector(__ret_715, __ret_715, 3, 2, 1, 0); \ + __ret_715; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqshrn_high_n_s16(__p0_716, __p1_716, __p2_716) __extension__ ({ \ + int8x16_t __ret_716; \ + int8x8_t __s0_716 = __p0_716; \ + int16x8_t __s1_716 = __p1_716; \ + __ret_716 = (int8x16_t)(vcombine_s8((int8x8_t)(__s0_716), (int8x8_t)(vqshrn_n_s16(__s1_716, __p2_716)))); \ + __ret_716; \ +}) +#else +#define vqshrn_high_n_s16(__p0_717, __p1_717, __p2_717) __extension__ ({ \ + int8x16_t __ret_717; \ + int8x8_t __s0_717 = __p0_717; \ + int16x8_t __s1_717 = __p1_717; \ + int8x8_t __rev0_717; __rev0_717 = __builtin_shufflevector(__s0_717, __s0_717, 7, 6, 5, 4, 3, 2, 1, 0); \ + int16x8_t __rev1_717; __rev1_717 = __builtin_shufflevector(__s1_717, __s1_717, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_717 = (int8x16_t)(__noswap_vcombine_s8((int8x8_t)(__rev0_717), (int8x8_t)(__noswap_vqshrn_n_s16(__rev1_717, __p2_717)))); \ + __ret_717 = __builtin_shufflevector(__ret_717, __ret_717, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_717; \ +}) +#endif + +#define vqshrns_n_u32(__p0, __p1) __extension__ ({ \ + uint16_t __ret; \ + uint32_t __s0 = __p0; \ + __ret = (uint16_t) __builtin_neon_vqshrns_n_u32(__s0, __p1); \ + __ret; \ +}) +#define vqshrnd_n_u64(__p0, __p1) __extension__ ({ \ + uint32_t __ret; \ + uint64_t __s0 = __p0; \ + __ret = (uint32_t) __builtin_neon_vqshrnd_n_u64(__s0, __p1); \ + __ret; \ +}) +#define vqshrnh_n_u16(__p0, __p1) __extension__ ({ \ + uint8_t __ret; \ + uint16_t __s0 = __p0; \ + __ret = (uint8_t) __builtin_neon_vqshrnh_n_u16(__s0, __p1); \ + __ret; \ +}) +#define vqshrns_n_s32(__p0, __p1) __extension__ ({ \ + int16_t __ret; \ + int32_t __s0 = __p0; \ + __ret = (int16_t) __builtin_neon_vqshrns_n_s32(__s0, __p1); \ + __ret; \ +}) +#define vqshrnd_n_s64(__p0, __p1) __extension__ ({ \ + int32_t __ret; \ + int64_t __s0 = __p0; \ + __ret = (int32_t) __builtin_neon_vqshrnd_n_s64(__s0, __p1); \ + __ret; \ +}) +#define vqshrnh_n_s16(__p0, __p1) __extension__ ({ \ + int8_t __ret; \ + int16_t __s0 = __p0; \ + __ret = (int8_t) __builtin_neon_vqshrnh_n_s16(__s0, __p1); \ + __ret; \ +}) +#ifdef __LITTLE_ENDIAN__ +#define vqshrun_high_n_s32(__p0_718, __p1_718, __p2_718) __extension__ ({ \ + int16x8_t __ret_718; \ + int16x4_t __s0_718 = __p0_718; \ + int32x4_t __s1_718 = __p1_718; \ + __ret_718 = (int16x8_t)(vcombine_s16((int16x4_t)(__s0_718), (int16x4_t)(vqshrun_n_s32(__s1_718, __p2_718)))); \ + __ret_718; \ +}) +#else +#define vqshrun_high_n_s32(__p0_719, __p1_719, __p2_719) __extension__ ({ \ + int16x8_t __ret_719; \ + int16x4_t __s0_719 = __p0_719; \ + int32x4_t __s1_719 = __p1_719; \ + int16x4_t __rev0_719; __rev0_719 = __builtin_shufflevector(__s0_719, __s0_719, 3, 2, 1, 0); \ + int32x4_t __rev1_719; __rev1_719 = __builtin_shufflevector(__s1_719, __s1_719, 3, 2, 1, 0); \ + __ret_719 = (int16x8_t)(__noswap_vcombine_s16((int16x4_t)(__rev0_719), (int16x4_t)(__noswap_vqshrun_n_s32(__rev1_719, __p2_719)))); \ + __ret_719 = __builtin_shufflevector(__ret_719, __ret_719, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_719; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqshrun_high_n_s64(__p0_720, __p1_720, __p2_720) __extension__ ({ \ + int32x4_t __ret_720; \ + int32x2_t __s0_720 = __p0_720; \ + int64x2_t __s1_720 = __p1_720; \ + __ret_720 = (int32x4_t)(vcombine_s32((int32x2_t)(__s0_720), (int32x2_t)(vqshrun_n_s64(__s1_720, __p2_720)))); \ + __ret_720; \ +}) +#else +#define vqshrun_high_n_s64(__p0_721, __p1_721, __p2_721) __extension__ ({ \ + int32x4_t __ret_721; \ + int32x2_t __s0_721 = __p0_721; \ + int64x2_t __s1_721 = __p1_721; \ + int32x2_t __rev0_721; __rev0_721 = __builtin_shufflevector(__s0_721, __s0_721, 1, 0); \ + int64x2_t __rev1_721; __rev1_721 = __builtin_shufflevector(__s1_721, __s1_721, 1, 0); \ + __ret_721 = (int32x4_t)(__noswap_vcombine_s32((int32x2_t)(__rev0_721), (int32x2_t)(__noswap_vqshrun_n_s64(__rev1_721, __p2_721)))); \ + __ret_721 = __builtin_shufflevector(__ret_721, __ret_721, 3, 2, 1, 0); \ + __ret_721; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqshrun_high_n_s16(__p0_722, __p1_722, __p2_722) __extension__ ({ \ + int8x16_t __ret_722; \ + int8x8_t __s0_722 = __p0_722; \ + int16x8_t __s1_722 = __p1_722; \ + __ret_722 = (int8x16_t)(vcombine_s8((int8x8_t)(__s0_722), (int8x8_t)(vqshrun_n_s16(__s1_722, __p2_722)))); \ + __ret_722; \ +}) +#else +#define vqshrun_high_n_s16(__p0_723, __p1_723, __p2_723) __extension__ ({ \ + int8x16_t __ret_723; \ + int8x8_t __s0_723 = __p0_723; \ + int16x8_t __s1_723 = __p1_723; \ + int8x8_t __rev0_723; __rev0_723 = __builtin_shufflevector(__s0_723, __s0_723, 7, 6, 5, 4, 3, 2, 1, 0); \ + int16x8_t __rev1_723; __rev1_723 = __builtin_shufflevector(__s1_723, __s1_723, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_723 = (int8x16_t)(__noswap_vcombine_s8((int8x8_t)(__rev0_723), (int8x8_t)(__noswap_vqshrun_n_s16(__rev1_723, __p2_723)))); \ + __ret_723 = __builtin_shufflevector(__ret_723, __ret_723, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_723; \ +}) +#endif + +#define vqshruns_n_s32(__p0, __p1) __extension__ ({ \ + uint16_t __ret; \ + int32_t __s0 = __p0; \ + __ret = (uint16_t) __builtin_neon_vqshruns_n_s32(__s0, __p1); \ + __ret; \ +}) +#define vqshrund_n_s64(__p0, __p1) __extension__ ({ \ + uint32_t __ret; \ + int64_t __s0 = __p0; \ + __ret = (uint32_t) __builtin_neon_vqshrund_n_s64(__s0, __p1); \ + __ret; \ +}) +#define vqshrunh_n_s16(__p0, __p1) __extension__ ({ \ + uint8_t __ret; \ + int16_t __s0 = __p0; \ + __ret = (uint8_t) __builtin_neon_vqshrunh_n_s16(__s0, __p1); \ + __ret; \ +}) +__ai __attribute__((target("neon"))) uint8_t vqsubb_u8(uint8_t __p0, uint8_t __p1) { + uint8_t __ret; + __ret = (uint8_t) __builtin_neon_vqsubb_u8(__p0, __p1); + return __ret; +} +__ai __attribute__((target("neon"))) uint32_t vqsubs_u32(uint32_t __p0, uint32_t __p1) { + uint32_t __ret; + __ret = (uint32_t) __builtin_neon_vqsubs_u32(__p0, __p1); + return __ret; +} +__ai __attribute__((target("neon"))) uint64_t vqsubd_u64(uint64_t __p0, uint64_t __p1) { + uint64_t __ret; + __ret = (uint64_t) __builtin_neon_vqsubd_u64(__p0, __p1); + return __ret; +} +__ai __attribute__((target("neon"))) uint16_t vqsubh_u16(uint16_t __p0, uint16_t __p1) { + uint16_t __ret; + __ret = (uint16_t) __builtin_neon_vqsubh_u16(__p0, __p1); + return __ret; +} +__ai __attribute__((target("neon"))) int8_t vqsubb_s8(int8_t __p0, int8_t __p1) { + int8_t __ret; + __ret = (int8_t) __builtin_neon_vqsubb_s8(__p0, __p1); + return __ret; +} +__ai __attribute__((target("neon"))) int32_t vqsubs_s32(int32_t __p0, int32_t __p1) { + int32_t __ret; + __ret = (int32_t) __builtin_neon_vqsubs_s32(__p0, __p1); + return __ret; +} +__ai __attribute__((target("neon"))) int64_t vqsubd_s64(int64_t __p0, int64_t __p1) { + int64_t __ret; + __ret = (int64_t) __builtin_neon_vqsubd_s64(__p0, __p1); + return __ret; +} +__ai __attribute__((target("neon"))) int16_t vqsubh_s16(int16_t __p0, int16_t __p1) { + int16_t __ret; + __ret = (int16_t) __builtin_neon_vqsubh_s16(__p0, __p1); + return __ret; +} +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) poly8x8_t vqtbl1_p8(poly8x16_t __p0, uint8x8_t __p1) { + poly8x8_t __ret; + __ret = (poly8x8_t) __builtin_neon_vqtbl1_v((int8x16_t)__p0, (int8x8_t)__p1, 4); + return __ret; +} +#else +__ai __attribute__((target("neon"))) poly8x8_t vqtbl1_p8(poly8x16_t __p0, uint8x8_t __p1) { + poly8x8_t __ret; + poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (poly8x8_t) __builtin_neon_vqtbl1_v((int8x16_t)__rev0, (int8x8_t)__rev1, 4); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) poly8x16_t vqtbl1q_p8(poly8x16_t __p0, uint8x16_t __p1) { + poly8x16_t __ret; + __ret = (poly8x16_t) __builtin_neon_vqtbl1q_v((int8x16_t)__p0, (int8x16_t)__p1, 36); + return __ret; +} +#else +__ai __attribute__((target("neon"))) poly8x16_t vqtbl1q_p8(poly8x16_t __p0, uint8x16_t __p1) { + poly8x16_t __ret; + poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (poly8x16_t) __builtin_neon_vqtbl1q_v((int8x16_t)__rev0, (int8x16_t)__rev1, 36); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint8x16_t vqtbl1q_u8(uint8x16_t __p0, uint8x16_t __p1) { + uint8x16_t __ret; + __ret = (uint8x16_t) __builtin_neon_vqtbl1q_v((int8x16_t)__p0, (int8x16_t)__p1, 48); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint8x16_t vqtbl1q_u8(uint8x16_t __p0, uint8x16_t __p1) { + uint8x16_t __ret; + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint8x16_t) __builtin_neon_vqtbl1q_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int8x16_t vqtbl1q_s8(int8x16_t __p0, uint8x16_t __p1) { + int8x16_t __ret; + __ret = (int8x16_t) __builtin_neon_vqtbl1q_v((int8x16_t)__p0, (int8x16_t)__p1, 32); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int8x16_t vqtbl1q_s8(int8x16_t __p0, uint8x16_t __p1) { + int8x16_t __ret; + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int8x16_t) __builtin_neon_vqtbl1q_v((int8x16_t)__rev0, (int8x16_t)__rev1, 32); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint8x8_t vqtbl1_u8(uint8x16_t __p0, uint8x8_t __p1) { + uint8x8_t __ret; + __ret = (uint8x8_t) __builtin_neon_vqtbl1_v((int8x16_t)__p0, (int8x8_t)__p1, 16); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint8x8_t vqtbl1_u8(uint8x16_t __p0, uint8x8_t __p1) { + uint8x8_t __ret; + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint8x8_t) __builtin_neon_vqtbl1_v((int8x16_t)__rev0, (int8x8_t)__rev1, 16); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int8x8_t vqtbl1_s8(int8x16_t __p0, uint8x8_t __p1) { + int8x8_t __ret; + __ret = (int8x8_t) __builtin_neon_vqtbl1_v((int8x16_t)__p0, (int8x8_t)__p1, 0); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int8x8_t vqtbl1_s8(int8x16_t __p0, uint8x8_t __p1) { + int8x8_t __ret; + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int8x8_t) __builtin_neon_vqtbl1_v((int8x16_t)__rev0, (int8x8_t)__rev1, 0); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) poly8x8_t vqtbl2_p8(poly8x16x2_t __p0, uint8x8_t __p1) { + poly8x8_t __ret; + __ret = (poly8x8_t) __builtin_neon_vqtbl2_v((int8x16_t)__p0.val[0], (int8x16_t)__p0.val[1], (int8x8_t)__p1, 4); + return __ret; +} +#else +__ai __attribute__((target("neon"))) poly8x8_t vqtbl2_p8(poly8x16x2_t __p0, uint8x8_t __p1) { + poly8x8_t __ret; + poly8x16x2_t __rev0; + __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (poly8x8_t) __builtin_neon_vqtbl2_v((int8x16_t)__rev0.val[0], (int8x16_t)__rev0.val[1], (int8x8_t)__rev1, 4); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) poly8x16_t vqtbl2q_p8(poly8x16x2_t __p0, uint8x16_t __p1) { + poly8x16_t __ret; + __ret = (poly8x16_t) __builtin_neon_vqtbl2q_v((int8x16_t)__p0.val[0], (int8x16_t)__p0.val[1], (int8x16_t)__p1, 36); + return __ret; +} +#else +__ai __attribute__((target("neon"))) poly8x16_t vqtbl2q_p8(poly8x16x2_t __p0, uint8x16_t __p1) { + poly8x16_t __ret; + poly8x16x2_t __rev0; + __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (poly8x16_t) __builtin_neon_vqtbl2q_v((int8x16_t)__rev0.val[0], (int8x16_t)__rev0.val[1], (int8x16_t)__rev1, 36); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint8x16_t vqtbl2q_u8(uint8x16x2_t __p0, uint8x16_t __p1) { + uint8x16_t __ret; + __ret = (uint8x16_t) __builtin_neon_vqtbl2q_v((int8x16_t)__p0.val[0], (int8x16_t)__p0.val[1], (int8x16_t)__p1, 48); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint8x16_t vqtbl2q_u8(uint8x16x2_t __p0, uint8x16_t __p1) { + uint8x16_t __ret; + uint8x16x2_t __rev0; + __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint8x16_t) __builtin_neon_vqtbl2q_v((int8x16_t)__rev0.val[0], (int8x16_t)__rev0.val[1], (int8x16_t)__rev1, 48); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int8x16_t vqtbl2q_s8(int8x16x2_t __p0, uint8x16_t __p1) { + int8x16_t __ret; + __ret = (int8x16_t) __builtin_neon_vqtbl2q_v((int8x16_t)__p0.val[0], (int8x16_t)__p0.val[1], (int8x16_t)__p1, 32); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int8x16_t vqtbl2q_s8(int8x16x2_t __p0, uint8x16_t __p1) { + int8x16_t __ret; + int8x16x2_t __rev0; + __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int8x16_t) __builtin_neon_vqtbl2q_v((int8x16_t)__rev0.val[0], (int8x16_t)__rev0.val[1], (int8x16_t)__rev1, 32); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint8x8_t vqtbl2_u8(uint8x16x2_t __p0, uint8x8_t __p1) { + uint8x8_t __ret; + __ret = (uint8x8_t) __builtin_neon_vqtbl2_v((int8x16_t)__p0.val[0], (int8x16_t)__p0.val[1], (int8x8_t)__p1, 16); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint8x8_t vqtbl2_u8(uint8x16x2_t __p0, uint8x8_t __p1) { + uint8x8_t __ret; + uint8x16x2_t __rev0; + __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint8x8_t) __builtin_neon_vqtbl2_v((int8x16_t)__rev0.val[0], (int8x16_t)__rev0.val[1], (int8x8_t)__rev1, 16); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int8x8_t vqtbl2_s8(int8x16x2_t __p0, uint8x8_t __p1) { + int8x8_t __ret; + __ret = (int8x8_t) __builtin_neon_vqtbl2_v((int8x16_t)__p0.val[0], (int8x16_t)__p0.val[1], (int8x8_t)__p1, 0); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int8x8_t vqtbl2_s8(int8x16x2_t __p0, uint8x8_t __p1) { + int8x8_t __ret; + int8x16x2_t __rev0; + __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int8x8_t) __builtin_neon_vqtbl2_v((int8x16_t)__rev0.val[0], (int8x16_t)__rev0.val[1], (int8x8_t)__rev1, 0); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) poly8x8_t vqtbl3_p8(poly8x16x3_t __p0, uint8x8_t __p1) { + poly8x8_t __ret; + __ret = (poly8x8_t) __builtin_neon_vqtbl3_v((int8x16_t)__p0.val[0], (int8x16_t)__p0.val[1], (int8x16_t)__p0.val[2], (int8x8_t)__p1, 4); + return __ret; +} +#else +__ai __attribute__((target("neon"))) poly8x8_t vqtbl3_p8(poly8x16x3_t __p0, uint8x8_t __p1) { + poly8x8_t __ret; + poly8x16x3_t __rev0; + __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __rev0.val[2] = __builtin_shufflevector(__p0.val[2], __p0.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (poly8x8_t) __builtin_neon_vqtbl3_v((int8x16_t)__rev0.val[0], (int8x16_t)__rev0.val[1], (int8x16_t)__rev0.val[2], (int8x8_t)__rev1, 4); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) poly8x16_t vqtbl3q_p8(poly8x16x3_t __p0, uint8x16_t __p1) { + poly8x16_t __ret; + __ret = (poly8x16_t) __builtin_neon_vqtbl3q_v((int8x16_t)__p0.val[0], (int8x16_t)__p0.val[1], (int8x16_t)__p0.val[2], (int8x16_t)__p1, 36); + return __ret; +} +#else +__ai __attribute__((target("neon"))) poly8x16_t vqtbl3q_p8(poly8x16x3_t __p0, uint8x16_t __p1) { + poly8x16_t __ret; + poly8x16x3_t __rev0; + __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __rev0.val[2] = __builtin_shufflevector(__p0.val[2], __p0.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (poly8x16_t) __builtin_neon_vqtbl3q_v((int8x16_t)__rev0.val[0], (int8x16_t)__rev0.val[1], (int8x16_t)__rev0.val[2], (int8x16_t)__rev1, 36); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint8x16_t vqtbl3q_u8(uint8x16x3_t __p0, uint8x16_t __p1) { + uint8x16_t __ret; + __ret = (uint8x16_t) __builtin_neon_vqtbl3q_v((int8x16_t)__p0.val[0], (int8x16_t)__p0.val[1], (int8x16_t)__p0.val[2], (int8x16_t)__p1, 48); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint8x16_t vqtbl3q_u8(uint8x16x3_t __p0, uint8x16_t __p1) { + uint8x16_t __ret; + uint8x16x3_t __rev0; + __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __rev0.val[2] = __builtin_shufflevector(__p0.val[2], __p0.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint8x16_t) __builtin_neon_vqtbl3q_v((int8x16_t)__rev0.val[0], (int8x16_t)__rev0.val[1], (int8x16_t)__rev0.val[2], (int8x16_t)__rev1, 48); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int8x16_t vqtbl3q_s8(int8x16x3_t __p0, uint8x16_t __p1) { + int8x16_t __ret; + __ret = (int8x16_t) __builtin_neon_vqtbl3q_v((int8x16_t)__p0.val[0], (int8x16_t)__p0.val[1], (int8x16_t)__p0.val[2], (int8x16_t)__p1, 32); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int8x16_t vqtbl3q_s8(int8x16x3_t __p0, uint8x16_t __p1) { + int8x16_t __ret; + int8x16x3_t __rev0; + __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __rev0.val[2] = __builtin_shufflevector(__p0.val[2], __p0.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int8x16_t) __builtin_neon_vqtbl3q_v((int8x16_t)__rev0.val[0], (int8x16_t)__rev0.val[1], (int8x16_t)__rev0.val[2], (int8x16_t)__rev1, 32); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint8x8_t vqtbl3_u8(uint8x16x3_t __p0, uint8x8_t __p1) { + uint8x8_t __ret; + __ret = (uint8x8_t) __builtin_neon_vqtbl3_v((int8x16_t)__p0.val[0], (int8x16_t)__p0.val[1], (int8x16_t)__p0.val[2], (int8x8_t)__p1, 16); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint8x8_t vqtbl3_u8(uint8x16x3_t __p0, uint8x8_t __p1) { + uint8x8_t __ret; + uint8x16x3_t __rev0; + __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __rev0.val[2] = __builtin_shufflevector(__p0.val[2], __p0.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint8x8_t) __builtin_neon_vqtbl3_v((int8x16_t)__rev0.val[0], (int8x16_t)__rev0.val[1], (int8x16_t)__rev0.val[2], (int8x8_t)__rev1, 16); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int8x8_t vqtbl3_s8(int8x16x3_t __p0, uint8x8_t __p1) { + int8x8_t __ret; + __ret = (int8x8_t) __builtin_neon_vqtbl3_v((int8x16_t)__p0.val[0], (int8x16_t)__p0.val[1], (int8x16_t)__p0.val[2], (int8x8_t)__p1, 0); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int8x8_t vqtbl3_s8(int8x16x3_t __p0, uint8x8_t __p1) { + int8x8_t __ret; + int8x16x3_t __rev0; + __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __rev0.val[2] = __builtin_shufflevector(__p0.val[2], __p0.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int8x8_t) __builtin_neon_vqtbl3_v((int8x16_t)__rev0.val[0], (int8x16_t)__rev0.val[1], (int8x16_t)__rev0.val[2], (int8x8_t)__rev1, 0); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) poly8x8_t vqtbl4_p8(poly8x16x4_t __p0, uint8x8_t __p1) { + poly8x8_t __ret; + __ret = (poly8x8_t) __builtin_neon_vqtbl4_v((int8x16_t)__p0.val[0], (int8x16_t)__p0.val[1], (int8x16_t)__p0.val[2], (int8x16_t)__p0.val[3], (int8x8_t)__p1, 4); + return __ret; +} +#else +__ai __attribute__((target("neon"))) poly8x8_t vqtbl4_p8(poly8x16x4_t __p0, uint8x8_t __p1) { + poly8x8_t __ret; + poly8x16x4_t __rev0; + __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __rev0.val[2] = __builtin_shufflevector(__p0.val[2], __p0.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __rev0.val[3] = __builtin_shufflevector(__p0.val[3], __p0.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (poly8x8_t) __builtin_neon_vqtbl4_v((int8x16_t)__rev0.val[0], (int8x16_t)__rev0.val[1], (int8x16_t)__rev0.val[2], (int8x16_t)__rev0.val[3], (int8x8_t)__rev1, 4); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) poly8x16_t vqtbl4q_p8(poly8x16x4_t __p0, uint8x16_t __p1) { + poly8x16_t __ret; + __ret = (poly8x16_t) __builtin_neon_vqtbl4q_v((int8x16_t)__p0.val[0], (int8x16_t)__p0.val[1], (int8x16_t)__p0.val[2], (int8x16_t)__p0.val[3], (int8x16_t)__p1, 36); + return __ret; +} +#else +__ai __attribute__((target("neon"))) poly8x16_t vqtbl4q_p8(poly8x16x4_t __p0, uint8x16_t __p1) { + poly8x16_t __ret; + poly8x16x4_t __rev0; + __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __rev0.val[2] = __builtin_shufflevector(__p0.val[2], __p0.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __rev0.val[3] = __builtin_shufflevector(__p0.val[3], __p0.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (poly8x16_t) __builtin_neon_vqtbl4q_v((int8x16_t)__rev0.val[0], (int8x16_t)__rev0.val[1], (int8x16_t)__rev0.val[2], (int8x16_t)__rev0.val[3], (int8x16_t)__rev1, 36); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint8x16_t vqtbl4q_u8(uint8x16x4_t __p0, uint8x16_t __p1) { + uint8x16_t __ret; + __ret = (uint8x16_t) __builtin_neon_vqtbl4q_v((int8x16_t)__p0.val[0], (int8x16_t)__p0.val[1], (int8x16_t)__p0.val[2], (int8x16_t)__p0.val[3], (int8x16_t)__p1, 48); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint8x16_t vqtbl4q_u8(uint8x16x4_t __p0, uint8x16_t __p1) { + uint8x16_t __ret; + uint8x16x4_t __rev0; + __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __rev0.val[2] = __builtin_shufflevector(__p0.val[2], __p0.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __rev0.val[3] = __builtin_shufflevector(__p0.val[3], __p0.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint8x16_t) __builtin_neon_vqtbl4q_v((int8x16_t)__rev0.val[0], (int8x16_t)__rev0.val[1], (int8x16_t)__rev0.val[2], (int8x16_t)__rev0.val[3], (int8x16_t)__rev1, 48); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int8x16_t vqtbl4q_s8(int8x16x4_t __p0, uint8x16_t __p1) { + int8x16_t __ret; + __ret = (int8x16_t) __builtin_neon_vqtbl4q_v((int8x16_t)__p0.val[0], (int8x16_t)__p0.val[1], (int8x16_t)__p0.val[2], (int8x16_t)__p0.val[3], (int8x16_t)__p1, 32); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int8x16_t vqtbl4q_s8(int8x16x4_t __p0, uint8x16_t __p1) { + int8x16_t __ret; + int8x16x4_t __rev0; + __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __rev0.val[2] = __builtin_shufflevector(__p0.val[2], __p0.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __rev0.val[3] = __builtin_shufflevector(__p0.val[3], __p0.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int8x16_t) __builtin_neon_vqtbl4q_v((int8x16_t)__rev0.val[0], (int8x16_t)__rev0.val[1], (int8x16_t)__rev0.val[2], (int8x16_t)__rev0.val[3], (int8x16_t)__rev1, 32); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint8x8_t vqtbl4_u8(uint8x16x4_t __p0, uint8x8_t __p1) { + uint8x8_t __ret; + __ret = (uint8x8_t) __builtin_neon_vqtbl4_v((int8x16_t)__p0.val[0], (int8x16_t)__p0.val[1], (int8x16_t)__p0.val[2], (int8x16_t)__p0.val[3], (int8x8_t)__p1, 16); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint8x8_t vqtbl4_u8(uint8x16x4_t __p0, uint8x8_t __p1) { + uint8x8_t __ret; + uint8x16x4_t __rev0; + __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __rev0.val[2] = __builtin_shufflevector(__p0.val[2], __p0.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __rev0.val[3] = __builtin_shufflevector(__p0.val[3], __p0.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint8x8_t) __builtin_neon_vqtbl4_v((int8x16_t)__rev0.val[0], (int8x16_t)__rev0.val[1], (int8x16_t)__rev0.val[2], (int8x16_t)__rev0.val[3], (int8x8_t)__rev1, 16); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int8x8_t vqtbl4_s8(int8x16x4_t __p0, uint8x8_t __p1) { + int8x8_t __ret; + __ret = (int8x8_t) __builtin_neon_vqtbl4_v((int8x16_t)__p0.val[0], (int8x16_t)__p0.val[1], (int8x16_t)__p0.val[2], (int8x16_t)__p0.val[3], (int8x8_t)__p1, 0); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int8x8_t vqtbl4_s8(int8x16x4_t __p0, uint8x8_t __p1) { + int8x8_t __ret; + int8x16x4_t __rev0; + __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __rev0.val[2] = __builtin_shufflevector(__p0.val[2], __p0.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __rev0.val[3] = __builtin_shufflevector(__p0.val[3], __p0.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int8x8_t) __builtin_neon_vqtbl4_v((int8x16_t)__rev0.val[0], (int8x16_t)__rev0.val[1], (int8x16_t)__rev0.val[2], (int8x16_t)__rev0.val[3], (int8x8_t)__rev1, 0); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) poly8x8_t vqtbx1_p8(poly8x8_t __p0, poly8x16_t __p1, uint8x8_t __p2) { + poly8x8_t __ret; + __ret = (poly8x8_t) __builtin_neon_vqtbx1_v((int8x8_t)__p0, (int8x16_t)__p1, (int8x8_t)__p2, 4); + return __ret; +} +#else +__ai __attribute__((target("neon"))) poly8x8_t vqtbx1_p8(poly8x8_t __p0, poly8x16_t __p1, uint8x8_t __p2) { + poly8x8_t __ret; + poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + poly8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (poly8x8_t) __builtin_neon_vqtbx1_v((int8x8_t)__rev0, (int8x16_t)__rev1, (int8x8_t)__rev2, 4); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) poly8x16_t vqtbx1q_p8(poly8x16_t __p0, poly8x16_t __p1, uint8x16_t __p2) { + poly8x16_t __ret; + __ret = (poly8x16_t) __builtin_neon_vqtbx1q_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 36); + return __ret; +} +#else +__ai __attribute__((target("neon"))) poly8x16_t vqtbx1q_p8(poly8x16_t __p0, poly8x16_t __p1, uint8x16_t __p2) { + poly8x16_t __ret; + poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + poly8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (poly8x16_t) __builtin_neon_vqtbx1q_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 36); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint8x16_t vqtbx1q_u8(uint8x16_t __p0, uint8x16_t __p1, uint8x16_t __p2) { + uint8x16_t __ret; + __ret = (uint8x16_t) __builtin_neon_vqtbx1q_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 48); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint8x16_t vqtbx1q_u8(uint8x16_t __p0, uint8x16_t __p1, uint8x16_t __p2) { + uint8x16_t __ret; + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint8x16_t) __builtin_neon_vqtbx1q_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 48); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int8x16_t vqtbx1q_s8(int8x16_t __p0, int8x16_t __p1, uint8x16_t __p2) { + int8x16_t __ret; + __ret = (int8x16_t) __builtin_neon_vqtbx1q_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 32); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int8x16_t vqtbx1q_s8(int8x16_t __p0, int8x16_t __p1, uint8x16_t __p2) { + int8x16_t __ret; + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int8x16_t) __builtin_neon_vqtbx1q_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 32); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint8x8_t vqtbx1_u8(uint8x8_t __p0, uint8x16_t __p1, uint8x8_t __p2) { + uint8x8_t __ret; + __ret = (uint8x8_t) __builtin_neon_vqtbx1_v((int8x8_t)__p0, (int8x16_t)__p1, (int8x8_t)__p2, 16); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint8x8_t vqtbx1_u8(uint8x8_t __p0, uint8x16_t __p1, uint8x8_t __p2) { + uint8x8_t __ret; + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint8x8_t) __builtin_neon_vqtbx1_v((int8x8_t)__rev0, (int8x16_t)__rev1, (int8x8_t)__rev2, 16); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int8x8_t vqtbx1_s8(int8x8_t __p0, int8x16_t __p1, uint8x8_t __p2) { + int8x8_t __ret; + __ret = (int8x8_t) __builtin_neon_vqtbx1_v((int8x8_t)__p0, (int8x16_t)__p1, (int8x8_t)__p2, 0); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int8x8_t vqtbx1_s8(int8x8_t __p0, int8x16_t __p1, uint8x8_t __p2) { + int8x8_t __ret; + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int8x8_t) __builtin_neon_vqtbx1_v((int8x8_t)__rev0, (int8x16_t)__rev1, (int8x8_t)__rev2, 0); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) poly8x8_t vqtbx2_p8(poly8x8_t __p0, poly8x16x2_t __p1, uint8x8_t __p2) { + poly8x8_t __ret; + __ret = (poly8x8_t) __builtin_neon_vqtbx2_v((int8x8_t)__p0, (int8x16_t)__p1.val[0], (int8x16_t)__p1.val[1], (int8x8_t)__p2, 4); + return __ret; +} +#else +__ai __attribute__((target("neon"))) poly8x8_t vqtbx2_p8(poly8x8_t __p0, poly8x16x2_t __p1, uint8x8_t __p2) { + poly8x8_t __ret; + poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + poly8x16x2_t __rev1; + __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (poly8x8_t) __builtin_neon_vqtbx2_v((int8x8_t)__rev0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x8_t)__rev2, 4); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) poly8x16_t vqtbx2q_p8(poly8x16_t __p0, poly8x16x2_t __p1, uint8x16_t __p2) { + poly8x16_t __ret; + __ret = (poly8x16_t) __builtin_neon_vqtbx2q_v((int8x16_t)__p0, (int8x16_t)__p1.val[0], (int8x16_t)__p1.val[1], (int8x16_t)__p2, 36); + return __ret; +} +#else +__ai __attribute__((target("neon"))) poly8x16_t vqtbx2q_p8(poly8x16_t __p0, poly8x16x2_t __p1, uint8x16_t __p2) { + poly8x16_t __ret; + poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + poly8x16x2_t __rev1; + __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (poly8x16_t) __builtin_neon_vqtbx2q_v((int8x16_t)__rev0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev2, 36); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint8x16_t vqtbx2q_u8(uint8x16_t __p0, uint8x16x2_t __p1, uint8x16_t __p2) { + uint8x16_t __ret; + __ret = (uint8x16_t) __builtin_neon_vqtbx2q_v((int8x16_t)__p0, (int8x16_t)__p1.val[0], (int8x16_t)__p1.val[1], (int8x16_t)__p2, 48); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint8x16_t vqtbx2q_u8(uint8x16_t __p0, uint8x16x2_t __p1, uint8x16_t __p2) { + uint8x16_t __ret; + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16x2_t __rev1; + __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint8x16_t) __builtin_neon_vqtbx2q_v((int8x16_t)__rev0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev2, 48); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int8x16_t vqtbx2q_s8(int8x16_t __p0, int8x16x2_t __p1, uint8x16_t __p2) { + int8x16_t __ret; + __ret = (int8x16_t) __builtin_neon_vqtbx2q_v((int8x16_t)__p0, (int8x16_t)__p1.val[0], (int8x16_t)__p1.val[1], (int8x16_t)__p2, 32); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int8x16_t vqtbx2q_s8(int8x16_t __p0, int8x16x2_t __p1, uint8x16_t __p2) { + int8x16_t __ret; + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16x2_t __rev1; + __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int8x16_t) __builtin_neon_vqtbx2q_v((int8x16_t)__rev0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev2, 32); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint8x8_t vqtbx2_u8(uint8x8_t __p0, uint8x16x2_t __p1, uint8x8_t __p2) { + uint8x8_t __ret; + __ret = (uint8x8_t) __builtin_neon_vqtbx2_v((int8x8_t)__p0, (int8x16_t)__p1.val[0], (int8x16_t)__p1.val[1], (int8x8_t)__p2, 16); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint8x8_t vqtbx2_u8(uint8x8_t __p0, uint8x16x2_t __p1, uint8x8_t __p2) { + uint8x8_t __ret; + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16x2_t __rev1; + __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint8x8_t) __builtin_neon_vqtbx2_v((int8x8_t)__rev0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x8_t)__rev2, 16); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int8x8_t vqtbx2_s8(int8x8_t __p0, int8x16x2_t __p1, uint8x8_t __p2) { + int8x8_t __ret; + __ret = (int8x8_t) __builtin_neon_vqtbx2_v((int8x8_t)__p0, (int8x16_t)__p1.val[0], (int8x16_t)__p1.val[1], (int8x8_t)__p2, 0); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int8x8_t vqtbx2_s8(int8x8_t __p0, int8x16x2_t __p1, uint8x8_t __p2) { + int8x8_t __ret; + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16x2_t __rev1; + __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int8x8_t) __builtin_neon_vqtbx2_v((int8x8_t)__rev0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x8_t)__rev2, 0); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) poly8x8_t vqtbx3_p8(poly8x8_t __p0, poly8x16x3_t __p1, uint8x8_t __p2) { + poly8x8_t __ret; + __ret = (poly8x8_t) __builtin_neon_vqtbx3_v((int8x8_t)__p0, (int8x16_t)__p1.val[0], (int8x16_t)__p1.val[1], (int8x16_t)__p1.val[2], (int8x8_t)__p2, 4); + return __ret; +} +#else +__ai __attribute__((target("neon"))) poly8x8_t vqtbx3_p8(poly8x8_t __p0, poly8x16x3_t __p1, uint8x8_t __p2) { + poly8x8_t __ret; + poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + poly8x16x3_t __rev1; + __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __rev1.val[2] = __builtin_shufflevector(__p1.val[2], __p1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (poly8x8_t) __builtin_neon_vqtbx3_v((int8x8_t)__rev0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x8_t)__rev2, 4); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) poly8x16_t vqtbx3q_p8(poly8x16_t __p0, poly8x16x3_t __p1, uint8x16_t __p2) { + poly8x16_t __ret; + __ret = (poly8x16_t) __builtin_neon_vqtbx3q_v((int8x16_t)__p0, (int8x16_t)__p1.val[0], (int8x16_t)__p1.val[1], (int8x16_t)__p1.val[2], (int8x16_t)__p2, 36); + return __ret; +} +#else +__ai __attribute__((target("neon"))) poly8x16_t vqtbx3q_p8(poly8x16_t __p0, poly8x16x3_t __p1, uint8x16_t __p2) { + poly8x16_t __ret; + poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + poly8x16x3_t __rev1; + __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __rev1.val[2] = __builtin_shufflevector(__p1.val[2], __p1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (poly8x16_t) __builtin_neon_vqtbx3q_v((int8x16_t)__rev0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev2, 36); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint8x16_t vqtbx3q_u8(uint8x16_t __p0, uint8x16x3_t __p1, uint8x16_t __p2) { + uint8x16_t __ret; + __ret = (uint8x16_t) __builtin_neon_vqtbx3q_v((int8x16_t)__p0, (int8x16_t)__p1.val[0], (int8x16_t)__p1.val[1], (int8x16_t)__p1.val[2], (int8x16_t)__p2, 48); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint8x16_t vqtbx3q_u8(uint8x16_t __p0, uint8x16x3_t __p1, uint8x16_t __p2) { + uint8x16_t __ret; + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16x3_t __rev1; + __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __rev1.val[2] = __builtin_shufflevector(__p1.val[2], __p1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint8x16_t) __builtin_neon_vqtbx3q_v((int8x16_t)__rev0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev2, 48); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int8x16_t vqtbx3q_s8(int8x16_t __p0, int8x16x3_t __p1, uint8x16_t __p2) { + int8x16_t __ret; + __ret = (int8x16_t) __builtin_neon_vqtbx3q_v((int8x16_t)__p0, (int8x16_t)__p1.val[0], (int8x16_t)__p1.val[1], (int8x16_t)__p1.val[2], (int8x16_t)__p2, 32); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int8x16_t vqtbx3q_s8(int8x16_t __p0, int8x16x3_t __p1, uint8x16_t __p2) { + int8x16_t __ret; + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16x3_t __rev1; + __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __rev1.val[2] = __builtin_shufflevector(__p1.val[2], __p1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int8x16_t) __builtin_neon_vqtbx3q_v((int8x16_t)__rev0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev2, 32); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint8x8_t vqtbx3_u8(uint8x8_t __p0, uint8x16x3_t __p1, uint8x8_t __p2) { + uint8x8_t __ret; + __ret = (uint8x8_t) __builtin_neon_vqtbx3_v((int8x8_t)__p0, (int8x16_t)__p1.val[0], (int8x16_t)__p1.val[1], (int8x16_t)__p1.val[2], (int8x8_t)__p2, 16); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint8x8_t vqtbx3_u8(uint8x8_t __p0, uint8x16x3_t __p1, uint8x8_t __p2) { + uint8x8_t __ret; + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16x3_t __rev1; + __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __rev1.val[2] = __builtin_shufflevector(__p1.val[2], __p1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint8x8_t) __builtin_neon_vqtbx3_v((int8x8_t)__rev0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x8_t)__rev2, 16); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int8x8_t vqtbx3_s8(int8x8_t __p0, int8x16x3_t __p1, uint8x8_t __p2) { + int8x8_t __ret; + __ret = (int8x8_t) __builtin_neon_vqtbx3_v((int8x8_t)__p0, (int8x16_t)__p1.val[0], (int8x16_t)__p1.val[1], (int8x16_t)__p1.val[2], (int8x8_t)__p2, 0); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int8x8_t vqtbx3_s8(int8x8_t __p0, int8x16x3_t __p1, uint8x8_t __p2) { + int8x8_t __ret; + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16x3_t __rev1; + __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __rev1.val[2] = __builtin_shufflevector(__p1.val[2], __p1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int8x8_t) __builtin_neon_vqtbx3_v((int8x8_t)__rev0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x8_t)__rev2, 0); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) poly8x8_t vqtbx4_p8(poly8x8_t __p0, poly8x16x4_t __p1, uint8x8_t __p2) { + poly8x8_t __ret; + __ret = (poly8x8_t) __builtin_neon_vqtbx4_v((int8x8_t)__p0, (int8x16_t)__p1.val[0], (int8x16_t)__p1.val[1], (int8x16_t)__p1.val[2], (int8x16_t)__p1.val[3], (int8x8_t)__p2, 4); + return __ret; +} +#else +__ai __attribute__((target("neon"))) poly8x8_t vqtbx4_p8(poly8x8_t __p0, poly8x16x4_t __p1, uint8x8_t __p2) { + poly8x8_t __ret; + poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + poly8x16x4_t __rev1; + __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __rev1.val[2] = __builtin_shufflevector(__p1.val[2], __p1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __rev1.val[3] = __builtin_shufflevector(__p1.val[3], __p1.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (poly8x8_t) __builtin_neon_vqtbx4_v((int8x8_t)__rev0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], (int8x8_t)__rev2, 4); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) poly8x16_t vqtbx4q_p8(poly8x16_t __p0, poly8x16x4_t __p1, uint8x16_t __p2) { + poly8x16_t __ret; + __ret = (poly8x16_t) __builtin_neon_vqtbx4q_v((int8x16_t)__p0, (int8x16_t)__p1.val[0], (int8x16_t)__p1.val[1], (int8x16_t)__p1.val[2], (int8x16_t)__p1.val[3], (int8x16_t)__p2, 36); + return __ret; +} +#else +__ai __attribute__((target("neon"))) poly8x16_t vqtbx4q_p8(poly8x16_t __p0, poly8x16x4_t __p1, uint8x16_t __p2) { + poly8x16_t __ret; + poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + poly8x16x4_t __rev1; + __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __rev1.val[2] = __builtin_shufflevector(__p1.val[2], __p1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __rev1.val[3] = __builtin_shufflevector(__p1.val[3], __p1.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (poly8x16_t) __builtin_neon_vqtbx4q_v((int8x16_t)__rev0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], (int8x16_t)__rev2, 36); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint8x16_t vqtbx4q_u8(uint8x16_t __p0, uint8x16x4_t __p1, uint8x16_t __p2) { + uint8x16_t __ret; + __ret = (uint8x16_t) __builtin_neon_vqtbx4q_v((int8x16_t)__p0, (int8x16_t)__p1.val[0], (int8x16_t)__p1.val[1], (int8x16_t)__p1.val[2], (int8x16_t)__p1.val[3], (int8x16_t)__p2, 48); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint8x16_t vqtbx4q_u8(uint8x16_t __p0, uint8x16x4_t __p1, uint8x16_t __p2) { + uint8x16_t __ret; + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16x4_t __rev1; + __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __rev1.val[2] = __builtin_shufflevector(__p1.val[2], __p1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __rev1.val[3] = __builtin_shufflevector(__p1.val[3], __p1.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint8x16_t) __builtin_neon_vqtbx4q_v((int8x16_t)__rev0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], (int8x16_t)__rev2, 48); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int8x16_t vqtbx4q_s8(int8x16_t __p0, int8x16x4_t __p1, uint8x16_t __p2) { + int8x16_t __ret; + __ret = (int8x16_t) __builtin_neon_vqtbx4q_v((int8x16_t)__p0, (int8x16_t)__p1.val[0], (int8x16_t)__p1.val[1], (int8x16_t)__p1.val[2], (int8x16_t)__p1.val[3], (int8x16_t)__p2, 32); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int8x16_t vqtbx4q_s8(int8x16_t __p0, int8x16x4_t __p1, uint8x16_t __p2) { + int8x16_t __ret; + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16x4_t __rev1; + __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __rev1.val[2] = __builtin_shufflevector(__p1.val[2], __p1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __rev1.val[3] = __builtin_shufflevector(__p1.val[3], __p1.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int8x16_t) __builtin_neon_vqtbx4q_v((int8x16_t)__rev0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], (int8x16_t)__rev2, 32); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint8x8_t vqtbx4_u8(uint8x8_t __p0, uint8x16x4_t __p1, uint8x8_t __p2) { + uint8x8_t __ret; + __ret = (uint8x8_t) __builtin_neon_vqtbx4_v((int8x8_t)__p0, (int8x16_t)__p1.val[0], (int8x16_t)__p1.val[1], (int8x16_t)__p1.val[2], (int8x16_t)__p1.val[3], (int8x8_t)__p2, 16); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint8x8_t vqtbx4_u8(uint8x8_t __p0, uint8x16x4_t __p1, uint8x8_t __p2) { + uint8x8_t __ret; + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16x4_t __rev1; + __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __rev1.val[2] = __builtin_shufflevector(__p1.val[2], __p1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __rev1.val[3] = __builtin_shufflevector(__p1.val[3], __p1.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint8x8_t) __builtin_neon_vqtbx4_v((int8x8_t)__rev0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], (int8x8_t)__rev2, 16); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int8x8_t vqtbx4_s8(int8x8_t __p0, int8x16x4_t __p1, uint8x8_t __p2) { + int8x8_t __ret; + __ret = (int8x8_t) __builtin_neon_vqtbx4_v((int8x8_t)__p0, (int8x16_t)__p1.val[0], (int8x16_t)__p1.val[1], (int8x16_t)__p1.val[2], (int8x16_t)__p1.val[3], (int8x8_t)__p2, 0); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int8x8_t vqtbx4_s8(int8x8_t __p0, int8x16x4_t __p1, uint8x8_t __p2) { + int8x8_t __ret; + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16x4_t __rev1; + __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __rev1.val[2] = __builtin_shufflevector(__p1.val[2], __p1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __rev1.val[3] = __builtin_shufflevector(__p1.val[3], __p1.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int8x8_t) __builtin_neon_vqtbx4_v((int8x8_t)__rev0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], (int8x8_t)__rev2, 0); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint16x8_t vraddhn_high_u32(uint16x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) { + uint16x8_t __ret; + __ret = vcombine_u16(__p0, vraddhn_u32(__p1, __p2)); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint16x8_t vraddhn_high_u32(uint16x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) { + uint16x8_t __ret; + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + uint32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); + __ret = __noswap_vcombine_u16(__rev0, __noswap_vraddhn_u32(__rev1, __rev2)); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint32x4_t vraddhn_high_u64(uint32x2_t __p0, uint64x2_t __p1, uint64x2_t __p2) { + uint32x4_t __ret; + __ret = vcombine_u32(__p0, vraddhn_u64(__p1, __p2)); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint32x4_t vraddhn_high_u64(uint32x2_t __p0, uint64x2_t __p1, uint64x2_t __p2) { + uint32x4_t __ret; + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + uint64x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0); + __ret = __noswap_vcombine_u32(__rev0, __noswap_vraddhn_u64(__rev1, __rev2)); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint8x16_t vraddhn_high_u16(uint8x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) { + uint8x16_t __ret; + __ret = vcombine_u8(__p0, vraddhn_u16(__p1, __p2)); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint8x16_t vraddhn_high_u16(uint8x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) { + uint8x16_t __ret; + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __noswap_vcombine_u8(__rev0, __noswap_vraddhn_u16(__rev1, __rev2)); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int16x8_t vraddhn_high_s32(int16x4_t __p0, int32x4_t __p1, int32x4_t __p2) { + int16x8_t __ret; + __ret = vcombine_s16(__p0, vraddhn_s32(__p1, __p2)); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int16x8_t vraddhn_high_s32(int16x4_t __p0, int32x4_t __p1, int32x4_t __p2) { + int16x8_t __ret; + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + int32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); + __ret = __noswap_vcombine_s16(__rev0, __noswap_vraddhn_s32(__rev1, __rev2)); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int32x4_t vraddhn_high_s64(int32x2_t __p0, int64x2_t __p1, int64x2_t __p2) { + int32x4_t __ret; + __ret = vcombine_s32(__p0, vraddhn_s64(__p1, __p2)); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int32x4_t vraddhn_high_s64(int32x2_t __p0, int64x2_t __p1, int64x2_t __p2) { + int32x4_t __ret; + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + int64x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0); + __ret = __noswap_vcombine_s32(__rev0, __noswap_vraddhn_s64(__rev1, __rev2)); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int8x16_t vraddhn_high_s16(int8x8_t __p0, int16x8_t __p1, int16x8_t __p2) { + int8x16_t __ret; + __ret = vcombine_s8(__p0, vraddhn_s16(__p1, __p2)); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int8x16_t vraddhn_high_s16(int8x8_t __p0, int16x8_t __p1, int16x8_t __p2) { + int8x16_t __ret; + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __noswap_vcombine_s8(__rev0, __noswap_vraddhn_s16(__rev1, __rev2)); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) poly8x8_t vrbit_p8(poly8x8_t __p0) { + poly8x8_t __ret; + __ret = (poly8x8_t) __builtin_neon_vrbit_v((int8x8_t)__p0, 4); + return __ret; +} +#else +__ai __attribute__((target("neon"))) poly8x8_t vrbit_p8(poly8x8_t __p0) { + poly8x8_t __ret; + poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (poly8x8_t) __builtin_neon_vrbit_v((int8x8_t)__rev0, 4); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) poly8x16_t vrbitq_p8(poly8x16_t __p0) { + poly8x16_t __ret; + __ret = (poly8x16_t) __builtin_neon_vrbitq_v((int8x16_t)__p0, 36); + return __ret; +} +#else +__ai __attribute__((target("neon"))) poly8x16_t vrbitq_p8(poly8x16_t __p0) { + poly8x16_t __ret; + poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (poly8x16_t) __builtin_neon_vrbitq_v((int8x16_t)__rev0, 36); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint8x16_t vrbitq_u8(uint8x16_t __p0) { + uint8x16_t __ret; + __ret = (uint8x16_t) __builtin_neon_vrbitq_v((int8x16_t)__p0, 48); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint8x16_t vrbitq_u8(uint8x16_t __p0) { + uint8x16_t __ret; + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint8x16_t) __builtin_neon_vrbitq_v((int8x16_t)__rev0, 48); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int8x16_t vrbitq_s8(int8x16_t __p0) { + int8x16_t __ret; + __ret = (int8x16_t) __builtin_neon_vrbitq_v((int8x16_t)__p0, 32); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int8x16_t vrbitq_s8(int8x16_t __p0) { + int8x16_t __ret; + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int8x16_t) __builtin_neon_vrbitq_v((int8x16_t)__rev0, 32); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint8x8_t vrbit_u8(uint8x8_t __p0) { + uint8x8_t __ret; + __ret = (uint8x8_t) __builtin_neon_vrbit_v((int8x8_t)__p0, 16); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint8x8_t vrbit_u8(uint8x8_t __p0) { + uint8x8_t __ret; + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint8x8_t) __builtin_neon_vrbit_v((int8x8_t)__rev0, 16); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int8x8_t vrbit_s8(int8x8_t __p0) { + int8x8_t __ret; + __ret = (int8x8_t) __builtin_neon_vrbit_v((int8x8_t)__p0, 0); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int8x8_t vrbit_s8(int8x8_t __p0) { + int8x8_t __ret; + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int8x8_t) __builtin_neon_vrbit_v((int8x8_t)__rev0, 0); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) float64x2_t vrecpeq_f64(float64x2_t __p0) { + float64x2_t __ret; + __ret = (float64x2_t) __builtin_neon_vrecpeq_v((int8x16_t)__p0, 42); + return __ret; +} +#else +__ai __attribute__((target("neon"))) float64x2_t vrecpeq_f64(float64x2_t __p0) { + float64x2_t __ret; + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = (float64x2_t) __builtin_neon_vrecpeq_v((int8x16_t)__rev0, 42); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +__ai __attribute__((target("neon"))) float64x1_t vrecpe_f64(float64x1_t __p0) { + float64x1_t __ret; + __ret = (float64x1_t) __builtin_neon_vrecpe_v((int8x8_t)__p0, 10); + return __ret; +} +__ai __attribute__((target("neon"))) float64_t vrecped_f64(float64_t __p0) { + float64_t __ret; + __ret = (float64_t) __builtin_neon_vrecped_f64(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) float32_t vrecpes_f32(float32_t __p0) { + float32_t __ret; + __ret = (float32_t) __builtin_neon_vrecpes_f32(__p0); + return __ret; +} +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) float64x2_t vrecpsq_f64(float64x2_t __p0, float64x2_t __p1) { + float64x2_t __ret; + __ret = (float64x2_t) __builtin_neon_vrecpsq_v((int8x16_t)__p0, (int8x16_t)__p1, 42); + return __ret; +} +#else +__ai __attribute__((target("neon"))) float64x2_t vrecpsq_f64(float64x2_t __p0, float64x2_t __p1) { + float64x2_t __ret; + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (float64x2_t) __builtin_neon_vrecpsq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 42); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +__ai __attribute__((target("neon"))) float64x1_t vrecps_f64(float64x1_t __p0, float64x1_t __p1) { + float64x1_t __ret; + __ret = (float64x1_t) __builtin_neon_vrecps_v((int8x8_t)__p0, (int8x8_t)__p1, 10); + return __ret; +} +__ai __attribute__((target("neon"))) float64_t vrecpsd_f64(float64_t __p0, float64_t __p1) { + float64_t __ret; + __ret = (float64_t) __builtin_neon_vrecpsd_f64(__p0, __p1); + return __ret; +} +__ai __attribute__((target("neon"))) float32_t vrecpss_f32(float32_t __p0, float32_t __p1) { + float32_t __ret; + __ret = (float32_t) __builtin_neon_vrecpss_f32(__p0, __p1); + return __ret; +} +__ai __attribute__((target("neon"))) float64_t vrecpxd_f64(float64_t __p0) { + float64_t __ret; + __ret = (float64_t) __builtin_neon_vrecpxd_f64(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) float32_t vrecpxs_f32(float32_t __p0) { + float32_t __ret; + __ret = (float32_t) __builtin_neon_vrecpxs_f32(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) poly8x8_t vreinterpret_p8_p64(poly64x1_t __p0) { + poly8x8_t __ret; + __ret = (poly8x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) poly8x8_t vreinterpret_p8_p16(poly16x4_t __p0) { + poly8x8_t __ret; + __ret = (poly8x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) poly8x8_t vreinterpret_p8_u8(uint8x8_t __p0) { + poly8x8_t __ret; + __ret = (poly8x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) poly8x8_t vreinterpret_p8_u32(uint32x2_t __p0) { + poly8x8_t __ret; + __ret = (poly8x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) poly8x8_t vreinterpret_p8_u64(uint64x1_t __p0) { + poly8x8_t __ret; + __ret = (poly8x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) poly8x8_t vreinterpret_p8_u16(uint16x4_t __p0) { + poly8x8_t __ret; + __ret = (poly8x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) poly8x8_t vreinterpret_p8_s8(int8x8_t __p0) { + poly8x8_t __ret; + __ret = (poly8x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) poly8x8_t vreinterpret_p8_f64(float64x1_t __p0) { + poly8x8_t __ret; + __ret = (poly8x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) poly8x8_t vreinterpret_p8_f32(float32x2_t __p0) { + poly8x8_t __ret; + __ret = (poly8x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) poly8x8_t vreinterpret_p8_f16(float16x4_t __p0) { + poly8x8_t __ret; + __ret = (poly8x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) poly8x8_t vreinterpret_p8_s32(int32x2_t __p0) { + poly8x8_t __ret; + __ret = (poly8x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) poly8x8_t vreinterpret_p8_s64(int64x1_t __p0) { + poly8x8_t __ret; + __ret = (poly8x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) poly8x8_t vreinterpret_p8_s16(int16x4_t __p0) { + poly8x8_t __ret; + __ret = (poly8x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) poly64x1_t vreinterpret_p64_p8(poly8x8_t __p0) { + poly64x1_t __ret; + __ret = (poly64x1_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) poly64x1_t vreinterpret_p64_p16(poly16x4_t __p0) { + poly64x1_t __ret; + __ret = (poly64x1_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) poly64x1_t vreinterpret_p64_u8(uint8x8_t __p0) { + poly64x1_t __ret; + __ret = (poly64x1_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) poly64x1_t vreinterpret_p64_u32(uint32x2_t __p0) { + poly64x1_t __ret; + __ret = (poly64x1_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) poly64x1_t vreinterpret_p64_u64(uint64x1_t __p0) { + poly64x1_t __ret; + __ret = (poly64x1_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) poly64x1_t vreinterpret_p64_u16(uint16x4_t __p0) { + poly64x1_t __ret; + __ret = (poly64x1_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) poly64x1_t vreinterpret_p64_s8(int8x8_t __p0) { + poly64x1_t __ret; + __ret = (poly64x1_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) poly64x1_t vreinterpret_p64_f64(float64x1_t __p0) { + poly64x1_t __ret; + __ret = (poly64x1_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) poly64x1_t vreinterpret_p64_f32(float32x2_t __p0) { + poly64x1_t __ret; + __ret = (poly64x1_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) poly64x1_t vreinterpret_p64_f16(float16x4_t __p0) { + poly64x1_t __ret; + __ret = (poly64x1_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) poly64x1_t vreinterpret_p64_s32(int32x2_t __p0) { + poly64x1_t __ret; + __ret = (poly64x1_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) poly64x1_t vreinterpret_p64_s64(int64x1_t __p0) { + poly64x1_t __ret; + __ret = (poly64x1_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) poly64x1_t vreinterpret_p64_s16(int16x4_t __p0) { + poly64x1_t __ret; + __ret = (poly64x1_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) poly16x4_t vreinterpret_p16_p8(poly8x8_t __p0) { + poly16x4_t __ret; + __ret = (poly16x4_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) poly16x4_t vreinterpret_p16_p64(poly64x1_t __p0) { + poly16x4_t __ret; + __ret = (poly16x4_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) poly16x4_t vreinterpret_p16_u8(uint8x8_t __p0) { + poly16x4_t __ret; + __ret = (poly16x4_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) poly16x4_t vreinterpret_p16_u32(uint32x2_t __p0) { + poly16x4_t __ret; + __ret = (poly16x4_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) poly16x4_t vreinterpret_p16_u64(uint64x1_t __p0) { + poly16x4_t __ret; + __ret = (poly16x4_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) poly16x4_t vreinterpret_p16_u16(uint16x4_t __p0) { + poly16x4_t __ret; + __ret = (poly16x4_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) poly16x4_t vreinterpret_p16_s8(int8x8_t __p0) { + poly16x4_t __ret; + __ret = (poly16x4_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) poly16x4_t vreinterpret_p16_f64(float64x1_t __p0) { + poly16x4_t __ret; + __ret = (poly16x4_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) poly16x4_t vreinterpret_p16_f32(float32x2_t __p0) { + poly16x4_t __ret; + __ret = (poly16x4_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) poly16x4_t vreinterpret_p16_f16(float16x4_t __p0) { + poly16x4_t __ret; + __ret = (poly16x4_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) poly16x4_t vreinterpret_p16_s32(int32x2_t __p0) { + poly16x4_t __ret; + __ret = (poly16x4_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) poly16x4_t vreinterpret_p16_s64(int64x1_t __p0) { + poly16x4_t __ret; + __ret = (poly16x4_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) poly16x4_t vreinterpret_p16_s16(int16x4_t __p0) { + poly16x4_t __ret; + __ret = (poly16x4_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) poly8x16_t vreinterpretq_p8_p128(poly128_t __p0) { + poly8x16_t __ret; + __ret = (poly8x16_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) poly8x16_t vreinterpretq_p8_p64(poly64x2_t __p0) { + poly8x16_t __ret; + __ret = (poly8x16_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) poly8x16_t vreinterpretq_p8_p16(poly16x8_t __p0) { + poly8x16_t __ret; + __ret = (poly8x16_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) poly8x16_t vreinterpretq_p8_u8(uint8x16_t __p0) { + poly8x16_t __ret; + __ret = (poly8x16_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) poly8x16_t vreinterpretq_p8_u32(uint32x4_t __p0) { + poly8x16_t __ret; + __ret = (poly8x16_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) poly8x16_t vreinterpretq_p8_u64(uint64x2_t __p0) { + poly8x16_t __ret; + __ret = (poly8x16_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) poly8x16_t vreinterpretq_p8_u16(uint16x8_t __p0) { + poly8x16_t __ret; + __ret = (poly8x16_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) poly8x16_t vreinterpretq_p8_s8(int8x16_t __p0) { + poly8x16_t __ret; + __ret = (poly8x16_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) poly8x16_t vreinterpretq_p8_f64(float64x2_t __p0) { + poly8x16_t __ret; + __ret = (poly8x16_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) poly8x16_t vreinterpretq_p8_f32(float32x4_t __p0) { + poly8x16_t __ret; + __ret = (poly8x16_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) poly8x16_t vreinterpretq_p8_f16(float16x8_t __p0) { + poly8x16_t __ret; + __ret = (poly8x16_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) poly8x16_t vreinterpretq_p8_s32(int32x4_t __p0) { + poly8x16_t __ret; + __ret = (poly8x16_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) poly8x16_t vreinterpretq_p8_s64(int64x2_t __p0) { + poly8x16_t __ret; + __ret = (poly8x16_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) poly8x16_t vreinterpretq_p8_s16(int16x8_t __p0) { + poly8x16_t __ret; + __ret = (poly8x16_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) poly128_t vreinterpretq_p128_p8(poly8x16_t __p0) { + poly128_t __ret; + __ret = (poly128_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) poly128_t vreinterpretq_p128_p64(poly64x2_t __p0) { + poly128_t __ret; + __ret = (poly128_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) poly128_t vreinterpretq_p128_p16(poly16x8_t __p0) { + poly128_t __ret; + __ret = (poly128_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) poly128_t vreinterpretq_p128_u8(uint8x16_t __p0) { + poly128_t __ret; + __ret = (poly128_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) poly128_t vreinterpretq_p128_u32(uint32x4_t __p0) { + poly128_t __ret; + __ret = (poly128_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) poly128_t vreinterpretq_p128_u64(uint64x2_t __p0) { + poly128_t __ret; + __ret = (poly128_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) poly128_t vreinterpretq_p128_u16(uint16x8_t __p0) { + poly128_t __ret; + __ret = (poly128_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) poly128_t vreinterpretq_p128_s8(int8x16_t __p0) { + poly128_t __ret; + __ret = (poly128_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) poly128_t vreinterpretq_p128_f64(float64x2_t __p0) { + poly128_t __ret; + __ret = (poly128_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) poly128_t vreinterpretq_p128_f32(float32x4_t __p0) { + poly128_t __ret; + __ret = (poly128_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) poly128_t vreinterpretq_p128_f16(float16x8_t __p0) { + poly128_t __ret; + __ret = (poly128_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) poly128_t vreinterpretq_p128_s32(int32x4_t __p0) { + poly128_t __ret; + __ret = (poly128_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) poly128_t vreinterpretq_p128_s64(int64x2_t __p0) { + poly128_t __ret; + __ret = (poly128_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) poly128_t vreinterpretq_p128_s16(int16x8_t __p0) { + poly128_t __ret; + __ret = (poly128_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) poly64x2_t vreinterpretq_p64_p8(poly8x16_t __p0) { + poly64x2_t __ret; + __ret = (poly64x2_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) poly64x2_t vreinterpretq_p64_p128(poly128_t __p0) { + poly64x2_t __ret; + __ret = (poly64x2_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) poly64x2_t vreinterpretq_p64_p16(poly16x8_t __p0) { + poly64x2_t __ret; + __ret = (poly64x2_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) poly64x2_t vreinterpretq_p64_u8(uint8x16_t __p0) { + poly64x2_t __ret; + __ret = (poly64x2_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) poly64x2_t vreinterpretq_p64_u32(uint32x4_t __p0) { + poly64x2_t __ret; + __ret = (poly64x2_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) poly64x2_t vreinterpretq_p64_u64(uint64x2_t __p0) { + poly64x2_t __ret; + __ret = (poly64x2_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) poly64x2_t vreinterpretq_p64_u16(uint16x8_t __p0) { + poly64x2_t __ret; + __ret = (poly64x2_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) poly64x2_t vreinterpretq_p64_s8(int8x16_t __p0) { + poly64x2_t __ret; + __ret = (poly64x2_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) poly64x2_t vreinterpretq_p64_f64(float64x2_t __p0) { + poly64x2_t __ret; + __ret = (poly64x2_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) poly64x2_t vreinterpretq_p64_f32(float32x4_t __p0) { + poly64x2_t __ret; + __ret = (poly64x2_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) poly64x2_t vreinterpretq_p64_f16(float16x8_t __p0) { + poly64x2_t __ret; + __ret = (poly64x2_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) poly64x2_t vreinterpretq_p64_s32(int32x4_t __p0) { + poly64x2_t __ret; + __ret = (poly64x2_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) poly64x2_t vreinterpretq_p64_s64(int64x2_t __p0) { + poly64x2_t __ret; + __ret = (poly64x2_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) poly64x2_t vreinterpretq_p64_s16(int16x8_t __p0) { + poly64x2_t __ret; + __ret = (poly64x2_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) poly16x8_t vreinterpretq_p16_p8(poly8x16_t __p0) { + poly16x8_t __ret; + __ret = (poly16x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) poly16x8_t vreinterpretq_p16_p128(poly128_t __p0) { + poly16x8_t __ret; + __ret = (poly16x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) poly16x8_t vreinterpretq_p16_p64(poly64x2_t __p0) { + poly16x8_t __ret; + __ret = (poly16x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) poly16x8_t vreinterpretq_p16_u8(uint8x16_t __p0) { + poly16x8_t __ret; + __ret = (poly16x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) poly16x8_t vreinterpretq_p16_u32(uint32x4_t __p0) { + poly16x8_t __ret; + __ret = (poly16x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) poly16x8_t vreinterpretq_p16_u64(uint64x2_t __p0) { + poly16x8_t __ret; + __ret = (poly16x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) poly16x8_t vreinterpretq_p16_u16(uint16x8_t __p0) { + poly16x8_t __ret; + __ret = (poly16x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) poly16x8_t vreinterpretq_p16_s8(int8x16_t __p0) { + poly16x8_t __ret; + __ret = (poly16x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) poly16x8_t vreinterpretq_p16_f64(float64x2_t __p0) { + poly16x8_t __ret; + __ret = (poly16x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) poly16x8_t vreinterpretq_p16_f32(float32x4_t __p0) { + poly16x8_t __ret; + __ret = (poly16x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) poly16x8_t vreinterpretq_p16_f16(float16x8_t __p0) { + poly16x8_t __ret; + __ret = (poly16x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) poly16x8_t vreinterpretq_p16_s32(int32x4_t __p0) { + poly16x8_t __ret; + __ret = (poly16x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) poly16x8_t vreinterpretq_p16_s64(int64x2_t __p0) { + poly16x8_t __ret; + __ret = (poly16x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) poly16x8_t vreinterpretq_p16_s16(int16x8_t __p0) { + poly16x8_t __ret; + __ret = (poly16x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) uint8x16_t vreinterpretq_u8_p8(poly8x16_t __p0) { + uint8x16_t __ret; + __ret = (uint8x16_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) uint8x16_t vreinterpretq_u8_p128(poly128_t __p0) { + uint8x16_t __ret; + __ret = (uint8x16_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) uint8x16_t vreinterpretq_u8_p64(poly64x2_t __p0) { + uint8x16_t __ret; + __ret = (uint8x16_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) uint8x16_t vreinterpretq_u8_p16(poly16x8_t __p0) { + uint8x16_t __ret; + __ret = (uint8x16_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) uint8x16_t vreinterpretq_u8_u32(uint32x4_t __p0) { + uint8x16_t __ret; + __ret = (uint8x16_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) uint8x16_t vreinterpretq_u8_u64(uint64x2_t __p0) { + uint8x16_t __ret; + __ret = (uint8x16_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) uint8x16_t vreinterpretq_u8_u16(uint16x8_t __p0) { + uint8x16_t __ret; + __ret = (uint8x16_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) uint8x16_t vreinterpretq_u8_s8(int8x16_t __p0) { + uint8x16_t __ret; + __ret = (uint8x16_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) uint8x16_t vreinterpretq_u8_f64(float64x2_t __p0) { + uint8x16_t __ret; + __ret = (uint8x16_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) uint8x16_t vreinterpretq_u8_f32(float32x4_t __p0) { + uint8x16_t __ret; + __ret = (uint8x16_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) uint8x16_t vreinterpretq_u8_f16(float16x8_t __p0) { + uint8x16_t __ret; + __ret = (uint8x16_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) uint8x16_t vreinterpretq_u8_s32(int32x4_t __p0) { + uint8x16_t __ret; + __ret = (uint8x16_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) uint8x16_t vreinterpretq_u8_s64(int64x2_t __p0) { + uint8x16_t __ret; + __ret = (uint8x16_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) uint8x16_t vreinterpretq_u8_s16(int16x8_t __p0) { + uint8x16_t __ret; + __ret = (uint8x16_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) uint32x4_t vreinterpretq_u32_p8(poly8x16_t __p0) { + uint32x4_t __ret; + __ret = (uint32x4_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) uint32x4_t vreinterpretq_u32_p128(poly128_t __p0) { + uint32x4_t __ret; + __ret = (uint32x4_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) uint32x4_t vreinterpretq_u32_p64(poly64x2_t __p0) { + uint32x4_t __ret; + __ret = (uint32x4_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) uint32x4_t vreinterpretq_u32_p16(poly16x8_t __p0) { + uint32x4_t __ret; + __ret = (uint32x4_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) uint32x4_t vreinterpretq_u32_u8(uint8x16_t __p0) { + uint32x4_t __ret; + __ret = (uint32x4_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) uint32x4_t vreinterpretq_u32_u64(uint64x2_t __p0) { + uint32x4_t __ret; + __ret = (uint32x4_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) uint32x4_t vreinterpretq_u32_u16(uint16x8_t __p0) { + uint32x4_t __ret; + __ret = (uint32x4_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) uint32x4_t vreinterpretq_u32_s8(int8x16_t __p0) { + uint32x4_t __ret; + __ret = (uint32x4_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) uint32x4_t vreinterpretq_u32_f64(float64x2_t __p0) { + uint32x4_t __ret; + __ret = (uint32x4_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) uint32x4_t vreinterpretq_u32_f32(float32x4_t __p0) { + uint32x4_t __ret; + __ret = (uint32x4_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) uint32x4_t vreinterpretq_u32_f16(float16x8_t __p0) { + uint32x4_t __ret; + __ret = (uint32x4_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) uint32x4_t vreinterpretq_u32_s32(int32x4_t __p0) { + uint32x4_t __ret; + __ret = (uint32x4_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) uint32x4_t vreinterpretq_u32_s64(int64x2_t __p0) { + uint32x4_t __ret; + __ret = (uint32x4_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) uint32x4_t vreinterpretq_u32_s16(int16x8_t __p0) { + uint32x4_t __ret; + __ret = (uint32x4_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) uint64x2_t vreinterpretq_u64_p8(poly8x16_t __p0) { + uint64x2_t __ret; + __ret = (uint64x2_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) uint64x2_t vreinterpretq_u64_p128(poly128_t __p0) { + uint64x2_t __ret; + __ret = (uint64x2_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) uint64x2_t vreinterpretq_u64_p64(poly64x2_t __p0) { + uint64x2_t __ret; + __ret = (uint64x2_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) uint64x2_t vreinterpretq_u64_p16(poly16x8_t __p0) { + uint64x2_t __ret; + __ret = (uint64x2_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) uint64x2_t vreinterpretq_u64_u8(uint8x16_t __p0) { + uint64x2_t __ret; + __ret = (uint64x2_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) uint64x2_t vreinterpretq_u64_u32(uint32x4_t __p0) { + uint64x2_t __ret; + __ret = (uint64x2_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) uint64x2_t vreinterpretq_u64_u16(uint16x8_t __p0) { + uint64x2_t __ret; + __ret = (uint64x2_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) uint64x2_t vreinterpretq_u64_s8(int8x16_t __p0) { + uint64x2_t __ret; + __ret = (uint64x2_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) uint64x2_t vreinterpretq_u64_f64(float64x2_t __p0) { + uint64x2_t __ret; + __ret = (uint64x2_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) uint64x2_t vreinterpretq_u64_f32(float32x4_t __p0) { + uint64x2_t __ret; + __ret = (uint64x2_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) uint64x2_t vreinterpretq_u64_f16(float16x8_t __p0) { + uint64x2_t __ret; + __ret = (uint64x2_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) uint64x2_t vreinterpretq_u64_s32(int32x4_t __p0) { + uint64x2_t __ret; + __ret = (uint64x2_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) uint64x2_t vreinterpretq_u64_s64(int64x2_t __p0) { + uint64x2_t __ret; + __ret = (uint64x2_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) uint64x2_t vreinterpretq_u64_s16(int16x8_t __p0) { + uint64x2_t __ret; + __ret = (uint64x2_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) uint16x8_t vreinterpretq_u16_p8(poly8x16_t __p0) { + uint16x8_t __ret; + __ret = (uint16x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) uint16x8_t vreinterpretq_u16_p128(poly128_t __p0) { + uint16x8_t __ret; + __ret = (uint16x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) uint16x8_t vreinterpretq_u16_p64(poly64x2_t __p0) { + uint16x8_t __ret; + __ret = (uint16x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) uint16x8_t vreinterpretq_u16_p16(poly16x8_t __p0) { + uint16x8_t __ret; + __ret = (uint16x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) uint16x8_t vreinterpretq_u16_u8(uint8x16_t __p0) { + uint16x8_t __ret; + __ret = (uint16x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) uint16x8_t vreinterpretq_u16_u32(uint32x4_t __p0) { + uint16x8_t __ret; + __ret = (uint16x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) uint16x8_t vreinterpretq_u16_u64(uint64x2_t __p0) { + uint16x8_t __ret; + __ret = (uint16x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) uint16x8_t vreinterpretq_u16_s8(int8x16_t __p0) { + uint16x8_t __ret; + __ret = (uint16x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) uint16x8_t vreinterpretq_u16_f64(float64x2_t __p0) { + uint16x8_t __ret; + __ret = (uint16x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) uint16x8_t vreinterpretq_u16_f32(float32x4_t __p0) { + uint16x8_t __ret; + __ret = (uint16x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) uint16x8_t vreinterpretq_u16_f16(float16x8_t __p0) { + uint16x8_t __ret; + __ret = (uint16x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) uint16x8_t vreinterpretq_u16_s32(int32x4_t __p0) { + uint16x8_t __ret; + __ret = (uint16x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) uint16x8_t vreinterpretq_u16_s64(int64x2_t __p0) { + uint16x8_t __ret; + __ret = (uint16x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) uint16x8_t vreinterpretq_u16_s16(int16x8_t __p0) { + uint16x8_t __ret; + __ret = (uint16x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) int8x16_t vreinterpretq_s8_p8(poly8x16_t __p0) { + int8x16_t __ret; + __ret = (int8x16_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) int8x16_t vreinterpretq_s8_p128(poly128_t __p0) { + int8x16_t __ret; + __ret = (int8x16_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) int8x16_t vreinterpretq_s8_p64(poly64x2_t __p0) { + int8x16_t __ret; + __ret = (int8x16_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) int8x16_t vreinterpretq_s8_p16(poly16x8_t __p0) { + int8x16_t __ret; + __ret = (int8x16_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) int8x16_t vreinterpretq_s8_u8(uint8x16_t __p0) { + int8x16_t __ret; + __ret = (int8x16_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) int8x16_t vreinterpretq_s8_u32(uint32x4_t __p0) { + int8x16_t __ret; + __ret = (int8x16_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) int8x16_t vreinterpretq_s8_u64(uint64x2_t __p0) { + int8x16_t __ret; + __ret = (int8x16_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) int8x16_t vreinterpretq_s8_u16(uint16x8_t __p0) { + int8x16_t __ret; + __ret = (int8x16_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) int8x16_t vreinterpretq_s8_f64(float64x2_t __p0) { + int8x16_t __ret; + __ret = (int8x16_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) int8x16_t vreinterpretq_s8_f32(float32x4_t __p0) { + int8x16_t __ret; + __ret = (int8x16_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) int8x16_t vreinterpretq_s8_f16(float16x8_t __p0) { + int8x16_t __ret; + __ret = (int8x16_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) int8x16_t vreinterpretq_s8_s32(int32x4_t __p0) { + int8x16_t __ret; + __ret = (int8x16_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) int8x16_t vreinterpretq_s8_s64(int64x2_t __p0) { + int8x16_t __ret; + __ret = (int8x16_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) int8x16_t vreinterpretq_s8_s16(int16x8_t __p0) { + int8x16_t __ret; + __ret = (int8x16_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) float64x2_t vreinterpretq_f64_p8(poly8x16_t __p0) { + float64x2_t __ret; + __ret = (float64x2_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) float64x2_t vreinterpretq_f64_p128(poly128_t __p0) { + float64x2_t __ret; + __ret = (float64x2_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) float64x2_t vreinterpretq_f64_p64(poly64x2_t __p0) { + float64x2_t __ret; + __ret = (float64x2_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) float64x2_t vreinterpretq_f64_p16(poly16x8_t __p0) { + float64x2_t __ret; + __ret = (float64x2_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) float64x2_t vreinterpretq_f64_u8(uint8x16_t __p0) { + float64x2_t __ret; + __ret = (float64x2_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) float64x2_t vreinterpretq_f64_u32(uint32x4_t __p0) { + float64x2_t __ret; + __ret = (float64x2_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) float64x2_t vreinterpretq_f64_u64(uint64x2_t __p0) { + float64x2_t __ret; + __ret = (float64x2_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) float64x2_t vreinterpretq_f64_u16(uint16x8_t __p0) { + float64x2_t __ret; + __ret = (float64x2_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) float64x2_t vreinterpretq_f64_s8(int8x16_t __p0) { + float64x2_t __ret; + __ret = (float64x2_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) float64x2_t vreinterpretq_f64_f32(float32x4_t __p0) { + float64x2_t __ret; + __ret = (float64x2_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) float64x2_t vreinterpretq_f64_f16(float16x8_t __p0) { + float64x2_t __ret; + __ret = (float64x2_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) float64x2_t vreinterpretq_f64_s32(int32x4_t __p0) { + float64x2_t __ret; + __ret = (float64x2_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) float64x2_t vreinterpretq_f64_s64(int64x2_t __p0) { + float64x2_t __ret; + __ret = (float64x2_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) float64x2_t vreinterpretq_f64_s16(int16x8_t __p0) { + float64x2_t __ret; + __ret = (float64x2_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) float32x4_t vreinterpretq_f32_p8(poly8x16_t __p0) { + float32x4_t __ret; + __ret = (float32x4_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) float32x4_t vreinterpretq_f32_p128(poly128_t __p0) { + float32x4_t __ret; + __ret = (float32x4_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) float32x4_t vreinterpretq_f32_p64(poly64x2_t __p0) { + float32x4_t __ret; + __ret = (float32x4_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) float32x4_t vreinterpretq_f32_p16(poly16x8_t __p0) { + float32x4_t __ret; + __ret = (float32x4_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) float32x4_t vreinterpretq_f32_u8(uint8x16_t __p0) { + float32x4_t __ret; + __ret = (float32x4_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) float32x4_t vreinterpretq_f32_u32(uint32x4_t __p0) { + float32x4_t __ret; + __ret = (float32x4_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) float32x4_t vreinterpretq_f32_u64(uint64x2_t __p0) { + float32x4_t __ret; + __ret = (float32x4_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) float32x4_t vreinterpretq_f32_u16(uint16x8_t __p0) { + float32x4_t __ret; + __ret = (float32x4_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) float32x4_t vreinterpretq_f32_s8(int8x16_t __p0) { + float32x4_t __ret; + __ret = (float32x4_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) float32x4_t vreinterpretq_f32_f64(float64x2_t __p0) { + float32x4_t __ret; + __ret = (float32x4_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) float32x4_t vreinterpretq_f32_f16(float16x8_t __p0) { + float32x4_t __ret; + __ret = (float32x4_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) float32x4_t vreinterpretq_f32_s32(int32x4_t __p0) { + float32x4_t __ret; + __ret = (float32x4_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) float32x4_t vreinterpretq_f32_s64(int64x2_t __p0) { + float32x4_t __ret; + __ret = (float32x4_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) float32x4_t vreinterpretq_f32_s16(int16x8_t __p0) { + float32x4_t __ret; + __ret = (float32x4_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) float16x8_t vreinterpretq_f16_p8(poly8x16_t __p0) { + float16x8_t __ret; + __ret = (float16x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) float16x8_t vreinterpretq_f16_p128(poly128_t __p0) { + float16x8_t __ret; + __ret = (float16x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) float16x8_t vreinterpretq_f16_p64(poly64x2_t __p0) { + float16x8_t __ret; + __ret = (float16x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) float16x8_t vreinterpretq_f16_p16(poly16x8_t __p0) { + float16x8_t __ret; + __ret = (float16x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) float16x8_t vreinterpretq_f16_u8(uint8x16_t __p0) { + float16x8_t __ret; + __ret = (float16x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) float16x8_t vreinterpretq_f16_u32(uint32x4_t __p0) { + float16x8_t __ret; + __ret = (float16x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) float16x8_t vreinterpretq_f16_u64(uint64x2_t __p0) { + float16x8_t __ret; + __ret = (float16x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) float16x8_t vreinterpretq_f16_u16(uint16x8_t __p0) { + float16x8_t __ret; + __ret = (float16x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) float16x8_t vreinterpretq_f16_s8(int8x16_t __p0) { + float16x8_t __ret; + __ret = (float16x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) float16x8_t vreinterpretq_f16_f64(float64x2_t __p0) { + float16x8_t __ret; + __ret = (float16x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) float16x8_t vreinterpretq_f16_f32(float32x4_t __p0) { + float16x8_t __ret; + __ret = (float16x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) float16x8_t vreinterpretq_f16_s32(int32x4_t __p0) { + float16x8_t __ret; + __ret = (float16x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) float16x8_t vreinterpretq_f16_s64(int64x2_t __p0) { + float16x8_t __ret; + __ret = (float16x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) float16x8_t vreinterpretq_f16_s16(int16x8_t __p0) { + float16x8_t __ret; + __ret = (float16x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) int32x4_t vreinterpretq_s32_p8(poly8x16_t __p0) { + int32x4_t __ret; + __ret = (int32x4_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) int32x4_t vreinterpretq_s32_p128(poly128_t __p0) { + int32x4_t __ret; + __ret = (int32x4_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) int32x4_t vreinterpretq_s32_p64(poly64x2_t __p0) { + int32x4_t __ret; + __ret = (int32x4_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) int32x4_t vreinterpretq_s32_p16(poly16x8_t __p0) { + int32x4_t __ret; + __ret = (int32x4_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) int32x4_t vreinterpretq_s32_u8(uint8x16_t __p0) { + int32x4_t __ret; + __ret = (int32x4_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) int32x4_t vreinterpretq_s32_u32(uint32x4_t __p0) { + int32x4_t __ret; + __ret = (int32x4_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) int32x4_t vreinterpretq_s32_u64(uint64x2_t __p0) { + int32x4_t __ret; + __ret = (int32x4_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) int32x4_t vreinterpretq_s32_u16(uint16x8_t __p0) { + int32x4_t __ret; + __ret = (int32x4_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) int32x4_t vreinterpretq_s32_s8(int8x16_t __p0) { + int32x4_t __ret; + __ret = (int32x4_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) int32x4_t vreinterpretq_s32_f64(float64x2_t __p0) { + int32x4_t __ret; + __ret = (int32x4_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) int32x4_t vreinterpretq_s32_f32(float32x4_t __p0) { + int32x4_t __ret; + __ret = (int32x4_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) int32x4_t vreinterpretq_s32_f16(float16x8_t __p0) { + int32x4_t __ret; + __ret = (int32x4_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) int32x4_t vreinterpretq_s32_s64(int64x2_t __p0) { + int32x4_t __ret; + __ret = (int32x4_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) int32x4_t vreinterpretq_s32_s16(int16x8_t __p0) { + int32x4_t __ret; + __ret = (int32x4_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) int64x2_t vreinterpretq_s64_p8(poly8x16_t __p0) { + int64x2_t __ret; + __ret = (int64x2_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) int64x2_t vreinterpretq_s64_p128(poly128_t __p0) { + int64x2_t __ret; + __ret = (int64x2_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) int64x2_t vreinterpretq_s64_p64(poly64x2_t __p0) { + int64x2_t __ret; + __ret = (int64x2_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) int64x2_t vreinterpretq_s64_p16(poly16x8_t __p0) { + int64x2_t __ret; + __ret = (int64x2_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) int64x2_t vreinterpretq_s64_u8(uint8x16_t __p0) { + int64x2_t __ret; + __ret = (int64x2_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) int64x2_t vreinterpretq_s64_u32(uint32x4_t __p0) { + int64x2_t __ret; + __ret = (int64x2_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) int64x2_t vreinterpretq_s64_u64(uint64x2_t __p0) { + int64x2_t __ret; + __ret = (int64x2_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) int64x2_t vreinterpretq_s64_u16(uint16x8_t __p0) { + int64x2_t __ret; + __ret = (int64x2_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) int64x2_t vreinterpretq_s64_s8(int8x16_t __p0) { + int64x2_t __ret; + __ret = (int64x2_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) int64x2_t vreinterpretq_s64_f64(float64x2_t __p0) { + int64x2_t __ret; + __ret = (int64x2_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) int64x2_t vreinterpretq_s64_f32(float32x4_t __p0) { + int64x2_t __ret; + __ret = (int64x2_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) int64x2_t vreinterpretq_s64_f16(float16x8_t __p0) { + int64x2_t __ret; + __ret = (int64x2_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) int64x2_t vreinterpretq_s64_s32(int32x4_t __p0) { + int64x2_t __ret; + __ret = (int64x2_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) int64x2_t vreinterpretq_s64_s16(int16x8_t __p0) { + int64x2_t __ret; + __ret = (int64x2_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) int16x8_t vreinterpretq_s16_p8(poly8x16_t __p0) { + int16x8_t __ret; + __ret = (int16x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) int16x8_t vreinterpretq_s16_p128(poly128_t __p0) { + int16x8_t __ret; + __ret = (int16x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) int16x8_t vreinterpretq_s16_p64(poly64x2_t __p0) { + int16x8_t __ret; + __ret = (int16x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) int16x8_t vreinterpretq_s16_p16(poly16x8_t __p0) { + int16x8_t __ret; + __ret = (int16x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) int16x8_t vreinterpretq_s16_u8(uint8x16_t __p0) { + int16x8_t __ret; + __ret = (int16x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) int16x8_t vreinterpretq_s16_u32(uint32x4_t __p0) { + int16x8_t __ret; + __ret = (int16x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) int16x8_t vreinterpretq_s16_u64(uint64x2_t __p0) { + int16x8_t __ret; + __ret = (int16x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) int16x8_t vreinterpretq_s16_u16(uint16x8_t __p0) { + int16x8_t __ret; + __ret = (int16x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) int16x8_t vreinterpretq_s16_s8(int8x16_t __p0) { + int16x8_t __ret; + __ret = (int16x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) int16x8_t vreinterpretq_s16_f64(float64x2_t __p0) { + int16x8_t __ret; + __ret = (int16x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) int16x8_t vreinterpretq_s16_f32(float32x4_t __p0) { + int16x8_t __ret; + __ret = (int16x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) int16x8_t vreinterpretq_s16_f16(float16x8_t __p0) { + int16x8_t __ret; + __ret = (int16x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) int16x8_t vreinterpretq_s16_s32(int32x4_t __p0) { + int16x8_t __ret; + __ret = (int16x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) int16x8_t vreinterpretq_s16_s64(int64x2_t __p0) { + int16x8_t __ret; + __ret = (int16x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) uint8x8_t vreinterpret_u8_p8(poly8x8_t __p0) { + uint8x8_t __ret; + __ret = (uint8x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) uint8x8_t vreinterpret_u8_p64(poly64x1_t __p0) { + uint8x8_t __ret; + __ret = (uint8x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) uint8x8_t vreinterpret_u8_p16(poly16x4_t __p0) { + uint8x8_t __ret; + __ret = (uint8x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) uint8x8_t vreinterpret_u8_u32(uint32x2_t __p0) { + uint8x8_t __ret; + __ret = (uint8x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) uint8x8_t vreinterpret_u8_u64(uint64x1_t __p0) { + uint8x8_t __ret; + __ret = (uint8x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) uint8x8_t vreinterpret_u8_u16(uint16x4_t __p0) { + uint8x8_t __ret; + __ret = (uint8x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) uint8x8_t vreinterpret_u8_s8(int8x8_t __p0) { + uint8x8_t __ret; + __ret = (uint8x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) uint8x8_t vreinterpret_u8_f64(float64x1_t __p0) { + uint8x8_t __ret; + __ret = (uint8x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) uint8x8_t vreinterpret_u8_f32(float32x2_t __p0) { + uint8x8_t __ret; + __ret = (uint8x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) uint8x8_t vreinterpret_u8_f16(float16x4_t __p0) { + uint8x8_t __ret; + __ret = (uint8x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) uint8x8_t vreinterpret_u8_s32(int32x2_t __p0) { + uint8x8_t __ret; + __ret = (uint8x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) uint8x8_t vreinterpret_u8_s64(int64x1_t __p0) { + uint8x8_t __ret; + __ret = (uint8x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) uint8x8_t vreinterpret_u8_s16(int16x4_t __p0) { + uint8x8_t __ret; + __ret = (uint8x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) uint32x2_t vreinterpret_u32_p8(poly8x8_t __p0) { + uint32x2_t __ret; + __ret = (uint32x2_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) uint32x2_t vreinterpret_u32_p64(poly64x1_t __p0) { + uint32x2_t __ret; + __ret = (uint32x2_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) uint32x2_t vreinterpret_u32_p16(poly16x4_t __p0) { + uint32x2_t __ret; + __ret = (uint32x2_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) uint32x2_t vreinterpret_u32_u8(uint8x8_t __p0) { + uint32x2_t __ret; + __ret = (uint32x2_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) uint32x2_t vreinterpret_u32_u64(uint64x1_t __p0) { + uint32x2_t __ret; + __ret = (uint32x2_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) uint32x2_t vreinterpret_u32_u16(uint16x4_t __p0) { + uint32x2_t __ret; + __ret = (uint32x2_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) uint32x2_t vreinterpret_u32_s8(int8x8_t __p0) { + uint32x2_t __ret; + __ret = (uint32x2_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) uint32x2_t vreinterpret_u32_f64(float64x1_t __p0) { + uint32x2_t __ret; + __ret = (uint32x2_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) uint32x2_t vreinterpret_u32_f32(float32x2_t __p0) { + uint32x2_t __ret; + __ret = (uint32x2_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) uint32x2_t vreinterpret_u32_f16(float16x4_t __p0) { + uint32x2_t __ret; + __ret = (uint32x2_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) uint32x2_t vreinterpret_u32_s32(int32x2_t __p0) { + uint32x2_t __ret; + __ret = (uint32x2_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) uint32x2_t vreinterpret_u32_s64(int64x1_t __p0) { + uint32x2_t __ret; + __ret = (uint32x2_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) uint32x2_t vreinterpret_u32_s16(int16x4_t __p0) { + uint32x2_t __ret; + __ret = (uint32x2_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) uint64x1_t vreinterpret_u64_p8(poly8x8_t __p0) { + uint64x1_t __ret; + __ret = (uint64x1_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) uint64x1_t vreinterpret_u64_p64(poly64x1_t __p0) { + uint64x1_t __ret; + __ret = (uint64x1_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) uint64x1_t vreinterpret_u64_p16(poly16x4_t __p0) { + uint64x1_t __ret; + __ret = (uint64x1_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) uint64x1_t vreinterpret_u64_u8(uint8x8_t __p0) { + uint64x1_t __ret; + __ret = (uint64x1_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) uint64x1_t vreinterpret_u64_u32(uint32x2_t __p0) { + uint64x1_t __ret; + __ret = (uint64x1_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) uint64x1_t vreinterpret_u64_u16(uint16x4_t __p0) { + uint64x1_t __ret; + __ret = (uint64x1_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) uint64x1_t vreinterpret_u64_s8(int8x8_t __p0) { + uint64x1_t __ret; + __ret = (uint64x1_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) uint64x1_t vreinterpret_u64_f64(float64x1_t __p0) { + uint64x1_t __ret; + __ret = (uint64x1_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) uint64x1_t vreinterpret_u64_f32(float32x2_t __p0) { + uint64x1_t __ret; + __ret = (uint64x1_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) uint64x1_t vreinterpret_u64_f16(float16x4_t __p0) { + uint64x1_t __ret; + __ret = (uint64x1_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) uint64x1_t vreinterpret_u64_s32(int32x2_t __p0) { + uint64x1_t __ret; + __ret = (uint64x1_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) uint64x1_t vreinterpret_u64_s64(int64x1_t __p0) { + uint64x1_t __ret; + __ret = (uint64x1_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) uint64x1_t vreinterpret_u64_s16(int16x4_t __p0) { + uint64x1_t __ret; + __ret = (uint64x1_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) uint16x4_t vreinterpret_u16_p8(poly8x8_t __p0) { + uint16x4_t __ret; + __ret = (uint16x4_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) uint16x4_t vreinterpret_u16_p64(poly64x1_t __p0) { + uint16x4_t __ret; + __ret = (uint16x4_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) uint16x4_t vreinterpret_u16_p16(poly16x4_t __p0) { + uint16x4_t __ret; + __ret = (uint16x4_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) uint16x4_t vreinterpret_u16_u8(uint8x8_t __p0) { + uint16x4_t __ret; + __ret = (uint16x4_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) uint16x4_t vreinterpret_u16_u32(uint32x2_t __p0) { + uint16x4_t __ret; + __ret = (uint16x4_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) uint16x4_t vreinterpret_u16_u64(uint64x1_t __p0) { + uint16x4_t __ret; + __ret = (uint16x4_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) uint16x4_t vreinterpret_u16_s8(int8x8_t __p0) { + uint16x4_t __ret; + __ret = (uint16x4_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) uint16x4_t vreinterpret_u16_f64(float64x1_t __p0) { + uint16x4_t __ret; + __ret = (uint16x4_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) uint16x4_t vreinterpret_u16_f32(float32x2_t __p0) { + uint16x4_t __ret; + __ret = (uint16x4_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) uint16x4_t vreinterpret_u16_f16(float16x4_t __p0) { + uint16x4_t __ret; + __ret = (uint16x4_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) uint16x4_t vreinterpret_u16_s32(int32x2_t __p0) { + uint16x4_t __ret; + __ret = (uint16x4_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) uint16x4_t vreinterpret_u16_s64(int64x1_t __p0) { + uint16x4_t __ret; + __ret = (uint16x4_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) uint16x4_t vreinterpret_u16_s16(int16x4_t __p0) { + uint16x4_t __ret; + __ret = (uint16x4_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) int8x8_t vreinterpret_s8_p8(poly8x8_t __p0) { + int8x8_t __ret; + __ret = (int8x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) int8x8_t vreinterpret_s8_p64(poly64x1_t __p0) { + int8x8_t __ret; + __ret = (int8x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) int8x8_t vreinterpret_s8_p16(poly16x4_t __p0) { + int8x8_t __ret; + __ret = (int8x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) int8x8_t vreinterpret_s8_u8(uint8x8_t __p0) { + int8x8_t __ret; + __ret = (int8x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) int8x8_t vreinterpret_s8_u32(uint32x2_t __p0) { + int8x8_t __ret; + __ret = (int8x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) int8x8_t vreinterpret_s8_u64(uint64x1_t __p0) { + int8x8_t __ret; + __ret = (int8x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) int8x8_t vreinterpret_s8_u16(uint16x4_t __p0) { + int8x8_t __ret; + __ret = (int8x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) int8x8_t vreinterpret_s8_f64(float64x1_t __p0) { + int8x8_t __ret; + __ret = (int8x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) int8x8_t vreinterpret_s8_f32(float32x2_t __p0) { + int8x8_t __ret; + __ret = (int8x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) int8x8_t vreinterpret_s8_f16(float16x4_t __p0) { + int8x8_t __ret; + __ret = (int8x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) int8x8_t vreinterpret_s8_s32(int32x2_t __p0) { + int8x8_t __ret; + __ret = (int8x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) int8x8_t vreinterpret_s8_s64(int64x1_t __p0) { + int8x8_t __ret; + __ret = (int8x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) int8x8_t vreinterpret_s8_s16(int16x4_t __p0) { + int8x8_t __ret; + __ret = (int8x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) float64x1_t vreinterpret_f64_p8(poly8x8_t __p0) { + float64x1_t __ret; + __ret = (float64x1_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) float64x1_t vreinterpret_f64_p64(poly64x1_t __p0) { + float64x1_t __ret; + __ret = (float64x1_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) float64x1_t vreinterpret_f64_p16(poly16x4_t __p0) { + float64x1_t __ret; + __ret = (float64x1_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) float64x1_t vreinterpret_f64_u8(uint8x8_t __p0) { + float64x1_t __ret; + __ret = (float64x1_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) float64x1_t vreinterpret_f64_u32(uint32x2_t __p0) { + float64x1_t __ret; + __ret = (float64x1_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) float64x1_t vreinterpret_f64_u64(uint64x1_t __p0) { + float64x1_t __ret; + __ret = (float64x1_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) float64x1_t vreinterpret_f64_u16(uint16x4_t __p0) { + float64x1_t __ret; + __ret = (float64x1_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) float64x1_t vreinterpret_f64_s8(int8x8_t __p0) { + float64x1_t __ret; + __ret = (float64x1_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) float64x1_t vreinterpret_f64_f32(float32x2_t __p0) { + float64x1_t __ret; + __ret = (float64x1_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) float64x1_t vreinterpret_f64_f16(float16x4_t __p0) { + float64x1_t __ret; + __ret = (float64x1_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) float64x1_t vreinterpret_f64_s32(int32x2_t __p0) { + float64x1_t __ret; + __ret = (float64x1_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) float64x1_t vreinterpret_f64_s64(int64x1_t __p0) { + float64x1_t __ret; + __ret = (float64x1_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) float64x1_t vreinterpret_f64_s16(int16x4_t __p0) { + float64x1_t __ret; + __ret = (float64x1_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) float32x2_t vreinterpret_f32_p8(poly8x8_t __p0) { + float32x2_t __ret; + __ret = (float32x2_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) float32x2_t vreinterpret_f32_p64(poly64x1_t __p0) { + float32x2_t __ret; + __ret = (float32x2_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) float32x2_t vreinterpret_f32_p16(poly16x4_t __p0) { + float32x2_t __ret; + __ret = (float32x2_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) float32x2_t vreinterpret_f32_u8(uint8x8_t __p0) { + float32x2_t __ret; + __ret = (float32x2_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) float32x2_t vreinterpret_f32_u32(uint32x2_t __p0) { + float32x2_t __ret; + __ret = (float32x2_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) float32x2_t vreinterpret_f32_u64(uint64x1_t __p0) { + float32x2_t __ret; + __ret = (float32x2_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) float32x2_t vreinterpret_f32_u16(uint16x4_t __p0) { + float32x2_t __ret; + __ret = (float32x2_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) float32x2_t vreinterpret_f32_s8(int8x8_t __p0) { + float32x2_t __ret; + __ret = (float32x2_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) float32x2_t vreinterpret_f32_f64(float64x1_t __p0) { + float32x2_t __ret; + __ret = (float32x2_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) float32x2_t vreinterpret_f32_f16(float16x4_t __p0) { + float32x2_t __ret; + __ret = (float32x2_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) float32x2_t vreinterpret_f32_s32(int32x2_t __p0) { + float32x2_t __ret; + __ret = (float32x2_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) float32x2_t vreinterpret_f32_s64(int64x1_t __p0) { + float32x2_t __ret; + __ret = (float32x2_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) float32x2_t vreinterpret_f32_s16(int16x4_t __p0) { + float32x2_t __ret; + __ret = (float32x2_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) float16x4_t vreinterpret_f16_p8(poly8x8_t __p0) { + float16x4_t __ret; + __ret = (float16x4_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) float16x4_t vreinterpret_f16_p64(poly64x1_t __p0) { + float16x4_t __ret; + __ret = (float16x4_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) float16x4_t vreinterpret_f16_p16(poly16x4_t __p0) { + float16x4_t __ret; + __ret = (float16x4_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) float16x4_t vreinterpret_f16_u8(uint8x8_t __p0) { + float16x4_t __ret; + __ret = (float16x4_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) float16x4_t vreinterpret_f16_u32(uint32x2_t __p0) { + float16x4_t __ret; + __ret = (float16x4_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) float16x4_t vreinterpret_f16_u64(uint64x1_t __p0) { + float16x4_t __ret; + __ret = (float16x4_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) float16x4_t vreinterpret_f16_u16(uint16x4_t __p0) { + float16x4_t __ret; + __ret = (float16x4_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) float16x4_t vreinterpret_f16_s8(int8x8_t __p0) { + float16x4_t __ret; + __ret = (float16x4_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) float16x4_t vreinterpret_f16_f64(float64x1_t __p0) { + float16x4_t __ret; + __ret = (float16x4_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) float16x4_t vreinterpret_f16_f32(float32x2_t __p0) { + float16x4_t __ret; + __ret = (float16x4_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) float16x4_t vreinterpret_f16_s32(int32x2_t __p0) { + float16x4_t __ret; + __ret = (float16x4_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) float16x4_t vreinterpret_f16_s64(int64x1_t __p0) { + float16x4_t __ret; + __ret = (float16x4_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) float16x4_t vreinterpret_f16_s16(int16x4_t __p0) { + float16x4_t __ret; + __ret = (float16x4_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) int32x2_t vreinterpret_s32_p8(poly8x8_t __p0) { + int32x2_t __ret; + __ret = (int32x2_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) int32x2_t vreinterpret_s32_p64(poly64x1_t __p0) { + int32x2_t __ret; + __ret = (int32x2_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) int32x2_t vreinterpret_s32_p16(poly16x4_t __p0) { + int32x2_t __ret; + __ret = (int32x2_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) int32x2_t vreinterpret_s32_u8(uint8x8_t __p0) { + int32x2_t __ret; + __ret = (int32x2_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) int32x2_t vreinterpret_s32_u32(uint32x2_t __p0) { + int32x2_t __ret; + __ret = (int32x2_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) int32x2_t vreinterpret_s32_u64(uint64x1_t __p0) { + int32x2_t __ret; + __ret = (int32x2_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) int32x2_t vreinterpret_s32_u16(uint16x4_t __p0) { + int32x2_t __ret; + __ret = (int32x2_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) int32x2_t vreinterpret_s32_s8(int8x8_t __p0) { + int32x2_t __ret; + __ret = (int32x2_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) int32x2_t vreinterpret_s32_f64(float64x1_t __p0) { + int32x2_t __ret; + __ret = (int32x2_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) int32x2_t vreinterpret_s32_f32(float32x2_t __p0) { + int32x2_t __ret; + __ret = (int32x2_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) int32x2_t vreinterpret_s32_f16(float16x4_t __p0) { + int32x2_t __ret; + __ret = (int32x2_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) int32x2_t vreinterpret_s32_s64(int64x1_t __p0) { + int32x2_t __ret; + __ret = (int32x2_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) int32x2_t vreinterpret_s32_s16(int16x4_t __p0) { + int32x2_t __ret; + __ret = (int32x2_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) int64x1_t vreinterpret_s64_p8(poly8x8_t __p0) { + int64x1_t __ret; + __ret = (int64x1_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) int64x1_t vreinterpret_s64_p64(poly64x1_t __p0) { + int64x1_t __ret; + __ret = (int64x1_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) int64x1_t vreinterpret_s64_p16(poly16x4_t __p0) { + int64x1_t __ret; + __ret = (int64x1_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) int64x1_t vreinterpret_s64_u8(uint8x8_t __p0) { + int64x1_t __ret; + __ret = (int64x1_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) int64x1_t vreinterpret_s64_u32(uint32x2_t __p0) { + int64x1_t __ret; + __ret = (int64x1_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) int64x1_t vreinterpret_s64_u64(uint64x1_t __p0) { + int64x1_t __ret; + __ret = (int64x1_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) int64x1_t vreinterpret_s64_u16(uint16x4_t __p0) { + int64x1_t __ret; + __ret = (int64x1_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) int64x1_t vreinterpret_s64_s8(int8x8_t __p0) { + int64x1_t __ret; + __ret = (int64x1_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) int64x1_t vreinterpret_s64_f64(float64x1_t __p0) { + int64x1_t __ret; + __ret = (int64x1_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) int64x1_t vreinterpret_s64_f32(float32x2_t __p0) { + int64x1_t __ret; + __ret = (int64x1_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) int64x1_t vreinterpret_s64_f16(float16x4_t __p0) { + int64x1_t __ret; + __ret = (int64x1_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) int64x1_t vreinterpret_s64_s32(int32x2_t __p0) { + int64x1_t __ret; + __ret = (int64x1_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) int64x1_t vreinterpret_s64_s16(int16x4_t __p0) { + int64x1_t __ret; + __ret = (int64x1_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) int16x4_t vreinterpret_s16_p8(poly8x8_t __p0) { + int16x4_t __ret; + __ret = (int16x4_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) int16x4_t vreinterpret_s16_p64(poly64x1_t __p0) { + int16x4_t __ret; + __ret = (int16x4_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) int16x4_t vreinterpret_s16_p16(poly16x4_t __p0) { + int16x4_t __ret; + __ret = (int16x4_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) int16x4_t vreinterpret_s16_u8(uint8x8_t __p0) { + int16x4_t __ret; + __ret = (int16x4_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) int16x4_t vreinterpret_s16_u32(uint32x2_t __p0) { + int16x4_t __ret; + __ret = (int16x4_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) int16x4_t vreinterpret_s16_u64(uint64x1_t __p0) { + int16x4_t __ret; + __ret = (int16x4_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) int16x4_t vreinterpret_s16_u16(uint16x4_t __p0) { + int16x4_t __ret; + __ret = (int16x4_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) int16x4_t vreinterpret_s16_s8(int8x8_t __p0) { + int16x4_t __ret; + __ret = (int16x4_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) int16x4_t vreinterpret_s16_f64(float64x1_t __p0) { + int16x4_t __ret; + __ret = (int16x4_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) int16x4_t vreinterpret_s16_f32(float32x2_t __p0) { + int16x4_t __ret; + __ret = (int16x4_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) int16x4_t vreinterpret_s16_f16(float16x4_t __p0) { + int16x4_t __ret; + __ret = (int16x4_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) int16x4_t vreinterpret_s16_s32(int32x2_t __p0) { + int16x4_t __ret; + __ret = (int16x4_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) int16x4_t vreinterpret_s16_s64(int64x1_t __p0) { + int16x4_t __ret; + __ret = (int16x4_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) uint64_t vrshld_u64(uint64_t __p0, int64_t __p1) { + uint64_t __ret; + __ret = (uint64_t) __builtin_neon_vrshld_u64(__p0, __p1); + return __ret; +} +__ai __attribute__((target("neon"))) int64_t vrshld_s64(int64_t __p0, int64_t __p1) { + int64_t __ret; + __ret = (int64_t) __builtin_neon_vrshld_s64(__p0, __p1); + return __ret; +} +#define vrshrd_n_u64(__p0, __p1) __extension__ ({ \ + uint64_t __ret; \ + uint64_t __s0 = __p0; \ + __ret = (uint64_t) __builtin_neon_vrshrd_n_u64(__s0, __p1); \ + __ret; \ +}) +#define vrshrd_n_s64(__p0, __p1) __extension__ ({ \ + int64_t __ret; \ + int64_t __s0 = __p0; \ + __ret = (int64_t) __builtin_neon_vrshrd_n_s64(__s0, __p1); \ + __ret; \ +}) +#ifdef __LITTLE_ENDIAN__ +#define vrshrn_high_n_u32(__p0_724, __p1_724, __p2_724) __extension__ ({ \ + uint16x8_t __ret_724; \ + uint16x4_t __s0_724 = __p0_724; \ + uint32x4_t __s1_724 = __p1_724; \ + __ret_724 = (uint16x8_t)(vcombine_u16((uint16x4_t)(__s0_724), (uint16x4_t)(vrshrn_n_u32(__s1_724, __p2_724)))); \ + __ret_724; \ +}) +#else +#define vrshrn_high_n_u32(__p0_725, __p1_725, __p2_725) __extension__ ({ \ + uint16x8_t __ret_725; \ + uint16x4_t __s0_725 = __p0_725; \ + uint32x4_t __s1_725 = __p1_725; \ + uint16x4_t __rev0_725; __rev0_725 = __builtin_shufflevector(__s0_725, __s0_725, 3, 2, 1, 0); \ + uint32x4_t __rev1_725; __rev1_725 = __builtin_shufflevector(__s1_725, __s1_725, 3, 2, 1, 0); \ + __ret_725 = (uint16x8_t)(__noswap_vcombine_u16((uint16x4_t)(__rev0_725), (uint16x4_t)(__noswap_vrshrn_n_u32(__rev1_725, __p2_725)))); \ + __ret_725 = __builtin_shufflevector(__ret_725, __ret_725, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_725; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vrshrn_high_n_u64(__p0_726, __p1_726, __p2_726) __extension__ ({ \ + uint32x4_t __ret_726; \ + uint32x2_t __s0_726 = __p0_726; \ + uint64x2_t __s1_726 = __p1_726; \ + __ret_726 = (uint32x4_t)(vcombine_u32((uint32x2_t)(__s0_726), (uint32x2_t)(vrshrn_n_u64(__s1_726, __p2_726)))); \ + __ret_726; \ +}) +#else +#define vrshrn_high_n_u64(__p0_727, __p1_727, __p2_727) __extension__ ({ \ + uint32x4_t __ret_727; \ + uint32x2_t __s0_727 = __p0_727; \ + uint64x2_t __s1_727 = __p1_727; \ + uint32x2_t __rev0_727; __rev0_727 = __builtin_shufflevector(__s0_727, __s0_727, 1, 0); \ + uint64x2_t __rev1_727; __rev1_727 = __builtin_shufflevector(__s1_727, __s1_727, 1, 0); \ + __ret_727 = (uint32x4_t)(__noswap_vcombine_u32((uint32x2_t)(__rev0_727), (uint32x2_t)(__noswap_vrshrn_n_u64(__rev1_727, __p2_727)))); \ + __ret_727 = __builtin_shufflevector(__ret_727, __ret_727, 3, 2, 1, 0); \ + __ret_727; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vrshrn_high_n_u16(__p0_728, __p1_728, __p2_728) __extension__ ({ \ + uint8x16_t __ret_728; \ + uint8x8_t __s0_728 = __p0_728; \ + uint16x8_t __s1_728 = __p1_728; \ + __ret_728 = (uint8x16_t)(vcombine_u8((uint8x8_t)(__s0_728), (uint8x8_t)(vrshrn_n_u16(__s1_728, __p2_728)))); \ + __ret_728; \ +}) +#else +#define vrshrn_high_n_u16(__p0_729, __p1_729, __p2_729) __extension__ ({ \ + uint8x16_t __ret_729; \ + uint8x8_t __s0_729 = __p0_729; \ + uint16x8_t __s1_729 = __p1_729; \ + uint8x8_t __rev0_729; __rev0_729 = __builtin_shufflevector(__s0_729, __s0_729, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint16x8_t __rev1_729; __rev1_729 = __builtin_shufflevector(__s1_729, __s1_729, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_729 = (uint8x16_t)(__noswap_vcombine_u8((uint8x8_t)(__rev0_729), (uint8x8_t)(__noswap_vrshrn_n_u16(__rev1_729, __p2_729)))); \ + __ret_729 = __builtin_shufflevector(__ret_729, __ret_729, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_729; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vrshrn_high_n_s32(__p0_730, __p1_730, __p2_730) __extension__ ({ \ + int16x8_t __ret_730; \ + int16x4_t __s0_730 = __p0_730; \ + int32x4_t __s1_730 = __p1_730; \ + __ret_730 = (int16x8_t)(vcombine_s16((int16x4_t)(__s0_730), (int16x4_t)(vrshrn_n_s32(__s1_730, __p2_730)))); \ + __ret_730; \ +}) +#else +#define vrshrn_high_n_s32(__p0_731, __p1_731, __p2_731) __extension__ ({ \ + int16x8_t __ret_731; \ + int16x4_t __s0_731 = __p0_731; \ + int32x4_t __s1_731 = __p1_731; \ + int16x4_t __rev0_731; __rev0_731 = __builtin_shufflevector(__s0_731, __s0_731, 3, 2, 1, 0); \ + int32x4_t __rev1_731; __rev1_731 = __builtin_shufflevector(__s1_731, __s1_731, 3, 2, 1, 0); \ + __ret_731 = (int16x8_t)(__noswap_vcombine_s16((int16x4_t)(__rev0_731), (int16x4_t)(__noswap_vrshrn_n_s32(__rev1_731, __p2_731)))); \ + __ret_731 = __builtin_shufflevector(__ret_731, __ret_731, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_731; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vrshrn_high_n_s64(__p0_732, __p1_732, __p2_732) __extension__ ({ \ + int32x4_t __ret_732; \ + int32x2_t __s0_732 = __p0_732; \ + int64x2_t __s1_732 = __p1_732; \ + __ret_732 = (int32x4_t)(vcombine_s32((int32x2_t)(__s0_732), (int32x2_t)(vrshrn_n_s64(__s1_732, __p2_732)))); \ + __ret_732; \ +}) +#else +#define vrshrn_high_n_s64(__p0_733, __p1_733, __p2_733) __extension__ ({ \ + int32x4_t __ret_733; \ + int32x2_t __s0_733 = __p0_733; \ + int64x2_t __s1_733 = __p1_733; \ + int32x2_t __rev0_733; __rev0_733 = __builtin_shufflevector(__s0_733, __s0_733, 1, 0); \ + int64x2_t __rev1_733; __rev1_733 = __builtin_shufflevector(__s1_733, __s1_733, 1, 0); \ + __ret_733 = (int32x4_t)(__noswap_vcombine_s32((int32x2_t)(__rev0_733), (int32x2_t)(__noswap_vrshrn_n_s64(__rev1_733, __p2_733)))); \ + __ret_733 = __builtin_shufflevector(__ret_733, __ret_733, 3, 2, 1, 0); \ + __ret_733; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vrshrn_high_n_s16(__p0_734, __p1_734, __p2_734) __extension__ ({ \ + int8x16_t __ret_734; \ + int8x8_t __s0_734 = __p0_734; \ + int16x8_t __s1_734 = __p1_734; \ + __ret_734 = (int8x16_t)(vcombine_s8((int8x8_t)(__s0_734), (int8x8_t)(vrshrn_n_s16(__s1_734, __p2_734)))); \ + __ret_734; \ +}) +#else +#define vrshrn_high_n_s16(__p0_735, __p1_735, __p2_735) __extension__ ({ \ + int8x16_t __ret_735; \ + int8x8_t __s0_735 = __p0_735; \ + int16x8_t __s1_735 = __p1_735; \ + int8x8_t __rev0_735; __rev0_735 = __builtin_shufflevector(__s0_735, __s0_735, 7, 6, 5, 4, 3, 2, 1, 0); \ + int16x8_t __rev1_735; __rev1_735 = __builtin_shufflevector(__s1_735, __s1_735, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_735 = (int8x16_t)(__noswap_vcombine_s8((int8x8_t)(__rev0_735), (int8x8_t)(__noswap_vrshrn_n_s16(__rev1_735, __p2_735)))); \ + __ret_735 = __builtin_shufflevector(__ret_735, __ret_735, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_735; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) float64x2_t vrsqrteq_f64(float64x2_t __p0) { + float64x2_t __ret; + __ret = (float64x2_t) __builtin_neon_vrsqrteq_v((int8x16_t)__p0, 42); + return __ret; +} +#else +__ai __attribute__((target("neon"))) float64x2_t vrsqrteq_f64(float64x2_t __p0) { + float64x2_t __ret; + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = (float64x2_t) __builtin_neon_vrsqrteq_v((int8x16_t)__rev0, 42); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +__ai __attribute__((target("neon"))) float64x1_t vrsqrte_f64(float64x1_t __p0) { + float64x1_t __ret; + __ret = (float64x1_t) __builtin_neon_vrsqrte_v((int8x8_t)__p0, 10); + return __ret; +} +__ai __attribute__((target("neon"))) float64_t vrsqrted_f64(float64_t __p0) { + float64_t __ret; + __ret = (float64_t) __builtin_neon_vrsqrted_f64(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) float32_t vrsqrtes_f32(float32_t __p0) { + float32_t __ret; + __ret = (float32_t) __builtin_neon_vrsqrtes_f32(__p0); + return __ret; +} +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) float64x2_t vrsqrtsq_f64(float64x2_t __p0, float64x2_t __p1) { + float64x2_t __ret; + __ret = (float64x2_t) __builtin_neon_vrsqrtsq_v((int8x16_t)__p0, (int8x16_t)__p1, 42); + return __ret; +} +#else +__ai __attribute__((target("neon"))) float64x2_t vrsqrtsq_f64(float64x2_t __p0, float64x2_t __p1) { + float64x2_t __ret; + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (float64x2_t) __builtin_neon_vrsqrtsq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 42); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +__ai __attribute__((target("neon"))) float64x1_t vrsqrts_f64(float64x1_t __p0, float64x1_t __p1) { + float64x1_t __ret; + __ret = (float64x1_t) __builtin_neon_vrsqrts_v((int8x8_t)__p0, (int8x8_t)__p1, 10); + return __ret; +} +__ai __attribute__((target("neon"))) float64_t vrsqrtsd_f64(float64_t __p0, float64_t __p1) { + float64_t __ret; + __ret = (float64_t) __builtin_neon_vrsqrtsd_f64(__p0, __p1); + return __ret; +} +__ai __attribute__((target("neon"))) float32_t vrsqrtss_f32(float32_t __p0, float32_t __p1) { + float32_t __ret; + __ret = (float32_t) __builtin_neon_vrsqrtss_f32(__p0, __p1); + return __ret; +} +#define vrsrad_n_u64(__p0, __p1, __p2) __extension__ ({ \ + uint64_t __ret; \ + uint64_t __s0 = __p0; \ + uint64_t __s1 = __p1; \ + __ret = (uint64_t) __builtin_neon_vrsrad_n_u64(__s0, __s1, __p2); \ + __ret; \ +}) +#define vrsrad_n_s64(__p0, __p1, __p2) __extension__ ({ \ + int64_t __ret; \ + int64_t __s0 = __p0; \ + int64_t __s1 = __p1; \ + __ret = (int64_t) __builtin_neon_vrsrad_n_s64(__s0, __s1, __p2); \ + __ret; \ +}) +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint16x8_t vrsubhn_high_u32(uint16x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) { + uint16x8_t __ret; + __ret = vcombine_u16(__p0, vrsubhn_u32(__p1, __p2)); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint16x8_t vrsubhn_high_u32(uint16x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) { + uint16x8_t __ret; + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + uint32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); + __ret = __noswap_vcombine_u16(__rev0, __noswap_vrsubhn_u32(__rev1, __rev2)); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint32x4_t vrsubhn_high_u64(uint32x2_t __p0, uint64x2_t __p1, uint64x2_t __p2) { + uint32x4_t __ret; + __ret = vcombine_u32(__p0, vrsubhn_u64(__p1, __p2)); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint32x4_t vrsubhn_high_u64(uint32x2_t __p0, uint64x2_t __p1, uint64x2_t __p2) { + uint32x4_t __ret; + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + uint64x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0); + __ret = __noswap_vcombine_u32(__rev0, __noswap_vrsubhn_u64(__rev1, __rev2)); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint8x16_t vrsubhn_high_u16(uint8x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) { + uint8x16_t __ret; + __ret = vcombine_u8(__p0, vrsubhn_u16(__p1, __p2)); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint8x16_t vrsubhn_high_u16(uint8x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) { + uint8x16_t __ret; + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __noswap_vcombine_u8(__rev0, __noswap_vrsubhn_u16(__rev1, __rev2)); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int16x8_t vrsubhn_high_s32(int16x4_t __p0, int32x4_t __p1, int32x4_t __p2) { + int16x8_t __ret; + __ret = vcombine_s16(__p0, vrsubhn_s32(__p1, __p2)); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int16x8_t vrsubhn_high_s32(int16x4_t __p0, int32x4_t __p1, int32x4_t __p2) { + int16x8_t __ret; + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + int32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); + __ret = __noswap_vcombine_s16(__rev0, __noswap_vrsubhn_s32(__rev1, __rev2)); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int32x4_t vrsubhn_high_s64(int32x2_t __p0, int64x2_t __p1, int64x2_t __p2) { + int32x4_t __ret; + __ret = vcombine_s32(__p0, vrsubhn_s64(__p1, __p2)); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int32x4_t vrsubhn_high_s64(int32x2_t __p0, int64x2_t __p1, int64x2_t __p2) { + int32x4_t __ret; + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + int64x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0); + __ret = __noswap_vcombine_s32(__rev0, __noswap_vrsubhn_s64(__rev1, __rev2)); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int8x16_t vrsubhn_high_s16(int8x8_t __p0, int16x8_t __p1, int16x8_t __p2) { + int8x16_t __ret; + __ret = vcombine_s8(__p0, vrsubhn_s16(__p1, __p2)); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int8x16_t vrsubhn_high_s16(int8x8_t __p0, int16x8_t __p1, int16x8_t __p2) { + int8x16_t __ret; + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __noswap_vcombine_s8(__rev0, __noswap_vrsubhn_s16(__rev1, __rev2)); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#define vset_lane_p64(__p0, __p1, __p2) __extension__ ({ \ + poly64x1_t __ret; \ + poly64_t __s0 = __p0; \ + poly64x1_t __s1 = __p1; \ + __ret = (poly64x1_t) __builtin_neon_vset_lane_i64(__s0, (poly64x1_t)__s1, __p2); \ + __ret; \ +}) +#ifdef __LITTLE_ENDIAN__ +#define vsetq_lane_p64(__p0, __p1, __p2) __extension__ ({ \ + poly64x2_t __ret; \ + poly64_t __s0 = __p0; \ + poly64x2_t __s1 = __p1; \ + __ret = (poly64x2_t) __builtin_neon_vsetq_lane_i64(__s0, (poly64x2_t)__s1, __p2); \ + __ret; \ +}) +#else +#define vsetq_lane_p64(__p0, __p1, __p2) __extension__ ({ \ + poly64x2_t __ret; \ + poly64_t __s0 = __p0; \ + poly64x2_t __s1 = __p1; \ + poly64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ + __ret = (poly64x2_t) __builtin_neon_vsetq_lane_i64(__s0, (poly64x2_t)__rev1, __p2); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#define __noswap_vsetq_lane_p64(__p0, __p1, __p2) __extension__ ({ \ + poly64x2_t __ret; \ + poly64_t __s0 = __p0; \ + poly64x2_t __s1 = __p1; \ + __ret = (poly64x2_t) __builtin_neon_vsetq_lane_i64(__s0, (poly64x2_t)__s1, __p2); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vsetq_lane_f64(__p0, __p1, __p2) __extension__ ({ \ + float64x2_t __ret; \ + float64_t __s0 = __p0; \ + float64x2_t __s1 = __p1; \ + __ret = (float64x2_t) __builtin_neon_vsetq_lane_f64(__s0, (float64x2_t)__s1, __p2); \ + __ret; \ +}) +#else +#define vsetq_lane_f64(__p0, __p1, __p2) __extension__ ({ \ + float64x2_t __ret; \ + float64_t __s0 = __p0; \ + float64x2_t __s1 = __p1; \ + float64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ + __ret = (float64x2_t) __builtin_neon_vsetq_lane_f64(__s0, (float64x2_t)__rev1, __p2); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#define __noswap_vsetq_lane_f64(__p0, __p1, __p2) __extension__ ({ \ + float64x2_t __ret; \ + float64_t __s0 = __p0; \ + float64x2_t __s1 = __p1; \ + __ret = (float64x2_t) __builtin_neon_vsetq_lane_f64(__s0, (float64x2_t)__s1, __p2); \ + __ret; \ +}) +#endif + +#define vset_lane_f64(__p0, __p1, __p2) __extension__ ({ \ + float64x1_t __ret; \ + float64_t __s0 = __p0; \ + float64x1_t __s1 = __p1; \ + __ret = (float64x1_t) __builtin_neon_vset_lane_f64(__s0, (float64x1_t)__s1, __p2); \ + __ret; \ +}) +__ai __attribute__((target("neon"))) uint64_t vshld_u64(uint64_t __p0, int64_t __p1) { + uint64_t __ret; + __ret = (uint64_t) __builtin_neon_vshld_u64(__p0, __p1); + return __ret; +} +__ai __attribute__((target("neon"))) int64_t vshld_s64(int64_t __p0, int64_t __p1) { + int64_t __ret; + __ret = (int64_t) __builtin_neon_vshld_s64(__p0, __p1); + return __ret; +} +#define vshld_n_u64(__p0, __p1) __extension__ ({ \ + uint64_t __ret; \ + uint64_t __s0 = __p0; \ + __ret = (uint64_t) __builtin_neon_vshld_n_u64(__s0, __p1); \ + __ret; \ +}) +#define vshld_n_s64(__p0, __p1) __extension__ ({ \ + int64_t __ret; \ + int64_t __s0 = __p0; \ + __ret = (int64_t) __builtin_neon_vshld_n_s64(__s0, __p1); \ + __ret; \ +}) +#ifdef __LITTLE_ENDIAN__ +#define vshll_high_n_u8(__p0_736, __p1_736) __extension__ ({ \ + uint16x8_t __ret_736; \ + uint8x16_t __s0_736 = __p0_736; \ + __ret_736 = (uint16x8_t)(vshll_n_u8(vget_high_u8(__s0_736), __p1_736)); \ + __ret_736; \ +}) +#else +#define vshll_high_n_u8(__p0_737, __p1_737) __extension__ ({ \ + uint16x8_t __ret_737; \ + uint8x16_t __s0_737 = __p0_737; \ + uint8x16_t __rev0_737; __rev0_737 = __builtin_shufflevector(__s0_737, __s0_737, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_737 = (uint16x8_t)(__noswap_vshll_n_u8(__noswap_vget_high_u8(__rev0_737), __p1_737)); \ + __ret_737 = __builtin_shufflevector(__ret_737, __ret_737, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_737; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vshll_high_n_u32(__p0_738, __p1_738) __extension__ ({ \ + uint64x2_t __ret_738; \ + uint32x4_t __s0_738 = __p0_738; \ + __ret_738 = (uint64x2_t)(vshll_n_u32(vget_high_u32(__s0_738), __p1_738)); \ + __ret_738; \ +}) +#else +#define vshll_high_n_u32(__p0_739, __p1_739) __extension__ ({ \ + uint64x2_t __ret_739; \ + uint32x4_t __s0_739 = __p0_739; \ + uint32x4_t __rev0_739; __rev0_739 = __builtin_shufflevector(__s0_739, __s0_739, 3, 2, 1, 0); \ + __ret_739 = (uint64x2_t)(__noswap_vshll_n_u32(__noswap_vget_high_u32(__rev0_739), __p1_739)); \ + __ret_739 = __builtin_shufflevector(__ret_739, __ret_739, 1, 0); \ + __ret_739; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vshll_high_n_u16(__p0_740, __p1_740) __extension__ ({ \ + uint32x4_t __ret_740; \ + uint16x8_t __s0_740 = __p0_740; \ + __ret_740 = (uint32x4_t)(vshll_n_u16(vget_high_u16(__s0_740), __p1_740)); \ + __ret_740; \ +}) +#else +#define vshll_high_n_u16(__p0_741, __p1_741) __extension__ ({ \ + uint32x4_t __ret_741; \ + uint16x8_t __s0_741 = __p0_741; \ + uint16x8_t __rev0_741; __rev0_741 = __builtin_shufflevector(__s0_741, __s0_741, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_741 = (uint32x4_t)(__noswap_vshll_n_u16(__noswap_vget_high_u16(__rev0_741), __p1_741)); \ + __ret_741 = __builtin_shufflevector(__ret_741, __ret_741, 3, 2, 1, 0); \ + __ret_741; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vshll_high_n_s8(__p0_742, __p1_742) __extension__ ({ \ + int16x8_t __ret_742; \ + int8x16_t __s0_742 = __p0_742; \ + __ret_742 = (int16x8_t)(vshll_n_s8(vget_high_s8(__s0_742), __p1_742)); \ + __ret_742; \ +}) +#else +#define vshll_high_n_s8(__p0_743, __p1_743) __extension__ ({ \ + int16x8_t __ret_743; \ + int8x16_t __s0_743 = __p0_743; \ + int8x16_t __rev0_743; __rev0_743 = __builtin_shufflevector(__s0_743, __s0_743, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_743 = (int16x8_t)(__noswap_vshll_n_s8(__noswap_vget_high_s8(__rev0_743), __p1_743)); \ + __ret_743 = __builtin_shufflevector(__ret_743, __ret_743, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_743; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vshll_high_n_s32(__p0_744, __p1_744) __extension__ ({ \ + int64x2_t __ret_744; \ + int32x4_t __s0_744 = __p0_744; \ + __ret_744 = (int64x2_t)(vshll_n_s32(vget_high_s32(__s0_744), __p1_744)); \ + __ret_744; \ +}) +#else +#define vshll_high_n_s32(__p0_745, __p1_745) __extension__ ({ \ + int64x2_t __ret_745; \ + int32x4_t __s0_745 = __p0_745; \ + int32x4_t __rev0_745; __rev0_745 = __builtin_shufflevector(__s0_745, __s0_745, 3, 2, 1, 0); \ + __ret_745 = (int64x2_t)(__noswap_vshll_n_s32(__noswap_vget_high_s32(__rev0_745), __p1_745)); \ + __ret_745 = __builtin_shufflevector(__ret_745, __ret_745, 1, 0); \ + __ret_745; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vshll_high_n_s16(__p0_746, __p1_746) __extension__ ({ \ + int32x4_t __ret_746; \ + int16x8_t __s0_746 = __p0_746; \ + __ret_746 = (int32x4_t)(vshll_n_s16(vget_high_s16(__s0_746), __p1_746)); \ + __ret_746; \ +}) +#else +#define vshll_high_n_s16(__p0_747, __p1_747) __extension__ ({ \ + int32x4_t __ret_747; \ + int16x8_t __s0_747 = __p0_747; \ + int16x8_t __rev0_747; __rev0_747 = __builtin_shufflevector(__s0_747, __s0_747, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_747 = (int32x4_t)(__noswap_vshll_n_s16(__noswap_vget_high_s16(__rev0_747), __p1_747)); \ + __ret_747 = __builtin_shufflevector(__ret_747, __ret_747, 3, 2, 1, 0); \ + __ret_747; \ +}) +#endif + +#define vshrd_n_u64(__p0, __p1) __extension__ ({ \ + uint64_t __ret; \ + uint64_t __s0 = __p0; \ + __ret = (uint64_t) __builtin_neon_vshrd_n_u64(__s0, __p1); \ + __ret; \ +}) +#define vshrd_n_s64(__p0, __p1) __extension__ ({ \ + int64_t __ret; \ + int64_t __s0 = __p0; \ + __ret = (int64_t) __builtin_neon_vshrd_n_s64(__s0, __p1); \ + __ret; \ +}) +#ifdef __LITTLE_ENDIAN__ +#define vshrn_high_n_u32(__p0_748, __p1_748, __p2_748) __extension__ ({ \ + uint16x8_t __ret_748; \ + uint16x4_t __s0_748 = __p0_748; \ + uint32x4_t __s1_748 = __p1_748; \ + __ret_748 = (uint16x8_t)(vcombine_u16((uint16x4_t)(__s0_748), (uint16x4_t)(vshrn_n_u32(__s1_748, __p2_748)))); \ + __ret_748; \ +}) +#else +#define vshrn_high_n_u32(__p0_749, __p1_749, __p2_749) __extension__ ({ \ + uint16x8_t __ret_749; \ + uint16x4_t __s0_749 = __p0_749; \ + uint32x4_t __s1_749 = __p1_749; \ + uint16x4_t __rev0_749; __rev0_749 = __builtin_shufflevector(__s0_749, __s0_749, 3, 2, 1, 0); \ + uint32x4_t __rev1_749; __rev1_749 = __builtin_shufflevector(__s1_749, __s1_749, 3, 2, 1, 0); \ + __ret_749 = (uint16x8_t)(__noswap_vcombine_u16((uint16x4_t)(__rev0_749), (uint16x4_t)(__noswap_vshrn_n_u32(__rev1_749, __p2_749)))); \ + __ret_749 = __builtin_shufflevector(__ret_749, __ret_749, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_749; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vshrn_high_n_u64(__p0_750, __p1_750, __p2_750) __extension__ ({ \ + uint32x4_t __ret_750; \ + uint32x2_t __s0_750 = __p0_750; \ + uint64x2_t __s1_750 = __p1_750; \ + __ret_750 = (uint32x4_t)(vcombine_u32((uint32x2_t)(__s0_750), (uint32x2_t)(vshrn_n_u64(__s1_750, __p2_750)))); \ + __ret_750; \ +}) +#else +#define vshrn_high_n_u64(__p0_751, __p1_751, __p2_751) __extension__ ({ \ + uint32x4_t __ret_751; \ + uint32x2_t __s0_751 = __p0_751; \ + uint64x2_t __s1_751 = __p1_751; \ + uint32x2_t __rev0_751; __rev0_751 = __builtin_shufflevector(__s0_751, __s0_751, 1, 0); \ + uint64x2_t __rev1_751; __rev1_751 = __builtin_shufflevector(__s1_751, __s1_751, 1, 0); \ + __ret_751 = (uint32x4_t)(__noswap_vcombine_u32((uint32x2_t)(__rev0_751), (uint32x2_t)(__noswap_vshrn_n_u64(__rev1_751, __p2_751)))); \ + __ret_751 = __builtin_shufflevector(__ret_751, __ret_751, 3, 2, 1, 0); \ + __ret_751; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vshrn_high_n_u16(__p0_752, __p1_752, __p2_752) __extension__ ({ \ + uint8x16_t __ret_752; \ + uint8x8_t __s0_752 = __p0_752; \ + uint16x8_t __s1_752 = __p1_752; \ + __ret_752 = (uint8x16_t)(vcombine_u8((uint8x8_t)(__s0_752), (uint8x8_t)(vshrn_n_u16(__s1_752, __p2_752)))); \ + __ret_752; \ +}) +#else +#define vshrn_high_n_u16(__p0_753, __p1_753, __p2_753) __extension__ ({ \ + uint8x16_t __ret_753; \ + uint8x8_t __s0_753 = __p0_753; \ + uint16x8_t __s1_753 = __p1_753; \ + uint8x8_t __rev0_753; __rev0_753 = __builtin_shufflevector(__s0_753, __s0_753, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint16x8_t __rev1_753; __rev1_753 = __builtin_shufflevector(__s1_753, __s1_753, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_753 = (uint8x16_t)(__noswap_vcombine_u8((uint8x8_t)(__rev0_753), (uint8x8_t)(__noswap_vshrn_n_u16(__rev1_753, __p2_753)))); \ + __ret_753 = __builtin_shufflevector(__ret_753, __ret_753, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_753; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vshrn_high_n_s32(__p0_754, __p1_754, __p2_754) __extension__ ({ \ + int16x8_t __ret_754; \ + int16x4_t __s0_754 = __p0_754; \ + int32x4_t __s1_754 = __p1_754; \ + __ret_754 = (int16x8_t)(vcombine_s16((int16x4_t)(__s0_754), (int16x4_t)(vshrn_n_s32(__s1_754, __p2_754)))); \ + __ret_754; \ +}) +#else +#define vshrn_high_n_s32(__p0_755, __p1_755, __p2_755) __extension__ ({ \ + int16x8_t __ret_755; \ + int16x4_t __s0_755 = __p0_755; \ + int32x4_t __s1_755 = __p1_755; \ + int16x4_t __rev0_755; __rev0_755 = __builtin_shufflevector(__s0_755, __s0_755, 3, 2, 1, 0); \ + int32x4_t __rev1_755; __rev1_755 = __builtin_shufflevector(__s1_755, __s1_755, 3, 2, 1, 0); \ + __ret_755 = (int16x8_t)(__noswap_vcombine_s16((int16x4_t)(__rev0_755), (int16x4_t)(__noswap_vshrn_n_s32(__rev1_755, __p2_755)))); \ + __ret_755 = __builtin_shufflevector(__ret_755, __ret_755, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_755; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vshrn_high_n_s64(__p0_756, __p1_756, __p2_756) __extension__ ({ \ + int32x4_t __ret_756; \ + int32x2_t __s0_756 = __p0_756; \ + int64x2_t __s1_756 = __p1_756; \ + __ret_756 = (int32x4_t)(vcombine_s32((int32x2_t)(__s0_756), (int32x2_t)(vshrn_n_s64(__s1_756, __p2_756)))); \ + __ret_756; \ +}) +#else +#define vshrn_high_n_s64(__p0_757, __p1_757, __p2_757) __extension__ ({ \ + int32x4_t __ret_757; \ + int32x2_t __s0_757 = __p0_757; \ + int64x2_t __s1_757 = __p1_757; \ + int32x2_t __rev0_757; __rev0_757 = __builtin_shufflevector(__s0_757, __s0_757, 1, 0); \ + int64x2_t __rev1_757; __rev1_757 = __builtin_shufflevector(__s1_757, __s1_757, 1, 0); \ + __ret_757 = (int32x4_t)(__noswap_vcombine_s32((int32x2_t)(__rev0_757), (int32x2_t)(__noswap_vshrn_n_s64(__rev1_757, __p2_757)))); \ + __ret_757 = __builtin_shufflevector(__ret_757, __ret_757, 3, 2, 1, 0); \ + __ret_757; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vshrn_high_n_s16(__p0_758, __p1_758, __p2_758) __extension__ ({ \ + int8x16_t __ret_758; \ + int8x8_t __s0_758 = __p0_758; \ + int16x8_t __s1_758 = __p1_758; \ + __ret_758 = (int8x16_t)(vcombine_s8((int8x8_t)(__s0_758), (int8x8_t)(vshrn_n_s16(__s1_758, __p2_758)))); \ + __ret_758; \ +}) +#else +#define vshrn_high_n_s16(__p0_759, __p1_759, __p2_759) __extension__ ({ \ + int8x16_t __ret_759; \ + int8x8_t __s0_759 = __p0_759; \ + int16x8_t __s1_759 = __p1_759; \ + int8x8_t __rev0_759; __rev0_759 = __builtin_shufflevector(__s0_759, __s0_759, 7, 6, 5, 4, 3, 2, 1, 0); \ + int16x8_t __rev1_759; __rev1_759 = __builtin_shufflevector(__s1_759, __s1_759, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_759 = (int8x16_t)(__noswap_vcombine_s8((int8x8_t)(__rev0_759), (int8x8_t)(__noswap_vshrn_n_s16(__rev1_759, __p2_759)))); \ + __ret_759 = __builtin_shufflevector(__ret_759, __ret_759, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_759; \ +}) +#endif + +#define vslid_n_u64(__p0, __p1, __p2) __extension__ ({ \ + uint64_t __ret; \ + uint64_t __s0 = __p0; \ + uint64_t __s1 = __p1; \ + __ret = (uint64_t) __builtin_neon_vslid_n_u64(__s0, __s1, __p2); \ + __ret; \ +}) +#define vslid_n_s64(__p0, __p1, __p2) __extension__ ({ \ + int64_t __ret; \ + int64_t __s0 = __p0; \ + int64_t __s1 = __p1; \ + __ret = (int64_t) __builtin_neon_vslid_n_s64(__s0, __s1, __p2); \ + __ret; \ +}) +#define vsli_n_p64(__p0, __p1, __p2) __extension__ ({ \ + poly64x1_t __ret; \ + poly64x1_t __s0 = __p0; \ + poly64x1_t __s1 = __p1; \ + __ret = (poly64x1_t) __builtin_neon_vsli_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 6); \ + __ret; \ +}) +#ifdef __LITTLE_ENDIAN__ +#define vsliq_n_p64(__p0, __p1, __p2) __extension__ ({ \ + poly64x2_t __ret; \ + poly64x2_t __s0 = __p0; \ + poly64x2_t __s1 = __p1; \ + __ret = (poly64x2_t) __builtin_neon_vsliq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 38); \ + __ret; \ +}) +#else +#define vsliq_n_p64(__p0, __p1, __p2) __extension__ ({ \ + poly64x2_t __ret; \ + poly64x2_t __s0 = __p0; \ + poly64x2_t __s1 = __p1; \ + poly64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + poly64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ + __ret = (poly64x2_t) __builtin_neon_vsliq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 38); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#endif + +__ai __attribute__((target("neon"))) uint8_t vsqaddb_u8(uint8_t __p0, int8_t __p1) { + uint8_t __ret; + __ret = (uint8_t) __builtin_neon_vsqaddb_u8(__p0, __p1); + return __ret; +} +__ai __attribute__((target("neon"))) uint32_t vsqadds_u32(uint32_t __p0, int32_t __p1) { + uint32_t __ret; + __ret = (uint32_t) __builtin_neon_vsqadds_u32(__p0, __p1); + return __ret; +} +__ai __attribute__((target("neon"))) uint64_t vsqaddd_u64(uint64_t __p0, int64_t __p1) { + uint64_t __ret; + __ret = (uint64_t) __builtin_neon_vsqaddd_u64(__p0, __p1); + return __ret; +} +__ai __attribute__((target("neon"))) uint16_t vsqaddh_u16(uint16_t __p0, int16_t __p1) { + uint16_t __ret; + __ret = (uint16_t) __builtin_neon_vsqaddh_u16(__p0, __p1); + return __ret; +} +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint8x16_t vsqaddq_u8(uint8x16_t __p0, int8x16_t __p1) { + uint8x16_t __ret; + __ret = (uint8x16_t) __builtin_neon_vsqaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 48); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint8x16_t vsqaddq_u8(uint8x16_t __p0, int8x16_t __p1) { + uint8x16_t __ret; + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint8x16_t) __builtin_neon_vsqaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint32x4_t vsqaddq_u32(uint32x4_t __p0, int32x4_t __p1) { + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vsqaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 50); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint32x4_t vsqaddq_u32(uint32x4_t __p0, int32x4_t __p1) { + uint32x4_t __ret; + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (uint32x4_t) __builtin_neon_vsqaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint64x2_t vsqaddq_u64(uint64x2_t __p0, int64x2_t __p1) { + uint64x2_t __ret; + __ret = (uint64x2_t) __builtin_neon_vsqaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 51); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint64x2_t vsqaddq_u64(uint64x2_t __p0, int64x2_t __p1) { + uint64x2_t __ret; + uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (uint64x2_t) __builtin_neon_vsqaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 51); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint16x8_t vsqaddq_u16(uint16x8_t __p0, int16x8_t __p1) { + uint16x8_t __ret; + __ret = (uint16x8_t) __builtin_neon_vsqaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 49); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint16x8_t vsqaddq_u16(uint16x8_t __p0, int16x8_t __p1) { + uint16x8_t __ret; + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint16x8_t) __builtin_neon_vsqaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint8x8_t vsqadd_u8(uint8x8_t __p0, int8x8_t __p1) { + uint8x8_t __ret; + __ret = (uint8x8_t) __builtin_neon_vsqadd_v((int8x8_t)__p0, (int8x8_t)__p1, 16); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint8x8_t vsqadd_u8(uint8x8_t __p0, int8x8_t __p1) { + uint8x8_t __ret; + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint8x8_t) __builtin_neon_vsqadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 16); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint32x2_t vsqadd_u32(uint32x2_t __p0, int32x2_t __p1) { + uint32x2_t __ret; + __ret = (uint32x2_t) __builtin_neon_vsqadd_v((int8x8_t)__p0, (int8x8_t)__p1, 18); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint32x2_t vsqadd_u32(uint32x2_t __p0, int32x2_t __p1) { + uint32x2_t __ret; + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (uint32x2_t) __builtin_neon_vsqadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +__ai __attribute__((target("neon"))) uint64x1_t vsqadd_u64(uint64x1_t __p0, int64x1_t __p1) { + uint64x1_t __ret; + __ret = (uint64x1_t) __builtin_neon_vsqadd_v((int8x8_t)__p0, (int8x8_t)__p1, 19); + return __ret; +} +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint16x4_t vsqadd_u16(uint16x4_t __p0, int16x4_t __p1) { + uint16x4_t __ret; + __ret = (uint16x4_t) __builtin_neon_vsqadd_v((int8x8_t)__p0, (int8x8_t)__p1, 17); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint16x4_t vsqadd_u16(uint16x4_t __p0, int16x4_t __p1) { + uint16x4_t __ret; + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (uint16x4_t) __builtin_neon_vsqadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) float64x2_t vsqrtq_f64(float64x2_t __p0) { + float64x2_t __ret; + __ret = (float64x2_t) __builtin_neon_vsqrtq_v((int8x16_t)__p0, 42); + return __ret; +} +#else +__ai __attribute__((target("neon"))) float64x2_t vsqrtq_f64(float64x2_t __p0) { + float64x2_t __ret; + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = (float64x2_t) __builtin_neon_vsqrtq_v((int8x16_t)__rev0, 42); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) float32x4_t vsqrtq_f32(float32x4_t __p0) { + float32x4_t __ret; + __ret = (float32x4_t) __builtin_neon_vsqrtq_v((int8x16_t)__p0, 41); + return __ret; +} +#else +__ai __attribute__((target("neon"))) float32x4_t vsqrtq_f32(float32x4_t __p0) { + float32x4_t __ret; + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = (float32x4_t) __builtin_neon_vsqrtq_v((int8x16_t)__rev0, 41); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +__ai __attribute__((target("neon"))) float64x1_t vsqrt_f64(float64x1_t __p0) { + float64x1_t __ret; + __ret = (float64x1_t) __builtin_neon_vsqrt_v((int8x8_t)__p0, 10); + return __ret; +} +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) float32x2_t vsqrt_f32(float32x2_t __p0) { + float32x2_t __ret; + __ret = (float32x2_t) __builtin_neon_vsqrt_v((int8x8_t)__p0, 9); + return __ret; +} +#else +__ai __attribute__((target("neon"))) float32x2_t vsqrt_f32(float32x2_t __p0) { + float32x2_t __ret; + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = (float32x2_t) __builtin_neon_vsqrt_v((int8x8_t)__rev0, 9); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#define vsrad_n_u64(__p0, __p1, __p2) __extension__ ({ \ + uint64_t __ret; \ + uint64_t __s0 = __p0; \ + uint64_t __s1 = __p1; \ + __ret = (uint64_t) __builtin_neon_vsrad_n_u64(__s0, __s1, __p2); \ + __ret; \ +}) +#define vsrad_n_s64(__p0, __p1, __p2) __extension__ ({ \ + int64_t __ret; \ + int64_t __s0 = __p0; \ + int64_t __s1 = __p1; \ + __ret = (int64_t) __builtin_neon_vsrad_n_s64(__s0, __s1, __p2); \ + __ret; \ +}) +#define vsrid_n_u64(__p0, __p1, __p2) __extension__ ({ \ + uint64_t __ret; \ + uint64_t __s0 = __p0; \ + uint64_t __s1 = __p1; \ + __ret = (uint64_t) __builtin_neon_vsrid_n_u64(__s0, __s1, __p2); \ + __ret; \ +}) +#define vsrid_n_s64(__p0, __p1, __p2) __extension__ ({ \ + int64_t __ret; \ + int64_t __s0 = __p0; \ + int64_t __s1 = __p1; \ + __ret = (int64_t) __builtin_neon_vsrid_n_s64(__s0, __s1, __p2); \ + __ret; \ +}) +#define vsri_n_p64(__p0, __p1, __p2) __extension__ ({ \ + poly64x1_t __ret; \ + poly64x1_t __s0 = __p0; \ + poly64x1_t __s1 = __p1; \ + __ret = (poly64x1_t) __builtin_neon_vsri_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 6); \ + __ret; \ +}) +#ifdef __LITTLE_ENDIAN__ +#define vsriq_n_p64(__p0, __p1, __p2) __extension__ ({ \ + poly64x2_t __ret; \ + poly64x2_t __s0 = __p0; \ + poly64x2_t __s1 = __p1; \ + __ret = (poly64x2_t) __builtin_neon_vsriq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 38); \ + __ret; \ +}) +#else +#define vsriq_n_p64(__p0, __p1, __p2) __extension__ ({ \ + poly64x2_t __ret; \ + poly64x2_t __s0 = __p0; \ + poly64x2_t __s1 = __p1; \ + poly64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + poly64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ + __ret = (poly64x2_t) __builtin_neon_vsriq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 38); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#endif + +#define vst1_p64(__p0, __p1) __extension__ ({ \ + poly64x1_t __s1 = __p1; \ + __builtin_neon_vst1_v(__p0, (int8x8_t)__s1, 6); \ +}) +#ifdef __LITTLE_ENDIAN__ +#define vst1q_p64(__p0, __p1) __extension__ ({ \ + poly64x2_t __s1 = __p1; \ + __builtin_neon_vst1q_v(__p0, (int8x16_t)__s1, 38); \ +}) +#else +#define vst1q_p64(__p0, __p1) __extension__ ({ \ + poly64x2_t __s1 = __p1; \ + poly64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ + __builtin_neon_vst1q_v(__p0, (int8x16_t)__rev1, 38); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1q_f64(__p0, __p1) __extension__ ({ \ + float64x2_t __s1 = __p1; \ + __builtin_neon_vst1q_v(__p0, (int8x16_t)__s1, 42); \ +}) +#else +#define vst1q_f64(__p0, __p1) __extension__ ({ \ + float64x2_t __s1 = __p1; \ + float64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ + __builtin_neon_vst1q_v(__p0, (int8x16_t)__rev1, 42); \ +}) +#endif + +#define vst1_f64(__p0, __p1) __extension__ ({ \ + float64x1_t __s1 = __p1; \ + __builtin_neon_vst1_v(__p0, (int8x8_t)__s1, 10); \ +}) +#define vst1_lane_p64(__p0, __p1, __p2) __extension__ ({ \ + poly64x1_t __s1 = __p1; \ + __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__s1, __p2, 6); \ +}) +#ifdef __LITTLE_ENDIAN__ +#define vst1q_lane_p64(__p0, __p1, __p2) __extension__ ({ \ + poly64x2_t __s1 = __p1; \ + __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__s1, __p2, 38); \ +}) +#else +#define vst1q_lane_p64(__p0, __p1, __p2) __extension__ ({ \ + poly64x2_t __s1 = __p1; \ + poly64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ + __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 38); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1q_lane_f64(__p0, __p1, __p2) __extension__ ({ \ + float64x2_t __s1 = __p1; \ + __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__s1, __p2, 42); \ +}) +#else +#define vst1q_lane_f64(__p0, __p1, __p2) __extension__ ({ \ + float64x2_t __s1 = __p1; \ + float64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ + __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 42); \ +}) +#endif + +#define vst1_lane_f64(__p0, __p1, __p2) __extension__ ({ \ + float64x1_t __s1 = __p1; \ + __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__s1, __p2, 10); \ +}) +#define vst1_p64_x2(__p0, __p1) __extension__ ({ \ + poly64x1x2_t __s1 = __p1; \ + __builtin_neon_vst1_x2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 6); \ +}) +#ifdef __LITTLE_ENDIAN__ +#define vst1q_p64_x2(__p0, __p1) __extension__ ({ \ + poly64x2x2_t __s1 = __p1; \ + __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 38); \ +}) +#else +#define vst1q_p64_x2(__p0, __p1) __extension__ ({ \ + poly64x2x2_t __s1 = __p1; \ + poly64x2x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ + __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 38); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1q_f64_x2(__p0, __p1) __extension__ ({ \ + float64x2x2_t __s1 = __p1; \ + __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 42); \ +}) +#else +#define vst1q_f64_x2(__p0, __p1) __extension__ ({ \ + float64x2x2_t __s1 = __p1; \ + float64x2x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ + __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 42); \ +}) +#endif + +#define vst1_f64_x2(__p0, __p1) __extension__ ({ \ + float64x1x2_t __s1 = __p1; \ + __builtin_neon_vst1_x2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 10); \ +}) +#define vst1_p64_x3(__p0, __p1) __extension__ ({ \ + poly64x1x3_t __s1 = __p1; \ + __builtin_neon_vst1_x3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 6); \ +}) +#ifdef __LITTLE_ENDIAN__ +#define vst1q_p64_x3(__p0, __p1) __extension__ ({ \ + poly64x2x3_t __s1 = __p1; \ + __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 38); \ +}) +#else +#define vst1q_p64_x3(__p0, __p1) __extension__ ({ \ + poly64x2x3_t __s1 = __p1; \ + poly64x2x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ + __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 38); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1q_f64_x3(__p0, __p1) __extension__ ({ \ + float64x2x3_t __s1 = __p1; \ + __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 42); \ +}) +#else +#define vst1q_f64_x3(__p0, __p1) __extension__ ({ \ + float64x2x3_t __s1 = __p1; \ + float64x2x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ + __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 42); \ +}) +#endif + +#define vst1_f64_x3(__p0, __p1) __extension__ ({ \ + float64x1x3_t __s1 = __p1; \ + __builtin_neon_vst1_x3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 10); \ +}) +#define vst1_p64_x4(__p0, __p1) __extension__ ({ \ + poly64x1x4_t __s1 = __p1; \ + __builtin_neon_vst1_x4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 6); \ +}) +#ifdef __LITTLE_ENDIAN__ +#define vst1q_p64_x4(__p0, __p1) __extension__ ({ \ + poly64x2x4_t __s1 = __p1; \ + __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 38); \ +}) +#else +#define vst1q_p64_x4(__p0, __p1) __extension__ ({ \ + poly64x2x4_t __s1 = __p1; \ + poly64x2x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \ + __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 38); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1q_f64_x4(__p0, __p1) __extension__ ({ \ + float64x2x4_t __s1 = __p1; \ + __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 42); \ +}) +#else +#define vst1q_f64_x4(__p0, __p1) __extension__ ({ \ + float64x2x4_t __s1 = __p1; \ + float64x2x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \ + __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 42); \ +}) +#endif + +#define vst1_f64_x4(__p0, __p1) __extension__ ({ \ + float64x1x4_t __s1 = __p1; \ + __builtin_neon_vst1_x4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 10); \ +}) +#define vst2_p64(__p0, __p1) __extension__ ({ \ + poly64x1x2_t __s1 = __p1; \ + __builtin_neon_vst2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 6); \ +}) +#ifdef __LITTLE_ENDIAN__ +#define vst2q_p64(__p0, __p1) __extension__ ({ \ + poly64x2x2_t __s1 = __p1; \ + __builtin_neon_vst2q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 38); \ +}) +#else +#define vst2q_p64(__p0, __p1) __extension__ ({ \ + poly64x2x2_t __s1 = __p1; \ + poly64x2x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ + __builtin_neon_vst2q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 38); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst2q_u64(__p0, __p1) __extension__ ({ \ + uint64x2x2_t __s1 = __p1; \ + __builtin_neon_vst2q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 51); \ +}) +#else +#define vst2q_u64(__p0, __p1) __extension__ ({ \ + uint64x2x2_t __s1 = __p1; \ + uint64x2x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ + __builtin_neon_vst2q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 51); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst2q_f64(__p0, __p1) __extension__ ({ \ + float64x2x2_t __s1 = __p1; \ + __builtin_neon_vst2q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 42); \ +}) +#else +#define vst2q_f64(__p0, __p1) __extension__ ({ \ + float64x2x2_t __s1 = __p1; \ + float64x2x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ + __builtin_neon_vst2q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 42); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst2q_s64(__p0, __p1) __extension__ ({ \ + int64x2x2_t __s1 = __p1; \ + __builtin_neon_vst2q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 35); \ +}) +#else +#define vst2q_s64(__p0, __p1) __extension__ ({ \ + int64x2x2_t __s1 = __p1; \ + int64x2x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ + __builtin_neon_vst2q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 35); \ +}) +#endif + +#define vst2_f64(__p0, __p1) __extension__ ({ \ + float64x1x2_t __s1 = __p1; \ + __builtin_neon_vst2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 10); \ +}) +#define vst2_lane_p64(__p0, __p1, __p2) __extension__ ({ \ + poly64x1x2_t __s1 = __p1; \ + __builtin_neon_vst2_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 6); \ +}) +#ifdef __LITTLE_ENDIAN__ +#define vst2q_lane_p8(__p0, __p1, __p2) __extension__ ({ \ + poly8x16x2_t __s1 = __p1; \ + __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 36); \ +}) +#else +#define vst2q_lane_p8(__p0, __p1, __p2) __extension__ ({ \ + poly8x16x2_t __s1 = __p1; \ + poly8x16x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 36); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst2q_lane_p64(__p0, __p1, __p2) __extension__ ({ \ + poly64x2x2_t __s1 = __p1; \ + __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 38); \ +}) +#else +#define vst2q_lane_p64(__p0, __p1, __p2) __extension__ ({ \ + poly64x2x2_t __s1 = __p1; \ + poly64x2x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ + __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 38); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst2q_lane_u8(__p0, __p1, __p2) __extension__ ({ \ + uint8x16x2_t __s1 = __p1; \ + __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 48); \ +}) +#else +#define vst2q_lane_u8(__p0, __p1, __p2) __extension__ ({ \ + uint8x16x2_t __s1 = __p1; \ + uint8x16x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 48); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst2q_lane_u64(__p0, __p1, __p2) __extension__ ({ \ + uint64x2x2_t __s1 = __p1; \ + __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 51); \ +}) +#else +#define vst2q_lane_u64(__p0, __p1, __p2) __extension__ ({ \ + uint64x2x2_t __s1 = __p1; \ + uint64x2x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ + __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 51); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst2q_lane_s8(__p0, __p1, __p2) __extension__ ({ \ + int8x16x2_t __s1 = __p1; \ + __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 32); \ +}) +#else +#define vst2q_lane_s8(__p0, __p1, __p2) __extension__ ({ \ + int8x16x2_t __s1 = __p1; \ + int8x16x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 32); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst2q_lane_f64(__p0, __p1, __p2) __extension__ ({ \ + float64x2x2_t __s1 = __p1; \ + __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 42); \ +}) +#else +#define vst2q_lane_f64(__p0, __p1, __p2) __extension__ ({ \ + float64x2x2_t __s1 = __p1; \ + float64x2x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ + __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 42); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst2q_lane_s64(__p0, __p1, __p2) __extension__ ({ \ + int64x2x2_t __s1 = __p1; \ + __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 35); \ +}) +#else +#define vst2q_lane_s64(__p0, __p1, __p2) __extension__ ({ \ + int64x2x2_t __s1 = __p1; \ + int64x2x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ + __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 35); \ +}) +#endif + +#define vst2_lane_u64(__p0, __p1, __p2) __extension__ ({ \ + uint64x1x2_t __s1 = __p1; \ + __builtin_neon_vst2_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 19); \ +}) +#define vst2_lane_f64(__p0, __p1, __p2) __extension__ ({ \ + float64x1x2_t __s1 = __p1; \ + __builtin_neon_vst2_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 10); \ +}) +#define vst2_lane_s64(__p0, __p1, __p2) __extension__ ({ \ + int64x1x2_t __s1 = __p1; \ + __builtin_neon_vst2_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 3); \ +}) +#define vst3_p64(__p0, __p1) __extension__ ({ \ + poly64x1x3_t __s1 = __p1; \ + __builtin_neon_vst3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 6); \ +}) +#ifdef __LITTLE_ENDIAN__ +#define vst3q_p64(__p0, __p1) __extension__ ({ \ + poly64x2x3_t __s1 = __p1; \ + __builtin_neon_vst3q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 38); \ +}) +#else +#define vst3q_p64(__p0, __p1) __extension__ ({ \ + poly64x2x3_t __s1 = __p1; \ + poly64x2x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ + __builtin_neon_vst3q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 38); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst3q_u64(__p0, __p1) __extension__ ({ \ + uint64x2x3_t __s1 = __p1; \ + __builtin_neon_vst3q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 51); \ +}) +#else +#define vst3q_u64(__p0, __p1) __extension__ ({ \ + uint64x2x3_t __s1 = __p1; \ + uint64x2x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ + __builtin_neon_vst3q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 51); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst3q_f64(__p0, __p1) __extension__ ({ \ + float64x2x3_t __s1 = __p1; \ + __builtin_neon_vst3q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 42); \ +}) +#else +#define vst3q_f64(__p0, __p1) __extension__ ({ \ + float64x2x3_t __s1 = __p1; \ + float64x2x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ + __builtin_neon_vst3q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 42); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst3q_s64(__p0, __p1) __extension__ ({ \ + int64x2x3_t __s1 = __p1; \ + __builtin_neon_vst3q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 35); \ +}) +#else +#define vst3q_s64(__p0, __p1) __extension__ ({ \ + int64x2x3_t __s1 = __p1; \ + int64x2x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ + __builtin_neon_vst3q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 35); \ +}) +#endif + +#define vst3_f64(__p0, __p1) __extension__ ({ \ + float64x1x3_t __s1 = __p1; \ + __builtin_neon_vst3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 10); \ +}) +#define vst3_lane_p64(__p0, __p1, __p2) __extension__ ({ \ + poly64x1x3_t __s1 = __p1; \ + __builtin_neon_vst3_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 6); \ +}) +#ifdef __LITTLE_ENDIAN__ +#define vst3q_lane_p8(__p0, __p1, __p2) __extension__ ({ \ + poly8x16x3_t __s1 = __p1; \ + __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 36); \ +}) +#else +#define vst3q_lane_p8(__p0, __p1, __p2) __extension__ ({ \ + poly8x16x3_t __s1 = __p1; \ + poly8x16x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 36); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst3q_lane_p64(__p0, __p1, __p2) __extension__ ({ \ + poly64x2x3_t __s1 = __p1; \ + __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 38); \ +}) +#else +#define vst3q_lane_p64(__p0, __p1, __p2) __extension__ ({ \ + poly64x2x3_t __s1 = __p1; \ + poly64x2x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ + __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 38); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst3q_lane_u8(__p0, __p1, __p2) __extension__ ({ \ + uint8x16x3_t __s1 = __p1; \ + __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 48); \ +}) +#else +#define vst3q_lane_u8(__p0, __p1, __p2) __extension__ ({ \ + uint8x16x3_t __s1 = __p1; \ + uint8x16x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 48); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst3q_lane_u64(__p0, __p1, __p2) __extension__ ({ \ + uint64x2x3_t __s1 = __p1; \ + __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 51); \ +}) +#else +#define vst3q_lane_u64(__p0, __p1, __p2) __extension__ ({ \ + uint64x2x3_t __s1 = __p1; \ + uint64x2x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ + __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 51); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst3q_lane_s8(__p0, __p1, __p2) __extension__ ({ \ + int8x16x3_t __s1 = __p1; \ + __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 32); \ +}) +#else +#define vst3q_lane_s8(__p0, __p1, __p2) __extension__ ({ \ + int8x16x3_t __s1 = __p1; \ + int8x16x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 32); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst3q_lane_f64(__p0, __p1, __p2) __extension__ ({ \ + float64x2x3_t __s1 = __p1; \ + __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 42); \ +}) +#else +#define vst3q_lane_f64(__p0, __p1, __p2) __extension__ ({ \ + float64x2x3_t __s1 = __p1; \ + float64x2x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ + __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 42); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst3q_lane_s64(__p0, __p1, __p2) __extension__ ({ \ + int64x2x3_t __s1 = __p1; \ + __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 35); \ +}) +#else +#define vst3q_lane_s64(__p0, __p1, __p2) __extension__ ({ \ + int64x2x3_t __s1 = __p1; \ + int64x2x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ + __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 35); \ +}) +#endif + +#define vst3_lane_u64(__p0, __p1, __p2) __extension__ ({ \ + uint64x1x3_t __s1 = __p1; \ + __builtin_neon_vst3_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 19); \ +}) +#define vst3_lane_f64(__p0, __p1, __p2) __extension__ ({ \ + float64x1x3_t __s1 = __p1; \ + __builtin_neon_vst3_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 10); \ +}) +#define vst3_lane_s64(__p0, __p1, __p2) __extension__ ({ \ + int64x1x3_t __s1 = __p1; \ + __builtin_neon_vst3_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 3); \ +}) +#define vst4_p64(__p0, __p1) __extension__ ({ \ + poly64x1x4_t __s1 = __p1; \ + __builtin_neon_vst4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 6); \ +}) +#ifdef __LITTLE_ENDIAN__ +#define vst4q_p64(__p0, __p1) __extension__ ({ \ + poly64x2x4_t __s1 = __p1; \ + __builtin_neon_vst4q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 38); \ +}) +#else +#define vst4q_p64(__p0, __p1) __extension__ ({ \ + poly64x2x4_t __s1 = __p1; \ + poly64x2x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \ + __builtin_neon_vst4q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 38); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst4q_u64(__p0, __p1) __extension__ ({ \ + uint64x2x4_t __s1 = __p1; \ + __builtin_neon_vst4q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 51); \ +}) +#else +#define vst4q_u64(__p0, __p1) __extension__ ({ \ + uint64x2x4_t __s1 = __p1; \ + uint64x2x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \ + __builtin_neon_vst4q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 51); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst4q_f64(__p0, __p1) __extension__ ({ \ + float64x2x4_t __s1 = __p1; \ + __builtin_neon_vst4q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 42); \ +}) +#else +#define vst4q_f64(__p0, __p1) __extension__ ({ \ + float64x2x4_t __s1 = __p1; \ + float64x2x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \ + __builtin_neon_vst4q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 42); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst4q_s64(__p0, __p1) __extension__ ({ \ + int64x2x4_t __s1 = __p1; \ + __builtin_neon_vst4q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 35); \ +}) +#else +#define vst4q_s64(__p0, __p1) __extension__ ({ \ + int64x2x4_t __s1 = __p1; \ + int64x2x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \ + __builtin_neon_vst4q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 35); \ +}) +#endif + +#define vst4_f64(__p0, __p1) __extension__ ({ \ + float64x1x4_t __s1 = __p1; \ + __builtin_neon_vst4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 10); \ +}) +#define vst4_lane_p64(__p0, __p1, __p2) __extension__ ({ \ + poly64x1x4_t __s1 = __p1; \ + __builtin_neon_vst4_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 6); \ +}) +#ifdef __LITTLE_ENDIAN__ +#define vst4q_lane_p8(__p0, __p1, __p2) __extension__ ({ \ + poly8x16x4_t __s1 = __p1; \ + __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 36); \ +}) +#else +#define vst4q_lane_p8(__p0, __p1, __p2) __extension__ ({ \ + poly8x16x4_t __s1 = __p1; \ + poly8x16x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 36); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst4q_lane_p64(__p0, __p1, __p2) __extension__ ({ \ + poly64x2x4_t __s1 = __p1; \ + __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 38); \ +}) +#else +#define vst4q_lane_p64(__p0, __p1, __p2) __extension__ ({ \ + poly64x2x4_t __s1 = __p1; \ + poly64x2x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \ + __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 38); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst4q_lane_u8(__p0, __p1, __p2) __extension__ ({ \ + uint8x16x4_t __s1 = __p1; \ + __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 48); \ +}) +#else +#define vst4q_lane_u8(__p0, __p1, __p2) __extension__ ({ \ + uint8x16x4_t __s1 = __p1; \ + uint8x16x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 48); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst4q_lane_u64(__p0, __p1, __p2) __extension__ ({ \ + uint64x2x4_t __s1 = __p1; \ + __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 51); \ +}) +#else +#define vst4q_lane_u64(__p0, __p1, __p2) __extension__ ({ \ + uint64x2x4_t __s1 = __p1; \ + uint64x2x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \ + __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 51); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst4q_lane_s8(__p0, __p1, __p2) __extension__ ({ \ + int8x16x4_t __s1 = __p1; \ + __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 32); \ +}) +#else +#define vst4q_lane_s8(__p0, __p1, __p2) __extension__ ({ \ + int8x16x4_t __s1 = __p1; \ + int8x16x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 32); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst4q_lane_f64(__p0, __p1, __p2) __extension__ ({ \ + float64x2x4_t __s1 = __p1; \ + __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 42); \ +}) +#else +#define vst4q_lane_f64(__p0, __p1, __p2) __extension__ ({ \ + float64x2x4_t __s1 = __p1; \ + float64x2x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \ + __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 42); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst4q_lane_s64(__p0, __p1, __p2) __extension__ ({ \ + int64x2x4_t __s1 = __p1; \ + __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 35); \ +}) +#else +#define vst4q_lane_s64(__p0, __p1, __p2) __extension__ ({ \ + int64x2x4_t __s1 = __p1; \ + int64x2x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \ + __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 35); \ +}) +#endif + +#define vst4_lane_u64(__p0, __p1, __p2) __extension__ ({ \ + uint64x1x4_t __s1 = __p1; \ + __builtin_neon_vst4_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 19); \ +}) +#define vst4_lane_f64(__p0, __p1, __p2) __extension__ ({ \ + float64x1x4_t __s1 = __p1; \ + __builtin_neon_vst4_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 10); \ +}) +#define vst4_lane_s64(__p0, __p1, __p2) __extension__ ({ \ + int64x1x4_t __s1 = __p1; \ + __builtin_neon_vst4_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 3); \ +}) +#define vstrq_p128(__p0, __p1) __extension__ ({ \ + poly128_t __s1 = __p1; \ + __builtin_neon_vstrq_p128(__p0, __s1); \ +}) +__ai __attribute__((target("neon"))) uint64_t vsubd_u64(uint64_t __p0, uint64_t __p1) { + uint64_t __ret; + __ret = (uint64_t) __builtin_neon_vsubd_u64(__p0, __p1); + return __ret; +} +__ai __attribute__((target("neon"))) int64_t vsubd_s64(int64_t __p0, int64_t __p1) { + int64_t __ret; + __ret = (int64_t) __builtin_neon_vsubd_s64(__p0, __p1); + return __ret; +} +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) float64x2_t vsubq_f64(float64x2_t __p0, float64x2_t __p1) { + float64x2_t __ret; + __ret = __p0 - __p1; + return __ret; +} +#else +__ai __attribute__((target("neon"))) float64x2_t vsubq_f64(float64x2_t __p0, float64x2_t __p1) { + float64x2_t __ret; + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = __rev0 - __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +__ai __attribute__((target("neon"))) float64x1_t vsub_f64(float64x1_t __p0, float64x1_t __p1) { + float64x1_t __ret; + __ret = __p0 - __p1; + return __ret; +} +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint16x8_t vsubhn_high_u32(uint16x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) { + uint16x8_t __ret; + __ret = vcombine_u16(__p0, vsubhn_u32(__p1, __p2)); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint16x8_t vsubhn_high_u32(uint16x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) { + uint16x8_t __ret; + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + uint32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); + __ret = __noswap_vcombine_u16(__rev0, __noswap_vsubhn_u32(__rev1, __rev2)); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint32x4_t vsubhn_high_u64(uint32x2_t __p0, uint64x2_t __p1, uint64x2_t __p2) { + uint32x4_t __ret; + __ret = vcombine_u32(__p0, vsubhn_u64(__p1, __p2)); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint32x4_t vsubhn_high_u64(uint32x2_t __p0, uint64x2_t __p1, uint64x2_t __p2) { + uint32x4_t __ret; + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + uint64x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0); + __ret = __noswap_vcombine_u32(__rev0, __noswap_vsubhn_u64(__rev1, __rev2)); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint8x16_t vsubhn_high_u16(uint8x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) { + uint8x16_t __ret; + __ret = vcombine_u8(__p0, vsubhn_u16(__p1, __p2)); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint8x16_t vsubhn_high_u16(uint8x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) { + uint8x16_t __ret; + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __noswap_vcombine_u8(__rev0, __noswap_vsubhn_u16(__rev1, __rev2)); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int16x8_t vsubhn_high_s32(int16x4_t __p0, int32x4_t __p1, int32x4_t __p2) { + int16x8_t __ret; + __ret = vcombine_s16(__p0, vsubhn_s32(__p1, __p2)); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int16x8_t vsubhn_high_s32(int16x4_t __p0, int32x4_t __p1, int32x4_t __p2) { + int16x8_t __ret; + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + int32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); + __ret = __noswap_vcombine_s16(__rev0, __noswap_vsubhn_s32(__rev1, __rev2)); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int32x4_t vsubhn_high_s64(int32x2_t __p0, int64x2_t __p1, int64x2_t __p2) { + int32x4_t __ret; + __ret = vcombine_s32(__p0, vsubhn_s64(__p1, __p2)); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int32x4_t vsubhn_high_s64(int32x2_t __p0, int64x2_t __p1, int64x2_t __p2) { + int32x4_t __ret; + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + int64x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0); + __ret = __noswap_vcombine_s32(__rev0, __noswap_vsubhn_s64(__rev1, __rev2)); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int8x16_t vsubhn_high_s16(int8x8_t __p0, int16x8_t __p1, int16x8_t __p2) { + int8x16_t __ret; + __ret = vcombine_s8(__p0, vsubhn_s16(__p1, __p2)); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int8x16_t vsubhn_high_s16(int8x8_t __p0, int16x8_t __p1, int16x8_t __p2) { + int8x16_t __ret; + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __noswap_vcombine_s8(__rev0, __noswap_vsubhn_s16(__rev1, __rev2)); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint16x8_t vsubl_high_u8(uint8x16_t __p0, uint8x16_t __p1) { + uint16x8_t __ret; + __ret = vmovl_high_u8(__p0) - vmovl_high_u8(__p1); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint16x8_t vsubl_high_u8(uint8x16_t __p0, uint8x16_t __p1) { + uint16x8_t __ret; + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __noswap_vmovl_high_u8(__rev0) - __noswap_vmovl_high_u8(__rev1); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint64x2_t vsubl_high_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint64x2_t __ret; + __ret = vmovl_high_u32(__p0) - vmovl_high_u32(__p1); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint64x2_t vsubl_high_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint64x2_t __ret; + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = __noswap_vmovl_high_u32(__rev0) - __noswap_vmovl_high_u32(__rev1); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint32x4_t vsubl_high_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint32x4_t __ret; + __ret = vmovl_high_u16(__p0) - vmovl_high_u16(__p1); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint32x4_t vsubl_high_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint32x4_t __ret; + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __noswap_vmovl_high_u16(__rev0) - __noswap_vmovl_high_u16(__rev1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int16x8_t vsubl_high_s8(int8x16_t __p0, int8x16_t __p1) { + int16x8_t __ret; + __ret = vmovl_high_s8(__p0) - vmovl_high_s8(__p1); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int16x8_t vsubl_high_s8(int8x16_t __p0, int8x16_t __p1) { + int16x8_t __ret; + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __noswap_vmovl_high_s8(__rev0) - __noswap_vmovl_high_s8(__rev1); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int64x2_t vsubl_high_s32(int32x4_t __p0, int32x4_t __p1) { + int64x2_t __ret; + __ret = vmovl_high_s32(__p0) - vmovl_high_s32(__p1); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int64x2_t vsubl_high_s32(int32x4_t __p0, int32x4_t __p1) { + int64x2_t __ret; + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = __noswap_vmovl_high_s32(__rev0) - __noswap_vmovl_high_s32(__rev1); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int32x4_t vsubl_high_s16(int16x8_t __p0, int16x8_t __p1) { + int32x4_t __ret; + __ret = vmovl_high_s16(__p0) - vmovl_high_s16(__p1); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int32x4_t vsubl_high_s16(int16x8_t __p0, int16x8_t __p1) { + int32x4_t __ret; + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __noswap_vmovl_high_s16(__rev0) - __noswap_vmovl_high_s16(__rev1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint16x8_t vsubw_high_u8(uint16x8_t __p0, uint8x16_t __p1) { + uint16x8_t __ret; + __ret = __p0 - vmovl_high_u8(__p1); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint16x8_t vsubw_high_u8(uint16x8_t __p0, uint8x16_t __p1) { + uint16x8_t __ret; + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __rev0 - __noswap_vmovl_high_u8(__rev1); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint64x2_t vsubw_high_u32(uint64x2_t __p0, uint32x4_t __p1) { + uint64x2_t __ret; + __ret = __p0 - vmovl_high_u32(__p1); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint64x2_t vsubw_high_u32(uint64x2_t __p0, uint32x4_t __p1) { + uint64x2_t __ret; + uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = __rev0 - __noswap_vmovl_high_u32(__rev1); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint32x4_t vsubw_high_u16(uint32x4_t __p0, uint16x8_t __p1) { + uint32x4_t __ret; + __ret = __p0 - vmovl_high_u16(__p1); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint32x4_t vsubw_high_u16(uint32x4_t __p0, uint16x8_t __p1) { + uint32x4_t __ret; + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __rev0 - __noswap_vmovl_high_u16(__rev1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int16x8_t vsubw_high_s8(int16x8_t __p0, int8x16_t __p1) { + int16x8_t __ret; + __ret = __p0 - vmovl_high_s8(__p1); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int16x8_t vsubw_high_s8(int16x8_t __p0, int8x16_t __p1) { + int16x8_t __ret; + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __rev0 - __noswap_vmovl_high_s8(__rev1); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int64x2_t vsubw_high_s32(int64x2_t __p0, int32x4_t __p1) { + int64x2_t __ret; + __ret = __p0 - vmovl_high_s32(__p1); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int64x2_t vsubw_high_s32(int64x2_t __p0, int32x4_t __p1) { + int64x2_t __ret; + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = __rev0 - __noswap_vmovl_high_s32(__rev1); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int32x4_t vsubw_high_s16(int32x4_t __p0, int16x8_t __p1) { + int32x4_t __ret; + __ret = __p0 - vmovl_high_s16(__p1); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int32x4_t vsubw_high_s16(int32x4_t __p0, int16x8_t __p1) { + int32x4_t __ret; + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __rev0 - __noswap_vmovl_high_s16(__rev1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) poly8x8_t vtrn1_p8(poly8x8_t __p0, poly8x8_t __p1) { + poly8x8_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 8, 2, 10, 4, 12, 6, 14); + return __ret; +} +#else +__ai __attribute__((target("neon"))) poly8x8_t vtrn1_p8(poly8x8_t __p0, poly8x8_t __p1) { + poly8x8_t __ret; + poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + poly8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 0, 8, 2, 10, 4, 12, 6, 14); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) poly16x4_t vtrn1_p16(poly16x4_t __p0, poly16x4_t __p1) { + poly16x4_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 4, 2, 6); + return __ret; +} +#else +__ai __attribute__((target("neon"))) poly16x4_t vtrn1_p16(poly16x4_t __p0, poly16x4_t __p1) { + poly16x4_t __ret; + poly16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + poly16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 0, 4, 2, 6); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) poly8x16_t vtrn1q_p8(poly8x16_t __p0, poly8x16_t __p1) { + poly8x16_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 16, 2, 18, 4, 20, 6, 22, 8, 24, 10, 26, 12, 28, 14, 30); + return __ret; +} +#else +__ai __attribute__((target("neon"))) poly8x16_t vtrn1q_p8(poly8x16_t __p0, poly8x16_t __p1) { + poly8x16_t __ret; + poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + poly8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 0, 16, 2, 18, 4, 20, 6, 22, 8, 24, 10, 26, 12, 28, 14, 30); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) poly64x2_t vtrn1q_p64(poly64x2_t __p0, poly64x2_t __p1) { + poly64x2_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 2); + return __ret; +} +#else +__ai __attribute__((target("neon"))) poly64x2_t vtrn1q_p64(poly64x2_t __p0, poly64x2_t __p1) { + poly64x2_t __ret; + poly64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + poly64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) poly16x8_t vtrn1q_p16(poly16x8_t __p0, poly16x8_t __p1) { + poly16x8_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 8, 2, 10, 4, 12, 6, 14); + return __ret; +} +#else +__ai __attribute__((target("neon"))) poly16x8_t vtrn1q_p16(poly16x8_t __p0, poly16x8_t __p1) { + poly16x8_t __ret; + poly16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + poly16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 0, 8, 2, 10, 4, 12, 6, 14); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint8x16_t vtrn1q_u8(uint8x16_t __p0, uint8x16_t __p1) { + uint8x16_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 16, 2, 18, 4, 20, 6, 22, 8, 24, 10, 26, 12, 28, 14, 30); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint8x16_t vtrn1q_u8(uint8x16_t __p0, uint8x16_t __p1) { + uint8x16_t __ret; + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 0, 16, 2, 18, 4, 20, 6, 22, 8, 24, 10, 26, 12, 28, 14, 30); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint32x4_t vtrn1q_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint32x4_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 4, 2, 6); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint32x4_t vtrn1q_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint32x4_t __ret; + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 0, 4, 2, 6); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint64x2_t vtrn1q_u64(uint64x2_t __p0, uint64x2_t __p1) { + uint64x2_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 2); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint64x2_t vtrn1q_u64(uint64x2_t __p0, uint64x2_t __p1) { + uint64x2_t __ret; + uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint16x8_t vtrn1q_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint16x8_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 8, 2, 10, 4, 12, 6, 14); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint16x8_t vtrn1q_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint16x8_t __ret; + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 0, 8, 2, 10, 4, 12, 6, 14); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int8x16_t vtrn1q_s8(int8x16_t __p0, int8x16_t __p1) { + int8x16_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 16, 2, 18, 4, 20, 6, 22, 8, 24, 10, 26, 12, 28, 14, 30); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int8x16_t vtrn1q_s8(int8x16_t __p0, int8x16_t __p1) { + int8x16_t __ret; + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 0, 16, 2, 18, 4, 20, 6, 22, 8, 24, 10, 26, 12, 28, 14, 30); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) float64x2_t vtrn1q_f64(float64x2_t __p0, float64x2_t __p1) { + float64x2_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 2); + return __ret; +} +#else +__ai __attribute__((target("neon"))) float64x2_t vtrn1q_f64(float64x2_t __p0, float64x2_t __p1) { + float64x2_t __ret; + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) float32x4_t vtrn1q_f32(float32x4_t __p0, float32x4_t __p1) { + float32x4_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 4, 2, 6); + return __ret; +} +#else +__ai __attribute__((target("neon"))) float32x4_t vtrn1q_f32(float32x4_t __p0, float32x4_t __p1) { + float32x4_t __ret; + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 0, 4, 2, 6); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int32x4_t vtrn1q_s32(int32x4_t __p0, int32x4_t __p1) { + int32x4_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 4, 2, 6); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int32x4_t vtrn1q_s32(int32x4_t __p0, int32x4_t __p1) { + int32x4_t __ret; + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 0, 4, 2, 6); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int64x2_t vtrn1q_s64(int64x2_t __p0, int64x2_t __p1) { + int64x2_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 2); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int64x2_t vtrn1q_s64(int64x2_t __p0, int64x2_t __p1) { + int64x2_t __ret; + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int16x8_t vtrn1q_s16(int16x8_t __p0, int16x8_t __p1) { + int16x8_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 8, 2, 10, 4, 12, 6, 14); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int16x8_t vtrn1q_s16(int16x8_t __p0, int16x8_t __p1) { + int16x8_t __ret; + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 0, 8, 2, 10, 4, 12, 6, 14); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint8x8_t vtrn1_u8(uint8x8_t __p0, uint8x8_t __p1) { + uint8x8_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 8, 2, 10, 4, 12, 6, 14); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint8x8_t vtrn1_u8(uint8x8_t __p0, uint8x8_t __p1) { + uint8x8_t __ret; + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 0, 8, 2, 10, 4, 12, 6, 14); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint32x2_t vtrn1_u32(uint32x2_t __p0, uint32x2_t __p1) { + uint32x2_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 2); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint32x2_t vtrn1_u32(uint32x2_t __p0, uint32x2_t __p1) { + uint32x2_t __ret; + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint16x4_t vtrn1_u16(uint16x4_t __p0, uint16x4_t __p1) { + uint16x4_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 4, 2, 6); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint16x4_t vtrn1_u16(uint16x4_t __p0, uint16x4_t __p1) { + uint16x4_t __ret; + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 0, 4, 2, 6); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int8x8_t vtrn1_s8(int8x8_t __p0, int8x8_t __p1) { + int8x8_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 8, 2, 10, 4, 12, 6, 14); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int8x8_t vtrn1_s8(int8x8_t __p0, int8x8_t __p1) { + int8x8_t __ret; + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 0, 8, 2, 10, 4, 12, 6, 14); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) float32x2_t vtrn1_f32(float32x2_t __p0, float32x2_t __p1) { + float32x2_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 2); + return __ret; +} +#else +__ai __attribute__((target("neon"))) float32x2_t vtrn1_f32(float32x2_t __p0, float32x2_t __p1) { + float32x2_t __ret; + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int32x2_t vtrn1_s32(int32x2_t __p0, int32x2_t __p1) { + int32x2_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 2); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int32x2_t vtrn1_s32(int32x2_t __p0, int32x2_t __p1) { + int32x2_t __ret; + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int16x4_t vtrn1_s16(int16x4_t __p0, int16x4_t __p1) { + int16x4_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 4, 2, 6); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int16x4_t vtrn1_s16(int16x4_t __p0, int16x4_t __p1) { + int16x4_t __ret; + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 0, 4, 2, 6); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) float16x8_t vtrn1q_f16(float16x8_t __p0, float16x8_t __p1) { + float16x8_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 8, 2, 10, 4, 12, 6, 14); + return __ret; +} +#else +__ai __attribute__((target("neon"))) float16x8_t vtrn1q_f16(float16x8_t __p0, float16x8_t __p1) { + float16x8_t __ret; + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 0, 8, 2, 10, 4, 12, 6, 14); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) float16x4_t vtrn1_f16(float16x4_t __p0, float16x4_t __p1) { + float16x4_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 4, 2, 6); + return __ret; +} +#else +__ai __attribute__((target("neon"))) float16x4_t vtrn1_f16(float16x4_t __p0, float16x4_t __p1) { + float16x4_t __ret; + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 0, 4, 2, 6); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) poly8x8_t vtrn2_p8(poly8x8_t __p0, poly8x8_t __p1) { + poly8x8_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 1, 9, 3, 11, 5, 13, 7, 15); + return __ret; +} +#else +__ai __attribute__((target("neon"))) poly8x8_t vtrn2_p8(poly8x8_t __p0, poly8x8_t __p1) { + poly8x8_t __ret; + poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + poly8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 1, 9, 3, 11, 5, 13, 7, 15); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) poly16x4_t vtrn2_p16(poly16x4_t __p0, poly16x4_t __p1) { + poly16x4_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 1, 5, 3, 7); + return __ret; +} +#else +__ai __attribute__((target("neon"))) poly16x4_t vtrn2_p16(poly16x4_t __p0, poly16x4_t __p1) { + poly16x4_t __ret; + poly16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + poly16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 1, 5, 3, 7); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) poly8x16_t vtrn2q_p8(poly8x16_t __p0, poly8x16_t __p1) { + poly8x16_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 1, 17, 3, 19, 5, 21, 7, 23, 9, 25, 11, 27, 13, 29, 15, 31); + return __ret; +} +#else +__ai __attribute__((target("neon"))) poly8x16_t vtrn2q_p8(poly8x16_t __p0, poly8x16_t __p1) { + poly8x16_t __ret; + poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + poly8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 1, 17, 3, 19, 5, 21, 7, 23, 9, 25, 11, 27, 13, 29, 15, 31); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) poly64x2_t vtrn2q_p64(poly64x2_t __p0, poly64x2_t __p1) { + poly64x2_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 1, 3); + return __ret; +} +#else +__ai __attribute__((target("neon"))) poly64x2_t vtrn2q_p64(poly64x2_t __p0, poly64x2_t __p1) { + poly64x2_t __ret; + poly64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + poly64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) poly16x8_t vtrn2q_p16(poly16x8_t __p0, poly16x8_t __p1) { + poly16x8_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 1, 9, 3, 11, 5, 13, 7, 15); + return __ret; +} +#else +__ai __attribute__((target("neon"))) poly16x8_t vtrn2q_p16(poly16x8_t __p0, poly16x8_t __p1) { + poly16x8_t __ret; + poly16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + poly16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 1, 9, 3, 11, 5, 13, 7, 15); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint8x16_t vtrn2q_u8(uint8x16_t __p0, uint8x16_t __p1) { + uint8x16_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 1, 17, 3, 19, 5, 21, 7, 23, 9, 25, 11, 27, 13, 29, 15, 31); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint8x16_t vtrn2q_u8(uint8x16_t __p0, uint8x16_t __p1) { + uint8x16_t __ret; + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 1, 17, 3, 19, 5, 21, 7, 23, 9, 25, 11, 27, 13, 29, 15, 31); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint32x4_t vtrn2q_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint32x4_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 1, 5, 3, 7); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint32x4_t vtrn2q_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint32x4_t __ret; + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 1, 5, 3, 7); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint64x2_t vtrn2q_u64(uint64x2_t __p0, uint64x2_t __p1) { + uint64x2_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 1, 3); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint64x2_t vtrn2q_u64(uint64x2_t __p0, uint64x2_t __p1) { + uint64x2_t __ret; + uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint16x8_t vtrn2q_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint16x8_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 1, 9, 3, 11, 5, 13, 7, 15); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint16x8_t vtrn2q_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint16x8_t __ret; + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 1, 9, 3, 11, 5, 13, 7, 15); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int8x16_t vtrn2q_s8(int8x16_t __p0, int8x16_t __p1) { + int8x16_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 1, 17, 3, 19, 5, 21, 7, 23, 9, 25, 11, 27, 13, 29, 15, 31); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int8x16_t vtrn2q_s8(int8x16_t __p0, int8x16_t __p1) { + int8x16_t __ret; + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 1, 17, 3, 19, 5, 21, 7, 23, 9, 25, 11, 27, 13, 29, 15, 31); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) float64x2_t vtrn2q_f64(float64x2_t __p0, float64x2_t __p1) { + float64x2_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 1, 3); + return __ret; +} +#else +__ai __attribute__((target("neon"))) float64x2_t vtrn2q_f64(float64x2_t __p0, float64x2_t __p1) { + float64x2_t __ret; + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) float32x4_t vtrn2q_f32(float32x4_t __p0, float32x4_t __p1) { + float32x4_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 1, 5, 3, 7); + return __ret; +} +#else +__ai __attribute__((target("neon"))) float32x4_t vtrn2q_f32(float32x4_t __p0, float32x4_t __p1) { + float32x4_t __ret; + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 1, 5, 3, 7); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int32x4_t vtrn2q_s32(int32x4_t __p0, int32x4_t __p1) { + int32x4_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 1, 5, 3, 7); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int32x4_t vtrn2q_s32(int32x4_t __p0, int32x4_t __p1) { + int32x4_t __ret; + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 1, 5, 3, 7); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int64x2_t vtrn2q_s64(int64x2_t __p0, int64x2_t __p1) { + int64x2_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 1, 3); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int64x2_t vtrn2q_s64(int64x2_t __p0, int64x2_t __p1) { + int64x2_t __ret; + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int16x8_t vtrn2q_s16(int16x8_t __p0, int16x8_t __p1) { + int16x8_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 1, 9, 3, 11, 5, 13, 7, 15); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int16x8_t vtrn2q_s16(int16x8_t __p0, int16x8_t __p1) { + int16x8_t __ret; + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 1, 9, 3, 11, 5, 13, 7, 15); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint8x8_t vtrn2_u8(uint8x8_t __p0, uint8x8_t __p1) { + uint8x8_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 1, 9, 3, 11, 5, 13, 7, 15); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint8x8_t vtrn2_u8(uint8x8_t __p0, uint8x8_t __p1) { + uint8x8_t __ret; + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 1, 9, 3, 11, 5, 13, 7, 15); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint32x2_t vtrn2_u32(uint32x2_t __p0, uint32x2_t __p1) { + uint32x2_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 1, 3); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint32x2_t vtrn2_u32(uint32x2_t __p0, uint32x2_t __p1) { + uint32x2_t __ret; + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint16x4_t vtrn2_u16(uint16x4_t __p0, uint16x4_t __p1) { + uint16x4_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 1, 5, 3, 7); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint16x4_t vtrn2_u16(uint16x4_t __p0, uint16x4_t __p1) { + uint16x4_t __ret; + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 1, 5, 3, 7); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int8x8_t vtrn2_s8(int8x8_t __p0, int8x8_t __p1) { + int8x8_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 1, 9, 3, 11, 5, 13, 7, 15); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int8x8_t vtrn2_s8(int8x8_t __p0, int8x8_t __p1) { + int8x8_t __ret; + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 1, 9, 3, 11, 5, 13, 7, 15); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) float32x2_t vtrn2_f32(float32x2_t __p0, float32x2_t __p1) { + float32x2_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 1, 3); + return __ret; +} +#else +__ai __attribute__((target("neon"))) float32x2_t vtrn2_f32(float32x2_t __p0, float32x2_t __p1) { + float32x2_t __ret; + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int32x2_t vtrn2_s32(int32x2_t __p0, int32x2_t __p1) { + int32x2_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 1, 3); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int32x2_t vtrn2_s32(int32x2_t __p0, int32x2_t __p1) { + int32x2_t __ret; + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int16x4_t vtrn2_s16(int16x4_t __p0, int16x4_t __p1) { + int16x4_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 1, 5, 3, 7); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int16x4_t vtrn2_s16(int16x4_t __p0, int16x4_t __p1) { + int16x4_t __ret; + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 1, 5, 3, 7); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) float16x8_t vtrn2q_f16(float16x8_t __p0, float16x8_t __p1) { + float16x8_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 1, 9, 3, 11, 5, 13, 7, 15); + return __ret; +} +#else +__ai __attribute__((target("neon"))) float16x8_t vtrn2q_f16(float16x8_t __p0, float16x8_t __p1) { + float16x8_t __ret; + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 1, 9, 3, 11, 5, 13, 7, 15); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) float16x4_t vtrn2_f16(float16x4_t __p0, float16x4_t __p1) { + float16x4_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 1, 5, 3, 7); + return __ret; +} +#else +__ai __attribute__((target("neon"))) float16x4_t vtrn2_f16(float16x4_t __p0, float16x4_t __p1) { + float16x4_t __ret; + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 1, 5, 3, 7); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +__ai __attribute__((target("neon"))) uint64x1_t vtst_p64(poly64x1_t __p0, poly64x1_t __p1) { + uint64x1_t __ret; + __ret = (uint64x1_t) __builtin_neon_vtst_v((int8x8_t)__p0, (int8x8_t)__p1, 19); + return __ret; +} +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint64x2_t vtstq_p64(poly64x2_t __p0, poly64x2_t __p1) { + uint64x2_t __ret; + __ret = (uint64x2_t) __builtin_neon_vtstq_v((int8x16_t)__p0, (int8x16_t)__p1, 51); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint64x2_t vtstq_p64(poly64x2_t __p0, poly64x2_t __p1) { + uint64x2_t __ret; + poly64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + poly64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (uint64x2_t) __builtin_neon_vtstq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 51); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint64x2_t vtstq_u64(uint64x2_t __p0, uint64x2_t __p1) { + uint64x2_t __ret; + __ret = (uint64x2_t) __builtin_neon_vtstq_v((int8x16_t)__p0, (int8x16_t)__p1, 51); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint64x2_t vtstq_u64(uint64x2_t __p0, uint64x2_t __p1) { + uint64x2_t __ret; + uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (uint64x2_t) __builtin_neon_vtstq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 51); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint64x2_t vtstq_s64(int64x2_t __p0, int64x2_t __p1) { + uint64x2_t __ret; + __ret = (uint64x2_t) __builtin_neon_vtstq_v((int8x16_t)__p0, (int8x16_t)__p1, 51); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint64x2_t vtstq_s64(int64x2_t __p0, int64x2_t __p1) { + uint64x2_t __ret; + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (uint64x2_t) __builtin_neon_vtstq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 51); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +__ai __attribute__((target("neon"))) uint64x1_t vtst_u64(uint64x1_t __p0, uint64x1_t __p1) { + uint64x1_t __ret; + __ret = (uint64x1_t) __builtin_neon_vtst_v((int8x8_t)__p0, (int8x8_t)__p1, 19); + return __ret; +} +__ai __attribute__((target("neon"))) uint64x1_t vtst_s64(int64x1_t __p0, int64x1_t __p1) { + uint64x1_t __ret; + __ret = (uint64x1_t) __builtin_neon_vtst_v((int8x8_t)__p0, (int8x8_t)__p1, 19); + return __ret; +} +__ai __attribute__((target("neon"))) uint64_t vtstd_u64(uint64_t __p0, uint64_t __p1) { + uint64_t __ret; + __ret = (uint64_t) __builtin_neon_vtstd_u64(__p0, __p1); + return __ret; +} +__ai __attribute__((target("neon"))) uint64_t vtstd_s64(int64_t __p0, int64_t __p1) { + uint64_t __ret; + __ret = (uint64_t) __builtin_neon_vtstd_s64(__p0, __p1); + return __ret; +} +__ai __attribute__((target("neon"))) int8_t vuqaddb_s8(int8_t __p0, uint8_t __p1) { + int8_t __ret; + __ret = (int8_t) __builtin_neon_vuqaddb_s8(__p0, __p1); + return __ret; +} +__ai __attribute__((target("neon"))) int32_t vuqadds_s32(int32_t __p0, uint32_t __p1) { + int32_t __ret; + __ret = (int32_t) __builtin_neon_vuqadds_s32(__p0, __p1); + return __ret; +} +__ai __attribute__((target("neon"))) int64_t vuqaddd_s64(int64_t __p0, uint64_t __p1) { + int64_t __ret; + __ret = (int64_t) __builtin_neon_vuqaddd_s64(__p0, __p1); + return __ret; +} +__ai __attribute__((target("neon"))) int16_t vuqaddh_s16(int16_t __p0, uint16_t __p1) { + int16_t __ret; + __ret = (int16_t) __builtin_neon_vuqaddh_s16(__p0, __p1); + return __ret; +} +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int8x16_t vuqaddq_s8(int8x16_t __p0, uint8x16_t __p1) { + int8x16_t __ret; + __ret = (int8x16_t) __builtin_neon_vuqaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 32); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int8x16_t vuqaddq_s8(int8x16_t __p0, uint8x16_t __p1) { + int8x16_t __ret; + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int8x16_t) __builtin_neon_vuqaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 32); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int32x4_t vuqaddq_s32(int32x4_t __p0, uint32x4_t __p1) { + int32x4_t __ret; + __ret = (int32x4_t) __builtin_neon_vuqaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 34); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int32x4_t vuqaddq_s32(int32x4_t __p0, uint32x4_t __p1) { + int32x4_t __ret; + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (int32x4_t) __builtin_neon_vuqaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 34); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int64x2_t vuqaddq_s64(int64x2_t __p0, uint64x2_t __p1) { + int64x2_t __ret; + __ret = (int64x2_t) __builtin_neon_vuqaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 35); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int64x2_t vuqaddq_s64(int64x2_t __p0, uint64x2_t __p1) { + int64x2_t __ret; + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (int64x2_t) __builtin_neon_vuqaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 35); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int16x8_t vuqaddq_s16(int16x8_t __p0, uint16x8_t __p1) { + int16x8_t __ret; + __ret = (int16x8_t) __builtin_neon_vuqaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 33); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int16x8_t vuqaddq_s16(int16x8_t __p0, uint16x8_t __p1) { + int16x8_t __ret; + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int16x8_t) __builtin_neon_vuqaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 33); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int8x8_t vuqadd_s8(int8x8_t __p0, uint8x8_t __p1) { + int8x8_t __ret; + __ret = (int8x8_t) __builtin_neon_vuqadd_v((int8x8_t)__p0, (int8x8_t)__p1, 0); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int8x8_t vuqadd_s8(int8x8_t __p0, uint8x8_t __p1) { + int8x8_t __ret; + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int8x8_t) __builtin_neon_vuqadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 0); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int32x2_t vuqadd_s32(int32x2_t __p0, uint32x2_t __p1) { + int32x2_t __ret; + __ret = (int32x2_t) __builtin_neon_vuqadd_v((int8x8_t)__p0, (int8x8_t)__p1, 2); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int32x2_t vuqadd_s32(int32x2_t __p0, uint32x2_t __p1) { + int32x2_t __ret; + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (int32x2_t) __builtin_neon_vuqadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 2); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +__ai __attribute__((target("neon"))) int64x1_t vuqadd_s64(int64x1_t __p0, uint64x1_t __p1) { + int64x1_t __ret; + __ret = (int64x1_t) __builtin_neon_vuqadd_v((int8x8_t)__p0, (int8x8_t)__p1, 3); + return __ret; +} +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int16x4_t vuqadd_s16(int16x4_t __p0, uint16x4_t __p1) { + int16x4_t __ret; + __ret = (int16x4_t) __builtin_neon_vuqadd_v((int8x8_t)__p0, (int8x8_t)__p1, 1); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int16x4_t vuqadd_s16(int16x4_t __p0, uint16x4_t __p1) { + int16x4_t __ret; + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (int16x4_t) __builtin_neon_vuqadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) poly8x8_t vuzp1_p8(poly8x8_t __p0, poly8x8_t __p1) { + poly8x8_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 2, 4, 6, 8, 10, 12, 14); + return __ret; +} +#else +__ai __attribute__((target("neon"))) poly8x8_t vuzp1_p8(poly8x8_t __p0, poly8x8_t __p1) { + poly8x8_t __ret; + poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + poly8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2, 4, 6, 8, 10, 12, 14); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) poly16x4_t vuzp1_p16(poly16x4_t __p0, poly16x4_t __p1) { + poly16x4_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 2, 4, 6); + return __ret; +} +#else +__ai __attribute__((target("neon"))) poly16x4_t vuzp1_p16(poly16x4_t __p0, poly16x4_t __p1) { + poly16x4_t __ret; + poly16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + poly16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2, 4, 6); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) poly8x16_t vuzp1q_p8(poly8x16_t __p0, poly8x16_t __p1) { + poly8x16_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30); + return __ret; +} +#else +__ai __attribute__((target("neon"))) poly8x16_t vuzp1q_p8(poly8x16_t __p0, poly8x16_t __p1) { + poly8x16_t __ret; + poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + poly8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) poly64x2_t vuzp1q_p64(poly64x2_t __p0, poly64x2_t __p1) { + poly64x2_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 2); + return __ret; +} +#else +__ai __attribute__((target("neon"))) poly64x2_t vuzp1q_p64(poly64x2_t __p0, poly64x2_t __p1) { + poly64x2_t __ret; + poly64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + poly64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) poly16x8_t vuzp1q_p16(poly16x8_t __p0, poly16x8_t __p1) { + poly16x8_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 2, 4, 6, 8, 10, 12, 14); + return __ret; +} +#else +__ai __attribute__((target("neon"))) poly16x8_t vuzp1q_p16(poly16x8_t __p0, poly16x8_t __p1) { + poly16x8_t __ret; + poly16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + poly16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2, 4, 6, 8, 10, 12, 14); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint8x16_t vuzp1q_u8(uint8x16_t __p0, uint8x16_t __p1) { + uint8x16_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint8x16_t vuzp1q_u8(uint8x16_t __p0, uint8x16_t __p1) { + uint8x16_t __ret; + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint32x4_t vuzp1q_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint32x4_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 2, 4, 6); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint32x4_t vuzp1q_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint32x4_t __ret; + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2, 4, 6); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint64x2_t vuzp1q_u64(uint64x2_t __p0, uint64x2_t __p1) { + uint64x2_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 2); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint64x2_t vuzp1q_u64(uint64x2_t __p0, uint64x2_t __p1) { + uint64x2_t __ret; + uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint16x8_t vuzp1q_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint16x8_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 2, 4, 6, 8, 10, 12, 14); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint16x8_t vuzp1q_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint16x8_t __ret; + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2, 4, 6, 8, 10, 12, 14); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int8x16_t vuzp1q_s8(int8x16_t __p0, int8x16_t __p1) { + int8x16_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int8x16_t vuzp1q_s8(int8x16_t __p0, int8x16_t __p1) { + int8x16_t __ret; + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) float64x2_t vuzp1q_f64(float64x2_t __p0, float64x2_t __p1) { + float64x2_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 2); + return __ret; +} +#else +__ai __attribute__((target("neon"))) float64x2_t vuzp1q_f64(float64x2_t __p0, float64x2_t __p1) { + float64x2_t __ret; + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) float32x4_t vuzp1q_f32(float32x4_t __p0, float32x4_t __p1) { + float32x4_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 2, 4, 6); + return __ret; +} +#else +__ai __attribute__((target("neon"))) float32x4_t vuzp1q_f32(float32x4_t __p0, float32x4_t __p1) { + float32x4_t __ret; + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2, 4, 6); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int32x4_t vuzp1q_s32(int32x4_t __p0, int32x4_t __p1) { + int32x4_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 2, 4, 6); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int32x4_t vuzp1q_s32(int32x4_t __p0, int32x4_t __p1) { + int32x4_t __ret; + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2, 4, 6); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int64x2_t vuzp1q_s64(int64x2_t __p0, int64x2_t __p1) { + int64x2_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 2); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int64x2_t vuzp1q_s64(int64x2_t __p0, int64x2_t __p1) { + int64x2_t __ret; + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int16x8_t vuzp1q_s16(int16x8_t __p0, int16x8_t __p1) { + int16x8_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 2, 4, 6, 8, 10, 12, 14); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int16x8_t vuzp1q_s16(int16x8_t __p0, int16x8_t __p1) { + int16x8_t __ret; + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2, 4, 6, 8, 10, 12, 14); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint8x8_t vuzp1_u8(uint8x8_t __p0, uint8x8_t __p1) { + uint8x8_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 2, 4, 6, 8, 10, 12, 14); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint8x8_t vuzp1_u8(uint8x8_t __p0, uint8x8_t __p1) { + uint8x8_t __ret; + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2, 4, 6, 8, 10, 12, 14); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint32x2_t vuzp1_u32(uint32x2_t __p0, uint32x2_t __p1) { + uint32x2_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 2); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint32x2_t vuzp1_u32(uint32x2_t __p0, uint32x2_t __p1) { + uint32x2_t __ret; + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint16x4_t vuzp1_u16(uint16x4_t __p0, uint16x4_t __p1) { + uint16x4_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 2, 4, 6); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint16x4_t vuzp1_u16(uint16x4_t __p0, uint16x4_t __p1) { + uint16x4_t __ret; + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2, 4, 6); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int8x8_t vuzp1_s8(int8x8_t __p0, int8x8_t __p1) { + int8x8_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 2, 4, 6, 8, 10, 12, 14); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int8x8_t vuzp1_s8(int8x8_t __p0, int8x8_t __p1) { + int8x8_t __ret; + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2, 4, 6, 8, 10, 12, 14); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) float32x2_t vuzp1_f32(float32x2_t __p0, float32x2_t __p1) { + float32x2_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 2); + return __ret; +} +#else +__ai __attribute__((target("neon"))) float32x2_t vuzp1_f32(float32x2_t __p0, float32x2_t __p1) { + float32x2_t __ret; + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int32x2_t vuzp1_s32(int32x2_t __p0, int32x2_t __p1) { + int32x2_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 2); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int32x2_t vuzp1_s32(int32x2_t __p0, int32x2_t __p1) { + int32x2_t __ret; + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int16x4_t vuzp1_s16(int16x4_t __p0, int16x4_t __p1) { + int16x4_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 2, 4, 6); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int16x4_t vuzp1_s16(int16x4_t __p0, int16x4_t __p1) { + int16x4_t __ret; + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2, 4, 6); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) float16x8_t vuzp1q_f16(float16x8_t __p0, float16x8_t __p1) { + float16x8_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 2, 4, 6, 8, 10, 12, 14); + return __ret; +} +#else +__ai __attribute__((target("neon"))) float16x8_t vuzp1q_f16(float16x8_t __p0, float16x8_t __p1) { + float16x8_t __ret; + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2, 4, 6, 8, 10, 12, 14); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) float16x4_t vuzp1_f16(float16x4_t __p0, float16x4_t __p1) { + float16x4_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 2, 4, 6); + return __ret; +} +#else +__ai __attribute__((target("neon"))) float16x4_t vuzp1_f16(float16x4_t __p0, float16x4_t __p1) { + float16x4_t __ret; + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2, 4, 6); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) poly8x8_t vuzp2_p8(poly8x8_t __p0, poly8x8_t __p1) { + poly8x8_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 1, 3, 5, 7, 9, 11, 13, 15); + return __ret; +} +#else +__ai __attribute__((target("neon"))) poly8x8_t vuzp2_p8(poly8x8_t __p0, poly8x8_t __p1) { + poly8x8_t __ret; + poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + poly8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3, 5, 7, 9, 11, 13, 15); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) poly16x4_t vuzp2_p16(poly16x4_t __p0, poly16x4_t __p1) { + poly16x4_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 1, 3, 5, 7); + return __ret; +} +#else +__ai __attribute__((target("neon"))) poly16x4_t vuzp2_p16(poly16x4_t __p0, poly16x4_t __p1) { + poly16x4_t __ret; + poly16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + poly16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3, 5, 7); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) poly8x16_t vuzp2q_p8(poly8x16_t __p0, poly8x16_t __p1) { + poly8x16_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31); + return __ret; +} +#else +__ai __attribute__((target("neon"))) poly8x16_t vuzp2q_p8(poly8x16_t __p0, poly8x16_t __p1) { + poly8x16_t __ret; + poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + poly8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) poly64x2_t vuzp2q_p64(poly64x2_t __p0, poly64x2_t __p1) { + poly64x2_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 1, 3); + return __ret; +} +#else +__ai __attribute__((target("neon"))) poly64x2_t vuzp2q_p64(poly64x2_t __p0, poly64x2_t __p1) { + poly64x2_t __ret; + poly64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + poly64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) poly16x8_t vuzp2q_p16(poly16x8_t __p0, poly16x8_t __p1) { + poly16x8_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 1, 3, 5, 7, 9, 11, 13, 15); + return __ret; +} +#else +__ai __attribute__((target("neon"))) poly16x8_t vuzp2q_p16(poly16x8_t __p0, poly16x8_t __p1) { + poly16x8_t __ret; + poly16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + poly16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3, 5, 7, 9, 11, 13, 15); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint8x16_t vuzp2q_u8(uint8x16_t __p0, uint8x16_t __p1) { + uint8x16_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint8x16_t vuzp2q_u8(uint8x16_t __p0, uint8x16_t __p1) { + uint8x16_t __ret; + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint32x4_t vuzp2q_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint32x4_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 1, 3, 5, 7); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint32x4_t vuzp2q_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint32x4_t __ret; + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3, 5, 7); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint64x2_t vuzp2q_u64(uint64x2_t __p0, uint64x2_t __p1) { + uint64x2_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 1, 3); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint64x2_t vuzp2q_u64(uint64x2_t __p0, uint64x2_t __p1) { + uint64x2_t __ret; + uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint16x8_t vuzp2q_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint16x8_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 1, 3, 5, 7, 9, 11, 13, 15); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint16x8_t vuzp2q_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint16x8_t __ret; + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3, 5, 7, 9, 11, 13, 15); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int8x16_t vuzp2q_s8(int8x16_t __p0, int8x16_t __p1) { + int8x16_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int8x16_t vuzp2q_s8(int8x16_t __p0, int8x16_t __p1) { + int8x16_t __ret; + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) float64x2_t vuzp2q_f64(float64x2_t __p0, float64x2_t __p1) { + float64x2_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 1, 3); + return __ret; +} +#else +__ai __attribute__((target("neon"))) float64x2_t vuzp2q_f64(float64x2_t __p0, float64x2_t __p1) { + float64x2_t __ret; + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) float32x4_t vuzp2q_f32(float32x4_t __p0, float32x4_t __p1) { + float32x4_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 1, 3, 5, 7); + return __ret; +} +#else +__ai __attribute__((target("neon"))) float32x4_t vuzp2q_f32(float32x4_t __p0, float32x4_t __p1) { + float32x4_t __ret; + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3, 5, 7); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int32x4_t vuzp2q_s32(int32x4_t __p0, int32x4_t __p1) { + int32x4_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 1, 3, 5, 7); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int32x4_t vuzp2q_s32(int32x4_t __p0, int32x4_t __p1) { + int32x4_t __ret; + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3, 5, 7); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int64x2_t vuzp2q_s64(int64x2_t __p0, int64x2_t __p1) { + int64x2_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 1, 3); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int64x2_t vuzp2q_s64(int64x2_t __p0, int64x2_t __p1) { + int64x2_t __ret; + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int16x8_t vuzp2q_s16(int16x8_t __p0, int16x8_t __p1) { + int16x8_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 1, 3, 5, 7, 9, 11, 13, 15); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int16x8_t vuzp2q_s16(int16x8_t __p0, int16x8_t __p1) { + int16x8_t __ret; + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3, 5, 7, 9, 11, 13, 15); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint8x8_t vuzp2_u8(uint8x8_t __p0, uint8x8_t __p1) { + uint8x8_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 1, 3, 5, 7, 9, 11, 13, 15); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint8x8_t vuzp2_u8(uint8x8_t __p0, uint8x8_t __p1) { + uint8x8_t __ret; + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3, 5, 7, 9, 11, 13, 15); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint32x2_t vuzp2_u32(uint32x2_t __p0, uint32x2_t __p1) { + uint32x2_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 1, 3); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint32x2_t vuzp2_u32(uint32x2_t __p0, uint32x2_t __p1) { + uint32x2_t __ret; + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint16x4_t vuzp2_u16(uint16x4_t __p0, uint16x4_t __p1) { + uint16x4_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 1, 3, 5, 7); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint16x4_t vuzp2_u16(uint16x4_t __p0, uint16x4_t __p1) { + uint16x4_t __ret; + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3, 5, 7); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int8x8_t vuzp2_s8(int8x8_t __p0, int8x8_t __p1) { + int8x8_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 1, 3, 5, 7, 9, 11, 13, 15); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int8x8_t vuzp2_s8(int8x8_t __p0, int8x8_t __p1) { + int8x8_t __ret; + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3, 5, 7, 9, 11, 13, 15); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) float32x2_t vuzp2_f32(float32x2_t __p0, float32x2_t __p1) { + float32x2_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 1, 3); + return __ret; +} +#else +__ai __attribute__((target("neon"))) float32x2_t vuzp2_f32(float32x2_t __p0, float32x2_t __p1) { + float32x2_t __ret; + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int32x2_t vuzp2_s32(int32x2_t __p0, int32x2_t __p1) { + int32x2_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 1, 3); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int32x2_t vuzp2_s32(int32x2_t __p0, int32x2_t __p1) { + int32x2_t __ret; + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int16x4_t vuzp2_s16(int16x4_t __p0, int16x4_t __p1) { + int16x4_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 1, 3, 5, 7); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int16x4_t vuzp2_s16(int16x4_t __p0, int16x4_t __p1) { + int16x4_t __ret; + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3, 5, 7); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) float16x8_t vuzp2q_f16(float16x8_t __p0, float16x8_t __p1) { + float16x8_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 1, 3, 5, 7, 9, 11, 13, 15); + return __ret; +} +#else +__ai __attribute__((target("neon"))) float16x8_t vuzp2q_f16(float16x8_t __p0, float16x8_t __p1) { + float16x8_t __ret; + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3, 5, 7, 9, 11, 13, 15); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) float16x4_t vuzp2_f16(float16x4_t __p0, float16x4_t __p1) { + float16x4_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 1, 3, 5, 7); + return __ret; +} +#else +__ai __attribute__((target("neon"))) float16x4_t vuzp2_f16(float16x4_t __p0, float16x4_t __p1) { + float16x4_t __ret; + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3, 5, 7); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) poly8x8_t vzip1_p8(poly8x8_t __p0, poly8x8_t __p1) { + poly8x8_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 8, 1, 9, 2, 10, 3, 11); + return __ret; +} +#else +__ai __attribute__((target("neon"))) poly8x8_t vzip1_p8(poly8x8_t __p0, poly8x8_t __p1) { + poly8x8_t __ret; + poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + poly8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 0, 8, 1, 9, 2, 10, 3, 11); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) poly16x4_t vzip1_p16(poly16x4_t __p0, poly16x4_t __p1) { + poly16x4_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 4, 1, 5); + return __ret; +} +#else +__ai __attribute__((target("neon"))) poly16x4_t vzip1_p16(poly16x4_t __p0, poly16x4_t __p1) { + poly16x4_t __ret; + poly16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + poly16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 0, 4, 1, 5); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) poly8x16_t vzip1q_p8(poly8x16_t __p0, poly8x16_t __p1) { + poly8x16_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23); + return __ret; +} +#else +__ai __attribute__((target("neon"))) poly8x16_t vzip1q_p8(poly8x16_t __p0, poly8x16_t __p1) { + poly8x16_t __ret; + poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + poly8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) poly64x2_t vzip1q_p64(poly64x2_t __p0, poly64x2_t __p1) { + poly64x2_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 2); + return __ret; +} +#else +__ai __attribute__((target("neon"))) poly64x2_t vzip1q_p64(poly64x2_t __p0, poly64x2_t __p1) { + poly64x2_t __ret; + poly64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + poly64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) poly16x8_t vzip1q_p16(poly16x8_t __p0, poly16x8_t __p1) { + poly16x8_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 8, 1, 9, 2, 10, 3, 11); + return __ret; +} +#else +__ai __attribute__((target("neon"))) poly16x8_t vzip1q_p16(poly16x8_t __p0, poly16x8_t __p1) { + poly16x8_t __ret; + poly16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + poly16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 0, 8, 1, 9, 2, 10, 3, 11); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint8x16_t vzip1q_u8(uint8x16_t __p0, uint8x16_t __p1) { + uint8x16_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint8x16_t vzip1q_u8(uint8x16_t __p0, uint8x16_t __p1) { + uint8x16_t __ret; + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint32x4_t vzip1q_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint32x4_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 4, 1, 5); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint32x4_t vzip1q_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint32x4_t __ret; + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 0, 4, 1, 5); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint64x2_t vzip1q_u64(uint64x2_t __p0, uint64x2_t __p1) { + uint64x2_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 2); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint64x2_t vzip1q_u64(uint64x2_t __p0, uint64x2_t __p1) { + uint64x2_t __ret; + uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint16x8_t vzip1q_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint16x8_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 8, 1, 9, 2, 10, 3, 11); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint16x8_t vzip1q_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint16x8_t __ret; + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 0, 8, 1, 9, 2, 10, 3, 11); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int8x16_t vzip1q_s8(int8x16_t __p0, int8x16_t __p1) { + int8x16_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int8x16_t vzip1q_s8(int8x16_t __p0, int8x16_t __p1) { + int8x16_t __ret; + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) float64x2_t vzip1q_f64(float64x2_t __p0, float64x2_t __p1) { + float64x2_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 2); + return __ret; +} +#else +__ai __attribute__((target("neon"))) float64x2_t vzip1q_f64(float64x2_t __p0, float64x2_t __p1) { + float64x2_t __ret; + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) float32x4_t vzip1q_f32(float32x4_t __p0, float32x4_t __p1) { + float32x4_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 4, 1, 5); + return __ret; +} +#else +__ai __attribute__((target("neon"))) float32x4_t vzip1q_f32(float32x4_t __p0, float32x4_t __p1) { + float32x4_t __ret; + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 0, 4, 1, 5); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int32x4_t vzip1q_s32(int32x4_t __p0, int32x4_t __p1) { + int32x4_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 4, 1, 5); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int32x4_t vzip1q_s32(int32x4_t __p0, int32x4_t __p1) { + int32x4_t __ret; + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 0, 4, 1, 5); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int64x2_t vzip1q_s64(int64x2_t __p0, int64x2_t __p1) { + int64x2_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 2); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int64x2_t vzip1q_s64(int64x2_t __p0, int64x2_t __p1) { + int64x2_t __ret; + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int16x8_t vzip1q_s16(int16x8_t __p0, int16x8_t __p1) { + int16x8_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 8, 1, 9, 2, 10, 3, 11); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int16x8_t vzip1q_s16(int16x8_t __p0, int16x8_t __p1) { + int16x8_t __ret; + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 0, 8, 1, 9, 2, 10, 3, 11); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint8x8_t vzip1_u8(uint8x8_t __p0, uint8x8_t __p1) { + uint8x8_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 8, 1, 9, 2, 10, 3, 11); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint8x8_t vzip1_u8(uint8x8_t __p0, uint8x8_t __p1) { + uint8x8_t __ret; + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 0, 8, 1, 9, 2, 10, 3, 11); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint32x2_t vzip1_u32(uint32x2_t __p0, uint32x2_t __p1) { + uint32x2_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 2); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint32x2_t vzip1_u32(uint32x2_t __p0, uint32x2_t __p1) { + uint32x2_t __ret; + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint16x4_t vzip1_u16(uint16x4_t __p0, uint16x4_t __p1) { + uint16x4_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 4, 1, 5); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint16x4_t vzip1_u16(uint16x4_t __p0, uint16x4_t __p1) { + uint16x4_t __ret; + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 0, 4, 1, 5); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int8x8_t vzip1_s8(int8x8_t __p0, int8x8_t __p1) { + int8x8_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 8, 1, 9, 2, 10, 3, 11); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int8x8_t vzip1_s8(int8x8_t __p0, int8x8_t __p1) { + int8x8_t __ret; + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 0, 8, 1, 9, 2, 10, 3, 11); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) float32x2_t vzip1_f32(float32x2_t __p0, float32x2_t __p1) { + float32x2_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 2); + return __ret; +} +#else +__ai __attribute__((target("neon"))) float32x2_t vzip1_f32(float32x2_t __p0, float32x2_t __p1) { + float32x2_t __ret; + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int32x2_t vzip1_s32(int32x2_t __p0, int32x2_t __p1) { + int32x2_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 2); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int32x2_t vzip1_s32(int32x2_t __p0, int32x2_t __p1) { + int32x2_t __ret; + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int16x4_t vzip1_s16(int16x4_t __p0, int16x4_t __p1) { + int16x4_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 4, 1, 5); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int16x4_t vzip1_s16(int16x4_t __p0, int16x4_t __p1) { + int16x4_t __ret; + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 0, 4, 1, 5); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) float16x8_t vzip1q_f16(float16x8_t __p0, float16x8_t __p1) { + float16x8_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 8, 1, 9, 2, 10, 3, 11); + return __ret; +} +#else +__ai __attribute__((target("neon"))) float16x8_t vzip1q_f16(float16x8_t __p0, float16x8_t __p1) { + float16x8_t __ret; + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 0, 8, 1, 9, 2, 10, 3, 11); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) float16x4_t vzip1_f16(float16x4_t __p0, float16x4_t __p1) { + float16x4_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 4, 1, 5); + return __ret; +} +#else +__ai __attribute__((target("neon"))) float16x4_t vzip1_f16(float16x4_t __p0, float16x4_t __p1) { + float16x4_t __ret; + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 0, 4, 1, 5); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) poly8x8_t vzip2_p8(poly8x8_t __p0, poly8x8_t __p1) { + poly8x8_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 4, 12, 5, 13, 6, 14, 7, 15); + return __ret; +} +#else +__ai __attribute__((target("neon"))) poly8x8_t vzip2_p8(poly8x8_t __p0, poly8x8_t __p1) { + poly8x8_t __ret; + poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + poly8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 4, 12, 5, 13, 6, 14, 7, 15); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) poly16x4_t vzip2_p16(poly16x4_t __p0, poly16x4_t __p1) { + poly16x4_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 2, 6, 3, 7); + return __ret; +} +#else +__ai __attribute__((target("neon"))) poly16x4_t vzip2_p16(poly16x4_t __p0, poly16x4_t __p1) { + poly16x4_t __ret; + poly16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + poly16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 2, 6, 3, 7); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) poly8x16_t vzip2q_p8(poly8x16_t __p0, poly8x16_t __p1) { + poly8x16_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31); + return __ret; +} +#else +__ai __attribute__((target("neon"))) poly8x16_t vzip2q_p8(poly8x16_t __p0, poly8x16_t __p1) { + poly8x16_t __ret; + poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + poly8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) poly64x2_t vzip2q_p64(poly64x2_t __p0, poly64x2_t __p1) { + poly64x2_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 1, 3); + return __ret; +} +#else +__ai __attribute__((target("neon"))) poly64x2_t vzip2q_p64(poly64x2_t __p0, poly64x2_t __p1) { + poly64x2_t __ret; + poly64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + poly64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) poly16x8_t vzip2q_p16(poly16x8_t __p0, poly16x8_t __p1) { + poly16x8_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 4, 12, 5, 13, 6, 14, 7, 15); + return __ret; +} +#else +__ai __attribute__((target("neon"))) poly16x8_t vzip2q_p16(poly16x8_t __p0, poly16x8_t __p1) { + poly16x8_t __ret; + poly16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + poly16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 4, 12, 5, 13, 6, 14, 7, 15); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint8x16_t vzip2q_u8(uint8x16_t __p0, uint8x16_t __p1) { + uint8x16_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint8x16_t vzip2q_u8(uint8x16_t __p0, uint8x16_t __p1) { + uint8x16_t __ret; + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint32x4_t vzip2q_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint32x4_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 2, 6, 3, 7); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint32x4_t vzip2q_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint32x4_t __ret; + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 2, 6, 3, 7); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint64x2_t vzip2q_u64(uint64x2_t __p0, uint64x2_t __p1) { + uint64x2_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 1, 3); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint64x2_t vzip2q_u64(uint64x2_t __p0, uint64x2_t __p1) { + uint64x2_t __ret; + uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint16x8_t vzip2q_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint16x8_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 4, 12, 5, 13, 6, 14, 7, 15); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint16x8_t vzip2q_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint16x8_t __ret; + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 4, 12, 5, 13, 6, 14, 7, 15); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int8x16_t vzip2q_s8(int8x16_t __p0, int8x16_t __p1) { + int8x16_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int8x16_t vzip2q_s8(int8x16_t __p0, int8x16_t __p1) { + int8x16_t __ret; + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) float64x2_t vzip2q_f64(float64x2_t __p0, float64x2_t __p1) { + float64x2_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 1, 3); + return __ret; +} +#else +__ai __attribute__((target("neon"))) float64x2_t vzip2q_f64(float64x2_t __p0, float64x2_t __p1) { + float64x2_t __ret; + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) float32x4_t vzip2q_f32(float32x4_t __p0, float32x4_t __p1) { + float32x4_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 2, 6, 3, 7); + return __ret; +} +#else +__ai __attribute__((target("neon"))) float32x4_t vzip2q_f32(float32x4_t __p0, float32x4_t __p1) { + float32x4_t __ret; + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 2, 6, 3, 7); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int32x4_t vzip2q_s32(int32x4_t __p0, int32x4_t __p1) { + int32x4_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 2, 6, 3, 7); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int32x4_t vzip2q_s32(int32x4_t __p0, int32x4_t __p1) { + int32x4_t __ret; + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 2, 6, 3, 7); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int64x2_t vzip2q_s64(int64x2_t __p0, int64x2_t __p1) { + int64x2_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 1, 3); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int64x2_t vzip2q_s64(int64x2_t __p0, int64x2_t __p1) { + int64x2_t __ret; + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int16x8_t vzip2q_s16(int16x8_t __p0, int16x8_t __p1) { + int16x8_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 4, 12, 5, 13, 6, 14, 7, 15); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int16x8_t vzip2q_s16(int16x8_t __p0, int16x8_t __p1) { + int16x8_t __ret; + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 4, 12, 5, 13, 6, 14, 7, 15); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint8x8_t vzip2_u8(uint8x8_t __p0, uint8x8_t __p1) { + uint8x8_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 4, 12, 5, 13, 6, 14, 7, 15); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint8x8_t vzip2_u8(uint8x8_t __p0, uint8x8_t __p1) { + uint8x8_t __ret; + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 4, 12, 5, 13, 6, 14, 7, 15); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint32x2_t vzip2_u32(uint32x2_t __p0, uint32x2_t __p1) { + uint32x2_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 1, 3); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint32x2_t vzip2_u32(uint32x2_t __p0, uint32x2_t __p1) { + uint32x2_t __ret; + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint16x4_t vzip2_u16(uint16x4_t __p0, uint16x4_t __p1) { + uint16x4_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 2, 6, 3, 7); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint16x4_t vzip2_u16(uint16x4_t __p0, uint16x4_t __p1) { + uint16x4_t __ret; + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 2, 6, 3, 7); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int8x8_t vzip2_s8(int8x8_t __p0, int8x8_t __p1) { + int8x8_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 4, 12, 5, 13, 6, 14, 7, 15); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int8x8_t vzip2_s8(int8x8_t __p0, int8x8_t __p1) { + int8x8_t __ret; + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 4, 12, 5, 13, 6, 14, 7, 15); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) float32x2_t vzip2_f32(float32x2_t __p0, float32x2_t __p1) { + float32x2_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 1, 3); + return __ret; +} +#else +__ai __attribute__((target("neon"))) float32x2_t vzip2_f32(float32x2_t __p0, float32x2_t __p1) { + float32x2_t __ret; + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int32x2_t vzip2_s32(int32x2_t __p0, int32x2_t __p1) { + int32x2_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 1, 3); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int32x2_t vzip2_s32(int32x2_t __p0, int32x2_t __p1) { + int32x2_t __ret; + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int16x4_t vzip2_s16(int16x4_t __p0, int16x4_t __p1) { + int16x4_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 2, 6, 3, 7); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int16x4_t vzip2_s16(int16x4_t __p0, int16x4_t __p1) { + int16x4_t __ret; + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 2, 6, 3, 7); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) float16x8_t vzip2q_f16(float16x8_t __p0, float16x8_t __p1) { + float16x8_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 4, 12, 5, 13, 6, 14, 7, 15); + return __ret; +} +#else +__ai __attribute__((target("neon"))) float16x8_t vzip2q_f16(float16x8_t __p0, float16x8_t __p1) { + float16x8_t __ret; + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 4, 12, 5, 13, 6, 14, 7, 15); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) float16x4_t vzip2_f16(float16x4_t __p0, float16x4_t __p1) { + float16x4_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 2, 6, 3, 7); + return __ret; +} +#else +__ai __attribute__((target("neon"))) float16x4_t vzip2_f16(float16x4_t __p0, float16x4_t __p1) { + float16x4_t __ret; + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 2, 6, 3, 7); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#define vldap1_lane_p64(__p0, __p1, __p2) __extension__ ({ \ + poly64x1_t __ret; \ + poly64x1_t __s1 = __p1; \ + __ret = (poly64x1_t) __builtin_neon_vldap1_lane_p64(__p0, (int8x8_t)__s1, __p2, 6); \ + __ret; \ +}) +#ifdef __LITTLE_ENDIAN__ +#define vldap1q_lane_p64(__p0, __p1, __p2) __extension__ ({ \ + poly64x2_t __ret; \ + poly64x2_t __s1 = __p1; \ + __ret = (poly64x2_t) __builtin_neon_vldap1q_lane_p64(__p0, (int8x16_t)__s1, __p2, 38); \ + __ret; \ +}) +#else +#define vldap1q_lane_p64(__p0, __p1, __p2) __extension__ ({ \ + poly64x2_t __ret; \ + poly64x2_t __s1 = __p1; \ + poly64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ + __ret = (poly64x2_t) __builtin_neon_vldap1q_lane_p64(__p0, (int8x16_t)__rev1, __p2, 38); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vldap1q_lane_u64(__p0, __p1, __p2) __extension__ ({ \ + uint64x2_t __ret; \ + uint64x2_t __s1 = __p1; \ + __ret = (uint64x2_t) __builtin_neon_vldap1q_lane_u64(__p0, (int8x16_t)__s1, __p2, 51); \ + __ret; \ +}) +#else +#define vldap1q_lane_u64(__p0, __p1, __p2) __extension__ ({ \ + uint64x2_t __ret; \ + uint64x2_t __s1 = __p1; \ + uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ + __ret = (uint64x2_t) __builtin_neon_vldap1q_lane_u64(__p0, (int8x16_t)__rev1, __p2, 51); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vldap1q_lane_f64(__p0, __p1, __p2) __extension__ ({ \ + float64x2_t __ret; \ + float64x2_t __s1 = __p1; \ + __ret = (float64x2_t) __builtin_neon_vldap1q_lane_f64(__p0, (int8x16_t)__s1, __p2, 42); \ + __ret; \ +}) +#else +#define vldap1q_lane_f64(__p0, __p1, __p2) __extension__ ({ \ + float64x2_t __ret; \ + float64x2_t __s1 = __p1; \ + float64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ + __ret = (float64x2_t) __builtin_neon_vldap1q_lane_f64(__p0, (int8x16_t)__rev1, __p2, 42); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vldap1q_lane_s64(__p0, __p1, __p2) __extension__ ({ \ + int64x2_t __ret; \ + int64x2_t __s1 = __p1; \ + __ret = (int64x2_t) __builtin_neon_vldap1q_lane_s64(__p0, (int8x16_t)__s1, __p2, 35); \ + __ret; \ +}) +#else +#define vldap1q_lane_s64(__p0, __p1, __p2) __extension__ ({ \ + int64x2_t __ret; \ + int64x2_t __s1 = __p1; \ + int64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ + __ret = (int64x2_t) __builtin_neon_vldap1q_lane_s64(__p0, (int8x16_t)__rev1, __p2, 35); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#endif + +#define vldap1_lane_u64(__p0, __p1, __p2) __extension__ ({ \ + uint64x1_t __ret; \ + uint64x1_t __s1 = __p1; \ + __ret = (uint64x1_t) __builtin_neon_vldap1_lane_u64(__p0, (int8x8_t)__s1, __p2, 19); \ + __ret; \ +}) +#define vldap1_lane_f64(__p0, __p1, __p2) __extension__ ({ \ + float64x1_t __ret; \ + float64x1_t __s1 = __p1; \ + __ret = (float64x1_t) __builtin_neon_vldap1_lane_f64(__p0, (int8x8_t)__s1, __p2, 10); \ + __ret; \ +}) +#define vldap1_lane_s64(__p0, __p1, __p2) __extension__ ({ \ + int64x1_t __ret; \ + int64x1_t __s1 = __p1; \ + __ret = (int64x1_t) __builtin_neon_vldap1_lane_s64(__p0, (int8x8_t)__s1, __p2, 3); \ + __ret; \ +}) +#define vstl1_lane_p64(__p0, __p1, __p2) __extension__ ({ \ + poly64x1_t __s1 = __p1; \ + __builtin_neon_vstl1_lane_p64(__p0, (int8x8_t)__s1, __p2, 6); \ +}) +#ifdef __LITTLE_ENDIAN__ +#define vstl1q_lane_p64(__p0, __p1, __p2) __extension__ ({ \ + poly64x2_t __s1 = __p1; \ + __builtin_neon_vstl1q_lane_p64(__p0, (int8x16_t)__s1, __p2, 38); \ +}) +#else +#define vstl1q_lane_p64(__p0, __p1, __p2) __extension__ ({ \ + poly64x2_t __s1 = __p1; \ + poly64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ + __builtin_neon_vstl1q_lane_p64(__p0, (int8x16_t)__rev1, __p2, 38); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vstl1q_lane_u64(__p0, __p1, __p2) __extension__ ({ \ + uint64x2_t __s1 = __p1; \ + __builtin_neon_vstl1q_lane_u64(__p0, (int8x16_t)__s1, __p2, 51); \ +}) +#else +#define vstl1q_lane_u64(__p0, __p1, __p2) __extension__ ({ \ + uint64x2_t __s1 = __p1; \ + uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ + __builtin_neon_vstl1q_lane_u64(__p0, (int8x16_t)__rev1, __p2, 51); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vstl1q_lane_f64(__p0, __p1, __p2) __extension__ ({ \ + float64x2_t __s1 = __p1; \ + __builtin_neon_vstl1q_lane_f64(__p0, (int8x16_t)__s1, __p2, 42); \ +}) +#else +#define vstl1q_lane_f64(__p0, __p1, __p2) __extension__ ({ \ + float64x2_t __s1 = __p1; \ + float64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ + __builtin_neon_vstl1q_lane_f64(__p0, (int8x16_t)__rev1, __p2, 42); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vstl1q_lane_s64(__p0, __p1, __p2) __extension__ ({ \ + int64x2_t __s1 = __p1; \ + __builtin_neon_vstl1q_lane_s64(__p0, (int8x16_t)__s1, __p2, 35); \ +}) +#else +#define vstl1q_lane_s64(__p0, __p1, __p2) __extension__ ({ \ + int64x2_t __s1 = __p1; \ + int64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ + __builtin_neon_vstl1q_lane_s64(__p0, (int8x16_t)__rev1, __p2, 35); \ +}) +#endif + +#define vstl1_lane_u64(__p0, __p1, __p2) __extension__ ({ \ + uint64x1_t __s1 = __p1; \ + __builtin_neon_vstl1_lane_u64(__p0, (int8x8_t)__s1, __p2, 19); \ +}) +#define vstl1_lane_f64(__p0, __p1, __p2) __extension__ ({ \ + float64x1_t __s1 = __p1; \ + __builtin_neon_vstl1_lane_f64(__p0, (int8x8_t)__s1, __p2, 10); \ +}) +#define vstl1_lane_s64(__p0, __p1, __p2) __extension__ ({ \ + int64x1_t __s1 = __p1; \ + __builtin_neon_vstl1_lane_s64(__p0, (int8x8_t)__s1, __p2, 3); \ +}) +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("sha3,neon"))) uint8x16_t vbcaxq_u8(uint8x16_t __p0, uint8x16_t __p1, uint8x16_t __p2) { + uint8x16_t __ret; + __ret = (uint8x16_t) __builtin_neon_vbcaxq_u8((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 48); + return __ret; +} +#else +__ai __attribute__((target("sha3,neon"))) uint8x16_t vbcaxq_u8(uint8x16_t __p0, uint8x16_t __p1, uint8x16_t __p2) { + uint8x16_t __ret; + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint8x16_t) __builtin_neon_vbcaxq_u8((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 48); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("sha3,neon"))) uint32x4_t vbcaxq_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) { + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vbcaxq_u32((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 50); + return __ret; +} +#else +__ai __attribute__((target("sha3,neon"))) uint32x4_t vbcaxq_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) { + uint32x4_t __ret; + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + uint32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); + __ret = (uint32x4_t) __builtin_neon_vbcaxq_u32((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 50); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("sha3,neon"))) uint64x2_t vbcaxq_u64(uint64x2_t __p0, uint64x2_t __p1, uint64x2_t __p2) { + uint64x2_t __ret; + __ret = (uint64x2_t) __builtin_neon_vbcaxq_u64((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 51); + return __ret; +} +#else +__ai __attribute__((target("sha3,neon"))) uint64x2_t vbcaxq_u64(uint64x2_t __p0, uint64x2_t __p1, uint64x2_t __p2) { + uint64x2_t __ret; + uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + uint64x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0); + __ret = (uint64x2_t) __builtin_neon_vbcaxq_u64((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 51); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("sha3,neon"))) uint16x8_t vbcaxq_u16(uint16x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) { + uint16x8_t __ret; + __ret = (uint16x8_t) __builtin_neon_vbcaxq_u16((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 49); + return __ret; +} +#else +__ai __attribute__((target("sha3,neon"))) uint16x8_t vbcaxq_u16(uint16x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) { + uint16x8_t __ret; + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint16x8_t) __builtin_neon_vbcaxq_u16((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 49); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("sha3,neon"))) int8x16_t vbcaxq_s8(int8x16_t __p0, int8x16_t __p1, int8x16_t __p2) { + int8x16_t __ret; + __ret = (int8x16_t) __builtin_neon_vbcaxq_s8((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 32); + return __ret; +} +#else +__ai __attribute__((target("sha3,neon"))) int8x16_t vbcaxq_s8(int8x16_t __p0, int8x16_t __p1, int8x16_t __p2) { + int8x16_t __ret; + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int8x16_t) __builtin_neon_vbcaxq_s8((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 32); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("sha3,neon"))) int32x4_t vbcaxq_s32(int32x4_t __p0, int32x4_t __p1, int32x4_t __p2) { + int32x4_t __ret; + __ret = (int32x4_t) __builtin_neon_vbcaxq_s32((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 34); + return __ret; +} +#else +__ai __attribute__((target("sha3,neon"))) int32x4_t vbcaxq_s32(int32x4_t __p0, int32x4_t __p1, int32x4_t __p2) { + int32x4_t __ret; + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + int32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); + __ret = (int32x4_t) __builtin_neon_vbcaxq_s32((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 34); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("sha3,neon"))) int64x2_t vbcaxq_s64(int64x2_t __p0, int64x2_t __p1, int64x2_t __p2) { + int64x2_t __ret; + __ret = (int64x2_t) __builtin_neon_vbcaxq_s64((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 35); + return __ret; +} +#else +__ai __attribute__((target("sha3,neon"))) int64x2_t vbcaxq_s64(int64x2_t __p0, int64x2_t __p1, int64x2_t __p2) { + int64x2_t __ret; + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + int64x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0); + __ret = (int64x2_t) __builtin_neon_vbcaxq_s64((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 35); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("sha3,neon"))) int16x8_t vbcaxq_s16(int16x8_t __p0, int16x8_t __p1, int16x8_t __p2) { + int16x8_t __ret; + __ret = (int16x8_t) __builtin_neon_vbcaxq_s16((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 33); + return __ret; +} +#else +__ai __attribute__((target("sha3,neon"))) int16x8_t vbcaxq_s16(int16x8_t __p0, int16x8_t __p1, int16x8_t __p2) { + int16x8_t __ret; + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int16x8_t) __builtin_neon_vbcaxq_s16((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 33); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("sha3,neon"))) uint8x16_t veor3q_u8(uint8x16_t __p0, uint8x16_t __p1, uint8x16_t __p2) { + uint8x16_t __ret; + __ret = (uint8x16_t) __builtin_neon_veor3q_u8((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 48); + return __ret; +} +#else +__ai __attribute__((target("sha3,neon"))) uint8x16_t veor3q_u8(uint8x16_t __p0, uint8x16_t __p1, uint8x16_t __p2) { + uint8x16_t __ret; + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint8x16_t) __builtin_neon_veor3q_u8((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 48); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("sha3,neon"))) uint32x4_t veor3q_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) { + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_veor3q_u32((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 50); + return __ret; +} +#else +__ai __attribute__((target("sha3,neon"))) uint32x4_t veor3q_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) { + uint32x4_t __ret; + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + uint32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); + __ret = (uint32x4_t) __builtin_neon_veor3q_u32((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 50); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("sha3,neon"))) uint64x2_t veor3q_u64(uint64x2_t __p0, uint64x2_t __p1, uint64x2_t __p2) { + uint64x2_t __ret; + __ret = (uint64x2_t) __builtin_neon_veor3q_u64((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 51); + return __ret; +} +#else +__ai __attribute__((target("sha3,neon"))) uint64x2_t veor3q_u64(uint64x2_t __p0, uint64x2_t __p1, uint64x2_t __p2) { + uint64x2_t __ret; + uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + uint64x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0); + __ret = (uint64x2_t) __builtin_neon_veor3q_u64((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 51); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("sha3,neon"))) uint16x8_t veor3q_u16(uint16x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) { + uint16x8_t __ret; + __ret = (uint16x8_t) __builtin_neon_veor3q_u16((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 49); + return __ret; +} +#else +__ai __attribute__((target("sha3,neon"))) uint16x8_t veor3q_u16(uint16x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) { + uint16x8_t __ret; + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint16x8_t) __builtin_neon_veor3q_u16((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 49); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("sha3,neon"))) int8x16_t veor3q_s8(int8x16_t __p0, int8x16_t __p1, int8x16_t __p2) { + int8x16_t __ret; + __ret = (int8x16_t) __builtin_neon_veor3q_s8((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 32); + return __ret; +} +#else +__ai __attribute__((target("sha3,neon"))) int8x16_t veor3q_s8(int8x16_t __p0, int8x16_t __p1, int8x16_t __p2) { + int8x16_t __ret; + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int8x16_t) __builtin_neon_veor3q_s8((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 32); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("sha3,neon"))) int32x4_t veor3q_s32(int32x4_t __p0, int32x4_t __p1, int32x4_t __p2) { + int32x4_t __ret; + __ret = (int32x4_t) __builtin_neon_veor3q_s32((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 34); + return __ret; +} +#else +__ai __attribute__((target("sha3,neon"))) int32x4_t veor3q_s32(int32x4_t __p0, int32x4_t __p1, int32x4_t __p2) { + int32x4_t __ret; + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + int32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); + __ret = (int32x4_t) __builtin_neon_veor3q_s32((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 34); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("sha3,neon"))) int64x2_t veor3q_s64(int64x2_t __p0, int64x2_t __p1, int64x2_t __p2) { + int64x2_t __ret; + __ret = (int64x2_t) __builtin_neon_veor3q_s64((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 35); + return __ret; +} +#else +__ai __attribute__((target("sha3,neon"))) int64x2_t veor3q_s64(int64x2_t __p0, int64x2_t __p1, int64x2_t __p2) { + int64x2_t __ret; + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + int64x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0); + __ret = (int64x2_t) __builtin_neon_veor3q_s64((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 35); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("sha3,neon"))) int16x8_t veor3q_s16(int16x8_t __p0, int16x8_t __p1, int16x8_t __p2) { + int16x8_t __ret; + __ret = (int16x8_t) __builtin_neon_veor3q_s16((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 33); + return __ret; +} +#else +__ai __attribute__((target("sha3,neon"))) int16x8_t veor3q_s16(int16x8_t __p0, int16x8_t __p1, int16x8_t __p2) { + int16x8_t __ret; + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int16x8_t) __builtin_neon_veor3q_s16((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 33); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("sha3,neon"))) uint64x2_t vrax1q_u64(uint64x2_t __p0, uint64x2_t __p1) { + uint64x2_t __ret; + __ret = (uint64x2_t) __builtin_neon_vrax1q_u64((int8x16_t)__p0, (int8x16_t)__p1, 51); + return __ret; +} +#else +__ai __attribute__((target("sha3,neon"))) uint64x2_t vrax1q_u64(uint64x2_t __p0, uint64x2_t __p1) { + uint64x2_t __ret; + uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (uint64x2_t) __builtin_neon_vrax1q_u64((int8x16_t)__rev0, (int8x16_t)__rev1, 51); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("sha3,neon"))) uint64x2_t vsha512hq_u64(uint64x2_t __p0, uint64x2_t __p1, uint64x2_t __p2) { + uint64x2_t __ret; + __ret = (uint64x2_t) __builtin_neon_vsha512hq_u64((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 51); + return __ret; +} +#else +__ai __attribute__((target("sha3,neon"))) uint64x2_t vsha512hq_u64(uint64x2_t __p0, uint64x2_t __p1, uint64x2_t __p2) { + uint64x2_t __ret; + uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + uint64x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0); + __ret = (uint64x2_t) __builtin_neon_vsha512hq_u64((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 51); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("sha3,neon"))) uint64x2_t vsha512h2q_u64(uint64x2_t __p0, uint64x2_t __p1, uint64x2_t __p2) { + uint64x2_t __ret; + __ret = (uint64x2_t) __builtin_neon_vsha512h2q_u64((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 51); + return __ret; +} +#else +__ai __attribute__((target("sha3,neon"))) uint64x2_t vsha512h2q_u64(uint64x2_t __p0, uint64x2_t __p1, uint64x2_t __p2) { + uint64x2_t __ret; + uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + uint64x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0); + __ret = (uint64x2_t) __builtin_neon_vsha512h2q_u64((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 51); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("sha3,neon"))) uint64x2_t vsha512su0q_u64(uint64x2_t __p0, uint64x2_t __p1) { + uint64x2_t __ret; + __ret = (uint64x2_t) __builtin_neon_vsha512su0q_u64((int8x16_t)__p0, (int8x16_t)__p1, 51); + return __ret; +} +#else +__ai __attribute__((target("sha3,neon"))) uint64x2_t vsha512su0q_u64(uint64x2_t __p0, uint64x2_t __p1) { + uint64x2_t __ret; + uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (uint64x2_t) __builtin_neon_vsha512su0q_u64((int8x16_t)__rev0, (int8x16_t)__rev1, 51); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("sha3,neon"))) uint64x2_t vsha512su1q_u64(uint64x2_t __p0, uint64x2_t __p1, uint64x2_t __p2) { + uint64x2_t __ret; + __ret = (uint64x2_t) __builtin_neon_vsha512su1q_u64((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 51); + return __ret; +} +#else +__ai __attribute__((target("sha3,neon"))) uint64x2_t vsha512su1q_u64(uint64x2_t __p0, uint64x2_t __p1, uint64x2_t __p2) { + uint64x2_t __ret; + uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + uint64x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0); + __ret = (uint64x2_t) __builtin_neon_vsha512su1q_u64((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 51); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vxarq_u64(__p0, __p1, __p2) __extension__ ({ \ + uint64x2_t __ret; \ + uint64x2_t __s0 = __p0; \ + uint64x2_t __s1 = __p1; \ + __ret = (uint64x2_t) __builtin_neon_vxarq_u64((int8x16_t)__s0, (int8x16_t)__s1, __p2, 51); \ + __ret; \ +}) +#else +#define vxarq_u64(__p0, __p1, __p2) __extension__ ({ \ + uint64x2_t __ret; \ + uint64x2_t __s0 = __p0; \ + uint64x2_t __s1 = __p1; \ + uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ + __ret = (uint64x2_t) __builtin_neon_vxarq_u64((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 51); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("sm4,neon"))) uint32x4_t vsm3partw1q_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) { + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vsm3partw1q_u32((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 50); + return __ret; +} +#else +__ai __attribute__((target("sm4,neon"))) uint32x4_t vsm3partw1q_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) { + uint32x4_t __ret; + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + uint32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); + __ret = (uint32x4_t) __builtin_neon_vsm3partw1q_u32((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 50); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("sm4,neon"))) uint32x4_t vsm3partw2q_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) { + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vsm3partw2q_u32((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 50); + return __ret; +} +#else +__ai __attribute__((target("sm4,neon"))) uint32x4_t vsm3partw2q_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) { + uint32x4_t __ret; + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + uint32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); + __ret = (uint32x4_t) __builtin_neon_vsm3partw2q_u32((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 50); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("sm4,neon"))) uint32x4_t vsm3ss1q_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) { + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vsm3ss1q_u32((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 50); + return __ret; +} +#else +__ai __attribute__((target("sm4,neon"))) uint32x4_t vsm3ss1q_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) { + uint32x4_t __ret; + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + uint32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); + __ret = (uint32x4_t) __builtin_neon_vsm3ss1q_u32((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 50); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vsm3tt1aq_u32(__p0, __p1, __p2, __p3) __extension__ ({ \ + uint32x4_t __ret; \ + uint32x4_t __s0 = __p0; \ + uint32x4_t __s1 = __p1; \ + uint32x4_t __s2 = __p2; \ + __ret = (uint32x4_t) __builtin_neon_vsm3tt1aq_u32((int8x16_t)__s0, (int8x16_t)__s1, (int8x16_t)__s2, __p3, 50); \ + __ret; \ +}) +#else +#define vsm3tt1aq_u32(__p0, __p1, __p2, __p3) __extension__ ({ \ + uint32x4_t __ret; \ + uint32x4_t __s0 = __p0; \ + uint32x4_t __s1 = __p1; \ + uint32x4_t __s2 = __p2; \ + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + uint32x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \ + __ret = (uint32x4_t) __builtin_neon_vsm3tt1aq_u32((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, __p3, 50); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vsm3tt1bq_u32(__p0, __p1, __p2, __p3) __extension__ ({ \ + uint32x4_t __ret; \ + uint32x4_t __s0 = __p0; \ + uint32x4_t __s1 = __p1; \ + uint32x4_t __s2 = __p2; \ + __ret = (uint32x4_t) __builtin_neon_vsm3tt1bq_u32((int8x16_t)__s0, (int8x16_t)__s1, (int8x16_t)__s2, __p3, 50); \ + __ret; \ +}) +#else +#define vsm3tt1bq_u32(__p0, __p1, __p2, __p3) __extension__ ({ \ + uint32x4_t __ret; \ + uint32x4_t __s0 = __p0; \ + uint32x4_t __s1 = __p1; \ + uint32x4_t __s2 = __p2; \ + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + uint32x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \ + __ret = (uint32x4_t) __builtin_neon_vsm3tt1bq_u32((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, __p3, 50); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vsm3tt2aq_u32(__p0, __p1, __p2, __p3) __extension__ ({ \ + uint32x4_t __ret; \ + uint32x4_t __s0 = __p0; \ + uint32x4_t __s1 = __p1; \ + uint32x4_t __s2 = __p2; \ + __ret = (uint32x4_t) __builtin_neon_vsm3tt2aq_u32((int8x16_t)__s0, (int8x16_t)__s1, (int8x16_t)__s2, __p3, 50); \ + __ret; \ +}) +#else +#define vsm3tt2aq_u32(__p0, __p1, __p2, __p3) __extension__ ({ \ + uint32x4_t __ret; \ + uint32x4_t __s0 = __p0; \ + uint32x4_t __s1 = __p1; \ + uint32x4_t __s2 = __p2; \ + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + uint32x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \ + __ret = (uint32x4_t) __builtin_neon_vsm3tt2aq_u32((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, __p3, 50); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vsm3tt2bq_u32(__p0, __p1, __p2, __p3) __extension__ ({ \ + uint32x4_t __ret; \ + uint32x4_t __s0 = __p0; \ + uint32x4_t __s1 = __p1; \ + uint32x4_t __s2 = __p2; \ + __ret = (uint32x4_t) __builtin_neon_vsm3tt2bq_u32((int8x16_t)__s0, (int8x16_t)__s1, (int8x16_t)__s2, __p3, 50); \ + __ret; \ +}) +#else +#define vsm3tt2bq_u32(__p0, __p1, __p2, __p3) __extension__ ({ \ + uint32x4_t __ret; \ + uint32x4_t __s0 = __p0; \ + uint32x4_t __s1 = __p1; \ + uint32x4_t __s2 = __p2; \ + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + uint32x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \ + __ret = (uint32x4_t) __builtin_neon_vsm3tt2bq_u32((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, __p3, 50); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("sm4,neon"))) uint32x4_t vsm4eq_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vsm4eq_u32((int8x16_t)__p0, (int8x16_t)__p1, 50); + return __ret; +} +#else +__ai __attribute__((target("sm4,neon"))) uint32x4_t vsm4eq_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint32x4_t __ret; + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (uint32x4_t) __builtin_neon_vsm4eq_u32((int8x16_t)__rev0, (int8x16_t)__rev1, 50); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("sm4,neon"))) uint32x4_t vsm4ekeyq_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vsm4ekeyq_u32((int8x16_t)__p0, (int8x16_t)__p1, 50); + return __ret; +} +#else +__ai __attribute__((target("sm4,neon"))) uint32x4_t vsm4ekeyq_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint32x4_t __ret; + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (uint32x4_t) __builtin_neon_vsm4ekeyq_u32((int8x16_t)__rev0, (int8x16_t)__rev1, 50); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +__ai __attribute__((target("v8.1a,neon"))) int32_t vqrdmlahs_s32(int32_t __p0, int32_t __p1, int32_t __p2) { + int32_t __ret; + __ret = (int32_t) __builtin_neon_vqrdmlahs_s32(__p0, __p1, __p2); + return __ret; +} +__ai __attribute__((target("v8.1a,neon"))) int16_t vqrdmlahh_s16(int16_t __p0, int16_t __p1, int16_t __p2) { + int16_t __ret; + __ret = (int16_t) __builtin_neon_vqrdmlahh_s16(__p0, __p1, __p2); + return __ret; +} +#ifdef __LITTLE_ENDIAN__ +#define vqrdmlahs_lane_s32(__p0_760, __p1_760, __p2_760, __p3_760) __extension__ ({ \ + int32_t __ret_760; \ + int32_t __s0_760 = __p0_760; \ + int32_t __s1_760 = __p1_760; \ + int32x2_t __s2_760 = __p2_760; \ + __ret_760 = vqrdmlahs_s32(__s0_760, __s1_760, vget_lane_s32(__s2_760, __p3_760)); \ + __ret_760; \ +}) +#else +#define vqrdmlahs_lane_s32(__p0_761, __p1_761, __p2_761, __p3_761) __extension__ ({ \ + int32_t __ret_761; \ + int32_t __s0_761 = __p0_761; \ + int32_t __s1_761 = __p1_761; \ + int32x2_t __s2_761 = __p2_761; \ + int32x2_t __rev2_761; __rev2_761 = __builtin_shufflevector(__s2_761, __s2_761, 1, 0); \ + __ret_761 = vqrdmlahs_s32(__s0_761, __s1_761, __noswap_vget_lane_s32(__rev2_761, __p3_761)); \ + __ret_761; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqrdmlahh_lane_s16(__p0_762, __p1_762, __p2_762, __p3_762) __extension__ ({ \ + int16_t __ret_762; \ + int16_t __s0_762 = __p0_762; \ + int16_t __s1_762 = __p1_762; \ + int16x4_t __s2_762 = __p2_762; \ + __ret_762 = vqrdmlahh_s16(__s0_762, __s1_762, vget_lane_s16(__s2_762, __p3_762)); \ + __ret_762; \ +}) +#else +#define vqrdmlahh_lane_s16(__p0_763, __p1_763, __p2_763, __p3_763) __extension__ ({ \ + int16_t __ret_763; \ + int16_t __s0_763 = __p0_763; \ + int16_t __s1_763 = __p1_763; \ + int16x4_t __s2_763 = __p2_763; \ + int16x4_t __rev2_763; __rev2_763 = __builtin_shufflevector(__s2_763, __s2_763, 3, 2, 1, 0); \ + __ret_763 = vqrdmlahh_s16(__s0_763, __s1_763, __noswap_vget_lane_s16(__rev2_763, __p3_763)); \ + __ret_763; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqrdmlahs_laneq_s32(__p0_764, __p1_764, __p2_764, __p3_764) __extension__ ({ \ + int32_t __ret_764; \ + int32_t __s0_764 = __p0_764; \ + int32_t __s1_764 = __p1_764; \ + int32x4_t __s2_764 = __p2_764; \ + __ret_764 = vqrdmlahs_s32(__s0_764, __s1_764, vgetq_lane_s32(__s2_764, __p3_764)); \ + __ret_764; \ +}) +#else +#define vqrdmlahs_laneq_s32(__p0_765, __p1_765, __p2_765, __p3_765) __extension__ ({ \ + int32_t __ret_765; \ + int32_t __s0_765 = __p0_765; \ + int32_t __s1_765 = __p1_765; \ + int32x4_t __s2_765 = __p2_765; \ + int32x4_t __rev2_765; __rev2_765 = __builtin_shufflevector(__s2_765, __s2_765, 3, 2, 1, 0); \ + __ret_765 = vqrdmlahs_s32(__s0_765, __s1_765, __noswap_vgetq_lane_s32(__rev2_765, __p3_765)); \ + __ret_765; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqrdmlahh_laneq_s16(__p0_766, __p1_766, __p2_766, __p3_766) __extension__ ({ \ + int16_t __ret_766; \ + int16_t __s0_766 = __p0_766; \ + int16_t __s1_766 = __p1_766; \ + int16x8_t __s2_766 = __p2_766; \ + __ret_766 = vqrdmlahh_s16(__s0_766, __s1_766, vgetq_lane_s16(__s2_766, __p3_766)); \ + __ret_766; \ +}) +#else +#define vqrdmlahh_laneq_s16(__p0_767, __p1_767, __p2_767, __p3_767) __extension__ ({ \ + int16_t __ret_767; \ + int16_t __s0_767 = __p0_767; \ + int16_t __s1_767 = __p1_767; \ + int16x8_t __s2_767 = __p2_767; \ + int16x8_t __rev2_767; __rev2_767 = __builtin_shufflevector(__s2_767, __s2_767, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_767 = vqrdmlahh_s16(__s0_767, __s1_767, __noswap_vgetq_lane_s16(__rev2_767, __p3_767)); \ + __ret_767; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqrdmlahq_laneq_s32(__p0_768, __p1_768, __p2_768, __p3_768) __extension__ ({ \ + int32x4_t __ret_768; \ + int32x4_t __s0_768 = __p0_768; \ + int32x4_t __s1_768 = __p1_768; \ + int32x4_t __s2_768 = __p2_768; \ + __ret_768 = vqrdmlahq_s32(__s0_768, __s1_768, splatq_laneq_s32(__s2_768, __p3_768)); \ + __ret_768; \ +}) +#else +#define vqrdmlahq_laneq_s32(__p0_769, __p1_769, __p2_769, __p3_769) __extension__ ({ \ + int32x4_t __ret_769; \ + int32x4_t __s0_769 = __p0_769; \ + int32x4_t __s1_769 = __p1_769; \ + int32x4_t __s2_769 = __p2_769; \ + int32x4_t __rev0_769; __rev0_769 = __builtin_shufflevector(__s0_769, __s0_769, 3, 2, 1, 0); \ + int32x4_t __rev1_769; __rev1_769 = __builtin_shufflevector(__s1_769, __s1_769, 3, 2, 1, 0); \ + int32x4_t __rev2_769; __rev2_769 = __builtin_shufflevector(__s2_769, __s2_769, 3, 2, 1, 0); \ + __ret_769 = __noswap_vqrdmlahq_s32(__rev0_769, __rev1_769, __noswap_splatq_laneq_s32(__rev2_769, __p3_769)); \ + __ret_769 = __builtin_shufflevector(__ret_769, __ret_769, 3, 2, 1, 0); \ + __ret_769; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqrdmlahq_laneq_s16(__p0_770, __p1_770, __p2_770, __p3_770) __extension__ ({ \ + int16x8_t __ret_770; \ + int16x8_t __s0_770 = __p0_770; \ + int16x8_t __s1_770 = __p1_770; \ + int16x8_t __s2_770 = __p2_770; \ + __ret_770 = vqrdmlahq_s16(__s0_770, __s1_770, splatq_laneq_s16(__s2_770, __p3_770)); \ + __ret_770; \ +}) +#else +#define vqrdmlahq_laneq_s16(__p0_771, __p1_771, __p2_771, __p3_771) __extension__ ({ \ + int16x8_t __ret_771; \ + int16x8_t __s0_771 = __p0_771; \ + int16x8_t __s1_771 = __p1_771; \ + int16x8_t __s2_771 = __p2_771; \ + int16x8_t __rev0_771; __rev0_771 = __builtin_shufflevector(__s0_771, __s0_771, 7, 6, 5, 4, 3, 2, 1, 0); \ + int16x8_t __rev1_771; __rev1_771 = __builtin_shufflevector(__s1_771, __s1_771, 7, 6, 5, 4, 3, 2, 1, 0); \ + int16x8_t __rev2_771; __rev2_771 = __builtin_shufflevector(__s2_771, __s2_771, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_771 = __noswap_vqrdmlahq_s16(__rev0_771, __rev1_771, __noswap_splatq_laneq_s16(__rev2_771, __p3_771)); \ + __ret_771 = __builtin_shufflevector(__ret_771, __ret_771, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_771; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqrdmlah_laneq_s32(__p0_772, __p1_772, __p2_772, __p3_772) __extension__ ({ \ + int32x2_t __ret_772; \ + int32x2_t __s0_772 = __p0_772; \ + int32x2_t __s1_772 = __p1_772; \ + int32x4_t __s2_772 = __p2_772; \ + __ret_772 = vqrdmlah_s32(__s0_772, __s1_772, splat_laneq_s32(__s2_772, __p3_772)); \ + __ret_772; \ +}) +#else +#define vqrdmlah_laneq_s32(__p0_773, __p1_773, __p2_773, __p3_773) __extension__ ({ \ + int32x2_t __ret_773; \ + int32x2_t __s0_773 = __p0_773; \ + int32x2_t __s1_773 = __p1_773; \ + int32x4_t __s2_773 = __p2_773; \ + int32x2_t __rev0_773; __rev0_773 = __builtin_shufflevector(__s0_773, __s0_773, 1, 0); \ + int32x2_t __rev1_773; __rev1_773 = __builtin_shufflevector(__s1_773, __s1_773, 1, 0); \ + int32x4_t __rev2_773; __rev2_773 = __builtin_shufflevector(__s2_773, __s2_773, 3, 2, 1, 0); \ + __ret_773 = __noswap_vqrdmlah_s32(__rev0_773, __rev1_773, __noswap_splat_laneq_s32(__rev2_773, __p3_773)); \ + __ret_773 = __builtin_shufflevector(__ret_773, __ret_773, 1, 0); \ + __ret_773; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqrdmlah_laneq_s16(__p0_774, __p1_774, __p2_774, __p3_774) __extension__ ({ \ + int16x4_t __ret_774; \ + int16x4_t __s0_774 = __p0_774; \ + int16x4_t __s1_774 = __p1_774; \ + int16x8_t __s2_774 = __p2_774; \ + __ret_774 = vqrdmlah_s16(__s0_774, __s1_774, splat_laneq_s16(__s2_774, __p3_774)); \ + __ret_774; \ +}) +#else +#define vqrdmlah_laneq_s16(__p0_775, __p1_775, __p2_775, __p3_775) __extension__ ({ \ + int16x4_t __ret_775; \ + int16x4_t __s0_775 = __p0_775; \ + int16x4_t __s1_775 = __p1_775; \ + int16x8_t __s2_775 = __p2_775; \ + int16x4_t __rev0_775; __rev0_775 = __builtin_shufflevector(__s0_775, __s0_775, 3, 2, 1, 0); \ + int16x4_t __rev1_775; __rev1_775 = __builtin_shufflevector(__s1_775, __s1_775, 3, 2, 1, 0); \ + int16x8_t __rev2_775; __rev2_775 = __builtin_shufflevector(__s2_775, __s2_775, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_775 = __noswap_vqrdmlah_s16(__rev0_775, __rev1_775, __noswap_splat_laneq_s16(__rev2_775, __p3_775)); \ + __ret_775 = __builtin_shufflevector(__ret_775, __ret_775, 3, 2, 1, 0); \ + __ret_775; \ +}) +#endif + +__ai __attribute__((target("v8.1a,neon"))) int32_t vqrdmlshs_s32(int32_t __p0, int32_t __p1, int32_t __p2) { + int32_t __ret; + __ret = (int32_t) __builtin_neon_vqrdmlshs_s32(__p0, __p1, __p2); + return __ret; +} +__ai __attribute__((target("v8.1a,neon"))) int16_t vqrdmlshh_s16(int16_t __p0, int16_t __p1, int16_t __p2) { + int16_t __ret; + __ret = (int16_t) __builtin_neon_vqrdmlshh_s16(__p0, __p1, __p2); + return __ret; +} +#ifdef __LITTLE_ENDIAN__ +#define vqrdmlshs_lane_s32(__p0_776, __p1_776, __p2_776, __p3_776) __extension__ ({ \ + int32_t __ret_776; \ + int32_t __s0_776 = __p0_776; \ + int32_t __s1_776 = __p1_776; \ + int32x2_t __s2_776 = __p2_776; \ + __ret_776 = vqrdmlshs_s32(__s0_776, __s1_776, vget_lane_s32(__s2_776, __p3_776)); \ + __ret_776; \ +}) +#else +#define vqrdmlshs_lane_s32(__p0_777, __p1_777, __p2_777, __p3_777) __extension__ ({ \ + int32_t __ret_777; \ + int32_t __s0_777 = __p0_777; \ + int32_t __s1_777 = __p1_777; \ + int32x2_t __s2_777 = __p2_777; \ + int32x2_t __rev2_777; __rev2_777 = __builtin_shufflevector(__s2_777, __s2_777, 1, 0); \ + __ret_777 = vqrdmlshs_s32(__s0_777, __s1_777, __noswap_vget_lane_s32(__rev2_777, __p3_777)); \ + __ret_777; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqrdmlshh_lane_s16(__p0_778, __p1_778, __p2_778, __p3_778) __extension__ ({ \ + int16_t __ret_778; \ + int16_t __s0_778 = __p0_778; \ + int16_t __s1_778 = __p1_778; \ + int16x4_t __s2_778 = __p2_778; \ + __ret_778 = vqrdmlshh_s16(__s0_778, __s1_778, vget_lane_s16(__s2_778, __p3_778)); \ + __ret_778; \ +}) +#else +#define vqrdmlshh_lane_s16(__p0_779, __p1_779, __p2_779, __p3_779) __extension__ ({ \ + int16_t __ret_779; \ + int16_t __s0_779 = __p0_779; \ + int16_t __s1_779 = __p1_779; \ + int16x4_t __s2_779 = __p2_779; \ + int16x4_t __rev2_779; __rev2_779 = __builtin_shufflevector(__s2_779, __s2_779, 3, 2, 1, 0); \ + __ret_779 = vqrdmlshh_s16(__s0_779, __s1_779, __noswap_vget_lane_s16(__rev2_779, __p3_779)); \ + __ret_779; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqrdmlshs_laneq_s32(__p0_780, __p1_780, __p2_780, __p3_780) __extension__ ({ \ + int32_t __ret_780; \ + int32_t __s0_780 = __p0_780; \ + int32_t __s1_780 = __p1_780; \ + int32x4_t __s2_780 = __p2_780; \ + __ret_780 = vqrdmlshs_s32(__s0_780, __s1_780, vgetq_lane_s32(__s2_780, __p3_780)); \ + __ret_780; \ +}) +#else +#define vqrdmlshs_laneq_s32(__p0_781, __p1_781, __p2_781, __p3_781) __extension__ ({ \ + int32_t __ret_781; \ + int32_t __s0_781 = __p0_781; \ + int32_t __s1_781 = __p1_781; \ + int32x4_t __s2_781 = __p2_781; \ + int32x4_t __rev2_781; __rev2_781 = __builtin_shufflevector(__s2_781, __s2_781, 3, 2, 1, 0); \ + __ret_781 = vqrdmlshs_s32(__s0_781, __s1_781, __noswap_vgetq_lane_s32(__rev2_781, __p3_781)); \ + __ret_781; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqrdmlshh_laneq_s16(__p0_782, __p1_782, __p2_782, __p3_782) __extension__ ({ \ + int16_t __ret_782; \ + int16_t __s0_782 = __p0_782; \ + int16_t __s1_782 = __p1_782; \ + int16x8_t __s2_782 = __p2_782; \ + __ret_782 = vqrdmlshh_s16(__s0_782, __s1_782, vgetq_lane_s16(__s2_782, __p3_782)); \ + __ret_782; \ +}) +#else +#define vqrdmlshh_laneq_s16(__p0_783, __p1_783, __p2_783, __p3_783) __extension__ ({ \ + int16_t __ret_783; \ + int16_t __s0_783 = __p0_783; \ + int16_t __s1_783 = __p1_783; \ + int16x8_t __s2_783 = __p2_783; \ + int16x8_t __rev2_783; __rev2_783 = __builtin_shufflevector(__s2_783, __s2_783, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_783 = vqrdmlshh_s16(__s0_783, __s1_783, __noswap_vgetq_lane_s16(__rev2_783, __p3_783)); \ + __ret_783; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqrdmlshq_laneq_s32(__p0_784, __p1_784, __p2_784, __p3_784) __extension__ ({ \ + int32x4_t __ret_784; \ + int32x4_t __s0_784 = __p0_784; \ + int32x4_t __s1_784 = __p1_784; \ + int32x4_t __s2_784 = __p2_784; \ + __ret_784 = vqrdmlshq_s32(__s0_784, __s1_784, splatq_laneq_s32(__s2_784, __p3_784)); \ + __ret_784; \ +}) +#else +#define vqrdmlshq_laneq_s32(__p0_785, __p1_785, __p2_785, __p3_785) __extension__ ({ \ + int32x4_t __ret_785; \ + int32x4_t __s0_785 = __p0_785; \ + int32x4_t __s1_785 = __p1_785; \ + int32x4_t __s2_785 = __p2_785; \ + int32x4_t __rev0_785; __rev0_785 = __builtin_shufflevector(__s0_785, __s0_785, 3, 2, 1, 0); \ + int32x4_t __rev1_785; __rev1_785 = __builtin_shufflevector(__s1_785, __s1_785, 3, 2, 1, 0); \ + int32x4_t __rev2_785; __rev2_785 = __builtin_shufflevector(__s2_785, __s2_785, 3, 2, 1, 0); \ + __ret_785 = __noswap_vqrdmlshq_s32(__rev0_785, __rev1_785, __noswap_splatq_laneq_s32(__rev2_785, __p3_785)); \ + __ret_785 = __builtin_shufflevector(__ret_785, __ret_785, 3, 2, 1, 0); \ + __ret_785; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqrdmlshq_laneq_s16(__p0_786, __p1_786, __p2_786, __p3_786) __extension__ ({ \ + int16x8_t __ret_786; \ + int16x8_t __s0_786 = __p0_786; \ + int16x8_t __s1_786 = __p1_786; \ + int16x8_t __s2_786 = __p2_786; \ + __ret_786 = vqrdmlshq_s16(__s0_786, __s1_786, splatq_laneq_s16(__s2_786, __p3_786)); \ + __ret_786; \ +}) +#else +#define vqrdmlshq_laneq_s16(__p0_787, __p1_787, __p2_787, __p3_787) __extension__ ({ \ + int16x8_t __ret_787; \ + int16x8_t __s0_787 = __p0_787; \ + int16x8_t __s1_787 = __p1_787; \ + int16x8_t __s2_787 = __p2_787; \ + int16x8_t __rev0_787; __rev0_787 = __builtin_shufflevector(__s0_787, __s0_787, 7, 6, 5, 4, 3, 2, 1, 0); \ + int16x8_t __rev1_787; __rev1_787 = __builtin_shufflevector(__s1_787, __s1_787, 7, 6, 5, 4, 3, 2, 1, 0); \ + int16x8_t __rev2_787; __rev2_787 = __builtin_shufflevector(__s2_787, __s2_787, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_787 = __noswap_vqrdmlshq_s16(__rev0_787, __rev1_787, __noswap_splatq_laneq_s16(__rev2_787, __p3_787)); \ + __ret_787 = __builtin_shufflevector(__ret_787, __ret_787, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_787; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqrdmlsh_laneq_s32(__p0_788, __p1_788, __p2_788, __p3_788) __extension__ ({ \ + int32x2_t __ret_788; \ + int32x2_t __s0_788 = __p0_788; \ + int32x2_t __s1_788 = __p1_788; \ + int32x4_t __s2_788 = __p2_788; \ + __ret_788 = vqrdmlsh_s32(__s0_788, __s1_788, splat_laneq_s32(__s2_788, __p3_788)); \ + __ret_788; \ +}) +#else +#define vqrdmlsh_laneq_s32(__p0_789, __p1_789, __p2_789, __p3_789) __extension__ ({ \ + int32x2_t __ret_789; \ + int32x2_t __s0_789 = __p0_789; \ + int32x2_t __s1_789 = __p1_789; \ + int32x4_t __s2_789 = __p2_789; \ + int32x2_t __rev0_789; __rev0_789 = __builtin_shufflevector(__s0_789, __s0_789, 1, 0); \ + int32x2_t __rev1_789; __rev1_789 = __builtin_shufflevector(__s1_789, __s1_789, 1, 0); \ + int32x4_t __rev2_789; __rev2_789 = __builtin_shufflevector(__s2_789, __s2_789, 3, 2, 1, 0); \ + __ret_789 = __noswap_vqrdmlsh_s32(__rev0_789, __rev1_789, __noswap_splat_laneq_s32(__rev2_789, __p3_789)); \ + __ret_789 = __builtin_shufflevector(__ret_789, __ret_789, 1, 0); \ + __ret_789; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqrdmlsh_laneq_s16(__p0_790, __p1_790, __p2_790, __p3_790) __extension__ ({ \ + int16x4_t __ret_790; \ + int16x4_t __s0_790 = __p0_790; \ + int16x4_t __s1_790 = __p1_790; \ + int16x8_t __s2_790 = __p2_790; \ + __ret_790 = vqrdmlsh_s16(__s0_790, __s1_790, splat_laneq_s16(__s2_790, __p3_790)); \ + __ret_790; \ +}) +#else +#define vqrdmlsh_laneq_s16(__p0_791, __p1_791, __p2_791, __p3_791) __extension__ ({ \ + int16x4_t __ret_791; \ + int16x4_t __s0_791 = __p0_791; \ + int16x4_t __s1_791 = __p1_791; \ + int16x8_t __s2_791 = __p2_791; \ + int16x4_t __rev0_791; __rev0_791 = __builtin_shufflevector(__s0_791, __s0_791, 3, 2, 1, 0); \ + int16x4_t __rev1_791; __rev1_791 = __builtin_shufflevector(__s1_791, __s1_791, 3, 2, 1, 0); \ + int16x8_t __rev2_791; __rev2_791 = __builtin_shufflevector(__s2_791, __s2_791, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_791 = __noswap_vqrdmlsh_s16(__rev0_791, __rev1_791, __noswap_splat_laneq_s16(__rev2_791, __p3_791)); \ + __ret_791 = __builtin_shufflevector(__ret_791, __ret_791, 3, 2, 1, 0); \ + __ret_791; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("v8.3a,neon"))) float64x2_t vcaddq_rot270_f64(float64x2_t __p0, float64x2_t __p1) { + float64x2_t __ret; + __ret = (float64x2_t) __builtin_neon_vcaddq_rot270_f64((int8x16_t)__p0, (int8x16_t)__p1, 42); + return __ret; +} +#else +__ai __attribute__((target("v8.3a,neon"))) float64x2_t vcaddq_rot270_f64(float64x2_t __p0, float64x2_t __p1) { + float64x2_t __ret; + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (float64x2_t) __builtin_neon_vcaddq_rot270_f64((int8x16_t)__rev0, (int8x16_t)__rev1, 42); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("v8.3a,neon"))) float64x2_t vcaddq_rot90_f64(float64x2_t __p0, float64x2_t __p1) { + float64x2_t __ret; + __ret = (float64x2_t) __builtin_neon_vcaddq_rot90_f64((int8x16_t)__p0, (int8x16_t)__p1, 42); + return __ret; +} +#else +__ai __attribute__((target("v8.3a,neon"))) float64x2_t vcaddq_rot90_f64(float64x2_t __p0, float64x2_t __p1) { + float64x2_t __ret; + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (float64x2_t) __builtin_neon_vcaddq_rot90_f64((int8x16_t)__rev0, (int8x16_t)__rev1, 42); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("v8.3a,neon"))) float64x2_t vcmlaq_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) { + float64x2_t __ret; + __ret = (float64x2_t) __builtin_neon_vcmlaq_f64((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 42); + return __ret; +} +#else +__ai __attribute__((target("v8.3a,neon"))) float64x2_t vcmlaq_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) { + float64x2_t __ret; + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + float64x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0); + __ret = (float64x2_t) __builtin_neon_vcmlaq_f64((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 42); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +__ai __attribute__((target("v8.3a,neon"))) float64x2_t __noswap_vcmlaq_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) { + float64x2_t __ret; + __ret = (float64x2_t) __builtin_neon_vcmlaq_f64((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 42); + return __ret; +} +#endif + +__ai __attribute__((target("v8.3a,neon"))) float64x1_t vcmla_f64(float64x1_t __p0, float64x1_t __p1, float64x1_t __p2) { + float64x1_t __ret; + __ret = (float64x1_t) __builtin_neon_vcmla_f64((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 10); + return __ret; +} +#define vcmla_lane_f64(__p0_792, __p1_792, __p2_792, __p3_792) __extension__ ({ \ + float64x1_t __ret_792; \ + float64x1_t __s0_792 = __p0_792; \ + float64x1_t __s1_792 = __p1_792; \ + float64x1_t __s2_792 = __p2_792; \ +float64x1_t __reint_792 = __s2_792; \ +uint64x2_t __reint1_792 = (uint64x2_t) {vgetq_lane_u64(*(uint64x2_t *) &__reint_792, __p3_792), vgetq_lane_u64(*(uint64x2_t *) &__reint_792, __p3_792)}; \ + __ret_792 = vcmla_f64(__s0_792, __s1_792, *(float64x1_t *) &__reint1_792); \ + __ret_792; \ +}) +#ifdef __LITTLE_ENDIAN__ +#define vcmlaq_lane_f64(__p0_793, __p1_793, __p2_793, __p3_793) __extension__ ({ \ + float64x2_t __ret_793; \ + float64x2_t __s0_793 = __p0_793; \ + float64x2_t __s1_793 = __p1_793; \ + float64x1_t __s2_793 = __p2_793; \ +float64x1_t __reint_793 = __s2_793; \ +uint64x2_t __reint1_793 = (uint64x2_t) {vgetq_lane_u64(*(uint64x2_t *) &__reint_793, __p3_793), vgetq_lane_u64(*(uint64x2_t *) &__reint_793, __p3_793)}; \ + __ret_793 = vcmlaq_f64(__s0_793, __s1_793, *(float64x2_t *) &__reint1_793); \ + __ret_793; \ +}) +#else +#define vcmlaq_lane_f64(__p0_794, __p1_794, __p2_794, __p3_794) __extension__ ({ \ + float64x2_t __ret_794; \ + float64x2_t __s0_794 = __p0_794; \ + float64x2_t __s1_794 = __p1_794; \ + float64x1_t __s2_794 = __p2_794; \ + float64x2_t __rev0_794; __rev0_794 = __builtin_shufflevector(__s0_794, __s0_794, 1, 0); \ + float64x2_t __rev1_794; __rev1_794 = __builtin_shufflevector(__s1_794, __s1_794, 1, 0); \ +float64x1_t __reint_794 = __s2_794; \ +uint64x2_t __reint1_794 = (uint64x2_t) {__noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_794, __p3_794), __noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_794, __p3_794)}; \ + __ret_794 = __noswap_vcmlaq_f64(__rev0_794, __rev1_794, *(float64x2_t *) &__reint1_794); \ + __ret_794 = __builtin_shufflevector(__ret_794, __ret_794, 1, 0); \ + __ret_794; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcmla_laneq_f64(__p0_795, __p1_795, __p2_795, __p3_795) __extension__ ({ \ + float64x1_t __ret_795; \ + float64x1_t __s0_795 = __p0_795; \ + float64x1_t __s1_795 = __p1_795; \ + float64x2_t __s2_795 = __p2_795; \ +float64x2_t __reint_795 = __s2_795; \ +uint64x2_t __reint1_795 = (uint64x2_t) {vgetq_lane_u64(*(uint64x2_t *) &__reint_795, __p3_795), vgetq_lane_u64(*(uint64x2_t *) &__reint_795, __p3_795)}; \ + __ret_795 = vcmla_f64(__s0_795, __s1_795, *(float64x1_t *) &__reint1_795); \ + __ret_795; \ +}) +#else +#define vcmla_laneq_f64(__p0_796, __p1_796, __p2_796, __p3_796) __extension__ ({ \ + float64x1_t __ret_796; \ + float64x1_t __s0_796 = __p0_796; \ + float64x1_t __s1_796 = __p1_796; \ + float64x2_t __s2_796 = __p2_796; \ + float64x2_t __rev2_796; __rev2_796 = __builtin_shufflevector(__s2_796, __s2_796, 1, 0); \ +float64x2_t __reint_796 = __rev2_796; \ +uint64x2_t __reint1_796 = (uint64x2_t) {__noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_796, __p3_796), __noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_796, __p3_796)}; \ + __ret_796 = vcmla_f64(__s0_796, __s1_796, *(float64x1_t *) &__reint1_796); \ + __ret_796; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcmlaq_laneq_f64(__p0_797, __p1_797, __p2_797, __p3_797) __extension__ ({ \ + float64x2_t __ret_797; \ + float64x2_t __s0_797 = __p0_797; \ + float64x2_t __s1_797 = __p1_797; \ + float64x2_t __s2_797 = __p2_797; \ +float64x2_t __reint_797 = __s2_797; \ +uint64x2_t __reint1_797 = (uint64x2_t) {vgetq_lane_u64(*(uint64x2_t *) &__reint_797, __p3_797), vgetq_lane_u64(*(uint64x2_t *) &__reint_797, __p3_797)}; \ + __ret_797 = vcmlaq_f64(__s0_797, __s1_797, *(float64x2_t *) &__reint1_797); \ + __ret_797; \ +}) +#else +#define vcmlaq_laneq_f64(__p0_798, __p1_798, __p2_798, __p3_798) __extension__ ({ \ + float64x2_t __ret_798; \ + float64x2_t __s0_798 = __p0_798; \ + float64x2_t __s1_798 = __p1_798; \ + float64x2_t __s2_798 = __p2_798; \ + float64x2_t __rev0_798; __rev0_798 = __builtin_shufflevector(__s0_798, __s0_798, 1, 0); \ + float64x2_t __rev1_798; __rev1_798 = __builtin_shufflevector(__s1_798, __s1_798, 1, 0); \ + float64x2_t __rev2_798; __rev2_798 = __builtin_shufflevector(__s2_798, __s2_798, 1, 0); \ +float64x2_t __reint_798 = __rev2_798; \ +uint64x2_t __reint1_798 = (uint64x2_t) {__noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_798, __p3_798), __noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_798, __p3_798)}; \ + __ret_798 = __noswap_vcmlaq_f64(__rev0_798, __rev1_798, *(float64x2_t *) &__reint1_798); \ + __ret_798 = __builtin_shufflevector(__ret_798, __ret_798, 1, 0); \ + __ret_798; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("v8.3a,neon"))) float64x2_t vcmlaq_rot180_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) { + float64x2_t __ret; + __ret = (float64x2_t) __builtin_neon_vcmlaq_rot180_f64((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 42); + return __ret; +} +#else +__ai __attribute__((target("v8.3a,neon"))) float64x2_t vcmlaq_rot180_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) { + float64x2_t __ret; + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + float64x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0); + __ret = (float64x2_t) __builtin_neon_vcmlaq_rot180_f64((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 42); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +__ai __attribute__((target("v8.3a,neon"))) float64x2_t __noswap_vcmlaq_rot180_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) { + float64x2_t __ret; + __ret = (float64x2_t) __builtin_neon_vcmlaq_rot180_f64((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 42); + return __ret; +} +#endif + +__ai __attribute__((target("v8.3a,neon"))) float64x1_t vcmla_rot180_f64(float64x1_t __p0, float64x1_t __p1, float64x1_t __p2) { + float64x1_t __ret; + __ret = (float64x1_t) __builtin_neon_vcmla_rot180_f64((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 10); + return __ret; +} +#define vcmla_rot180_lane_f64(__p0_799, __p1_799, __p2_799, __p3_799) __extension__ ({ \ + float64x1_t __ret_799; \ + float64x1_t __s0_799 = __p0_799; \ + float64x1_t __s1_799 = __p1_799; \ + float64x1_t __s2_799 = __p2_799; \ +float64x1_t __reint_799 = __s2_799; \ +uint64x2_t __reint1_799 = (uint64x2_t) {vgetq_lane_u64(*(uint64x2_t *) &__reint_799, __p3_799), vgetq_lane_u64(*(uint64x2_t *) &__reint_799, __p3_799)}; \ + __ret_799 = vcmla_rot180_f64(__s0_799, __s1_799, *(float64x1_t *) &__reint1_799); \ + __ret_799; \ +}) +#ifdef __LITTLE_ENDIAN__ +#define vcmlaq_rot180_lane_f64(__p0_800, __p1_800, __p2_800, __p3_800) __extension__ ({ \ + float64x2_t __ret_800; \ + float64x2_t __s0_800 = __p0_800; \ + float64x2_t __s1_800 = __p1_800; \ + float64x1_t __s2_800 = __p2_800; \ +float64x1_t __reint_800 = __s2_800; \ +uint64x2_t __reint1_800 = (uint64x2_t) {vgetq_lane_u64(*(uint64x2_t *) &__reint_800, __p3_800), vgetq_lane_u64(*(uint64x2_t *) &__reint_800, __p3_800)}; \ + __ret_800 = vcmlaq_rot180_f64(__s0_800, __s1_800, *(float64x2_t *) &__reint1_800); \ + __ret_800; \ +}) +#else +#define vcmlaq_rot180_lane_f64(__p0_801, __p1_801, __p2_801, __p3_801) __extension__ ({ \ + float64x2_t __ret_801; \ + float64x2_t __s0_801 = __p0_801; \ + float64x2_t __s1_801 = __p1_801; \ + float64x1_t __s2_801 = __p2_801; \ + float64x2_t __rev0_801; __rev0_801 = __builtin_shufflevector(__s0_801, __s0_801, 1, 0); \ + float64x2_t __rev1_801; __rev1_801 = __builtin_shufflevector(__s1_801, __s1_801, 1, 0); \ +float64x1_t __reint_801 = __s2_801; \ +uint64x2_t __reint1_801 = (uint64x2_t) {__noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_801, __p3_801), __noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_801, __p3_801)}; \ + __ret_801 = __noswap_vcmlaq_rot180_f64(__rev0_801, __rev1_801, *(float64x2_t *) &__reint1_801); \ + __ret_801 = __builtin_shufflevector(__ret_801, __ret_801, 1, 0); \ + __ret_801; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcmla_rot180_laneq_f64(__p0_802, __p1_802, __p2_802, __p3_802) __extension__ ({ \ + float64x1_t __ret_802; \ + float64x1_t __s0_802 = __p0_802; \ + float64x1_t __s1_802 = __p1_802; \ + float64x2_t __s2_802 = __p2_802; \ +float64x2_t __reint_802 = __s2_802; \ +uint64x2_t __reint1_802 = (uint64x2_t) {vgetq_lane_u64(*(uint64x2_t *) &__reint_802, __p3_802), vgetq_lane_u64(*(uint64x2_t *) &__reint_802, __p3_802)}; \ + __ret_802 = vcmla_rot180_f64(__s0_802, __s1_802, *(float64x1_t *) &__reint1_802); \ + __ret_802; \ +}) +#else +#define vcmla_rot180_laneq_f64(__p0_803, __p1_803, __p2_803, __p3_803) __extension__ ({ \ + float64x1_t __ret_803; \ + float64x1_t __s0_803 = __p0_803; \ + float64x1_t __s1_803 = __p1_803; \ + float64x2_t __s2_803 = __p2_803; \ + float64x2_t __rev2_803; __rev2_803 = __builtin_shufflevector(__s2_803, __s2_803, 1, 0); \ +float64x2_t __reint_803 = __rev2_803; \ +uint64x2_t __reint1_803 = (uint64x2_t) {__noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_803, __p3_803), __noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_803, __p3_803)}; \ + __ret_803 = vcmla_rot180_f64(__s0_803, __s1_803, *(float64x1_t *) &__reint1_803); \ + __ret_803; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcmlaq_rot180_laneq_f64(__p0_804, __p1_804, __p2_804, __p3_804) __extension__ ({ \ + float64x2_t __ret_804; \ + float64x2_t __s0_804 = __p0_804; \ + float64x2_t __s1_804 = __p1_804; \ + float64x2_t __s2_804 = __p2_804; \ +float64x2_t __reint_804 = __s2_804; \ +uint64x2_t __reint1_804 = (uint64x2_t) {vgetq_lane_u64(*(uint64x2_t *) &__reint_804, __p3_804), vgetq_lane_u64(*(uint64x2_t *) &__reint_804, __p3_804)}; \ + __ret_804 = vcmlaq_rot180_f64(__s0_804, __s1_804, *(float64x2_t *) &__reint1_804); \ + __ret_804; \ +}) +#else +#define vcmlaq_rot180_laneq_f64(__p0_805, __p1_805, __p2_805, __p3_805) __extension__ ({ \ + float64x2_t __ret_805; \ + float64x2_t __s0_805 = __p0_805; \ + float64x2_t __s1_805 = __p1_805; \ + float64x2_t __s2_805 = __p2_805; \ + float64x2_t __rev0_805; __rev0_805 = __builtin_shufflevector(__s0_805, __s0_805, 1, 0); \ + float64x2_t __rev1_805; __rev1_805 = __builtin_shufflevector(__s1_805, __s1_805, 1, 0); \ + float64x2_t __rev2_805; __rev2_805 = __builtin_shufflevector(__s2_805, __s2_805, 1, 0); \ +float64x2_t __reint_805 = __rev2_805; \ +uint64x2_t __reint1_805 = (uint64x2_t) {__noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_805, __p3_805), __noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_805, __p3_805)}; \ + __ret_805 = __noswap_vcmlaq_rot180_f64(__rev0_805, __rev1_805, *(float64x2_t *) &__reint1_805); \ + __ret_805 = __builtin_shufflevector(__ret_805, __ret_805, 1, 0); \ + __ret_805; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("v8.3a,neon"))) float64x2_t vcmlaq_rot270_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) { + float64x2_t __ret; + __ret = (float64x2_t) __builtin_neon_vcmlaq_rot270_f64((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 42); + return __ret; +} +#else +__ai __attribute__((target("v8.3a,neon"))) float64x2_t vcmlaq_rot270_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) { + float64x2_t __ret; + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + float64x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0); + __ret = (float64x2_t) __builtin_neon_vcmlaq_rot270_f64((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 42); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +__ai __attribute__((target("v8.3a,neon"))) float64x2_t __noswap_vcmlaq_rot270_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) { + float64x2_t __ret; + __ret = (float64x2_t) __builtin_neon_vcmlaq_rot270_f64((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 42); + return __ret; +} +#endif + +__ai __attribute__((target("v8.3a,neon"))) float64x1_t vcmla_rot270_f64(float64x1_t __p0, float64x1_t __p1, float64x1_t __p2) { + float64x1_t __ret; + __ret = (float64x1_t) __builtin_neon_vcmla_rot270_f64((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 10); + return __ret; +} +#define vcmla_rot270_lane_f64(__p0_806, __p1_806, __p2_806, __p3_806) __extension__ ({ \ + float64x1_t __ret_806; \ + float64x1_t __s0_806 = __p0_806; \ + float64x1_t __s1_806 = __p1_806; \ + float64x1_t __s2_806 = __p2_806; \ +float64x1_t __reint_806 = __s2_806; \ +uint64x2_t __reint1_806 = (uint64x2_t) {vgetq_lane_u64(*(uint64x2_t *) &__reint_806, __p3_806), vgetq_lane_u64(*(uint64x2_t *) &__reint_806, __p3_806)}; \ + __ret_806 = vcmla_rot270_f64(__s0_806, __s1_806, *(float64x1_t *) &__reint1_806); \ + __ret_806; \ +}) +#ifdef __LITTLE_ENDIAN__ +#define vcmlaq_rot270_lane_f64(__p0_807, __p1_807, __p2_807, __p3_807) __extension__ ({ \ + float64x2_t __ret_807; \ + float64x2_t __s0_807 = __p0_807; \ + float64x2_t __s1_807 = __p1_807; \ + float64x1_t __s2_807 = __p2_807; \ +float64x1_t __reint_807 = __s2_807; \ +uint64x2_t __reint1_807 = (uint64x2_t) {vgetq_lane_u64(*(uint64x2_t *) &__reint_807, __p3_807), vgetq_lane_u64(*(uint64x2_t *) &__reint_807, __p3_807)}; \ + __ret_807 = vcmlaq_rot270_f64(__s0_807, __s1_807, *(float64x2_t *) &__reint1_807); \ + __ret_807; \ +}) +#else +#define vcmlaq_rot270_lane_f64(__p0_808, __p1_808, __p2_808, __p3_808) __extension__ ({ \ + float64x2_t __ret_808; \ + float64x2_t __s0_808 = __p0_808; \ + float64x2_t __s1_808 = __p1_808; \ + float64x1_t __s2_808 = __p2_808; \ + float64x2_t __rev0_808; __rev0_808 = __builtin_shufflevector(__s0_808, __s0_808, 1, 0); \ + float64x2_t __rev1_808; __rev1_808 = __builtin_shufflevector(__s1_808, __s1_808, 1, 0); \ +float64x1_t __reint_808 = __s2_808; \ +uint64x2_t __reint1_808 = (uint64x2_t) {__noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_808, __p3_808), __noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_808, __p3_808)}; \ + __ret_808 = __noswap_vcmlaq_rot270_f64(__rev0_808, __rev1_808, *(float64x2_t *) &__reint1_808); \ + __ret_808 = __builtin_shufflevector(__ret_808, __ret_808, 1, 0); \ + __ret_808; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcmla_rot270_laneq_f64(__p0_809, __p1_809, __p2_809, __p3_809) __extension__ ({ \ + float64x1_t __ret_809; \ + float64x1_t __s0_809 = __p0_809; \ + float64x1_t __s1_809 = __p1_809; \ + float64x2_t __s2_809 = __p2_809; \ +float64x2_t __reint_809 = __s2_809; \ +uint64x2_t __reint1_809 = (uint64x2_t) {vgetq_lane_u64(*(uint64x2_t *) &__reint_809, __p3_809), vgetq_lane_u64(*(uint64x2_t *) &__reint_809, __p3_809)}; \ + __ret_809 = vcmla_rot270_f64(__s0_809, __s1_809, *(float64x1_t *) &__reint1_809); \ + __ret_809; \ +}) +#else +#define vcmla_rot270_laneq_f64(__p0_810, __p1_810, __p2_810, __p3_810) __extension__ ({ \ + float64x1_t __ret_810; \ + float64x1_t __s0_810 = __p0_810; \ + float64x1_t __s1_810 = __p1_810; \ + float64x2_t __s2_810 = __p2_810; \ + float64x2_t __rev2_810; __rev2_810 = __builtin_shufflevector(__s2_810, __s2_810, 1, 0); \ +float64x2_t __reint_810 = __rev2_810; \ +uint64x2_t __reint1_810 = (uint64x2_t) {__noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_810, __p3_810), __noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_810, __p3_810)}; \ + __ret_810 = vcmla_rot270_f64(__s0_810, __s1_810, *(float64x1_t *) &__reint1_810); \ + __ret_810; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcmlaq_rot270_laneq_f64(__p0_811, __p1_811, __p2_811, __p3_811) __extension__ ({ \ + float64x2_t __ret_811; \ + float64x2_t __s0_811 = __p0_811; \ + float64x2_t __s1_811 = __p1_811; \ + float64x2_t __s2_811 = __p2_811; \ +float64x2_t __reint_811 = __s2_811; \ +uint64x2_t __reint1_811 = (uint64x2_t) {vgetq_lane_u64(*(uint64x2_t *) &__reint_811, __p3_811), vgetq_lane_u64(*(uint64x2_t *) &__reint_811, __p3_811)}; \ + __ret_811 = vcmlaq_rot270_f64(__s0_811, __s1_811, *(float64x2_t *) &__reint1_811); \ + __ret_811; \ +}) +#else +#define vcmlaq_rot270_laneq_f64(__p0_812, __p1_812, __p2_812, __p3_812) __extension__ ({ \ + float64x2_t __ret_812; \ + float64x2_t __s0_812 = __p0_812; \ + float64x2_t __s1_812 = __p1_812; \ + float64x2_t __s2_812 = __p2_812; \ + float64x2_t __rev0_812; __rev0_812 = __builtin_shufflevector(__s0_812, __s0_812, 1, 0); \ + float64x2_t __rev1_812; __rev1_812 = __builtin_shufflevector(__s1_812, __s1_812, 1, 0); \ + float64x2_t __rev2_812; __rev2_812 = __builtin_shufflevector(__s2_812, __s2_812, 1, 0); \ +float64x2_t __reint_812 = __rev2_812; \ +uint64x2_t __reint1_812 = (uint64x2_t) {__noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_812, __p3_812), __noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_812, __p3_812)}; \ + __ret_812 = __noswap_vcmlaq_rot270_f64(__rev0_812, __rev1_812, *(float64x2_t *) &__reint1_812); \ + __ret_812 = __builtin_shufflevector(__ret_812, __ret_812, 1, 0); \ + __ret_812; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("v8.3a,neon"))) float64x2_t vcmlaq_rot90_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) { + float64x2_t __ret; + __ret = (float64x2_t) __builtin_neon_vcmlaq_rot90_f64((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 42); + return __ret; +} +#else +__ai __attribute__((target("v8.3a,neon"))) float64x2_t vcmlaq_rot90_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) { + float64x2_t __ret; + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + float64x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0); + __ret = (float64x2_t) __builtin_neon_vcmlaq_rot90_f64((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 42); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +__ai __attribute__((target("v8.3a,neon"))) float64x2_t __noswap_vcmlaq_rot90_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) { + float64x2_t __ret; + __ret = (float64x2_t) __builtin_neon_vcmlaq_rot90_f64((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 42); + return __ret; +} +#endif + +__ai __attribute__((target("v8.3a,neon"))) float64x1_t vcmla_rot90_f64(float64x1_t __p0, float64x1_t __p1, float64x1_t __p2) { + float64x1_t __ret; + __ret = (float64x1_t) __builtin_neon_vcmla_rot90_f64((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 10); + return __ret; +} +#define vcmla_rot90_lane_f64(__p0_813, __p1_813, __p2_813, __p3_813) __extension__ ({ \ + float64x1_t __ret_813; \ + float64x1_t __s0_813 = __p0_813; \ + float64x1_t __s1_813 = __p1_813; \ + float64x1_t __s2_813 = __p2_813; \ +float64x1_t __reint_813 = __s2_813; \ +uint64x2_t __reint1_813 = (uint64x2_t) {vgetq_lane_u64(*(uint64x2_t *) &__reint_813, __p3_813), vgetq_lane_u64(*(uint64x2_t *) &__reint_813, __p3_813)}; \ + __ret_813 = vcmla_rot90_f64(__s0_813, __s1_813, *(float64x1_t *) &__reint1_813); \ + __ret_813; \ +}) +#ifdef __LITTLE_ENDIAN__ +#define vcmlaq_rot90_lane_f64(__p0_814, __p1_814, __p2_814, __p3_814) __extension__ ({ \ + float64x2_t __ret_814; \ + float64x2_t __s0_814 = __p0_814; \ + float64x2_t __s1_814 = __p1_814; \ + float64x1_t __s2_814 = __p2_814; \ +float64x1_t __reint_814 = __s2_814; \ +uint64x2_t __reint1_814 = (uint64x2_t) {vgetq_lane_u64(*(uint64x2_t *) &__reint_814, __p3_814), vgetq_lane_u64(*(uint64x2_t *) &__reint_814, __p3_814)}; \ + __ret_814 = vcmlaq_rot90_f64(__s0_814, __s1_814, *(float64x2_t *) &__reint1_814); \ + __ret_814; \ +}) +#else +#define vcmlaq_rot90_lane_f64(__p0_815, __p1_815, __p2_815, __p3_815) __extension__ ({ \ + float64x2_t __ret_815; \ + float64x2_t __s0_815 = __p0_815; \ + float64x2_t __s1_815 = __p1_815; \ + float64x1_t __s2_815 = __p2_815; \ + float64x2_t __rev0_815; __rev0_815 = __builtin_shufflevector(__s0_815, __s0_815, 1, 0); \ + float64x2_t __rev1_815; __rev1_815 = __builtin_shufflevector(__s1_815, __s1_815, 1, 0); \ +float64x1_t __reint_815 = __s2_815; \ +uint64x2_t __reint1_815 = (uint64x2_t) {__noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_815, __p3_815), __noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_815, __p3_815)}; \ + __ret_815 = __noswap_vcmlaq_rot90_f64(__rev0_815, __rev1_815, *(float64x2_t *) &__reint1_815); \ + __ret_815 = __builtin_shufflevector(__ret_815, __ret_815, 1, 0); \ + __ret_815; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcmla_rot90_laneq_f64(__p0_816, __p1_816, __p2_816, __p3_816) __extension__ ({ \ + float64x1_t __ret_816; \ + float64x1_t __s0_816 = __p0_816; \ + float64x1_t __s1_816 = __p1_816; \ + float64x2_t __s2_816 = __p2_816; \ +float64x2_t __reint_816 = __s2_816; \ +uint64x2_t __reint1_816 = (uint64x2_t) {vgetq_lane_u64(*(uint64x2_t *) &__reint_816, __p3_816), vgetq_lane_u64(*(uint64x2_t *) &__reint_816, __p3_816)}; \ + __ret_816 = vcmla_rot90_f64(__s0_816, __s1_816, *(float64x1_t *) &__reint1_816); \ + __ret_816; \ +}) +#else +#define vcmla_rot90_laneq_f64(__p0_817, __p1_817, __p2_817, __p3_817) __extension__ ({ \ + float64x1_t __ret_817; \ + float64x1_t __s0_817 = __p0_817; \ + float64x1_t __s1_817 = __p1_817; \ + float64x2_t __s2_817 = __p2_817; \ + float64x2_t __rev2_817; __rev2_817 = __builtin_shufflevector(__s2_817, __s2_817, 1, 0); \ +float64x2_t __reint_817 = __rev2_817; \ +uint64x2_t __reint1_817 = (uint64x2_t) {__noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_817, __p3_817), __noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_817, __p3_817)}; \ + __ret_817 = vcmla_rot90_f64(__s0_817, __s1_817, *(float64x1_t *) &__reint1_817); \ + __ret_817; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcmlaq_rot90_laneq_f64(__p0_818, __p1_818, __p2_818, __p3_818) __extension__ ({ \ + float64x2_t __ret_818; \ + float64x2_t __s0_818 = __p0_818; \ + float64x2_t __s1_818 = __p1_818; \ + float64x2_t __s2_818 = __p2_818; \ +float64x2_t __reint_818 = __s2_818; \ +uint64x2_t __reint1_818 = (uint64x2_t) {vgetq_lane_u64(*(uint64x2_t *) &__reint_818, __p3_818), vgetq_lane_u64(*(uint64x2_t *) &__reint_818, __p3_818)}; \ + __ret_818 = vcmlaq_rot90_f64(__s0_818, __s1_818, *(float64x2_t *) &__reint1_818); \ + __ret_818; \ +}) +#else +#define vcmlaq_rot90_laneq_f64(__p0_819, __p1_819, __p2_819, __p3_819) __extension__ ({ \ + float64x2_t __ret_819; \ + float64x2_t __s0_819 = __p0_819; \ + float64x2_t __s1_819 = __p1_819; \ + float64x2_t __s2_819 = __p2_819; \ + float64x2_t __rev0_819; __rev0_819 = __builtin_shufflevector(__s0_819, __s0_819, 1, 0); \ + float64x2_t __rev1_819; __rev1_819 = __builtin_shufflevector(__s1_819, __s1_819, 1, 0); \ + float64x2_t __rev2_819; __rev2_819 = __builtin_shufflevector(__s2_819, __s2_819, 1, 0); \ +float64x2_t __reint_819 = __rev2_819; \ +uint64x2_t __reint1_819 = (uint64x2_t) {__noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_819, __p3_819), __noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_819, __p3_819)}; \ + __ret_819 = __noswap_vcmlaq_rot90_f64(__rev0_819, __rev1_819, *(float64x2_t *) &__reint1_819); \ + __ret_819 = __builtin_shufflevector(__ret_819, __ret_819, 1, 0); \ + __ret_819; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("v8.5a,neon"))) float32x4_t vrnd32xq_f32(float32x4_t __p0) { + float32x4_t __ret; + __ret = (float32x4_t) __builtin_neon_vrnd32xq_f32((int8x16_t)__p0, 41); + return __ret; +} +#else +__ai __attribute__((target("v8.5a,neon"))) float32x4_t vrnd32xq_f32(float32x4_t __p0) { + float32x4_t __ret; + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = (float32x4_t) __builtin_neon_vrnd32xq_f32((int8x16_t)__rev0, 41); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("v8.5a,neon"))) float32x2_t vrnd32x_f32(float32x2_t __p0) { + float32x2_t __ret; + __ret = (float32x2_t) __builtin_neon_vrnd32x_f32((int8x8_t)__p0, 9); + return __ret; +} +#else +__ai __attribute__((target("v8.5a,neon"))) float32x2_t vrnd32x_f32(float32x2_t __p0) { + float32x2_t __ret; + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = (float32x2_t) __builtin_neon_vrnd32x_f32((int8x8_t)__rev0, 9); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("v8.5a,neon"))) float64x2_t vrnd32xq_f64(float64x2_t __p0) { + float64x2_t __ret; + __ret = (float64x2_t) __builtin_neon_vrnd32xq_f64((int8x16_t)__p0, 42); + return __ret; +} +#else +__ai __attribute__((target("v8.5a,neon"))) float64x2_t vrnd32xq_f64(float64x2_t __p0) { + float64x2_t __ret; + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = (float64x2_t) __builtin_neon_vrnd32xq_f64((int8x16_t)__rev0, 42); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +__ai __attribute__((target("v8.5a,neon"))) float64x1_t vrnd32x_f64(float64x1_t __p0) { + float64x1_t __ret; + __ret = (float64x1_t) __builtin_neon_vrnd32x_f64((int8x8_t)__p0, 10); + return __ret; +} +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("v8.5a,neon"))) float32x4_t vrnd32zq_f32(float32x4_t __p0) { + float32x4_t __ret; + __ret = (float32x4_t) __builtin_neon_vrnd32zq_f32((int8x16_t)__p0, 41); + return __ret; +} +#else +__ai __attribute__((target("v8.5a,neon"))) float32x4_t vrnd32zq_f32(float32x4_t __p0) { + float32x4_t __ret; + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = (float32x4_t) __builtin_neon_vrnd32zq_f32((int8x16_t)__rev0, 41); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("v8.5a,neon"))) float32x2_t vrnd32z_f32(float32x2_t __p0) { + float32x2_t __ret; + __ret = (float32x2_t) __builtin_neon_vrnd32z_f32((int8x8_t)__p0, 9); + return __ret; +} +#else +__ai __attribute__((target("v8.5a,neon"))) float32x2_t vrnd32z_f32(float32x2_t __p0) { + float32x2_t __ret; + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = (float32x2_t) __builtin_neon_vrnd32z_f32((int8x8_t)__rev0, 9); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("v8.5a,neon"))) float64x2_t vrnd32zq_f64(float64x2_t __p0) { + float64x2_t __ret; + __ret = (float64x2_t) __builtin_neon_vrnd32zq_f64((int8x16_t)__p0, 42); + return __ret; +} +#else +__ai __attribute__((target("v8.5a,neon"))) float64x2_t vrnd32zq_f64(float64x2_t __p0) { + float64x2_t __ret; + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = (float64x2_t) __builtin_neon_vrnd32zq_f64((int8x16_t)__rev0, 42); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +__ai __attribute__((target("v8.5a,neon"))) float64x1_t vrnd32z_f64(float64x1_t __p0) { + float64x1_t __ret; + __ret = (float64x1_t) __builtin_neon_vrnd32z_f64((int8x8_t)__p0, 10); + return __ret; +} +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("v8.5a,neon"))) float32x4_t vrnd64xq_f32(float32x4_t __p0) { + float32x4_t __ret; + __ret = (float32x4_t) __builtin_neon_vrnd64xq_f32((int8x16_t)__p0, 41); + return __ret; +} +#else +__ai __attribute__((target("v8.5a,neon"))) float32x4_t vrnd64xq_f32(float32x4_t __p0) { + float32x4_t __ret; + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = (float32x4_t) __builtin_neon_vrnd64xq_f32((int8x16_t)__rev0, 41); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("v8.5a,neon"))) float32x2_t vrnd64x_f32(float32x2_t __p0) { + float32x2_t __ret; + __ret = (float32x2_t) __builtin_neon_vrnd64x_f32((int8x8_t)__p0, 9); + return __ret; +} +#else +__ai __attribute__((target("v8.5a,neon"))) float32x2_t vrnd64x_f32(float32x2_t __p0) { + float32x2_t __ret; + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = (float32x2_t) __builtin_neon_vrnd64x_f32((int8x8_t)__rev0, 9); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("v8.5a,neon"))) float64x2_t vrnd64xq_f64(float64x2_t __p0) { + float64x2_t __ret; + __ret = (float64x2_t) __builtin_neon_vrnd64xq_f64((int8x16_t)__p0, 42); + return __ret; +} +#else +__ai __attribute__((target("v8.5a,neon"))) float64x2_t vrnd64xq_f64(float64x2_t __p0) { + float64x2_t __ret; + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = (float64x2_t) __builtin_neon_vrnd64xq_f64((int8x16_t)__rev0, 42); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +__ai __attribute__((target("v8.5a,neon"))) float64x1_t vrnd64x_f64(float64x1_t __p0) { + float64x1_t __ret; + __ret = (float64x1_t) __builtin_neon_vrnd64x_f64((int8x8_t)__p0, 10); + return __ret; +} +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("v8.5a,neon"))) float32x4_t vrnd64zq_f32(float32x4_t __p0) { + float32x4_t __ret; + __ret = (float32x4_t) __builtin_neon_vrnd64zq_f32((int8x16_t)__p0, 41); + return __ret; +} +#else +__ai __attribute__((target("v8.5a,neon"))) float32x4_t vrnd64zq_f32(float32x4_t __p0) { + float32x4_t __ret; + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = (float32x4_t) __builtin_neon_vrnd64zq_f32((int8x16_t)__rev0, 41); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("v8.5a,neon"))) float32x2_t vrnd64z_f32(float32x2_t __p0) { + float32x2_t __ret; + __ret = (float32x2_t) __builtin_neon_vrnd64z_f32((int8x8_t)__p0, 9); + return __ret; +} +#else +__ai __attribute__((target("v8.5a,neon"))) float32x2_t vrnd64z_f32(float32x2_t __p0) { + float32x2_t __ret; + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = (float32x2_t) __builtin_neon_vrnd64z_f32((int8x8_t)__rev0, 9); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("v8.5a,neon"))) float64x2_t vrnd64zq_f64(float64x2_t __p0) { + float64x2_t __ret; + __ret = (float64x2_t) __builtin_neon_vrnd64zq_f64((int8x16_t)__p0, 42); + return __ret; +} +#else +__ai __attribute__((target("v8.5a,neon"))) float64x2_t vrnd64zq_f64(float64x2_t __p0) { + float64x2_t __ret; + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = (float64x2_t) __builtin_neon_vrnd64zq_f64((int8x16_t)__rev0, 42); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +__ai __attribute__((target("v8.5a,neon"))) float64x1_t vrnd64z_f64(float64x1_t __p0) { + float64x1_t __ret; + __ret = (float64x1_t) __builtin_neon_vrnd64z_f64((int8x8_t)__p0, 10); + return __ret; +} +#endif +#ifdef __LITTLE_ENDIAN__ +#define vbfdotq_lane_f32(__p0_820, __p1_820, __p2_820, __p3_820) __extension__ ({ \ + float32x4_t __ret_820; \ + float32x4_t __s0_820 = __p0_820; \ + bfloat16x8_t __s1_820 = __p1_820; \ + bfloat16x4_t __s2_820 = __p2_820; \ +bfloat16x4_t __reint_820 = __s2_820; \ +float32x4_t __reint1_820 = splatq_lane_f32(*(float32x2_t *) &__reint_820, __p3_820); \ + __ret_820 = vbfdotq_f32(__s0_820, __s1_820, *(bfloat16x8_t *) &__reint1_820); \ + __ret_820; \ +}) +#else +#define vbfdotq_lane_f32(__p0_821, __p1_821, __p2_821, __p3_821) __extension__ ({ \ + float32x4_t __ret_821; \ + float32x4_t __s0_821 = __p0_821; \ + bfloat16x8_t __s1_821 = __p1_821; \ + bfloat16x4_t __s2_821 = __p2_821; \ + float32x4_t __rev0_821; __rev0_821 = __builtin_shufflevector(__s0_821, __s0_821, 3, 2, 1, 0); \ + bfloat16x8_t __rev1_821; __rev1_821 = __builtin_shufflevector(__s1_821, __s1_821, 7, 6, 5, 4, 3, 2, 1, 0); \ + bfloat16x4_t __rev2_821; __rev2_821 = __builtin_shufflevector(__s2_821, __s2_821, 3, 2, 1, 0); \ +bfloat16x4_t __reint_821 = __rev2_821; \ +float32x4_t __reint1_821 = __noswap_splatq_lane_f32(*(float32x2_t *) &__reint_821, __p3_821); \ + __ret_821 = __noswap_vbfdotq_f32(__rev0_821, __rev1_821, *(bfloat16x8_t *) &__reint1_821); \ + __ret_821 = __builtin_shufflevector(__ret_821, __ret_821, 3, 2, 1, 0); \ + __ret_821; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vbfdot_lane_f32(__p0_822, __p1_822, __p2_822, __p3_822) __extension__ ({ \ + float32x2_t __ret_822; \ + float32x2_t __s0_822 = __p0_822; \ + bfloat16x4_t __s1_822 = __p1_822; \ + bfloat16x4_t __s2_822 = __p2_822; \ +bfloat16x4_t __reint_822 = __s2_822; \ +float32x2_t __reint1_822 = splat_lane_f32(*(float32x2_t *) &__reint_822, __p3_822); \ + __ret_822 = vbfdot_f32(__s0_822, __s1_822, *(bfloat16x4_t *) &__reint1_822); \ + __ret_822; \ +}) +#else +#define vbfdot_lane_f32(__p0_823, __p1_823, __p2_823, __p3_823) __extension__ ({ \ + float32x2_t __ret_823; \ + float32x2_t __s0_823 = __p0_823; \ + bfloat16x4_t __s1_823 = __p1_823; \ + bfloat16x4_t __s2_823 = __p2_823; \ + float32x2_t __rev0_823; __rev0_823 = __builtin_shufflevector(__s0_823, __s0_823, 1, 0); \ + bfloat16x4_t __rev1_823; __rev1_823 = __builtin_shufflevector(__s1_823, __s1_823, 3, 2, 1, 0); \ + bfloat16x4_t __rev2_823; __rev2_823 = __builtin_shufflevector(__s2_823, __s2_823, 3, 2, 1, 0); \ +bfloat16x4_t __reint_823 = __rev2_823; \ +float32x2_t __reint1_823 = __noswap_splat_lane_f32(*(float32x2_t *) &__reint_823, __p3_823); \ + __ret_823 = __noswap_vbfdot_f32(__rev0_823, __rev1_823, *(bfloat16x4_t *) &__reint1_823); \ + __ret_823 = __builtin_shufflevector(__ret_823, __ret_823, 1, 0); \ + __ret_823; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vbfdotq_laneq_f32(__p0_824, __p1_824, __p2_824, __p3_824) __extension__ ({ \ + float32x4_t __ret_824; \ + float32x4_t __s0_824 = __p0_824; \ + bfloat16x8_t __s1_824 = __p1_824; \ + bfloat16x8_t __s2_824 = __p2_824; \ +bfloat16x8_t __reint_824 = __s2_824; \ +float32x4_t __reint1_824 = splatq_laneq_f32(*(float32x4_t *) &__reint_824, __p3_824); \ + __ret_824 = vbfdotq_f32(__s0_824, __s1_824, *(bfloat16x8_t *) &__reint1_824); \ + __ret_824; \ +}) +#else +#define vbfdotq_laneq_f32(__p0_825, __p1_825, __p2_825, __p3_825) __extension__ ({ \ + float32x4_t __ret_825; \ + float32x4_t __s0_825 = __p0_825; \ + bfloat16x8_t __s1_825 = __p1_825; \ + bfloat16x8_t __s2_825 = __p2_825; \ + float32x4_t __rev0_825; __rev0_825 = __builtin_shufflevector(__s0_825, __s0_825, 3, 2, 1, 0); \ + bfloat16x8_t __rev1_825; __rev1_825 = __builtin_shufflevector(__s1_825, __s1_825, 7, 6, 5, 4, 3, 2, 1, 0); \ + bfloat16x8_t __rev2_825; __rev2_825 = __builtin_shufflevector(__s2_825, __s2_825, 7, 6, 5, 4, 3, 2, 1, 0); \ +bfloat16x8_t __reint_825 = __rev2_825; \ +float32x4_t __reint1_825 = __noswap_splatq_laneq_f32(*(float32x4_t *) &__reint_825, __p3_825); \ + __ret_825 = __noswap_vbfdotq_f32(__rev0_825, __rev1_825, *(bfloat16x8_t *) &__reint1_825); \ + __ret_825 = __builtin_shufflevector(__ret_825, __ret_825, 3, 2, 1, 0); \ + __ret_825; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vbfdot_laneq_f32(__p0_826, __p1_826, __p2_826, __p3_826) __extension__ ({ \ + float32x2_t __ret_826; \ + float32x2_t __s0_826 = __p0_826; \ + bfloat16x4_t __s1_826 = __p1_826; \ + bfloat16x8_t __s2_826 = __p2_826; \ +bfloat16x8_t __reint_826 = __s2_826; \ +float32x2_t __reint1_826 = splat_laneq_f32(*(float32x4_t *) &__reint_826, __p3_826); \ + __ret_826 = vbfdot_f32(__s0_826, __s1_826, *(bfloat16x4_t *) &__reint1_826); \ + __ret_826; \ +}) +#else +#define vbfdot_laneq_f32(__p0_827, __p1_827, __p2_827, __p3_827) __extension__ ({ \ + float32x2_t __ret_827; \ + float32x2_t __s0_827 = __p0_827; \ + bfloat16x4_t __s1_827 = __p1_827; \ + bfloat16x8_t __s2_827 = __p2_827; \ + float32x2_t __rev0_827; __rev0_827 = __builtin_shufflevector(__s0_827, __s0_827, 1, 0); \ + bfloat16x4_t __rev1_827; __rev1_827 = __builtin_shufflevector(__s1_827, __s1_827, 3, 2, 1, 0); \ + bfloat16x8_t __rev2_827; __rev2_827 = __builtin_shufflevector(__s2_827, __s2_827, 7, 6, 5, 4, 3, 2, 1, 0); \ +bfloat16x8_t __reint_827 = __rev2_827; \ +float32x2_t __reint1_827 = __noswap_splat_laneq_f32(*(float32x4_t *) &__reint_827, __p3_827); \ + __ret_827 = __noswap_vbfdot_f32(__rev0_827, __rev1_827, *(bfloat16x4_t *) &__reint1_827); \ + __ret_827 = __builtin_shufflevector(__ret_827, __ret_827, 1, 0); \ + __ret_827; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vbfmlalbq_lane_f32(__p0_828, __p1_828, __p2_828, __p3_828) __extension__ ({ \ + float32x4_t __ret_828; \ + float32x4_t __s0_828 = __p0_828; \ + bfloat16x8_t __s1_828 = __p1_828; \ + bfloat16x4_t __s2_828 = __p2_828; \ + __ret_828 = vbfmlalbq_f32(__s0_828, __s1_828, (bfloat16x8_t) {vget_lane_bf16(__s2_828, __p3_828), vget_lane_bf16(__s2_828, __p3_828), vget_lane_bf16(__s2_828, __p3_828), vget_lane_bf16(__s2_828, __p3_828), vget_lane_bf16(__s2_828, __p3_828), vget_lane_bf16(__s2_828, __p3_828), vget_lane_bf16(__s2_828, __p3_828), vget_lane_bf16(__s2_828, __p3_828)}); \ + __ret_828; \ +}) +#else +#define vbfmlalbq_lane_f32(__p0_829, __p1_829, __p2_829, __p3_829) __extension__ ({ \ + float32x4_t __ret_829; \ + float32x4_t __s0_829 = __p0_829; \ + bfloat16x8_t __s1_829 = __p1_829; \ + bfloat16x4_t __s2_829 = __p2_829; \ + float32x4_t __rev0_829; __rev0_829 = __builtin_shufflevector(__s0_829, __s0_829, 3, 2, 1, 0); \ + bfloat16x8_t __rev1_829; __rev1_829 = __builtin_shufflevector(__s1_829, __s1_829, 7, 6, 5, 4, 3, 2, 1, 0); \ + bfloat16x4_t __rev2_829; __rev2_829 = __builtin_shufflevector(__s2_829, __s2_829, 3, 2, 1, 0); \ + __ret_829 = __noswap_vbfmlalbq_f32(__rev0_829, __rev1_829, (bfloat16x8_t) {__noswap_vget_lane_bf16(__rev2_829, __p3_829), __noswap_vget_lane_bf16(__rev2_829, __p3_829), __noswap_vget_lane_bf16(__rev2_829, __p3_829), __noswap_vget_lane_bf16(__rev2_829, __p3_829), __noswap_vget_lane_bf16(__rev2_829, __p3_829), __noswap_vget_lane_bf16(__rev2_829, __p3_829), __noswap_vget_lane_bf16(__rev2_829, __p3_829), __noswap_vget_lane_bf16(__rev2_829, __p3_829)}); \ + __ret_829 = __builtin_shufflevector(__ret_829, __ret_829, 3, 2, 1, 0); \ + __ret_829; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vbfmlalbq_laneq_f32(__p0_830, __p1_830, __p2_830, __p3_830) __extension__ ({ \ + float32x4_t __ret_830; \ + float32x4_t __s0_830 = __p0_830; \ + bfloat16x8_t __s1_830 = __p1_830; \ + bfloat16x8_t __s2_830 = __p2_830; \ + __ret_830 = vbfmlalbq_f32(__s0_830, __s1_830, (bfloat16x8_t) {vgetq_lane_bf16(__s2_830, __p3_830), vgetq_lane_bf16(__s2_830, __p3_830), vgetq_lane_bf16(__s2_830, __p3_830), vgetq_lane_bf16(__s2_830, __p3_830), vgetq_lane_bf16(__s2_830, __p3_830), vgetq_lane_bf16(__s2_830, __p3_830), vgetq_lane_bf16(__s2_830, __p3_830), vgetq_lane_bf16(__s2_830, __p3_830)}); \ + __ret_830; \ +}) +#else +#define vbfmlalbq_laneq_f32(__p0_831, __p1_831, __p2_831, __p3_831) __extension__ ({ \ + float32x4_t __ret_831; \ + float32x4_t __s0_831 = __p0_831; \ + bfloat16x8_t __s1_831 = __p1_831; \ + bfloat16x8_t __s2_831 = __p2_831; \ + float32x4_t __rev0_831; __rev0_831 = __builtin_shufflevector(__s0_831, __s0_831, 3, 2, 1, 0); \ + bfloat16x8_t __rev1_831; __rev1_831 = __builtin_shufflevector(__s1_831, __s1_831, 7, 6, 5, 4, 3, 2, 1, 0); \ + bfloat16x8_t __rev2_831; __rev2_831 = __builtin_shufflevector(__s2_831, __s2_831, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_831 = __noswap_vbfmlalbq_f32(__rev0_831, __rev1_831, (bfloat16x8_t) {__noswap_vgetq_lane_bf16(__rev2_831, __p3_831), __noswap_vgetq_lane_bf16(__rev2_831, __p3_831), __noswap_vgetq_lane_bf16(__rev2_831, __p3_831), __noswap_vgetq_lane_bf16(__rev2_831, __p3_831), __noswap_vgetq_lane_bf16(__rev2_831, __p3_831), __noswap_vgetq_lane_bf16(__rev2_831, __p3_831), __noswap_vgetq_lane_bf16(__rev2_831, __p3_831), __noswap_vgetq_lane_bf16(__rev2_831, __p3_831)}); \ + __ret_831 = __builtin_shufflevector(__ret_831, __ret_831, 3, 2, 1, 0); \ + __ret_831; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vbfmlaltq_lane_f32(__p0_832, __p1_832, __p2_832, __p3_832) __extension__ ({ \ + float32x4_t __ret_832; \ + float32x4_t __s0_832 = __p0_832; \ + bfloat16x8_t __s1_832 = __p1_832; \ + bfloat16x4_t __s2_832 = __p2_832; \ + __ret_832 = vbfmlaltq_f32(__s0_832, __s1_832, (bfloat16x8_t) {vget_lane_bf16(__s2_832, __p3_832), vget_lane_bf16(__s2_832, __p3_832), vget_lane_bf16(__s2_832, __p3_832), vget_lane_bf16(__s2_832, __p3_832), vget_lane_bf16(__s2_832, __p3_832), vget_lane_bf16(__s2_832, __p3_832), vget_lane_bf16(__s2_832, __p3_832), vget_lane_bf16(__s2_832, __p3_832)}); \ + __ret_832; \ +}) +#else +#define vbfmlaltq_lane_f32(__p0_833, __p1_833, __p2_833, __p3_833) __extension__ ({ \ + float32x4_t __ret_833; \ + float32x4_t __s0_833 = __p0_833; \ + bfloat16x8_t __s1_833 = __p1_833; \ + bfloat16x4_t __s2_833 = __p2_833; \ + float32x4_t __rev0_833; __rev0_833 = __builtin_shufflevector(__s0_833, __s0_833, 3, 2, 1, 0); \ + bfloat16x8_t __rev1_833; __rev1_833 = __builtin_shufflevector(__s1_833, __s1_833, 7, 6, 5, 4, 3, 2, 1, 0); \ + bfloat16x4_t __rev2_833; __rev2_833 = __builtin_shufflevector(__s2_833, __s2_833, 3, 2, 1, 0); \ + __ret_833 = __noswap_vbfmlaltq_f32(__rev0_833, __rev1_833, (bfloat16x8_t) {__noswap_vget_lane_bf16(__rev2_833, __p3_833), __noswap_vget_lane_bf16(__rev2_833, __p3_833), __noswap_vget_lane_bf16(__rev2_833, __p3_833), __noswap_vget_lane_bf16(__rev2_833, __p3_833), __noswap_vget_lane_bf16(__rev2_833, __p3_833), __noswap_vget_lane_bf16(__rev2_833, __p3_833), __noswap_vget_lane_bf16(__rev2_833, __p3_833), __noswap_vget_lane_bf16(__rev2_833, __p3_833)}); \ + __ret_833 = __builtin_shufflevector(__ret_833, __ret_833, 3, 2, 1, 0); \ + __ret_833; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vbfmlaltq_laneq_f32(__p0_834, __p1_834, __p2_834, __p3_834) __extension__ ({ \ + float32x4_t __ret_834; \ + float32x4_t __s0_834 = __p0_834; \ + bfloat16x8_t __s1_834 = __p1_834; \ + bfloat16x8_t __s2_834 = __p2_834; \ + __ret_834 = vbfmlaltq_f32(__s0_834, __s1_834, (bfloat16x8_t) {vgetq_lane_bf16(__s2_834, __p3_834), vgetq_lane_bf16(__s2_834, __p3_834), vgetq_lane_bf16(__s2_834, __p3_834), vgetq_lane_bf16(__s2_834, __p3_834), vgetq_lane_bf16(__s2_834, __p3_834), vgetq_lane_bf16(__s2_834, __p3_834), vgetq_lane_bf16(__s2_834, __p3_834), vgetq_lane_bf16(__s2_834, __p3_834)}); \ + __ret_834; \ +}) +#else +#define vbfmlaltq_laneq_f32(__p0_835, __p1_835, __p2_835, __p3_835) __extension__ ({ \ + float32x4_t __ret_835; \ + float32x4_t __s0_835 = __p0_835; \ + bfloat16x8_t __s1_835 = __p1_835; \ + bfloat16x8_t __s2_835 = __p2_835; \ + float32x4_t __rev0_835; __rev0_835 = __builtin_shufflevector(__s0_835, __s0_835, 3, 2, 1, 0); \ + bfloat16x8_t __rev1_835; __rev1_835 = __builtin_shufflevector(__s1_835, __s1_835, 7, 6, 5, 4, 3, 2, 1, 0); \ + bfloat16x8_t __rev2_835; __rev2_835 = __builtin_shufflevector(__s2_835, __s2_835, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_835 = __noswap_vbfmlaltq_f32(__rev0_835, __rev1_835, (bfloat16x8_t) {__noswap_vgetq_lane_bf16(__rev2_835, __p3_835), __noswap_vgetq_lane_bf16(__rev2_835, __p3_835), __noswap_vgetq_lane_bf16(__rev2_835, __p3_835), __noswap_vgetq_lane_bf16(__rev2_835, __p3_835), __noswap_vgetq_lane_bf16(__rev2_835, __p3_835), __noswap_vgetq_lane_bf16(__rev2_835, __p3_835), __noswap_vgetq_lane_bf16(__rev2_835, __p3_835), __noswap_vgetq_lane_bf16(__rev2_835, __p3_835)}); \ + __ret_835 = __builtin_shufflevector(__ret_835, __ret_835, 3, 2, 1, 0); \ + __ret_835; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("bf16,neon"))) float32x4_t vcvt_f32_bf16(bfloat16x4_t __p0_836) { + float32x4_t __ret_836; +bfloat16x4_t __reint_836 = __p0_836; +int32x4_t __reint1_836 = vshll_n_s16(*(int16x4_t *) &__reint_836, 16); + __ret_836 = *(float32x4_t *) &__reint1_836; + return __ret_836; +} +#else +__ai __attribute__((target("bf16,neon"))) float32x4_t vcvt_f32_bf16(bfloat16x4_t __p0_837) { + float32x4_t __ret_837; + bfloat16x4_t __rev0_837; __rev0_837 = __builtin_shufflevector(__p0_837, __p0_837, 3, 2, 1, 0); +bfloat16x4_t __reint_837 = __rev0_837; +int32x4_t __reint1_837 = __noswap_vshll_n_s16(*(int16x4_t *) &__reint_837, 16); + __ret_837 = *(float32x4_t *) &__reint1_837; + __ret_837 = __builtin_shufflevector(__ret_837, __ret_837, 3, 2, 1, 0); + return __ret_837; +} +__ai __attribute__((target("bf16,neon"))) float32x4_t __noswap_vcvt_f32_bf16(bfloat16x4_t __p0_838) { + float32x4_t __ret_838; +bfloat16x4_t __reint_838 = __p0_838; +int32x4_t __reint1_838 = __noswap_vshll_n_s16(*(int16x4_t *) &__reint_838, 16); + __ret_838 = *(float32x4_t *) &__reint1_838; + return __ret_838; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("bf16,neon"))) float32x4_t vcvtq_high_f32_bf16(bfloat16x8_t __p0) { + float32x4_t __ret; + __ret = vcvt_f32_bf16(vget_high_bf16(__p0)); + return __ret; +} +#else +__ai __attribute__((target("bf16,neon"))) float32x4_t vcvtq_high_f32_bf16(bfloat16x8_t __p0) { + float32x4_t __ret; + bfloat16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __noswap_vcvt_f32_bf16(__noswap_vget_high_bf16(__rev0)); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("bf16,neon"))) float32x4_t vcvtq_low_f32_bf16(bfloat16x8_t __p0) { + float32x4_t __ret; + __ret = vcvt_f32_bf16(vget_low_bf16(__p0)); + return __ret; +} +#else +__ai __attribute__((target("bf16,neon"))) float32x4_t vcvtq_low_f32_bf16(bfloat16x8_t __p0) { + float32x4_t __ret; + bfloat16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __noswap_vcvt_f32_bf16(__noswap_vget_low_bf16(__rev0)); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vdotq_lane_u32(__p0_839, __p1_839, __p2_839, __p3_839) __extension__ ({ \ + uint32x4_t __ret_839; \ + uint32x4_t __s0_839 = __p0_839; \ + uint8x16_t __s1_839 = __p1_839; \ + uint8x8_t __s2_839 = __p2_839; \ +uint8x8_t __reint_839 = __s2_839; \ +uint32x4_t __reint1_839 = splatq_lane_u32(*(uint32x2_t *) &__reint_839, __p3_839); \ + __ret_839 = vdotq_u32(__s0_839, __s1_839, *(uint8x16_t *) &__reint1_839); \ + __ret_839; \ +}) +#else +#define vdotq_lane_u32(__p0_840, __p1_840, __p2_840, __p3_840) __extension__ ({ \ + uint32x4_t __ret_840; \ + uint32x4_t __s0_840 = __p0_840; \ + uint8x16_t __s1_840 = __p1_840; \ + uint8x8_t __s2_840 = __p2_840; \ + uint32x4_t __rev0_840; __rev0_840 = __builtin_shufflevector(__s0_840, __s0_840, 3, 2, 1, 0); \ + uint8x16_t __rev1_840; __rev1_840 = __builtin_shufflevector(__s1_840, __s1_840, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint8x8_t __rev2_840; __rev2_840 = __builtin_shufflevector(__s2_840, __s2_840, 7, 6, 5, 4, 3, 2, 1, 0); \ +uint8x8_t __reint_840 = __rev2_840; \ +uint32x4_t __reint1_840 = __noswap_splatq_lane_u32(*(uint32x2_t *) &__reint_840, __p3_840); \ + __ret_840 = __noswap_vdotq_u32(__rev0_840, __rev1_840, *(uint8x16_t *) &__reint1_840); \ + __ret_840 = __builtin_shufflevector(__ret_840, __ret_840, 3, 2, 1, 0); \ + __ret_840; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vdotq_lane_s32(__p0_841, __p1_841, __p2_841, __p3_841) __extension__ ({ \ + int32x4_t __ret_841; \ + int32x4_t __s0_841 = __p0_841; \ + int8x16_t __s1_841 = __p1_841; \ + int8x8_t __s2_841 = __p2_841; \ +int8x8_t __reint_841 = __s2_841; \ +int32x4_t __reint1_841 = splatq_lane_s32(*(int32x2_t *) &__reint_841, __p3_841); \ + __ret_841 = vdotq_s32(__s0_841, __s1_841, *(int8x16_t *) &__reint1_841); \ + __ret_841; \ +}) +#else +#define vdotq_lane_s32(__p0_842, __p1_842, __p2_842, __p3_842) __extension__ ({ \ + int32x4_t __ret_842; \ + int32x4_t __s0_842 = __p0_842; \ + int8x16_t __s1_842 = __p1_842; \ + int8x8_t __s2_842 = __p2_842; \ + int32x4_t __rev0_842; __rev0_842 = __builtin_shufflevector(__s0_842, __s0_842, 3, 2, 1, 0); \ + int8x16_t __rev1_842; __rev1_842 = __builtin_shufflevector(__s1_842, __s1_842, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + int8x8_t __rev2_842; __rev2_842 = __builtin_shufflevector(__s2_842, __s2_842, 7, 6, 5, 4, 3, 2, 1, 0); \ +int8x8_t __reint_842 = __rev2_842; \ +int32x4_t __reint1_842 = __noswap_splatq_lane_s32(*(int32x2_t *) &__reint_842, __p3_842); \ + __ret_842 = __noswap_vdotq_s32(__rev0_842, __rev1_842, *(int8x16_t *) &__reint1_842); \ + __ret_842 = __builtin_shufflevector(__ret_842, __ret_842, 3, 2, 1, 0); \ + __ret_842; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vdot_lane_u32(__p0_843, __p1_843, __p2_843, __p3_843) __extension__ ({ \ + uint32x2_t __ret_843; \ + uint32x2_t __s0_843 = __p0_843; \ + uint8x8_t __s1_843 = __p1_843; \ + uint8x8_t __s2_843 = __p2_843; \ +uint8x8_t __reint_843 = __s2_843; \ +uint32x2_t __reint1_843 = splat_lane_u32(*(uint32x2_t *) &__reint_843, __p3_843); \ + __ret_843 = vdot_u32(__s0_843, __s1_843, *(uint8x8_t *) &__reint1_843); \ + __ret_843; \ +}) +#else +#define vdot_lane_u32(__p0_844, __p1_844, __p2_844, __p3_844) __extension__ ({ \ + uint32x2_t __ret_844; \ + uint32x2_t __s0_844 = __p0_844; \ + uint8x8_t __s1_844 = __p1_844; \ + uint8x8_t __s2_844 = __p2_844; \ + uint32x2_t __rev0_844; __rev0_844 = __builtin_shufflevector(__s0_844, __s0_844, 1, 0); \ + uint8x8_t __rev1_844; __rev1_844 = __builtin_shufflevector(__s1_844, __s1_844, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint8x8_t __rev2_844; __rev2_844 = __builtin_shufflevector(__s2_844, __s2_844, 7, 6, 5, 4, 3, 2, 1, 0); \ +uint8x8_t __reint_844 = __rev2_844; \ +uint32x2_t __reint1_844 = __noswap_splat_lane_u32(*(uint32x2_t *) &__reint_844, __p3_844); \ + __ret_844 = __noswap_vdot_u32(__rev0_844, __rev1_844, *(uint8x8_t *) &__reint1_844); \ + __ret_844 = __builtin_shufflevector(__ret_844, __ret_844, 1, 0); \ + __ret_844; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vdot_lane_s32(__p0_845, __p1_845, __p2_845, __p3_845) __extension__ ({ \ + int32x2_t __ret_845; \ + int32x2_t __s0_845 = __p0_845; \ + int8x8_t __s1_845 = __p1_845; \ + int8x8_t __s2_845 = __p2_845; \ +int8x8_t __reint_845 = __s2_845; \ +int32x2_t __reint1_845 = splat_lane_s32(*(int32x2_t *) &__reint_845, __p3_845); \ + __ret_845 = vdot_s32(__s0_845, __s1_845, *(int8x8_t *) &__reint1_845); \ + __ret_845; \ +}) +#else +#define vdot_lane_s32(__p0_846, __p1_846, __p2_846, __p3_846) __extension__ ({ \ + int32x2_t __ret_846; \ + int32x2_t __s0_846 = __p0_846; \ + int8x8_t __s1_846 = __p1_846; \ + int8x8_t __s2_846 = __p2_846; \ + int32x2_t __rev0_846; __rev0_846 = __builtin_shufflevector(__s0_846, __s0_846, 1, 0); \ + int8x8_t __rev1_846; __rev1_846 = __builtin_shufflevector(__s1_846, __s1_846, 7, 6, 5, 4, 3, 2, 1, 0); \ + int8x8_t __rev2_846; __rev2_846 = __builtin_shufflevector(__s2_846, __s2_846, 7, 6, 5, 4, 3, 2, 1, 0); \ +int8x8_t __reint_846 = __rev2_846; \ +int32x2_t __reint1_846 = __noswap_splat_lane_s32(*(int32x2_t *) &__reint_846, __p3_846); \ + __ret_846 = __noswap_vdot_s32(__rev0_846, __rev1_846, *(int8x8_t *) &__reint1_846); \ + __ret_846 = __builtin_shufflevector(__ret_846, __ret_846, 1, 0); \ + __ret_846; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmulq_lane_f16(__p0_847, __p1_847, __p2_847) __extension__ ({ \ + float16x8_t __ret_847; \ + float16x8_t __s0_847 = __p0_847; \ + float16x4_t __s1_847 = __p1_847; \ + __ret_847 = __s0_847 * splatq_lane_f16(__s1_847, __p2_847); \ + __ret_847; \ +}) +#else +#define vmulq_lane_f16(__p0_848, __p1_848, __p2_848) __extension__ ({ \ + float16x8_t __ret_848; \ + float16x8_t __s0_848 = __p0_848; \ + float16x4_t __s1_848 = __p1_848; \ + float16x8_t __rev0_848; __rev0_848 = __builtin_shufflevector(__s0_848, __s0_848, 7, 6, 5, 4, 3, 2, 1, 0); \ + float16x4_t __rev1_848; __rev1_848 = __builtin_shufflevector(__s1_848, __s1_848, 3, 2, 1, 0); \ + __ret_848 = __rev0_848 * __noswap_splatq_lane_f16(__rev1_848, __p2_848); \ + __ret_848 = __builtin_shufflevector(__ret_848, __ret_848, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_848; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmul_lane_f16(__p0_849, __p1_849, __p2_849) __extension__ ({ \ + float16x4_t __ret_849; \ + float16x4_t __s0_849 = __p0_849; \ + float16x4_t __s1_849 = __p1_849; \ + __ret_849 = __s0_849 * splat_lane_f16(__s1_849, __p2_849); \ + __ret_849; \ +}) +#else +#define vmul_lane_f16(__p0_850, __p1_850, __p2_850) __extension__ ({ \ + float16x4_t __ret_850; \ + float16x4_t __s0_850 = __p0_850; \ + float16x4_t __s1_850 = __p1_850; \ + float16x4_t __rev0_850; __rev0_850 = __builtin_shufflevector(__s0_850, __s0_850, 3, 2, 1, 0); \ + float16x4_t __rev1_850; __rev1_850 = __builtin_shufflevector(__s1_850, __s1_850, 3, 2, 1, 0); \ + __ret_850 = __rev0_850 * __noswap_splat_lane_f16(__rev1_850, __p2_850); \ + __ret_850 = __builtin_shufflevector(__ret_850, __ret_850, 3, 2, 1, 0); \ + __ret_850; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vsudotq_lane_s32(__p0_851, __p1_851, __p2_851, __p3_851) __extension__ ({ \ + int32x4_t __ret_851; \ + int32x4_t __s0_851 = __p0_851; \ + int8x16_t __s1_851 = __p1_851; \ + uint8x8_t __s2_851 = __p2_851; \ +uint8x8_t __reint_851 = __s2_851; \ + __ret_851 = vusdotq_s32(__s0_851, (uint8x16_t)(splatq_lane_s32(*(int32x2_t *) &__reint_851, __p3_851)), __s1_851); \ + __ret_851; \ +}) +#else +#define vsudotq_lane_s32(__p0_852, __p1_852, __p2_852, __p3_852) __extension__ ({ \ + int32x4_t __ret_852; \ + int32x4_t __s0_852 = __p0_852; \ + int8x16_t __s1_852 = __p1_852; \ + uint8x8_t __s2_852 = __p2_852; \ + int32x4_t __rev0_852; __rev0_852 = __builtin_shufflevector(__s0_852, __s0_852, 3, 2, 1, 0); \ + int8x16_t __rev1_852; __rev1_852 = __builtin_shufflevector(__s1_852, __s1_852, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint8x8_t __rev2_852; __rev2_852 = __builtin_shufflevector(__s2_852, __s2_852, 7, 6, 5, 4, 3, 2, 1, 0); \ +uint8x8_t __reint_852 = __rev2_852; \ + __ret_852 = __noswap_vusdotq_s32(__rev0_852, (uint8x16_t)(__noswap_splatq_lane_s32(*(int32x2_t *) &__reint_852, __p3_852)), __rev1_852); \ + __ret_852 = __builtin_shufflevector(__ret_852, __ret_852, 3, 2, 1, 0); \ + __ret_852; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vsudot_lane_s32(__p0_853, __p1_853, __p2_853, __p3_853) __extension__ ({ \ + int32x2_t __ret_853; \ + int32x2_t __s0_853 = __p0_853; \ + int8x8_t __s1_853 = __p1_853; \ + uint8x8_t __s2_853 = __p2_853; \ +uint8x8_t __reint_853 = __s2_853; \ + __ret_853 = vusdot_s32(__s0_853, (uint8x8_t)(splat_lane_s32(*(int32x2_t *) &__reint_853, __p3_853)), __s1_853); \ + __ret_853; \ +}) +#else +#define vsudot_lane_s32(__p0_854, __p1_854, __p2_854, __p3_854) __extension__ ({ \ + int32x2_t __ret_854; \ + int32x2_t __s0_854 = __p0_854; \ + int8x8_t __s1_854 = __p1_854; \ + uint8x8_t __s2_854 = __p2_854; \ + int32x2_t __rev0_854; __rev0_854 = __builtin_shufflevector(__s0_854, __s0_854, 1, 0); \ + int8x8_t __rev1_854; __rev1_854 = __builtin_shufflevector(__s1_854, __s1_854, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint8x8_t __rev2_854; __rev2_854 = __builtin_shufflevector(__s2_854, __s2_854, 7, 6, 5, 4, 3, 2, 1, 0); \ +uint8x8_t __reint_854 = __rev2_854; \ + __ret_854 = __noswap_vusdot_s32(__rev0_854, (uint8x8_t)(__noswap_splat_lane_s32(*(int32x2_t *) &__reint_854, __p3_854)), __rev1_854); \ + __ret_854 = __builtin_shufflevector(__ret_854, __ret_854, 1, 0); \ + __ret_854; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vusdotq_lane_s32(__p0_855, __p1_855, __p2_855, __p3_855) __extension__ ({ \ + int32x4_t __ret_855; \ + int32x4_t __s0_855 = __p0_855; \ + uint8x16_t __s1_855 = __p1_855; \ + int8x8_t __s2_855 = __p2_855; \ +int8x8_t __reint_855 = __s2_855; \ + __ret_855 = vusdotq_s32(__s0_855, __s1_855, (int8x16_t)(splatq_lane_s32(*(int32x2_t *) &__reint_855, __p3_855))); \ + __ret_855; \ +}) +#else +#define vusdotq_lane_s32(__p0_856, __p1_856, __p2_856, __p3_856) __extension__ ({ \ + int32x4_t __ret_856; \ + int32x4_t __s0_856 = __p0_856; \ + uint8x16_t __s1_856 = __p1_856; \ + int8x8_t __s2_856 = __p2_856; \ + int32x4_t __rev0_856; __rev0_856 = __builtin_shufflevector(__s0_856, __s0_856, 3, 2, 1, 0); \ + uint8x16_t __rev1_856; __rev1_856 = __builtin_shufflevector(__s1_856, __s1_856, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + int8x8_t __rev2_856; __rev2_856 = __builtin_shufflevector(__s2_856, __s2_856, 7, 6, 5, 4, 3, 2, 1, 0); \ +int8x8_t __reint_856 = __rev2_856; \ + __ret_856 = __noswap_vusdotq_s32(__rev0_856, __rev1_856, (int8x16_t)(__noswap_splatq_lane_s32(*(int32x2_t *) &__reint_856, __p3_856))); \ + __ret_856 = __builtin_shufflevector(__ret_856, __ret_856, 3, 2, 1, 0); \ + __ret_856; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vusdot_lane_s32(__p0_857, __p1_857, __p2_857, __p3_857) __extension__ ({ \ + int32x2_t __ret_857; \ + int32x2_t __s0_857 = __p0_857; \ + uint8x8_t __s1_857 = __p1_857; \ + int8x8_t __s2_857 = __p2_857; \ +int8x8_t __reint_857 = __s2_857; \ + __ret_857 = vusdot_s32(__s0_857, __s1_857, (int8x8_t)(splat_lane_s32(*(int32x2_t *) &__reint_857, __p3_857))); \ + __ret_857; \ +}) +#else +#define vusdot_lane_s32(__p0_858, __p1_858, __p2_858, __p3_858) __extension__ ({ \ + int32x2_t __ret_858; \ + int32x2_t __s0_858 = __p0_858; \ + uint8x8_t __s1_858 = __p1_858; \ + int8x8_t __s2_858 = __p2_858; \ + int32x2_t __rev0_858; __rev0_858 = __builtin_shufflevector(__s0_858, __s0_858, 1, 0); \ + uint8x8_t __rev1_858; __rev1_858 = __builtin_shufflevector(__s1_858, __s1_858, 7, 6, 5, 4, 3, 2, 1, 0); \ + int8x8_t __rev2_858; __rev2_858 = __builtin_shufflevector(__s2_858, __s2_858, 7, 6, 5, 4, 3, 2, 1, 0); \ +int8x8_t __reint_858 = __rev2_858; \ + __ret_858 = __noswap_vusdot_s32(__rev0_858, __rev1_858, (int8x8_t)(__noswap_splat_lane_s32(*(int32x2_t *) &__reint_858, __p3_858))); \ + __ret_858 = __builtin_shufflevector(__ret_858, __ret_858, 1, 0); \ + __ret_858; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint8x16_t vabaq_u8(uint8x16_t __p0, uint8x16_t __p1, uint8x16_t __p2) { + uint8x16_t __ret; + __ret = __p0 + vabdq_u8(__p1, __p2); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint8x16_t vabaq_u8(uint8x16_t __p0, uint8x16_t __p1, uint8x16_t __p2) { + uint8x16_t __ret; + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __rev0 + __noswap_vabdq_u8(__rev1, __rev2); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint32x4_t vabaq_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) { + uint32x4_t __ret; + __ret = __p0 + vabdq_u32(__p1, __p2); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint32x4_t vabaq_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) { + uint32x4_t __ret; + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + uint32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); + __ret = __rev0 + __noswap_vabdq_u32(__rev1, __rev2); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint16x8_t vabaq_u16(uint16x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) { + uint16x8_t __ret; + __ret = __p0 + vabdq_u16(__p1, __p2); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint16x8_t vabaq_u16(uint16x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) { + uint16x8_t __ret; + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __rev0 + __noswap_vabdq_u16(__rev1, __rev2); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int8x16_t vabaq_s8(int8x16_t __p0, int8x16_t __p1, int8x16_t __p2) { + int8x16_t __ret; + __ret = __p0 + vabdq_s8(__p1, __p2); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int8x16_t vabaq_s8(int8x16_t __p0, int8x16_t __p1, int8x16_t __p2) { + int8x16_t __ret; + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __rev0 + __noswap_vabdq_s8(__rev1, __rev2); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int32x4_t vabaq_s32(int32x4_t __p0, int32x4_t __p1, int32x4_t __p2) { + int32x4_t __ret; + __ret = __p0 + vabdq_s32(__p1, __p2); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int32x4_t vabaq_s32(int32x4_t __p0, int32x4_t __p1, int32x4_t __p2) { + int32x4_t __ret; + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + int32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); + __ret = __rev0 + __noswap_vabdq_s32(__rev1, __rev2); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int16x8_t vabaq_s16(int16x8_t __p0, int16x8_t __p1, int16x8_t __p2) { + int16x8_t __ret; + __ret = __p0 + vabdq_s16(__p1, __p2); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int16x8_t vabaq_s16(int16x8_t __p0, int16x8_t __p1, int16x8_t __p2) { + int16x8_t __ret; + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __rev0 + __noswap_vabdq_s16(__rev1, __rev2); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint8x8_t vaba_u8(uint8x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) { + uint8x8_t __ret; + __ret = __p0 + vabd_u8(__p1, __p2); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint8x8_t vaba_u8(uint8x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) { + uint8x8_t __ret; + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __rev0 + __noswap_vabd_u8(__rev1, __rev2); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint32x2_t vaba_u32(uint32x2_t __p0, uint32x2_t __p1, uint32x2_t __p2) { + uint32x2_t __ret; + __ret = __p0 + vabd_u32(__p1, __p2); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint32x2_t vaba_u32(uint32x2_t __p0, uint32x2_t __p1, uint32x2_t __p2) { + uint32x2_t __ret; + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + uint32x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0); + __ret = __rev0 + __noswap_vabd_u32(__rev1, __rev2); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint16x4_t vaba_u16(uint16x4_t __p0, uint16x4_t __p1, uint16x4_t __p2) { + uint16x4_t __ret; + __ret = __p0 + vabd_u16(__p1, __p2); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint16x4_t vaba_u16(uint16x4_t __p0, uint16x4_t __p1, uint16x4_t __p2) { + uint16x4_t __ret; + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + uint16x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); + __ret = __rev0 + __noswap_vabd_u16(__rev1, __rev2); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int8x8_t vaba_s8(int8x8_t __p0, int8x8_t __p1, int8x8_t __p2) { + int8x8_t __ret; + __ret = __p0 + vabd_s8(__p1, __p2); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int8x8_t vaba_s8(int8x8_t __p0, int8x8_t __p1, int8x8_t __p2) { + int8x8_t __ret; + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __rev0 + __noswap_vabd_s8(__rev1, __rev2); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int32x2_t vaba_s32(int32x2_t __p0, int32x2_t __p1, int32x2_t __p2) { + int32x2_t __ret; + __ret = __p0 + vabd_s32(__p1, __p2); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int32x2_t vaba_s32(int32x2_t __p0, int32x2_t __p1, int32x2_t __p2) { + int32x2_t __ret; + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + int32x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0); + __ret = __rev0 + __noswap_vabd_s32(__rev1, __rev2); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int16x4_t vaba_s16(int16x4_t __p0, int16x4_t __p1, int16x4_t __p2) { + int16x4_t __ret; + __ret = __p0 + vabd_s16(__p1, __p2); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int16x4_t vaba_s16(int16x4_t __p0, int16x4_t __p1, int16x4_t __p2) { + int16x4_t __ret; + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + int16x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); + __ret = __rev0 + __noswap_vabd_s16(__rev1, __rev2); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint16x8_t vabdl_u8(uint8x8_t __p0, uint8x8_t __p1) { + uint16x8_t __ret; + __ret = (uint16x8_t)(vmovl_u8((uint8x8_t)(vabd_u8(__p0, __p1)))); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint16x8_t vabdl_u8(uint8x8_t __p0, uint8x8_t __p1) { + uint16x8_t __ret; + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint16x8_t)(__noswap_vmovl_u8((uint8x8_t)(__noswap_vabd_u8(__rev0, __rev1)))); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +__ai __attribute__((target("neon"))) uint16x8_t __noswap_vabdl_u8(uint8x8_t __p0, uint8x8_t __p1) { + uint16x8_t __ret; + __ret = (uint16x8_t)(__noswap_vmovl_u8((uint8x8_t)(__noswap_vabd_u8(__p0, __p1)))); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint64x2_t vabdl_u32(uint32x2_t __p0, uint32x2_t __p1) { + uint64x2_t __ret; + __ret = (uint64x2_t)(vmovl_u32((uint32x2_t)(vabd_u32(__p0, __p1)))); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint64x2_t vabdl_u32(uint32x2_t __p0, uint32x2_t __p1) { + uint64x2_t __ret; + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (uint64x2_t)(__noswap_vmovl_u32((uint32x2_t)(__noswap_vabd_u32(__rev0, __rev1)))); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +__ai __attribute__((target("neon"))) uint64x2_t __noswap_vabdl_u32(uint32x2_t __p0, uint32x2_t __p1) { + uint64x2_t __ret; + __ret = (uint64x2_t)(__noswap_vmovl_u32((uint32x2_t)(__noswap_vabd_u32(__p0, __p1)))); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint32x4_t vabdl_u16(uint16x4_t __p0, uint16x4_t __p1) { + uint32x4_t __ret; + __ret = (uint32x4_t)(vmovl_u16((uint16x4_t)(vabd_u16(__p0, __p1)))); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint32x4_t vabdl_u16(uint16x4_t __p0, uint16x4_t __p1) { + uint32x4_t __ret; + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (uint32x4_t)(__noswap_vmovl_u16((uint16x4_t)(__noswap_vabd_u16(__rev0, __rev1)))); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +__ai __attribute__((target("neon"))) uint32x4_t __noswap_vabdl_u16(uint16x4_t __p0, uint16x4_t __p1) { + uint32x4_t __ret; + __ret = (uint32x4_t)(__noswap_vmovl_u16((uint16x4_t)(__noswap_vabd_u16(__p0, __p1)))); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int16x8_t vabdl_s8(int8x8_t __p0, int8x8_t __p1) { + int16x8_t __ret; + __ret = (int16x8_t)(vmovl_u8((uint8x8_t)(vabd_s8(__p0, __p1)))); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int16x8_t vabdl_s8(int8x8_t __p0, int8x8_t __p1) { + int16x8_t __ret; + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int16x8_t)(__noswap_vmovl_u8((uint8x8_t)(__noswap_vabd_s8(__rev0, __rev1)))); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +__ai __attribute__((target("neon"))) int16x8_t __noswap_vabdl_s8(int8x8_t __p0, int8x8_t __p1) { + int16x8_t __ret; + __ret = (int16x8_t)(__noswap_vmovl_u8((uint8x8_t)(__noswap_vabd_s8(__p0, __p1)))); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int64x2_t vabdl_s32(int32x2_t __p0, int32x2_t __p1) { + int64x2_t __ret; + __ret = (int64x2_t)(vmovl_u32((uint32x2_t)(vabd_s32(__p0, __p1)))); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int64x2_t vabdl_s32(int32x2_t __p0, int32x2_t __p1) { + int64x2_t __ret; + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (int64x2_t)(__noswap_vmovl_u32((uint32x2_t)(__noswap_vabd_s32(__rev0, __rev1)))); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +__ai __attribute__((target("neon"))) int64x2_t __noswap_vabdl_s32(int32x2_t __p0, int32x2_t __p1) { + int64x2_t __ret; + __ret = (int64x2_t)(__noswap_vmovl_u32((uint32x2_t)(__noswap_vabd_s32(__p0, __p1)))); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int32x4_t vabdl_s16(int16x4_t __p0, int16x4_t __p1) { + int32x4_t __ret; + __ret = (int32x4_t)(vmovl_u16((uint16x4_t)(vabd_s16(__p0, __p1)))); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int32x4_t vabdl_s16(int16x4_t __p0, int16x4_t __p1) { + int32x4_t __ret; + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (int32x4_t)(__noswap_vmovl_u16((uint16x4_t)(__noswap_vabd_s16(__rev0, __rev1)))); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +__ai __attribute__((target("neon"))) int32x4_t __noswap_vabdl_s16(int16x4_t __p0, int16x4_t __p1) { + int32x4_t __ret; + __ret = (int32x4_t)(__noswap_vmovl_u16((uint16x4_t)(__noswap_vabd_s16(__p0, __p1)))); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint16x8_t vaddl_u8(uint8x8_t __p0, uint8x8_t __p1) { + uint16x8_t __ret; + __ret = vmovl_u8(__p0) + vmovl_u8(__p1); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint16x8_t vaddl_u8(uint8x8_t __p0, uint8x8_t __p1) { + uint16x8_t __ret; + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __noswap_vmovl_u8(__rev0) + __noswap_vmovl_u8(__rev1); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint64x2_t vaddl_u32(uint32x2_t __p0, uint32x2_t __p1) { + uint64x2_t __ret; + __ret = vmovl_u32(__p0) + vmovl_u32(__p1); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint64x2_t vaddl_u32(uint32x2_t __p0, uint32x2_t __p1) { + uint64x2_t __ret; + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = __noswap_vmovl_u32(__rev0) + __noswap_vmovl_u32(__rev1); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint32x4_t vaddl_u16(uint16x4_t __p0, uint16x4_t __p1) { + uint32x4_t __ret; + __ret = vmovl_u16(__p0) + vmovl_u16(__p1); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint32x4_t vaddl_u16(uint16x4_t __p0, uint16x4_t __p1) { + uint32x4_t __ret; + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = __noswap_vmovl_u16(__rev0) + __noswap_vmovl_u16(__rev1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int16x8_t vaddl_s8(int8x8_t __p0, int8x8_t __p1) { + int16x8_t __ret; + __ret = vmovl_s8(__p0) + vmovl_s8(__p1); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int16x8_t vaddl_s8(int8x8_t __p0, int8x8_t __p1) { + int16x8_t __ret; + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __noswap_vmovl_s8(__rev0) + __noswap_vmovl_s8(__rev1); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int64x2_t vaddl_s32(int32x2_t __p0, int32x2_t __p1) { + int64x2_t __ret; + __ret = vmovl_s32(__p0) + vmovl_s32(__p1); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int64x2_t vaddl_s32(int32x2_t __p0, int32x2_t __p1) { + int64x2_t __ret; + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = __noswap_vmovl_s32(__rev0) + __noswap_vmovl_s32(__rev1); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int32x4_t vaddl_s16(int16x4_t __p0, int16x4_t __p1) { + int32x4_t __ret; + __ret = vmovl_s16(__p0) + vmovl_s16(__p1); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int32x4_t vaddl_s16(int16x4_t __p0, int16x4_t __p1) { + int32x4_t __ret; + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = __noswap_vmovl_s16(__rev0) + __noswap_vmovl_s16(__rev1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint16x8_t vaddw_u8(uint16x8_t __p0, uint8x8_t __p1) { + uint16x8_t __ret; + __ret = __p0 + vmovl_u8(__p1); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint16x8_t vaddw_u8(uint16x8_t __p0, uint8x8_t __p1) { + uint16x8_t __ret; + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __rev0 + __noswap_vmovl_u8(__rev1); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint64x2_t vaddw_u32(uint64x2_t __p0, uint32x2_t __p1) { + uint64x2_t __ret; + __ret = __p0 + vmovl_u32(__p1); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint64x2_t vaddw_u32(uint64x2_t __p0, uint32x2_t __p1) { + uint64x2_t __ret; + uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = __rev0 + __noswap_vmovl_u32(__rev1); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint32x4_t vaddw_u16(uint32x4_t __p0, uint16x4_t __p1) { + uint32x4_t __ret; + __ret = __p0 + vmovl_u16(__p1); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint32x4_t vaddw_u16(uint32x4_t __p0, uint16x4_t __p1) { + uint32x4_t __ret; + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = __rev0 + __noswap_vmovl_u16(__rev1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int16x8_t vaddw_s8(int16x8_t __p0, int8x8_t __p1) { + int16x8_t __ret; + __ret = __p0 + vmovl_s8(__p1); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int16x8_t vaddw_s8(int16x8_t __p0, int8x8_t __p1) { + int16x8_t __ret; + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __rev0 + __noswap_vmovl_s8(__rev1); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int64x2_t vaddw_s32(int64x2_t __p0, int32x2_t __p1) { + int64x2_t __ret; + __ret = __p0 + vmovl_s32(__p1); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int64x2_t vaddw_s32(int64x2_t __p0, int32x2_t __p1) { + int64x2_t __ret; + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = __rev0 + __noswap_vmovl_s32(__rev1); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int32x4_t vaddw_s16(int32x4_t __p0, int16x4_t __p1) { + int32x4_t __ret; + __ret = __p0 + vmovl_s16(__p1); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int32x4_t vaddw_s16(int32x4_t __p0, int16x4_t __p1) { + int32x4_t __ret; + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = __rev0 + __noswap_vmovl_s16(__rev1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vget_lane_f16(__p0_859, __p1_859) __extension__ ({ \ + float16_t __ret_859; \ + float16x4_t __s0_859 = __p0_859; \ +float16x4_t __reint_859 = __s0_859; \ +int16_t __reint1_859 = vget_lane_s16(*(int16x4_t *) &__reint_859, __p1_859); \ + __ret_859 = *(float16_t *) &__reint1_859; \ + __ret_859; \ +}) +#else +#define vget_lane_f16(__p0_860, __p1_860) __extension__ ({ \ + float16_t __ret_860; \ + float16x4_t __s0_860 = __p0_860; \ + float16x4_t __rev0_860; __rev0_860 = __builtin_shufflevector(__s0_860, __s0_860, 3, 2, 1, 0); \ +float16x4_t __reint_860 = __rev0_860; \ +int16_t __reint1_860 = __noswap_vget_lane_s16(*(int16x4_t *) &__reint_860, __p1_860); \ + __ret_860 = *(float16_t *) &__reint1_860; \ + __ret_860; \ +}) +#define __noswap_vget_lane_f16(__p0_861, __p1_861) __extension__ ({ \ + float16_t __ret_861; \ + float16x4_t __s0_861 = __p0_861; \ +float16x4_t __reint_861 = __s0_861; \ +int16_t __reint1_861 = __noswap_vget_lane_s16(*(int16x4_t *) &__reint_861, __p1_861); \ + __ret_861 = *(float16_t *) &__reint1_861; \ + __ret_861; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vgetq_lane_f16(__p0_862, __p1_862) __extension__ ({ \ + float16_t __ret_862; \ + float16x8_t __s0_862 = __p0_862; \ +float16x8_t __reint_862 = __s0_862; \ +int16_t __reint1_862 = vgetq_lane_s16(*(int16x8_t *) &__reint_862, __p1_862); \ + __ret_862 = *(float16_t *) &__reint1_862; \ + __ret_862; \ +}) +#else +#define vgetq_lane_f16(__p0_863, __p1_863) __extension__ ({ \ + float16_t __ret_863; \ + float16x8_t __s0_863 = __p0_863; \ + float16x8_t __rev0_863; __rev0_863 = __builtin_shufflevector(__s0_863, __s0_863, 7, 6, 5, 4, 3, 2, 1, 0); \ +float16x8_t __reint_863 = __rev0_863; \ +int16_t __reint1_863 = __noswap_vgetq_lane_s16(*(int16x8_t *) &__reint_863, __p1_863); \ + __ret_863 = *(float16_t *) &__reint1_863; \ + __ret_863; \ +}) +#define __noswap_vgetq_lane_f16(__p0_864, __p1_864) __extension__ ({ \ + float16_t __ret_864; \ + float16x8_t __s0_864 = __p0_864; \ +float16x8_t __reint_864 = __s0_864; \ +int16_t __reint1_864 = __noswap_vgetq_lane_s16(*(int16x8_t *) &__reint_864, __p1_864); \ + __ret_864 = *(float16_t *) &__reint1_864; \ + __ret_864; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint16x8_t vmlal_u8(uint16x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) { + uint16x8_t __ret; + __ret = __p0 + vmull_u8(__p1, __p2); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint16x8_t vmlal_u8(uint16x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) { + uint16x8_t __ret; + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __rev0 + __noswap_vmull_u8(__rev1, __rev2); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +__ai __attribute__((target("neon"))) uint16x8_t __noswap_vmlal_u8(uint16x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) { + uint16x8_t __ret; + __ret = __p0 + __noswap_vmull_u8(__p1, __p2); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint64x2_t vmlal_u32(uint64x2_t __p0, uint32x2_t __p1, uint32x2_t __p2) { + uint64x2_t __ret; + __ret = __p0 + vmull_u32(__p1, __p2); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint64x2_t vmlal_u32(uint64x2_t __p0, uint32x2_t __p1, uint32x2_t __p2) { + uint64x2_t __ret; + uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + uint32x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0); + __ret = __rev0 + __noswap_vmull_u32(__rev1, __rev2); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +__ai __attribute__((target("neon"))) uint64x2_t __noswap_vmlal_u32(uint64x2_t __p0, uint32x2_t __p1, uint32x2_t __p2) { + uint64x2_t __ret; + __ret = __p0 + __noswap_vmull_u32(__p1, __p2); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint32x4_t vmlal_u16(uint32x4_t __p0, uint16x4_t __p1, uint16x4_t __p2) { + uint32x4_t __ret; + __ret = __p0 + vmull_u16(__p1, __p2); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint32x4_t vmlal_u16(uint32x4_t __p0, uint16x4_t __p1, uint16x4_t __p2) { + uint32x4_t __ret; + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + uint16x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); + __ret = __rev0 + __noswap_vmull_u16(__rev1, __rev2); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +__ai __attribute__((target("neon"))) uint32x4_t __noswap_vmlal_u16(uint32x4_t __p0, uint16x4_t __p1, uint16x4_t __p2) { + uint32x4_t __ret; + __ret = __p0 + __noswap_vmull_u16(__p1, __p2); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int16x8_t vmlal_s8(int16x8_t __p0, int8x8_t __p1, int8x8_t __p2) { + int16x8_t __ret; + __ret = __p0 + vmull_s8(__p1, __p2); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int16x8_t vmlal_s8(int16x8_t __p0, int8x8_t __p1, int8x8_t __p2) { + int16x8_t __ret; + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __rev0 + __noswap_vmull_s8(__rev1, __rev2); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +__ai __attribute__((target("neon"))) int16x8_t __noswap_vmlal_s8(int16x8_t __p0, int8x8_t __p1, int8x8_t __p2) { + int16x8_t __ret; + __ret = __p0 + __noswap_vmull_s8(__p1, __p2); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int64x2_t vmlal_s32(int64x2_t __p0, int32x2_t __p1, int32x2_t __p2) { + int64x2_t __ret; + __ret = __p0 + vmull_s32(__p1, __p2); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int64x2_t vmlal_s32(int64x2_t __p0, int32x2_t __p1, int32x2_t __p2) { + int64x2_t __ret; + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + int32x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0); + __ret = __rev0 + __noswap_vmull_s32(__rev1, __rev2); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +__ai __attribute__((target("neon"))) int64x2_t __noswap_vmlal_s32(int64x2_t __p0, int32x2_t __p1, int32x2_t __p2) { + int64x2_t __ret; + __ret = __p0 + __noswap_vmull_s32(__p1, __p2); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int32x4_t vmlal_s16(int32x4_t __p0, int16x4_t __p1, int16x4_t __p2) { + int32x4_t __ret; + __ret = __p0 + vmull_s16(__p1, __p2); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int32x4_t vmlal_s16(int32x4_t __p0, int16x4_t __p1, int16x4_t __p2) { + int32x4_t __ret; + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + int16x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); + __ret = __rev0 + __noswap_vmull_s16(__rev1, __rev2); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +__ai __attribute__((target("neon"))) int32x4_t __noswap_vmlal_s16(int32x4_t __p0, int16x4_t __p1, int16x4_t __p2) { + int32x4_t __ret; + __ret = __p0 + __noswap_vmull_s16(__p1, __p2); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmlal_lane_u32(__p0_865, __p1_865, __p2_865, __p3_865) __extension__ ({ \ + uint64x2_t __ret_865; \ + uint64x2_t __s0_865 = __p0_865; \ + uint32x2_t __s1_865 = __p1_865; \ + uint32x2_t __s2_865 = __p2_865; \ + __ret_865 = __s0_865 + vmull_u32(__s1_865, splat_lane_u32(__s2_865, __p3_865)); \ + __ret_865; \ +}) +#else +#define vmlal_lane_u32(__p0_866, __p1_866, __p2_866, __p3_866) __extension__ ({ \ + uint64x2_t __ret_866; \ + uint64x2_t __s0_866 = __p0_866; \ + uint32x2_t __s1_866 = __p1_866; \ + uint32x2_t __s2_866 = __p2_866; \ + uint64x2_t __rev0_866; __rev0_866 = __builtin_shufflevector(__s0_866, __s0_866, 1, 0); \ + uint32x2_t __rev1_866; __rev1_866 = __builtin_shufflevector(__s1_866, __s1_866, 1, 0); \ + uint32x2_t __rev2_866; __rev2_866 = __builtin_shufflevector(__s2_866, __s2_866, 1, 0); \ + __ret_866 = __rev0_866 + __noswap_vmull_u32(__rev1_866, __noswap_splat_lane_u32(__rev2_866, __p3_866)); \ + __ret_866 = __builtin_shufflevector(__ret_866, __ret_866, 1, 0); \ + __ret_866; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmlal_lane_u16(__p0_867, __p1_867, __p2_867, __p3_867) __extension__ ({ \ + uint32x4_t __ret_867; \ + uint32x4_t __s0_867 = __p0_867; \ + uint16x4_t __s1_867 = __p1_867; \ + uint16x4_t __s2_867 = __p2_867; \ + __ret_867 = __s0_867 + vmull_u16(__s1_867, splat_lane_u16(__s2_867, __p3_867)); \ + __ret_867; \ +}) +#else +#define vmlal_lane_u16(__p0_868, __p1_868, __p2_868, __p3_868) __extension__ ({ \ + uint32x4_t __ret_868; \ + uint32x4_t __s0_868 = __p0_868; \ + uint16x4_t __s1_868 = __p1_868; \ + uint16x4_t __s2_868 = __p2_868; \ + uint32x4_t __rev0_868; __rev0_868 = __builtin_shufflevector(__s0_868, __s0_868, 3, 2, 1, 0); \ + uint16x4_t __rev1_868; __rev1_868 = __builtin_shufflevector(__s1_868, __s1_868, 3, 2, 1, 0); \ + uint16x4_t __rev2_868; __rev2_868 = __builtin_shufflevector(__s2_868, __s2_868, 3, 2, 1, 0); \ + __ret_868 = __rev0_868 + __noswap_vmull_u16(__rev1_868, __noswap_splat_lane_u16(__rev2_868, __p3_868)); \ + __ret_868 = __builtin_shufflevector(__ret_868, __ret_868, 3, 2, 1, 0); \ + __ret_868; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmlal_lane_s32(__p0_869, __p1_869, __p2_869, __p3_869) __extension__ ({ \ + int64x2_t __ret_869; \ + int64x2_t __s0_869 = __p0_869; \ + int32x2_t __s1_869 = __p1_869; \ + int32x2_t __s2_869 = __p2_869; \ + __ret_869 = __s0_869 + vmull_s32(__s1_869, splat_lane_s32(__s2_869, __p3_869)); \ + __ret_869; \ +}) +#else +#define vmlal_lane_s32(__p0_870, __p1_870, __p2_870, __p3_870) __extension__ ({ \ + int64x2_t __ret_870; \ + int64x2_t __s0_870 = __p0_870; \ + int32x2_t __s1_870 = __p1_870; \ + int32x2_t __s2_870 = __p2_870; \ + int64x2_t __rev0_870; __rev0_870 = __builtin_shufflevector(__s0_870, __s0_870, 1, 0); \ + int32x2_t __rev1_870; __rev1_870 = __builtin_shufflevector(__s1_870, __s1_870, 1, 0); \ + int32x2_t __rev2_870; __rev2_870 = __builtin_shufflevector(__s2_870, __s2_870, 1, 0); \ + __ret_870 = __rev0_870 + __noswap_vmull_s32(__rev1_870, __noswap_splat_lane_s32(__rev2_870, __p3_870)); \ + __ret_870 = __builtin_shufflevector(__ret_870, __ret_870, 1, 0); \ + __ret_870; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmlal_lane_s16(__p0_871, __p1_871, __p2_871, __p3_871) __extension__ ({ \ + int32x4_t __ret_871; \ + int32x4_t __s0_871 = __p0_871; \ + int16x4_t __s1_871 = __p1_871; \ + int16x4_t __s2_871 = __p2_871; \ + __ret_871 = __s0_871 + vmull_s16(__s1_871, splat_lane_s16(__s2_871, __p3_871)); \ + __ret_871; \ +}) +#else +#define vmlal_lane_s16(__p0_872, __p1_872, __p2_872, __p3_872) __extension__ ({ \ + int32x4_t __ret_872; \ + int32x4_t __s0_872 = __p0_872; \ + int16x4_t __s1_872 = __p1_872; \ + int16x4_t __s2_872 = __p2_872; \ + int32x4_t __rev0_872; __rev0_872 = __builtin_shufflevector(__s0_872, __s0_872, 3, 2, 1, 0); \ + int16x4_t __rev1_872; __rev1_872 = __builtin_shufflevector(__s1_872, __s1_872, 3, 2, 1, 0); \ + int16x4_t __rev2_872; __rev2_872 = __builtin_shufflevector(__s2_872, __s2_872, 3, 2, 1, 0); \ + __ret_872 = __rev0_872 + __noswap_vmull_s16(__rev1_872, __noswap_splat_lane_s16(__rev2_872, __p3_872)); \ + __ret_872 = __builtin_shufflevector(__ret_872, __ret_872, 3, 2, 1, 0); \ + __ret_872; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint64x2_t vmlal_n_u32(uint64x2_t __p0, uint32x2_t __p1, uint32_t __p2) { + uint64x2_t __ret; + __ret = __p0 + vmull_u32(__p1, (uint32x2_t) {__p2, __p2}); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint64x2_t vmlal_n_u32(uint64x2_t __p0, uint32x2_t __p1, uint32_t __p2) { + uint64x2_t __ret; + uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = __rev0 + __noswap_vmull_u32(__rev1, (uint32x2_t) {__p2, __p2}); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +__ai __attribute__((target("neon"))) uint64x2_t __noswap_vmlal_n_u32(uint64x2_t __p0, uint32x2_t __p1, uint32_t __p2) { + uint64x2_t __ret; + __ret = __p0 + __noswap_vmull_u32(__p1, (uint32x2_t) {__p2, __p2}); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint32x4_t vmlal_n_u16(uint32x4_t __p0, uint16x4_t __p1, uint16_t __p2) { + uint32x4_t __ret; + __ret = __p0 + vmull_u16(__p1, (uint16x4_t) {__p2, __p2, __p2, __p2}); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint32x4_t vmlal_n_u16(uint32x4_t __p0, uint16x4_t __p1, uint16_t __p2) { + uint32x4_t __ret; + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = __rev0 + __noswap_vmull_u16(__rev1, (uint16x4_t) {__p2, __p2, __p2, __p2}); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +__ai __attribute__((target("neon"))) uint32x4_t __noswap_vmlal_n_u16(uint32x4_t __p0, uint16x4_t __p1, uint16_t __p2) { + uint32x4_t __ret; + __ret = __p0 + __noswap_vmull_u16(__p1, (uint16x4_t) {__p2, __p2, __p2, __p2}); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int64x2_t vmlal_n_s32(int64x2_t __p0, int32x2_t __p1, int32_t __p2) { + int64x2_t __ret; + __ret = __p0 + vmull_s32(__p1, (int32x2_t) {__p2, __p2}); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int64x2_t vmlal_n_s32(int64x2_t __p0, int32x2_t __p1, int32_t __p2) { + int64x2_t __ret; + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = __rev0 + __noswap_vmull_s32(__rev1, (int32x2_t) {__p2, __p2}); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +__ai __attribute__((target("neon"))) int64x2_t __noswap_vmlal_n_s32(int64x2_t __p0, int32x2_t __p1, int32_t __p2) { + int64x2_t __ret; + __ret = __p0 + __noswap_vmull_s32(__p1, (int32x2_t) {__p2, __p2}); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int32x4_t vmlal_n_s16(int32x4_t __p0, int16x4_t __p1, int16_t __p2) { + int32x4_t __ret; + __ret = __p0 + vmull_s16(__p1, (int16x4_t) {__p2, __p2, __p2, __p2}); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int32x4_t vmlal_n_s16(int32x4_t __p0, int16x4_t __p1, int16_t __p2) { + int32x4_t __ret; + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = __rev0 + __noswap_vmull_s16(__rev1, (int16x4_t) {__p2, __p2, __p2, __p2}); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +__ai __attribute__((target("neon"))) int32x4_t __noswap_vmlal_n_s16(int32x4_t __p0, int16x4_t __p1, int16_t __p2) { + int32x4_t __ret; + __ret = __p0 + __noswap_vmull_s16(__p1, (int16x4_t) {__p2, __p2, __p2, __p2}); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint16x8_t vmlsl_u8(uint16x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) { + uint16x8_t __ret; + __ret = __p0 - vmull_u8(__p1, __p2); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint16x8_t vmlsl_u8(uint16x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) { + uint16x8_t __ret; + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __rev0 - __noswap_vmull_u8(__rev1, __rev2); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +__ai __attribute__((target("neon"))) uint16x8_t __noswap_vmlsl_u8(uint16x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) { + uint16x8_t __ret; + __ret = __p0 - __noswap_vmull_u8(__p1, __p2); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint64x2_t vmlsl_u32(uint64x2_t __p0, uint32x2_t __p1, uint32x2_t __p2) { + uint64x2_t __ret; + __ret = __p0 - vmull_u32(__p1, __p2); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint64x2_t vmlsl_u32(uint64x2_t __p0, uint32x2_t __p1, uint32x2_t __p2) { + uint64x2_t __ret; + uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + uint32x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0); + __ret = __rev0 - __noswap_vmull_u32(__rev1, __rev2); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +__ai __attribute__((target("neon"))) uint64x2_t __noswap_vmlsl_u32(uint64x2_t __p0, uint32x2_t __p1, uint32x2_t __p2) { + uint64x2_t __ret; + __ret = __p0 - __noswap_vmull_u32(__p1, __p2); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint32x4_t vmlsl_u16(uint32x4_t __p0, uint16x4_t __p1, uint16x4_t __p2) { + uint32x4_t __ret; + __ret = __p0 - vmull_u16(__p1, __p2); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint32x4_t vmlsl_u16(uint32x4_t __p0, uint16x4_t __p1, uint16x4_t __p2) { + uint32x4_t __ret; + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + uint16x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); + __ret = __rev0 - __noswap_vmull_u16(__rev1, __rev2); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +__ai __attribute__((target("neon"))) uint32x4_t __noswap_vmlsl_u16(uint32x4_t __p0, uint16x4_t __p1, uint16x4_t __p2) { + uint32x4_t __ret; + __ret = __p0 - __noswap_vmull_u16(__p1, __p2); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int16x8_t vmlsl_s8(int16x8_t __p0, int8x8_t __p1, int8x8_t __p2) { + int16x8_t __ret; + __ret = __p0 - vmull_s8(__p1, __p2); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int16x8_t vmlsl_s8(int16x8_t __p0, int8x8_t __p1, int8x8_t __p2) { + int16x8_t __ret; + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __rev0 - __noswap_vmull_s8(__rev1, __rev2); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +__ai __attribute__((target("neon"))) int16x8_t __noswap_vmlsl_s8(int16x8_t __p0, int8x8_t __p1, int8x8_t __p2) { + int16x8_t __ret; + __ret = __p0 - __noswap_vmull_s8(__p1, __p2); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int64x2_t vmlsl_s32(int64x2_t __p0, int32x2_t __p1, int32x2_t __p2) { + int64x2_t __ret; + __ret = __p0 - vmull_s32(__p1, __p2); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int64x2_t vmlsl_s32(int64x2_t __p0, int32x2_t __p1, int32x2_t __p2) { + int64x2_t __ret; + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + int32x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0); + __ret = __rev0 - __noswap_vmull_s32(__rev1, __rev2); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +__ai __attribute__((target("neon"))) int64x2_t __noswap_vmlsl_s32(int64x2_t __p0, int32x2_t __p1, int32x2_t __p2) { + int64x2_t __ret; + __ret = __p0 - __noswap_vmull_s32(__p1, __p2); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int32x4_t vmlsl_s16(int32x4_t __p0, int16x4_t __p1, int16x4_t __p2) { + int32x4_t __ret; + __ret = __p0 - vmull_s16(__p1, __p2); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int32x4_t vmlsl_s16(int32x4_t __p0, int16x4_t __p1, int16x4_t __p2) { + int32x4_t __ret; + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + int16x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); + __ret = __rev0 - __noswap_vmull_s16(__rev1, __rev2); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +__ai __attribute__((target("neon"))) int32x4_t __noswap_vmlsl_s16(int32x4_t __p0, int16x4_t __p1, int16x4_t __p2) { + int32x4_t __ret; + __ret = __p0 - __noswap_vmull_s16(__p1, __p2); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmlsl_lane_u32(__p0_873, __p1_873, __p2_873, __p3_873) __extension__ ({ \ + uint64x2_t __ret_873; \ + uint64x2_t __s0_873 = __p0_873; \ + uint32x2_t __s1_873 = __p1_873; \ + uint32x2_t __s2_873 = __p2_873; \ + __ret_873 = __s0_873 - vmull_u32(__s1_873, splat_lane_u32(__s2_873, __p3_873)); \ + __ret_873; \ +}) +#else +#define vmlsl_lane_u32(__p0_874, __p1_874, __p2_874, __p3_874) __extension__ ({ \ + uint64x2_t __ret_874; \ + uint64x2_t __s0_874 = __p0_874; \ + uint32x2_t __s1_874 = __p1_874; \ + uint32x2_t __s2_874 = __p2_874; \ + uint64x2_t __rev0_874; __rev0_874 = __builtin_shufflevector(__s0_874, __s0_874, 1, 0); \ + uint32x2_t __rev1_874; __rev1_874 = __builtin_shufflevector(__s1_874, __s1_874, 1, 0); \ + uint32x2_t __rev2_874; __rev2_874 = __builtin_shufflevector(__s2_874, __s2_874, 1, 0); \ + __ret_874 = __rev0_874 - __noswap_vmull_u32(__rev1_874, __noswap_splat_lane_u32(__rev2_874, __p3_874)); \ + __ret_874 = __builtin_shufflevector(__ret_874, __ret_874, 1, 0); \ + __ret_874; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmlsl_lane_u16(__p0_875, __p1_875, __p2_875, __p3_875) __extension__ ({ \ + uint32x4_t __ret_875; \ + uint32x4_t __s0_875 = __p0_875; \ + uint16x4_t __s1_875 = __p1_875; \ + uint16x4_t __s2_875 = __p2_875; \ + __ret_875 = __s0_875 - vmull_u16(__s1_875, splat_lane_u16(__s2_875, __p3_875)); \ + __ret_875; \ +}) +#else +#define vmlsl_lane_u16(__p0_876, __p1_876, __p2_876, __p3_876) __extension__ ({ \ + uint32x4_t __ret_876; \ + uint32x4_t __s0_876 = __p0_876; \ + uint16x4_t __s1_876 = __p1_876; \ + uint16x4_t __s2_876 = __p2_876; \ + uint32x4_t __rev0_876; __rev0_876 = __builtin_shufflevector(__s0_876, __s0_876, 3, 2, 1, 0); \ + uint16x4_t __rev1_876; __rev1_876 = __builtin_shufflevector(__s1_876, __s1_876, 3, 2, 1, 0); \ + uint16x4_t __rev2_876; __rev2_876 = __builtin_shufflevector(__s2_876, __s2_876, 3, 2, 1, 0); \ + __ret_876 = __rev0_876 - __noswap_vmull_u16(__rev1_876, __noswap_splat_lane_u16(__rev2_876, __p3_876)); \ + __ret_876 = __builtin_shufflevector(__ret_876, __ret_876, 3, 2, 1, 0); \ + __ret_876; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmlsl_lane_s32(__p0_877, __p1_877, __p2_877, __p3_877) __extension__ ({ \ + int64x2_t __ret_877; \ + int64x2_t __s0_877 = __p0_877; \ + int32x2_t __s1_877 = __p1_877; \ + int32x2_t __s2_877 = __p2_877; \ + __ret_877 = __s0_877 - vmull_s32(__s1_877, splat_lane_s32(__s2_877, __p3_877)); \ + __ret_877; \ +}) +#else +#define vmlsl_lane_s32(__p0_878, __p1_878, __p2_878, __p3_878) __extension__ ({ \ + int64x2_t __ret_878; \ + int64x2_t __s0_878 = __p0_878; \ + int32x2_t __s1_878 = __p1_878; \ + int32x2_t __s2_878 = __p2_878; \ + int64x2_t __rev0_878; __rev0_878 = __builtin_shufflevector(__s0_878, __s0_878, 1, 0); \ + int32x2_t __rev1_878; __rev1_878 = __builtin_shufflevector(__s1_878, __s1_878, 1, 0); \ + int32x2_t __rev2_878; __rev2_878 = __builtin_shufflevector(__s2_878, __s2_878, 1, 0); \ + __ret_878 = __rev0_878 - __noswap_vmull_s32(__rev1_878, __noswap_splat_lane_s32(__rev2_878, __p3_878)); \ + __ret_878 = __builtin_shufflevector(__ret_878, __ret_878, 1, 0); \ + __ret_878; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmlsl_lane_s16(__p0_879, __p1_879, __p2_879, __p3_879) __extension__ ({ \ + int32x4_t __ret_879; \ + int32x4_t __s0_879 = __p0_879; \ + int16x4_t __s1_879 = __p1_879; \ + int16x4_t __s2_879 = __p2_879; \ + __ret_879 = __s0_879 - vmull_s16(__s1_879, splat_lane_s16(__s2_879, __p3_879)); \ + __ret_879; \ +}) +#else +#define vmlsl_lane_s16(__p0_880, __p1_880, __p2_880, __p3_880) __extension__ ({ \ + int32x4_t __ret_880; \ + int32x4_t __s0_880 = __p0_880; \ + int16x4_t __s1_880 = __p1_880; \ + int16x4_t __s2_880 = __p2_880; \ + int32x4_t __rev0_880; __rev0_880 = __builtin_shufflevector(__s0_880, __s0_880, 3, 2, 1, 0); \ + int16x4_t __rev1_880; __rev1_880 = __builtin_shufflevector(__s1_880, __s1_880, 3, 2, 1, 0); \ + int16x4_t __rev2_880; __rev2_880 = __builtin_shufflevector(__s2_880, __s2_880, 3, 2, 1, 0); \ + __ret_880 = __rev0_880 - __noswap_vmull_s16(__rev1_880, __noswap_splat_lane_s16(__rev2_880, __p3_880)); \ + __ret_880 = __builtin_shufflevector(__ret_880, __ret_880, 3, 2, 1, 0); \ + __ret_880; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint64x2_t vmlsl_n_u32(uint64x2_t __p0, uint32x2_t __p1, uint32_t __p2) { + uint64x2_t __ret; + __ret = __p0 - vmull_u32(__p1, (uint32x2_t) {__p2, __p2}); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint64x2_t vmlsl_n_u32(uint64x2_t __p0, uint32x2_t __p1, uint32_t __p2) { + uint64x2_t __ret; + uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = __rev0 - __noswap_vmull_u32(__rev1, (uint32x2_t) {__p2, __p2}); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +__ai __attribute__((target("neon"))) uint64x2_t __noswap_vmlsl_n_u32(uint64x2_t __p0, uint32x2_t __p1, uint32_t __p2) { + uint64x2_t __ret; + __ret = __p0 - __noswap_vmull_u32(__p1, (uint32x2_t) {__p2, __p2}); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint32x4_t vmlsl_n_u16(uint32x4_t __p0, uint16x4_t __p1, uint16_t __p2) { + uint32x4_t __ret; + __ret = __p0 - vmull_u16(__p1, (uint16x4_t) {__p2, __p2, __p2, __p2}); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint32x4_t vmlsl_n_u16(uint32x4_t __p0, uint16x4_t __p1, uint16_t __p2) { + uint32x4_t __ret; + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = __rev0 - __noswap_vmull_u16(__rev1, (uint16x4_t) {__p2, __p2, __p2, __p2}); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +__ai __attribute__((target("neon"))) uint32x4_t __noswap_vmlsl_n_u16(uint32x4_t __p0, uint16x4_t __p1, uint16_t __p2) { + uint32x4_t __ret; + __ret = __p0 - __noswap_vmull_u16(__p1, (uint16x4_t) {__p2, __p2, __p2, __p2}); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int64x2_t vmlsl_n_s32(int64x2_t __p0, int32x2_t __p1, int32_t __p2) { + int64x2_t __ret; + __ret = __p0 - vmull_s32(__p1, (int32x2_t) {__p2, __p2}); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int64x2_t vmlsl_n_s32(int64x2_t __p0, int32x2_t __p1, int32_t __p2) { + int64x2_t __ret; + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = __rev0 - __noswap_vmull_s32(__rev1, (int32x2_t) {__p2, __p2}); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +__ai __attribute__((target("neon"))) int64x2_t __noswap_vmlsl_n_s32(int64x2_t __p0, int32x2_t __p1, int32_t __p2) { + int64x2_t __ret; + __ret = __p0 - __noswap_vmull_s32(__p1, (int32x2_t) {__p2, __p2}); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int32x4_t vmlsl_n_s16(int32x4_t __p0, int16x4_t __p1, int16_t __p2) { + int32x4_t __ret; + __ret = __p0 - vmull_s16(__p1, (int16x4_t) {__p2, __p2, __p2, __p2}); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int32x4_t vmlsl_n_s16(int32x4_t __p0, int16x4_t __p1, int16_t __p2) { + int32x4_t __ret; + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = __rev0 - __noswap_vmull_s16(__rev1, (int16x4_t) {__p2, __p2, __p2, __p2}); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +__ai __attribute__((target("neon"))) int32x4_t __noswap_vmlsl_n_s16(int32x4_t __p0, int16x4_t __p1, int16_t __p2) { + int32x4_t __ret; + __ret = __p0 - __noswap_vmull_s16(__p1, (int16x4_t) {__p2, __p2, __p2, __p2}); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vset_lane_f16(__p0_881, __p1_881, __p2_881) __extension__ ({ \ + float16x4_t __ret_881; \ + float16_t __s0_881 = __p0_881; \ + float16x4_t __s1_881 = __p1_881; \ +float16_t __reint_881 = __s0_881; \ +float16x4_t __reint1_881 = __s1_881; \ +int16x4_t __reint2_881 = vset_lane_s16(*(int16_t *) &__reint_881, *(int16x4_t *) &__reint1_881, __p2_881); \ + __ret_881 = *(float16x4_t *) &__reint2_881; \ + __ret_881; \ +}) +#else +#define vset_lane_f16(__p0_882, __p1_882, __p2_882) __extension__ ({ \ + float16x4_t __ret_882; \ + float16_t __s0_882 = __p0_882; \ + float16x4_t __s1_882 = __p1_882; \ + float16x4_t __rev1_882; __rev1_882 = __builtin_shufflevector(__s1_882, __s1_882, 3, 2, 1, 0); \ +float16_t __reint_882 = __s0_882; \ +float16x4_t __reint1_882 = __rev1_882; \ +int16x4_t __reint2_882 = __noswap_vset_lane_s16(*(int16_t *) &__reint_882, *(int16x4_t *) &__reint1_882, __p2_882); \ + __ret_882 = *(float16x4_t *) &__reint2_882; \ + __ret_882 = __builtin_shufflevector(__ret_882, __ret_882, 3, 2, 1, 0); \ + __ret_882; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vsetq_lane_f16(__p0_883, __p1_883, __p2_883) __extension__ ({ \ + float16x8_t __ret_883; \ + float16_t __s0_883 = __p0_883; \ + float16x8_t __s1_883 = __p1_883; \ +float16_t __reint_883 = __s0_883; \ +float16x8_t __reint1_883 = __s1_883; \ +int16x8_t __reint2_883 = vsetq_lane_s16(*(int16_t *) &__reint_883, *(int16x8_t *) &__reint1_883, __p2_883); \ + __ret_883 = *(float16x8_t *) &__reint2_883; \ + __ret_883; \ +}) +#else +#define vsetq_lane_f16(__p0_884, __p1_884, __p2_884) __extension__ ({ \ + float16x8_t __ret_884; \ + float16_t __s0_884 = __p0_884; \ + float16x8_t __s1_884 = __p1_884; \ + float16x8_t __rev1_884; __rev1_884 = __builtin_shufflevector(__s1_884, __s1_884, 7, 6, 5, 4, 3, 2, 1, 0); \ +float16_t __reint_884 = __s0_884; \ +float16x8_t __reint1_884 = __rev1_884; \ +int16x8_t __reint2_884 = __noswap_vsetq_lane_s16(*(int16_t *) &__reint_884, *(int16x8_t *) &__reint1_884, __p2_884); \ + __ret_884 = *(float16x8_t *) &__reint2_884; \ + __ret_884 = __builtin_shufflevector(__ret_884, __ret_884, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_884; \ +}) +#endif + +#if defined(__aarch64__) || defined(__arm64ec__) +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("aes,neon"))) poly128_t vmull_high_p64(poly64x2_t __p0, poly64x2_t __p1) { + poly128_t __ret; + __ret = vmull_p64((poly64_t)(vget_high_p64(__p0)), (poly64_t)(vget_high_p64(__p1))); + return __ret; +} +#else +__ai __attribute__((target("aes,neon"))) poly128_t vmull_high_p64(poly64x2_t __p0, poly64x2_t __p1) { + poly128_t __ret; + poly64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + poly64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = vmull_p64((poly64_t)(__noswap_vget_high_p64(__rev0)), (poly64_t)(__noswap_vget_high_p64(__rev1))); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vfmlalq_lane_high_f16(__p0_885, __p1_885, __p2_885, __p3_885) __extension__ ({ \ + float32x4_t __ret_885; \ + float32x4_t __s0_885 = __p0_885; \ + float16x8_t __s1_885 = __p1_885; \ + float16x4_t __s2_885 = __p2_885; \ + __ret_885 = vfmlalq_high_f16(__s0_885, __s1_885, (float16x8_t) {vget_lane_f16(__s2_885, __p3_885), vget_lane_f16(__s2_885, __p3_885), vget_lane_f16(__s2_885, __p3_885), vget_lane_f16(__s2_885, __p3_885), vget_lane_f16(__s2_885, __p3_885), vget_lane_f16(__s2_885, __p3_885), vget_lane_f16(__s2_885, __p3_885), vget_lane_f16(__s2_885, __p3_885)}); \ + __ret_885; \ +}) +#else +#define vfmlalq_lane_high_f16(__p0_886, __p1_886, __p2_886, __p3_886) __extension__ ({ \ + float32x4_t __ret_886; \ + float32x4_t __s0_886 = __p0_886; \ + float16x8_t __s1_886 = __p1_886; \ + float16x4_t __s2_886 = __p2_886; \ + float32x4_t __rev0_886; __rev0_886 = __builtin_shufflevector(__s0_886, __s0_886, 3, 2, 1, 0); \ + float16x8_t __rev1_886; __rev1_886 = __builtin_shufflevector(__s1_886, __s1_886, 7, 6, 5, 4, 3, 2, 1, 0); \ + float16x4_t __rev2_886; __rev2_886 = __builtin_shufflevector(__s2_886, __s2_886, 3, 2, 1, 0); \ + __ret_886 = __noswap_vfmlalq_high_f16(__rev0_886, __rev1_886, (float16x8_t) {__noswap_vget_lane_f16(__rev2_886, __p3_886), __noswap_vget_lane_f16(__rev2_886, __p3_886), __noswap_vget_lane_f16(__rev2_886, __p3_886), __noswap_vget_lane_f16(__rev2_886, __p3_886), __noswap_vget_lane_f16(__rev2_886, __p3_886), __noswap_vget_lane_f16(__rev2_886, __p3_886), __noswap_vget_lane_f16(__rev2_886, __p3_886), __noswap_vget_lane_f16(__rev2_886, __p3_886)}); \ + __ret_886 = __builtin_shufflevector(__ret_886, __ret_886, 3, 2, 1, 0); \ + __ret_886; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vfmlal_lane_high_f16(__p0_887, __p1_887, __p2_887, __p3_887) __extension__ ({ \ + float32x2_t __ret_887; \ + float32x2_t __s0_887 = __p0_887; \ + float16x4_t __s1_887 = __p1_887; \ + float16x4_t __s2_887 = __p2_887; \ + __ret_887 = vfmlal_high_f16(__s0_887, __s1_887, (float16x4_t) {vget_lane_f16(__s2_887, __p3_887), vget_lane_f16(__s2_887, __p3_887), vget_lane_f16(__s2_887, __p3_887), vget_lane_f16(__s2_887, __p3_887)}); \ + __ret_887; \ +}) +#else +#define vfmlal_lane_high_f16(__p0_888, __p1_888, __p2_888, __p3_888) __extension__ ({ \ + float32x2_t __ret_888; \ + float32x2_t __s0_888 = __p0_888; \ + float16x4_t __s1_888 = __p1_888; \ + float16x4_t __s2_888 = __p2_888; \ + float32x2_t __rev0_888; __rev0_888 = __builtin_shufflevector(__s0_888, __s0_888, 1, 0); \ + float16x4_t __rev1_888; __rev1_888 = __builtin_shufflevector(__s1_888, __s1_888, 3, 2, 1, 0); \ + float16x4_t __rev2_888; __rev2_888 = __builtin_shufflevector(__s2_888, __s2_888, 3, 2, 1, 0); \ + __ret_888 = __noswap_vfmlal_high_f16(__rev0_888, __rev1_888, (float16x4_t) {__noswap_vget_lane_f16(__rev2_888, __p3_888), __noswap_vget_lane_f16(__rev2_888, __p3_888), __noswap_vget_lane_f16(__rev2_888, __p3_888), __noswap_vget_lane_f16(__rev2_888, __p3_888)}); \ + __ret_888 = __builtin_shufflevector(__ret_888, __ret_888, 1, 0); \ + __ret_888; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vfmlalq_lane_low_f16(__p0_889, __p1_889, __p2_889, __p3_889) __extension__ ({ \ + float32x4_t __ret_889; \ + float32x4_t __s0_889 = __p0_889; \ + float16x8_t __s1_889 = __p1_889; \ + float16x4_t __s2_889 = __p2_889; \ + __ret_889 = vfmlalq_low_f16(__s0_889, __s1_889, (float16x8_t) {vget_lane_f16(__s2_889, __p3_889), vget_lane_f16(__s2_889, __p3_889), vget_lane_f16(__s2_889, __p3_889), vget_lane_f16(__s2_889, __p3_889), vget_lane_f16(__s2_889, __p3_889), vget_lane_f16(__s2_889, __p3_889), vget_lane_f16(__s2_889, __p3_889), vget_lane_f16(__s2_889, __p3_889)}); \ + __ret_889; \ +}) +#else +#define vfmlalq_lane_low_f16(__p0_890, __p1_890, __p2_890, __p3_890) __extension__ ({ \ + float32x4_t __ret_890; \ + float32x4_t __s0_890 = __p0_890; \ + float16x8_t __s1_890 = __p1_890; \ + float16x4_t __s2_890 = __p2_890; \ + float32x4_t __rev0_890; __rev0_890 = __builtin_shufflevector(__s0_890, __s0_890, 3, 2, 1, 0); \ + float16x8_t __rev1_890; __rev1_890 = __builtin_shufflevector(__s1_890, __s1_890, 7, 6, 5, 4, 3, 2, 1, 0); \ + float16x4_t __rev2_890; __rev2_890 = __builtin_shufflevector(__s2_890, __s2_890, 3, 2, 1, 0); \ + __ret_890 = __noswap_vfmlalq_low_f16(__rev0_890, __rev1_890, (float16x8_t) {__noswap_vget_lane_f16(__rev2_890, __p3_890), __noswap_vget_lane_f16(__rev2_890, __p3_890), __noswap_vget_lane_f16(__rev2_890, __p3_890), __noswap_vget_lane_f16(__rev2_890, __p3_890), __noswap_vget_lane_f16(__rev2_890, __p3_890), __noswap_vget_lane_f16(__rev2_890, __p3_890), __noswap_vget_lane_f16(__rev2_890, __p3_890), __noswap_vget_lane_f16(__rev2_890, __p3_890)}); \ + __ret_890 = __builtin_shufflevector(__ret_890, __ret_890, 3, 2, 1, 0); \ + __ret_890; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vfmlal_lane_low_f16(__p0_891, __p1_891, __p2_891, __p3_891) __extension__ ({ \ + float32x2_t __ret_891; \ + float32x2_t __s0_891 = __p0_891; \ + float16x4_t __s1_891 = __p1_891; \ + float16x4_t __s2_891 = __p2_891; \ + __ret_891 = vfmlal_low_f16(__s0_891, __s1_891, (float16x4_t) {vget_lane_f16(__s2_891, __p3_891), vget_lane_f16(__s2_891, __p3_891), vget_lane_f16(__s2_891, __p3_891), vget_lane_f16(__s2_891, __p3_891)}); \ + __ret_891; \ +}) +#else +#define vfmlal_lane_low_f16(__p0_892, __p1_892, __p2_892, __p3_892) __extension__ ({ \ + float32x2_t __ret_892; \ + float32x2_t __s0_892 = __p0_892; \ + float16x4_t __s1_892 = __p1_892; \ + float16x4_t __s2_892 = __p2_892; \ + float32x2_t __rev0_892; __rev0_892 = __builtin_shufflevector(__s0_892, __s0_892, 1, 0); \ + float16x4_t __rev1_892; __rev1_892 = __builtin_shufflevector(__s1_892, __s1_892, 3, 2, 1, 0); \ + float16x4_t __rev2_892; __rev2_892 = __builtin_shufflevector(__s2_892, __s2_892, 3, 2, 1, 0); \ + __ret_892 = __noswap_vfmlal_low_f16(__rev0_892, __rev1_892, (float16x4_t) {__noswap_vget_lane_f16(__rev2_892, __p3_892), __noswap_vget_lane_f16(__rev2_892, __p3_892), __noswap_vget_lane_f16(__rev2_892, __p3_892), __noswap_vget_lane_f16(__rev2_892, __p3_892)}); \ + __ret_892 = __builtin_shufflevector(__ret_892, __ret_892, 1, 0); \ + __ret_892; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vfmlalq_laneq_high_f16(__p0_893, __p1_893, __p2_893, __p3_893) __extension__ ({ \ + float32x4_t __ret_893; \ + float32x4_t __s0_893 = __p0_893; \ + float16x8_t __s1_893 = __p1_893; \ + float16x8_t __s2_893 = __p2_893; \ + __ret_893 = vfmlalq_high_f16(__s0_893, __s1_893, (float16x8_t) {vgetq_lane_f16(__s2_893, __p3_893), vgetq_lane_f16(__s2_893, __p3_893), vgetq_lane_f16(__s2_893, __p3_893), vgetq_lane_f16(__s2_893, __p3_893), vgetq_lane_f16(__s2_893, __p3_893), vgetq_lane_f16(__s2_893, __p3_893), vgetq_lane_f16(__s2_893, __p3_893), vgetq_lane_f16(__s2_893, __p3_893)}); \ + __ret_893; \ +}) +#else +#define vfmlalq_laneq_high_f16(__p0_894, __p1_894, __p2_894, __p3_894) __extension__ ({ \ + float32x4_t __ret_894; \ + float32x4_t __s0_894 = __p0_894; \ + float16x8_t __s1_894 = __p1_894; \ + float16x8_t __s2_894 = __p2_894; \ + float32x4_t __rev0_894; __rev0_894 = __builtin_shufflevector(__s0_894, __s0_894, 3, 2, 1, 0); \ + float16x8_t __rev1_894; __rev1_894 = __builtin_shufflevector(__s1_894, __s1_894, 7, 6, 5, 4, 3, 2, 1, 0); \ + float16x8_t __rev2_894; __rev2_894 = __builtin_shufflevector(__s2_894, __s2_894, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_894 = __noswap_vfmlalq_high_f16(__rev0_894, __rev1_894, (float16x8_t) {__noswap_vgetq_lane_f16(__rev2_894, __p3_894), __noswap_vgetq_lane_f16(__rev2_894, __p3_894), __noswap_vgetq_lane_f16(__rev2_894, __p3_894), __noswap_vgetq_lane_f16(__rev2_894, __p3_894), __noswap_vgetq_lane_f16(__rev2_894, __p3_894), __noswap_vgetq_lane_f16(__rev2_894, __p3_894), __noswap_vgetq_lane_f16(__rev2_894, __p3_894), __noswap_vgetq_lane_f16(__rev2_894, __p3_894)}); \ + __ret_894 = __builtin_shufflevector(__ret_894, __ret_894, 3, 2, 1, 0); \ + __ret_894; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vfmlal_laneq_high_f16(__p0_895, __p1_895, __p2_895, __p3_895) __extension__ ({ \ + float32x2_t __ret_895; \ + float32x2_t __s0_895 = __p0_895; \ + float16x4_t __s1_895 = __p1_895; \ + float16x8_t __s2_895 = __p2_895; \ + __ret_895 = vfmlal_high_f16(__s0_895, __s1_895, (float16x4_t) {vgetq_lane_f16(__s2_895, __p3_895), vgetq_lane_f16(__s2_895, __p3_895), vgetq_lane_f16(__s2_895, __p3_895), vgetq_lane_f16(__s2_895, __p3_895)}); \ + __ret_895; \ +}) +#else +#define vfmlal_laneq_high_f16(__p0_896, __p1_896, __p2_896, __p3_896) __extension__ ({ \ + float32x2_t __ret_896; \ + float32x2_t __s0_896 = __p0_896; \ + float16x4_t __s1_896 = __p1_896; \ + float16x8_t __s2_896 = __p2_896; \ + float32x2_t __rev0_896; __rev0_896 = __builtin_shufflevector(__s0_896, __s0_896, 1, 0); \ + float16x4_t __rev1_896; __rev1_896 = __builtin_shufflevector(__s1_896, __s1_896, 3, 2, 1, 0); \ + float16x8_t __rev2_896; __rev2_896 = __builtin_shufflevector(__s2_896, __s2_896, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_896 = __noswap_vfmlal_high_f16(__rev0_896, __rev1_896, (float16x4_t) {__noswap_vgetq_lane_f16(__rev2_896, __p3_896), __noswap_vgetq_lane_f16(__rev2_896, __p3_896), __noswap_vgetq_lane_f16(__rev2_896, __p3_896), __noswap_vgetq_lane_f16(__rev2_896, __p3_896)}); \ + __ret_896 = __builtin_shufflevector(__ret_896, __ret_896, 1, 0); \ + __ret_896; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vfmlalq_laneq_low_f16(__p0_897, __p1_897, __p2_897, __p3_897) __extension__ ({ \ + float32x4_t __ret_897; \ + float32x4_t __s0_897 = __p0_897; \ + float16x8_t __s1_897 = __p1_897; \ + float16x8_t __s2_897 = __p2_897; \ + __ret_897 = vfmlalq_low_f16(__s0_897, __s1_897, (float16x8_t) {vgetq_lane_f16(__s2_897, __p3_897), vgetq_lane_f16(__s2_897, __p3_897), vgetq_lane_f16(__s2_897, __p3_897), vgetq_lane_f16(__s2_897, __p3_897), vgetq_lane_f16(__s2_897, __p3_897), vgetq_lane_f16(__s2_897, __p3_897), vgetq_lane_f16(__s2_897, __p3_897), vgetq_lane_f16(__s2_897, __p3_897)}); \ + __ret_897; \ +}) +#else +#define vfmlalq_laneq_low_f16(__p0_898, __p1_898, __p2_898, __p3_898) __extension__ ({ \ + float32x4_t __ret_898; \ + float32x4_t __s0_898 = __p0_898; \ + float16x8_t __s1_898 = __p1_898; \ + float16x8_t __s2_898 = __p2_898; \ + float32x4_t __rev0_898; __rev0_898 = __builtin_shufflevector(__s0_898, __s0_898, 3, 2, 1, 0); \ + float16x8_t __rev1_898; __rev1_898 = __builtin_shufflevector(__s1_898, __s1_898, 7, 6, 5, 4, 3, 2, 1, 0); \ + float16x8_t __rev2_898; __rev2_898 = __builtin_shufflevector(__s2_898, __s2_898, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_898 = __noswap_vfmlalq_low_f16(__rev0_898, __rev1_898, (float16x8_t) {__noswap_vgetq_lane_f16(__rev2_898, __p3_898), __noswap_vgetq_lane_f16(__rev2_898, __p3_898), __noswap_vgetq_lane_f16(__rev2_898, __p3_898), __noswap_vgetq_lane_f16(__rev2_898, __p3_898), __noswap_vgetq_lane_f16(__rev2_898, __p3_898), __noswap_vgetq_lane_f16(__rev2_898, __p3_898), __noswap_vgetq_lane_f16(__rev2_898, __p3_898), __noswap_vgetq_lane_f16(__rev2_898, __p3_898)}); \ + __ret_898 = __builtin_shufflevector(__ret_898, __ret_898, 3, 2, 1, 0); \ + __ret_898; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vfmlal_laneq_low_f16(__p0_899, __p1_899, __p2_899, __p3_899) __extension__ ({ \ + float32x2_t __ret_899; \ + float32x2_t __s0_899 = __p0_899; \ + float16x4_t __s1_899 = __p1_899; \ + float16x8_t __s2_899 = __p2_899; \ + __ret_899 = vfmlal_low_f16(__s0_899, __s1_899, (float16x4_t) {vgetq_lane_f16(__s2_899, __p3_899), vgetq_lane_f16(__s2_899, __p3_899), vgetq_lane_f16(__s2_899, __p3_899), vgetq_lane_f16(__s2_899, __p3_899)}); \ + __ret_899; \ +}) +#else +#define vfmlal_laneq_low_f16(__p0_900, __p1_900, __p2_900, __p3_900) __extension__ ({ \ + float32x2_t __ret_900; \ + float32x2_t __s0_900 = __p0_900; \ + float16x4_t __s1_900 = __p1_900; \ + float16x8_t __s2_900 = __p2_900; \ + float32x2_t __rev0_900; __rev0_900 = __builtin_shufflevector(__s0_900, __s0_900, 1, 0); \ + float16x4_t __rev1_900; __rev1_900 = __builtin_shufflevector(__s1_900, __s1_900, 3, 2, 1, 0); \ + float16x8_t __rev2_900; __rev2_900 = __builtin_shufflevector(__s2_900, __s2_900, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_900 = __noswap_vfmlal_low_f16(__rev0_900, __rev1_900, (float16x4_t) {__noswap_vgetq_lane_f16(__rev2_900, __p3_900), __noswap_vgetq_lane_f16(__rev2_900, __p3_900), __noswap_vgetq_lane_f16(__rev2_900, __p3_900), __noswap_vgetq_lane_f16(__rev2_900, __p3_900)}); \ + __ret_900 = __builtin_shufflevector(__ret_900, __ret_900, 1, 0); \ + __ret_900; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vfmlslq_lane_high_f16(__p0_901, __p1_901, __p2_901, __p3_901) __extension__ ({ \ + float32x4_t __ret_901; \ + float32x4_t __s0_901 = __p0_901; \ + float16x8_t __s1_901 = __p1_901; \ + float16x4_t __s2_901 = __p2_901; \ + __ret_901 = vfmlslq_high_f16(__s0_901, __s1_901, (float16x8_t) {vget_lane_f16(__s2_901, __p3_901), vget_lane_f16(__s2_901, __p3_901), vget_lane_f16(__s2_901, __p3_901), vget_lane_f16(__s2_901, __p3_901), vget_lane_f16(__s2_901, __p3_901), vget_lane_f16(__s2_901, __p3_901), vget_lane_f16(__s2_901, __p3_901), vget_lane_f16(__s2_901, __p3_901)}); \ + __ret_901; \ +}) +#else +#define vfmlslq_lane_high_f16(__p0_902, __p1_902, __p2_902, __p3_902) __extension__ ({ \ + float32x4_t __ret_902; \ + float32x4_t __s0_902 = __p0_902; \ + float16x8_t __s1_902 = __p1_902; \ + float16x4_t __s2_902 = __p2_902; \ + float32x4_t __rev0_902; __rev0_902 = __builtin_shufflevector(__s0_902, __s0_902, 3, 2, 1, 0); \ + float16x8_t __rev1_902; __rev1_902 = __builtin_shufflevector(__s1_902, __s1_902, 7, 6, 5, 4, 3, 2, 1, 0); \ + float16x4_t __rev2_902; __rev2_902 = __builtin_shufflevector(__s2_902, __s2_902, 3, 2, 1, 0); \ + __ret_902 = __noswap_vfmlslq_high_f16(__rev0_902, __rev1_902, (float16x8_t) {__noswap_vget_lane_f16(__rev2_902, __p3_902), __noswap_vget_lane_f16(__rev2_902, __p3_902), __noswap_vget_lane_f16(__rev2_902, __p3_902), __noswap_vget_lane_f16(__rev2_902, __p3_902), __noswap_vget_lane_f16(__rev2_902, __p3_902), __noswap_vget_lane_f16(__rev2_902, __p3_902), __noswap_vget_lane_f16(__rev2_902, __p3_902), __noswap_vget_lane_f16(__rev2_902, __p3_902)}); \ + __ret_902 = __builtin_shufflevector(__ret_902, __ret_902, 3, 2, 1, 0); \ + __ret_902; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vfmlsl_lane_high_f16(__p0_903, __p1_903, __p2_903, __p3_903) __extension__ ({ \ + float32x2_t __ret_903; \ + float32x2_t __s0_903 = __p0_903; \ + float16x4_t __s1_903 = __p1_903; \ + float16x4_t __s2_903 = __p2_903; \ + __ret_903 = vfmlsl_high_f16(__s0_903, __s1_903, (float16x4_t) {vget_lane_f16(__s2_903, __p3_903), vget_lane_f16(__s2_903, __p3_903), vget_lane_f16(__s2_903, __p3_903), vget_lane_f16(__s2_903, __p3_903)}); \ + __ret_903; \ +}) +#else +#define vfmlsl_lane_high_f16(__p0_904, __p1_904, __p2_904, __p3_904) __extension__ ({ \ + float32x2_t __ret_904; \ + float32x2_t __s0_904 = __p0_904; \ + float16x4_t __s1_904 = __p1_904; \ + float16x4_t __s2_904 = __p2_904; \ + float32x2_t __rev0_904; __rev0_904 = __builtin_shufflevector(__s0_904, __s0_904, 1, 0); \ + float16x4_t __rev1_904; __rev1_904 = __builtin_shufflevector(__s1_904, __s1_904, 3, 2, 1, 0); \ + float16x4_t __rev2_904; __rev2_904 = __builtin_shufflevector(__s2_904, __s2_904, 3, 2, 1, 0); \ + __ret_904 = __noswap_vfmlsl_high_f16(__rev0_904, __rev1_904, (float16x4_t) {__noswap_vget_lane_f16(__rev2_904, __p3_904), __noswap_vget_lane_f16(__rev2_904, __p3_904), __noswap_vget_lane_f16(__rev2_904, __p3_904), __noswap_vget_lane_f16(__rev2_904, __p3_904)}); \ + __ret_904 = __builtin_shufflevector(__ret_904, __ret_904, 1, 0); \ + __ret_904; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vfmlslq_lane_low_f16(__p0_905, __p1_905, __p2_905, __p3_905) __extension__ ({ \ + float32x4_t __ret_905; \ + float32x4_t __s0_905 = __p0_905; \ + float16x8_t __s1_905 = __p1_905; \ + float16x4_t __s2_905 = __p2_905; \ + __ret_905 = vfmlslq_low_f16(__s0_905, __s1_905, (float16x8_t) {vget_lane_f16(__s2_905, __p3_905), vget_lane_f16(__s2_905, __p3_905), vget_lane_f16(__s2_905, __p3_905), vget_lane_f16(__s2_905, __p3_905), vget_lane_f16(__s2_905, __p3_905), vget_lane_f16(__s2_905, __p3_905), vget_lane_f16(__s2_905, __p3_905), vget_lane_f16(__s2_905, __p3_905)}); \ + __ret_905; \ +}) +#else +#define vfmlslq_lane_low_f16(__p0_906, __p1_906, __p2_906, __p3_906) __extension__ ({ \ + float32x4_t __ret_906; \ + float32x4_t __s0_906 = __p0_906; \ + float16x8_t __s1_906 = __p1_906; \ + float16x4_t __s2_906 = __p2_906; \ + float32x4_t __rev0_906; __rev0_906 = __builtin_shufflevector(__s0_906, __s0_906, 3, 2, 1, 0); \ + float16x8_t __rev1_906; __rev1_906 = __builtin_shufflevector(__s1_906, __s1_906, 7, 6, 5, 4, 3, 2, 1, 0); \ + float16x4_t __rev2_906; __rev2_906 = __builtin_shufflevector(__s2_906, __s2_906, 3, 2, 1, 0); \ + __ret_906 = __noswap_vfmlslq_low_f16(__rev0_906, __rev1_906, (float16x8_t) {__noswap_vget_lane_f16(__rev2_906, __p3_906), __noswap_vget_lane_f16(__rev2_906, __p3_906), __noswap_vget_lane_f16(__rev2_906, __p3_906), __noswap_vget_lane_f16(__rev2_906, __p3_906), __noswap_vget_lane_f16(__rev2_906, __p3_906), __noswap_vget_lane_f16(__rev2_906, __p3_906), __noswap_vget_lane_f16(__rev2_906, __p3_906), __noswap_vget_lane_f16(__rev2_906, __p3_906)}); \ + __ret_906 = __builtin_shufflevector(__ret_906, __ret_906, 3, 2, 1, 0); \ + __ret_906; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vfmlsl_lane_low_f16(__p0_907, __p1_907, __p2_907, __p3_907) __extension__ ({ \ + float32x2_t __ret_907; \ + float32x2_t __s0_907 = __p0_907; \ + float16x4_t __s1_907 = __p1_907; \ + float16x4_t __s2_907 = __p2_907; \ + __ret_907 = vfmlsl_low_f16(__s0_907, __s1_907, (float16x4_t) {vget_lane_f16(__s2_907, __p3_907), vget_lane_f16(__s2_907, __p3_907), vget_lane_f16(__s2_907, __p3_907), vget_lane_f16(__s2_907, __p3_907)}); \ + __ret_907; \ +}) +#else +#define vfmlsl_lane_low_f16(__p0_908, __p1_908, __p2_908, __p3_908) __extension__ ({ \ + float32x2_t __ret_908; \ + float32x2_t __s0_908 = __p0_908; \ + float16x4_t __s1_908 = __p1_908; \ + float16x4_t __s2_908 = __p2_908; \ + float32x2_t __rev0_908; __rev0_908 = __builtin_shufflevector(__s0_908, __s0_908, 1, 0); \ + float16x4_t __rev1_908; __rev1_908 = __builtin_shufflevector(__s1_908, __s1_908, 3, 2, 1, 0); \ + float16x4_t __rev2_908; __rev2_908 = __builtin_shufflevector(__s2_908, __s2_908, 3, 2, 1, 0); \ + __ret_908 = __noswap_vfmlsl_low_f16(__rev0_908, __rev1_908, (float16x4_t) {__noswap_vget_lane_f16(__rev2_908, __p3_908), __noswap_vget_lane_f16(__rev2_908, __p3_908), __noswap_vget_lane_f16(__rev2_908, __p3_908), __noswap_vget_lane_f16(__rev2_908, __p3_908)}); \ + __ret_908 = __builtin_shufflevector(__ret_908, __ret_908, 1, 0); \ + __ret_908; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vfmlslq_laneq_high_f16(__p0_909, __p1_909, __p2_909, __p3_909) __extension__ ({ \ + float32x4_t __ret_909; \ + float32x4_t __s0_909 = __p0_909; \ + float16x8_t __s1_909 = __p1_909; \ + float16x8_t __s2_909 = __p2_909; \ + __ret_909 = vfmlslq_high_f16(__s0_909, __s1_909, (float16x8_t) {vgetq_lane_f16(__s2_909, __p3_909), vgetq_lane_f16(__s2_909, __p3_909), vgetq_lane_f16(__s2_909, __p3_909), vgetq_lane_f16(__s2_909, __p3_909), vgetq_lane_f16(__s2_909, __p3_909), vgetq_lane_f16(__s2_909, __p3_909), vgetq_lane_f16(__s2_909, __p3_909), vgetq_lane_f16(__s2_909, __p3_909)}); \ + __ret_909; \ +}) +#else +#define vfmlslq_laneq_high_f16(__p0_910, __p1_910, __p2_910, __p3_910) __extension__ ({ \ + float32x4_t __ret_910; \ + float32x4_t __s0_910 = __p0_910; \ + float16x8_t __s1_910 = __p1_910; \ + float16x8_t __s2_910 = __p2_910; \ + float32x4_t __rev0_910; __rev0_910 = __builtin_shufflevector(__s0_910, __s0_910, 3, 2, 1, 0); \ + float16x8_t __rev1_910; __rev1_910 = __builtin_shufflevector(__s1_910, __s1_910, 7, 6, 5, 4, 3, 2, 1, 0); \ + float16x8_t __rev2_910; __rev2_910 = __builtin_shufflevector(__s2_910, __s2_910, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_910 = __noswap_vfmlslq_high_f16(__rev0_910, __rev1_910, (float16x8_t) {__noswap_vgetq_lane_f16(__rev2_910, __p3_910), __noswap_vgetq_lane_f16(__rev2_910, __p3_910), __noswap_vgetq_lane_f16(__rev2_910, __p3_910), __noswap_vgetq_lane_f16(__rev2_910, __p3_910), __noswap_vgetq_lane_f16(__rev2_910, __p3_910), __noswap_vgetq_lane_f16(__rev2_910, __p3_910), __noswap_vgetq_lane_f16(__rev2_910, __p3_910), __noswap_vgetq_lane_f16(__rev2_910, __p3_910)}); \ + __ret_910 = __builtin_shufflevector(__ret_910, __ret_910, 3, 2, 1, 0); \ + __ret_910; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vfmlsl_laneq_high_f16(__p0_911, __p1_911, __p2_911, __p3_911) __extension__ ({ \ + float32x2_t __ret_911; \ + float32x2_t __s0_911 = __p0_911; \ + float16x4_t __s1_911 = __p1_911; \ + float16x8_t __s2_911 = __p2_911; \ + __ret_911 = vfmlsl_high_f16(__s0_911, __s1_911, (float16x4_t) {vgetq_lane_f16(__s2_911, __p3_911), vgetq_lane_f16(__s2_911, __p3_911), vgetq_lane_f16(__s2_911, __p3_911), vgetq_lane_f16(__s2_911, __p3_911)}); \ + __ret_911; \ +}) +#else +#define vfmlsl_laneq_high_f16(__p0_912, __p1_912, __p2_912, __p3_912) __extension__ ({ \ + float32x2_t __ret_912; \ + float32x2_t __s0_912 = __p0_912; \ + float16x4_t __s1_912 = __p1_912; \ + float16x8_t __s2_912 = __p2_912; \ + float32x2_t __rev0_912; __rev0_912 = __builtin_shufflevector(__s0_912, __s0_912, 1, 0); \ + float16x4_t __rev1_912; __rev1_912 = __builtin_shufflevector(__s1_912, __s1_912, 3, 2, 1, 0); \ + float16x8_t __rev2_912; __rev2_912 = __builtin_shufflevector(__s2_912, __s2_912, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_912 = __noswap_vfmlsl_high_f16(__rev0_912, __rev1_912, (float16x4_t) {__noswap_vgetq_lane_f16(__rev2_912, __p3_912), __noswap_vgetq_lane_f16(__rev2_912, __p3_912), __noswap_vgetq_lane_f16(__rev2_912, __p3_912), __noswap_vgetq_lane_f16(__rev2_912, __p3_912)}); \ + __ret_912 = __builtin_shufflevector(__ret_912, __ret_912, 1, 0); \ + __ret_912; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vfmlslq_laneq_low_f16(__p0_913, __p1_913, __p2_913, __p3_913) __extension__ ({ \ + float32x4_t __ret_913; \ + float32x4_t __s0_913 = __p0_913; \ + float16x8_t __s1_913 = __p1_913; \ + float16x8_t __s2_913 = __p2_913; \ + __ret_913 = vfmlslq_low_f16(__s0_913, __s1_913, (float16x8_t) {vgetq_lane_f16(__s2_913, __p3_913), vgetq_lane_f16(__s2_913, __p3_913), vgetq_lane_f16(__s2_913, __p3_913), vgetq_lane_f16(__s2_913, __p3_913), vgetq_lane_f16(__s2_913, __p3_913), vgetq_lane_f16(__s2_913, __p3_913), vgetq_lane_f16(__s2_913, __p3_913), vgetq_lane_f16(__s2_913, __p3_913)}); \ + __ret_913; \ +}) +#else +#define vfmlslq_laneq_low_f16(__p0_914, __p1_914, __p2_914, __p3_914) __extension__ ({ \ + float32x4_t __ret_914; \ + float32x4_t __s0_914 = __p0_914; \ + float16x8_t __s1_914 = __p1_914; \ + float16x8_t __s2_914 = __p2_914; \ + float32x4_t __rev0_914; __rev0_914 = __builtin_shufflevector(__s0_914, __s0_914, 3, 2, 1, 0); \ + float16x8_t __rev1_914; __rev1_914 = __builtin_shufflevector(__s1_914, __s1_914, 7, 6, 5, 4, 3, 2, 1, 0); \ + float16x8_t __rev2_914; __rev2_914 = __builtin_shufflevector(__s2_914, __s2_914, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_914 = __noswap_vfmlslq_low_f16(__rev0_914, __rev1_914, (float16x8_t) {__noswap_vgetq_lane_f16(__rev2_914, __p3_914), __noswap_vgetq_lane_f16(__rev2_914, __p3_914), __noswap_vgetq_lane_f16(__rev2_914, __p3_914), __noswap_vgetq_lane_f16(__rev2_914, __p3_914), __noswap_vgetq_lane_f16(__rev2_914, __p3_914), __noswap_vgetq_lane_f16(__rev2_914, __p3_914), __noswap_vgetq_lane_f16(__rev2_914, __p3_914), __noswap_vgetq_lane_f16(__rev2_914, __p3_914)}); \ + __ret_914 = __builtin_shufflevector(__ret_914, __ret_914, 3, 2, 1, 0); \ + __ret_914; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vfmlsl_laneq_low_f16(__p0_915, __p1_915, __p2_915, __p3_915) __extension__ ({ \ + float32x2_t __ret_915; \ + float32x2_t __s0_915 = __p0_915; \ + float16x4_t __s1_915 = __p1_915; \ + float16x8_t __s2_915 = __p2_915; \ + __ret_915 = vfmlsl_low_f16(__s0_915, __s1_915, (float16x4_t) {vgetq_lane_f16(__s2_915, __p3_915), vgetq_lane_f16(__s2_915, __p3_915), vgetq_lane_f16(__s2_915, __p3_915), vgetq_lane_f16(__s2_915, __p3_915)}); \ + __ret_915; \ +}) +#else +#define vfmlsl_laneq_low_f16(__p0_916, __p1_916, __p2_916, __p3_916) __extension__ ({ \ + float32x2_t __ret_916; \ + float32x2_t __s0_916 = __p0_916; \ + float16x4_t __s1_916 = __p1_916; \ + float16x8_t __s2_916 = __p2_916; \ + float32x2_t __rev0_916; __rev0_916 = __builtin_shufflevector(__s0_916, __s0_916, 1, 0); \ + float16x4_t __rev1_916; __rev1_916 = __builtin_shufflevector(__s1_916, __s1_916, 3, 2, 1, 0); \ + float16x8_t __rev2_916; __rev2_916 = __builtin_shufflevector(__s2_916, __s2_916, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_916 = __noswap_vfmlsl_low_f16(__rev0_916, __rev1_916, (float16x4_t) {__noswap_vgetq_lane_f16(__rev2_916, __p3_916), __noswap_vgetq_lane_f16(__rev2_916, __p3_916), __noswap_vgetq_lane_f16(__rev2_916, __p3_916), __noswap_vgetq_lane_f16(__rev2_916, __p3_916)}); \ + __ret_916 = __builtin_shufflevector(__ret_916, __ret_916, 1, 0); \ + __ret_916; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmulh_lane_f16(__p0_917, __p1_917, __p2_917) __extension__ ({ \ + float16_t __ret_917; \ + float16_t __s0_917 = __p0_917; \ + float16x4_t __s1_917 = __p1_917; \ + __ret_917 = __s0_917 * vget_lane_f16(__s1_917, __p2_917); \ + __ret_917; \ +}) +#else +#define vmulh_lane_f16(__p0_918, __p1_918, __p2_918) __extension__ ({ \ + float16_t __ret_918; \ + float16_t __s0_918 = __p0_918; \ + float16x4_t __s1_918 = __p1_918; \ + float16x4_t __rev1_918; __rev1_918 = __builtin_shufflevector(__s1_918, __s1_918, 3, 2, 1, 0); \ + __ret_918 = __s0_918 * __noswap_vget_lane_f16(__rev1_918, __p2_918); \ + __ret_918; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmulh_laneq_f16(__p0_919, __p1_919, __p2_919) __extension__ ({ \ + float16_t __ret_919; \ + float16_t __s0_919 = __p0_919; \ + float16x8_t __s1_919 = __p1_919; \ + __ret_919 = __s0_919 * vgetq_lane_f16(__s1_919, __p2_919); \ + __ret_919; \ +}) +#else +#define vmulh_laneq_f16(__p0_920, __p1_920, __p2_920) __extension__ ({ \ + float16_t __ret_920; \ + float16_t __s0_920 = __p0_920; \ + float16x8_t __s1_920 = __p1_920; \ + float16x8_t __rev1_920; __rev1_920 = __builtin_shufflevector(__s1_920, __s1_920, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_920 = __s0_920 * __noswap_vgetq_lane_f16(__rev1_920, __p2_920); \ + __ret_920; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint16x8_t vabdl_high_u8(uint8x16_t __p0, uint8x16_t __p1) { + uint16x8_t __ret; + __ret = vabdl_u8(vget_high_u8(__p0), vget_high_u8(__p1)); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint16x8_t vabdl_high_u8(uint8x16_t __p0, uint8x16_t __p1) { + uint16x8_t __ret; + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __noswap_vabdl_u8(__noswap_vget_high_u8(__rev0), __noswap_vget_high_u8(__rev1)); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint64x2_t vabdl_high_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint64x2_t __ret; + __ret = vabdl_u32(vget_high_u32(__p0), vget_high_u32(__p1)); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint64x2_t vabdl_high_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint64x2_t __ret; + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = __noswap_vabdl_u32(__noswap_vget_high_u32(__rev0), __noswap_vget_high_u32(__rev1)); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint32x4_t vabdl_high_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint32x4_t __ret; + __ret = vabdl_u16(vget_high_u16(__p0), vget_high_u16(__p1)); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint32x4_t vabdl_high_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint32x4_t __ret; + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __noswap_vabdl_u16(__noswap_vget_high_u16(__rev0), __noswap_vget_high_u16(__rev1)); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int16x8_t vabdl_high_s8(int8x16_t __p0, int8x16_t __p1) { + int16x8_t __ret; + __ret = vabdl_s8(vget_high_s8(__p0), vget_high_s8(__p1)); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int16x8_t vabdl_high_s8(int8x16_t __p0, int8x16_t __p1) { + int16x8_t __ret; + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __noswap_vabdl_s8(__noswap_vget_high_s8(__rev0), __noswap_vget_high_s8(__rev1)); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int64x2_t vabdl_high_s32(int32x4_t __p0, int32x4_t __p1) { + int64x2_t __ret; + __ret = vabdl_s32(vget_high_s32(__p0), vget_high_s32(__p1)); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int64x2_t vabdl_high_s32(int32x4_t __p0, int32x4_t __p1) { + int64x2_t __ret; + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = __noswap_vabdl_s32(__noswap_vget_high_s32(__rev0), __noswap_vget_high_s32(__rev1)); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int32x4_t vabdl_high_s16(int16x8_t __p0, int16x8_t __p1) { + int32x4_t __ret; + __ret = vabdl_s16(vget_high_s16(__p0), vget_high_s16(__p1)); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int32x4_t vabdl_high_s16(int16x8_t __p0, int16x8_t __p1) { + int32x4_t __ret; + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __noswap_vabdl_s16(__noswap_vget_high_s16(__rev0), __noswap_vget_high_s16(__rev1)); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint16x8_t vaddl_high_u8(uint8x16_t __p0, uint8x16_t __p1) { + uint16x8_t __ret; + __ret = vmovl_high_u8(__p0) + vmovl_high_u8(__p1); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint16x8_t vaddl_high_u8(uint8x16_t __p0, uint8x16_t __p1) { + uint16x8_t __ret; + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __noswap_vmovl_high_u8(__rev0) + __noswap_vmovl_high_u8(__rev1); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint64x2_t vaddl_high_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint64x2_t __ret; + __ret = vmovl_high_u32(__p0) + vmovl_high_u32(__p1); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint64x2_t vaddl_high_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint64x2_t __ret; + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = __noswap_vmovl_high_u32(__rev0) + __noswap_vmovl_high_u32(__rev1); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint32x4_t vaddl_high_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint32x4_t __ret; + __ret = vmovl_high_u16(__p0) + vmovl_high_u16(__p1); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint32x4_t vaddl_high_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint32x4_t __ret; + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __noswap_vmovl_high_u16(__rev0) + __noswap_vmovl_high_u16(__rev1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int16x8_t vaddl_high_s8(int8x16_t __p0, int8x16_t __p1) { + int16x8_t __ret; + __ret = vmovl_high_s8(__p0) + vmovl_high_s8(__p1); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int16x8_t vaddl_high_s8(int8x16_t __p0, int8x16_t __p1) { + int16x8_t __ret; + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __noswap_vmovl_high_s8(__rev0) + __noswap_vmovl_high_s8(__rev1); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int64x2_t vaddl_high_s32(int32x4_t __p0, int32x4_t __p1) { + int64x2_t __ret; + __ret = vmovl_high_s32(__p0) + vmovl_high_s32(__p1); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int64x2_t vaddl_high_s32(int32x4_t __p0, int32x4_t __p1) { + int64x2_t __ret; + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = __noswap_vmovl_high_s32(__rev0) + __noswap_vmovl_high_s32(__rev1); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int32x4_t vaddl_high_s16(int16x8_t __p0, int16x8_t __p1) { + int32x4_t __ret; + __ret = vmovl_high_s16(__p0) + vmovl_high_s16(__p1); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int32x4_t vaddl_high_s16(int16x8_t __p0, int16x8_t __p1) { + int32x4_t __ret; + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __noswap_vmovl_high_s16(__rev0) + __noswap_vmovl_high_s16(__rev1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint16x8_t vaddw_high_u8(uint16x8_t __p0, uint8x16_t __p1) { + uint16x8_t __ret; + __ret = __p0 + vmovl_high_u8(__p1); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint16x8_t vaddw_high_u8(uint16x8_t __p0, uint8x16_t __p1) { + uint16x8_t __ret; + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __rev0 + __noswap_vmovl_high_u8(__rev1); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint64x2_t vaddw_high_u32(uint64x2_t __p0, uint32x4_t __p1) { + uint64x2_t __ret; + __ret = __p0 + vmovl_high_u32(__p1); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint64x2_t vaddw_high_u32(uint64x2_t __p0, uint32x4_t __p1) { + uint64x2_t __ret; + uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = __rev0 + __noswap_vmovl_high_u32(__rev1); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint32x4_t vaddw_high_u16(uint32x4_t __p0, uint16x8_t __p1) { + uint32x4_t __ret; + __ret = __p0 + vmovl_high_u16(__p1); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint32x4_t vaddw_high_u16(uint32x4_t __p0, uint16x8_t __p1) { + uint32x4_t __ret; + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __rev0 + __noswap_vmovl_high_u16(__rev1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int16x8_t vaddw_high_s8(int16x8_t __p0, int8x16_t __p1) { + int16x8_t __ret; + __ret = __p0 + vmovl_high_s8(__p1); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int16x8_t vaddw_high_s8(int16x8_t __p0, int8x16_t __p1) { + int16x8_t __ret; + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __rev0 + __noswap_vmovl_high_s8(__rev1); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int64x2_t vaddw_high_s32(int64x2_t __p0, int32x4_t __p1) { + int64x2_t __ret; + __ret = __p0 + vmovl_high_s32(__p1); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int64x2_t vaddw_high_s32(int64x2_t __p0, int32x4_t __p1) { + int64x2_t __ret; + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = __rev0 + __noswap_vmovl_high_s32(__rev1); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int32x4_t vaddw_high_s16(int32x4_t __p0, int16x8_t __p1) { + int32x4_t __ret; + __ret = __p0 + vmovl_high_s16(__p1); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int32x4_t vaddw_high_s16(int32x4_t __p0, int16x8_t __p1) { + int32x4_t __ret; + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __rev0 + __noswap_vmovl_high_s16(__rev1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcopyq_lane_p64(__p0_921, __p1_921, __p2_921, __p3_921) __extension__ ({ \ + poly64x2_t __ret_921; \ + poly64x2_t __s0_921 = __p0_921; \ + poly64x1_t __s2_921 = __p2_921; \ + __ret_921 = vsetq_lane_p64(vget_lane_p64(__s2_921, __p3_921), __s0_921, __p1_921); \ + __ret_921; \ +}) +#else +#define vcopyq_lane_p64(__p0_922, __p1_922, __p2_922, __p3_922) __extension__ ({ \ + poly64x2_t __ret_922; \ + poly64x2_t __s0_922 = __p0_922; \ + poly64x1_t __s2_922 = __p2_922; \ + poly64x2_t __rev0_922; __rev0_922 = __builtin_shufflevector(__s0_922, __s0_922, 1, 0); \ + __ret_922 = __noswap_vsetq_lane_p64(vget_lane_p64(__s2_922, __p3_922), __rev0_922, __p1_922); \ + __ret_922 = __builtin_shufflevector(__ret_922, __ret_922, 1, 0); \ + __ret_922; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcopyq_lane_f64(__p0_923, __p1_923, __p2_923, __p3_923) __extension__ ({ \ + float64x2_t __ret_923; \ + float64x2_t __s0_923 = __p0_923; \ + float64x1_t __s2_923 = __p2_923; \ + __ret_923 = vsetq_lane_f64(vget_lane_f64(__s2_923, __p3_923), __s0_923, __p1_923); \ + __ret_923; \ +}) +#else +#define vcopyq_lane_f64(__p0_924, __p1_924, __p2_924, __p3_924) __extension__ ({ \ + float64x2_t __ret_924; \ + float64x2_t __s0_924 = __p0_924; \ + float64x1_t __s2_924 = __p2_924; \ + float64x2_t __rev0_924; __rev0_924 = __builtin_shufflevector(__s0_924, __s0_924, 1, 0); \ + __ret_924 = __noswap_vsetq_lane_f64(vget_lane_f64(__s2_924, __p3_924), __rev0_924, __p1_924); \ + __ret_924 = __builtin_shufflevector(__ret_924, __ret_924, 1, 0); \ + __ret_924; \ +}) +#endif + +#define vcopy_lane_p64(__p0_925, __p1_925, __p2_925, __p3_925) __extension__ ({ \ + poly64x1_t __ret_925; \ + poly64x1_t __s0_925 = __p0_925; \ + poly64x1_t __s2_925 = __p2_925; \ + __ret_925 = vset_lane_p64(vget_lane_p64(__s2_925, __p3_925), __s0_925, __p1_925); \ + __ret_925; \ +}) +#define vcopy_lane_f64(__p0_926, __p1_926, __p2_926, __p3_926) __extension__ ({ \ + float64x1_t __ret_926; \ + float64x1_t __s0_926 = __p0_926; \ + float64x1_t __s2_926 = __p2_926; \ + __ret_926 = vset_lane_f64(vget_lane_f64(__s2_926, __p3_926), __s0_926, __p1_926); \ + __ret_926; \ +}) +#ifdef __LITTLE_ENDIAN__ +#define vcopyq_laneq_p64(__p0_927, __p1_927, __p2_927, __p3_927) __extension__ ({ \ + poly64x2_t __ret_927; \ + poly64x2_t __s0_927 = __p0_927; \ + poly64x2_t __s2_927 = __p2_927; \ + __ret_927 = vsetq_lane_p64(vgetq_lane_p64(__s2_927, __p3_927), __s0_927, __p1_927); \ + __ret_927; \ +}) +#else +#define vcopyq_laneq_p64(__p0_928, __p1_928, __p2_928, __p3_928) __extension__ ({ \ + poly64x2_t __ret_928; \ + poly64x2_t __s0_928 = __p0_928; \ + poly64x2_t __s2_928 = __p2_928; \ + poly64x2_t __rev0_928; __rev0_928 = __builtin_shufflevector(__s0_928, __s0_928, 1, 0); \ + poly64x2_t __rev2_928; __rev2_928 = __builtin_shufflevector(__s2_928, __s2_928, 1, 0); \ + __ret_928 = __noswap_vsetq_lane_p64(__noswap_vgetq_lane_p64(__rev2_928, __p3_928), __rev0_928, __p1_928); \ + __ret_928 = __builtin_shufflevector(__ret_928, __ret_928, 1, 0); \ + __ret_928; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcopyq_laneq_f64(__p0_929, __p1_929, __p2_929, __p3_929) __extension__ ({ \ + float64x2_t __ret_929; \ + float64x2_t __s0_929 = __p0_929; \ + float64x2_t __s2_929 = __p2_929; \ + __ret_929 = vsetq_lane_f64(vgetq_lane_f64(__s2_929, __p3_929), __s0_929, __p1_929); \ + __ret_929; \ +}) +#else +#define vcopyq_laneq_f64(__p0_930, __p1_930, __p2_930, __p3_930) __extension__ ({ \ + float64x2_t __ret_930; \ + float64x2_t __s0_930 = __p0_930; \ + float64x2_t __s2_930 = __p2_930; \ + float64x2_t __rev0_930; __rev0_930 = __builtin_shufflevector(__s0_930, __s0_930, 1, 0); \ + float64x2_t __rev2_930; __rev2_930 = __builtin_shufflevector(__s2_930, __s2_930, 1, 0); \ + __ret_930 = __noswap_vsetq_lane_f64(__noswap_vgetq_lane_f64(__rev2_930, __p3_930), __rev0_930, __p1_930); \ + __ret_930 = __builtin_shufflevector(__ret_930, __ret_930, 1, 0); \ + __ret_930; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcopy_laneq_p64(__p0_931, __p1_931, __p2_931, __p3_931) __extension__ ({ \ + poly64x1_t __ret_931; \ + poly64x1_t __s0_931 = __p0_931; \ + poly64x2_t __s2_931 = __p2_931; \ + __ret_931 = vset_lane_p64(vgetq_lane_p64(__s2_931, __p3_931), __s0_931, __p1_931); \ + __ret_931; \ +}) +#else +#define vcopy_laneq_p64(__p0_932, __p1_932, __p2_932, __p3_932) __extension__ ({ \ + poly64x1_t __ret_932; \ + poly64x1_t __s0_932 = __p0_932; \ + poly64x2_t __s2_932 = __p2_932; \ + poly64x2_t __rev2_932; __rev2_932 = __builtin_shufflevector(__s2_932, __s2_932, 1, 0); \ + __ret_932 = vset_lane_p64(__noswap_vgetq_lane_p64(__rev2_932, __p3_932), __s0_932, __p1_932); \ + __ret_932; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcopy_laneq_f64(__p0_933, __p1_933, __p2_933, __p3_933) __extension__ ({ \ + float64x1_t __ret_933; \ + float64x1_t __s0_933 = __p0_933; \ + float64x2_t __s2_933 = __p2_933; \ + __ret_933 = vset_lane_f64(vgetq_lane_f64(__s2_933, __p3_933), __s0_933, __p1_933); \ + __ret_933; \ +}) +#else +#define vcopy_laneq_f64(__p0_934, __p1_934, __p2_934, __p3_934) __extension__ ({ \ + float64x1_t __ret_934; \ + float64x1_t __s0_934 = __p0_934; \ + float64x2_t __s2_934 = __p2_934; \ + float64x2_t __rev2_934; __rev2_934 = __builtin_shufflevector(__s2_934, __s2_934, 1, 0); \ + __ret_934 = vset_lane_f64(__noswap_vgetq_lane_f64(__rev2_934, __p3_934), __s0_934, __p1_934); \ + __ret_934; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint16x8_t vmlal_high_u8(uint16x8_t __p0, uint8x16_t __p1, uint8x16_t __p2) { + uint16x8_t __ret; + __ret = vmlal_u8(__p0, vget_high_u8(__p1), vget_high_u8(__p2)); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint16x8_t vmlal_high_u8(uint16x8_t __p0, uint8x16_t __p1, uint8x16_t __p2) { + uint16x8_t __ret; + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __noswap_vmlal_u8(__rev0, __noswap_vget_high_u8(__rev1), __noswap_vget_high_u8(__rev2)); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint64x2_t vmlal_high_u32(uint64x2_t __p0, uint32x4_t __p1, uint32x4_t __p2) { + uint64x2_t __ret; + __ret = vmlal_u32(__p0, vget_high_u32(__p1), vget_high_u32(__p2)); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint64x2_t vmlal_high_u32(uint64x2_t __p0, uint32x4_t __p1, uint32x4_t __p2) { + uint64x2_t __ret; + uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + uint32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); + __ret = __noswap_vmlal_u32(__rev0, __noswap_vget_high_u32(__rev1), __noswap_vget_high_u32(__rev2)); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint32x4_t vmlal_high_u16(uint32x4_t __p0, uint16x8_t __p1, uint16x8_t __p2) { + uint32x4_t __ret; + __ret = vmlal_u16(__p0, vget_high_u16(__p1), vget_high_u16(__p2)); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint32x4_t vmlal_high_u16(uint32x4_t __p0, uint16x8_t __p1, uint16x8_t __p2) { + uint32x4_t __ret; + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __noswap_vmlal_u16(__rev0, __noswap_vget_high_u16(__rev1), __noswap_vget_high_u16(__rev2)); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int16x8_t vmlal_high_s8(int16x8_t __p0, int8x16_t __p1, int8x16_t __p2) { + int16x8_t __ret; + __ret = vmlal_s8(__p0, vget_high_s8(__p1), vget_high_s8(__p2)); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int16x8_t vmlal_high_s8(int16x8_t __p0, int8x16_t __p1, int8x16_t __p2) { + int16x8_t __ret; + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __noswap_vmlal_s8(__rev0, __noswap_vget_high_s8(__rev1), __noswap_vget_high_s8(__rev2)); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int64x2_t vmlal_high_s32(int64x2_t __p0, int32x4_t __p1, int32x4_t __p2) { + int64x2_t __ret; + __ret = vmlal_s32(__p0, vget_high_s32(__p1), vget_high_s32(__p2)); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int64x2_t vmlal_high_s32(int64x2_t __p0, int32x4_t __p1, int32x4_t __p2) { + int64x2_t __ret; + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + int32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); + __ret = __noswap_vmlal_s32(__rev0, __noswap_vget_high_s32(__rev1), __noswap_vget_high_s32(__rev2)); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int32x4_t vmlal_high_s16(int32x4_t __p0, int16x8_t __p1, int16x8_t __p2) { + int32x4_t __ret; + __ret = vmlal_s16(__p0, vget_high_s16(__p1), vget_high_s16(__p2)); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int32x4_t vmlal_high_s16(int32x4_t __p0, int16x8_t __p1, int16x8_t __p2) { + int32x4_t __ret; + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __noswap_vmlal_s16(__rev0, __noswap_vget_high_s16(__rev1), __noswap_vget_high_s16(__rev2)); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint64x2_t vmlal_high_n_u32(uint64x2_t __p0, uint32x4_t __p1, uint32_t __p2) { + uint64x2_t __ret; + __ret = vmlal_n_u32(__p0, vget_high_u32(__p1), __p2); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint64x2_t vmlal_high_n_u32(uint64x2_t __p0, uint32x4_t __p1, uint32_t __p2) { + uint64x2_t __ret; + uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = __noswap_vmlal_n_u32(__rev0, __noswap_vget_high_u32(__rev1), __p2); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint32x4_t vmlal_high_n_u16(uint32x4_t __p0, uint16x8_t __p1, uint16_t __p2) { + uint32x4_t __ret; + __ret = vmlal_n_u16(__p0, vget_high_u16(__p1), __p2); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint32x4_t vmlal_high_n_u16(uint32x4_t __p0, uint16x8_t __p1, uint16_t __p2) { + uint32x4_t __ret; + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __noswap_vmlal_n_u16(__rev0, __noswap_vget_high_u16(__rev1), __p2); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int64x2_t vmlal_high_n_s32(int64x2_t __p0, int32x4_t __p1, int32_t __p2) { + int64x2_t __ret; + __ret = vmlal_n_s32(__p0, vget_high_s32(__p1), __p2); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int64x2_t vmlal_high_n_s32(int64x2_t __p0, int32x4_t __p1, int32_t __p2) { + int64x2_t __ret; + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = __noswap_vmlal_n_s32(__rev0, __noswap_vget_high_s32(__rev1), __p2); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int32x4_t vmlal_high_n_s16(int32x4_t __p0, int16x8_t __p1, int16_t __p2) { + int32x4_t __ret; + __ret = vmlal_n_s16(__p0, vget_high_s16(__p1), __p2); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int32x4_t vmlal_high_n_s16(int32x4_t __p0, int16x8_t __p1, int16_t __p2) { + int32x4_t __ret; + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __noswap_vmlal_n_s16(__rev0, __noswap_vget_high_s16(__rev1), __p2); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint16x8_t vmlsl_high_u8(uint16x8_t __p0, uint8x16_t __p1, uint8x16_t __p2) { + uint16x8_t __ret; + __ret = vmlsl_u8(__p0, vget_high_u8(__p1), vget_high_u8(__p2)); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint16x8_t vmlsl_high_u8(uint16x8_t __p0, uint8x16_t __p1, uint8x16_t __p2) { + uint16x8_t __ret; + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __noswap_vmlsl_u8(__rev0, __noswap_vget_high_u8(__rev1), __noswap_vget_high_u8(__rev2)); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint64x2_t vmlsl_high_u32(uint64x2_t __p0, uint32x4_t __p1, uint32x4_t __p2) { + uint64x2_t __ret; + __ret = vmlsl_u32(__p0, vget_high_u32(__p1), vget_high_u32(__p2)); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint64x2_t vmlsl_high_u32(uint64x2_t __p0, uint32x4_t __p1, uint32x4_t __p2) { + uint64x2_t __ret; + uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + uint32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); + __ret = __noswap_vmlsl_u32(__rev0, __noswap_vget_high_u32(__rev1), __noswap_vget_high_u32(__rev2)); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint32x4_t vmlsl_high_u16(uint32x4_t __p0, uint16x8_t __p1, uint16x8_t __p2) { + uint32x4_t __ret; + __ret = vmlsl_u16(__p0, vget_high_u16(__p1), vget_high_u16(__p2)); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint32x4_t vmlsl_high_u16(uint32x4_t __p0, uint16x8_t __p1, uint16x8_t __p2) { + uint32x4_t __ret; + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __noswap_vmlsl_u16(__rev0, __noswap_vget_high_u16(__rev1), __noswap_vget_high_u16(__rev2)); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int16x8_t vmlsl_high_s8(int16x8_t __p0, int8x16_t __p1, int8x16_t __p2) { + int16x8_t __ret; + __ret = vmlsl_s8(__p0, vget_high_s8(__p1), vget_high_s8(__p2)); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int16x8_t vmlsl_high_s8(int16x8_t __p0, int8x16_t __p1, int8x16_t __p2) { + int16x8_t __ret; + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __noswap_vmlsl_s8(__rev0, __noswap_vget_high_s8(__rev1), __noswap_vget_high_s8(__rev2)); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int64x2_t vmlsl_high_s32(int64x2_t __p0, int32x4_t __p1, int32x4_t __p2) { + int64x2_t __ret; + __ret = vmlsl_s32(__p0, vget_high_s32(__p1), vget_high_s32(__p2)); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int64x2_t vmlsl_high_s32(int64x2_t __p0, int32x4_t __p1, int32x4_t __p2) { + int64x2_t __ret; + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + int32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); + __ret = __noswap_vmlsl_s32(__rev0, __noswap_vget_high_s32(__rev1), __noswap_vget_high_s32(__rev2)); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int32x4_t vmlsl_high_s16(int32x4_t __p0, int16x8_t __p1, int16x8_t __p2) { + int32x4_t __ret; + __ret = vmlsl_s16(__p0, vget_high_s16(__p1), vget_high_s16(__p2)); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int32x4_t vmlsl_high_s16(int32x4_t __p0, int16x8_t __p1, int16x8_t __p2) { + int32x4_t __ret; + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __noswap_vmlsl_s16(__rev0, __noswap_vget_high_s16(__rev1), __noswap_vget_high_s16(__rev2)); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint64x2_t vmlsl_high_n_u32(uint64x2_t __p0, uint32x4_t __p1, uint32_t __p2) { + uint64x2_t __ret; + __ret = vmlsl_n_u32(__p0, vget_high_u32(__p1), __p2); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint64x2_t vmlsl_high_n_u32(uint64x2_t __p0, uint32x4_t __p1, uint32_t __p2) { + uint64x2_t __ret; + uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = __noswap_vmlsl_n_u32(__rev0, __noswap_vget_high_u32(__rev1), __p2); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint32x4_t vmlsl_high_n_u16(uint32x4_t __p0, uint16x8_t __p1, uint16_t __p2) { + uint32x4_t __ret; + __ret = vmlsl_n_u16(__p0, vget_high_u16(__p1), __p2); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint32x4_t vmlsl_high_n_u16(uint32x4_t __p0, uint16x8_t __p1, uint16_t __p2) { + uint32x4_t __ret; + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __noswap_vmlsl_n_u16(__rev0, __noswap_vget_high_u16(__rev1), __p2); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int64x2_t vmlsl_high_n_s32(int64x2_t __p0, int32x4_t __p1, int32_t __p2) { + int64x2_t __ret; + __ret = vmlsl_n_s32(__p0, vget_high_s32(__p1), __p2); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int64x2_t vmlsl_high_n_s32(int64x2_t __p0, int32x4_t __p1, int32_t __p2) { + int64x2_t __ret; + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = __noswap_vmlsl_n_s32(__rev0, __noswap_vget_high_s32(__rev1), __p2); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int32x4_t vmlsl_high_n_s16(int32x4_t __p0, int16x8_t __p1, int16_t __p2) { + int32x4_t __ret; + __ret = vmlsl_n_s16(__p0, vget_high_s16(__p1), __p2); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int32x4_t vmlsl_high_n_s16(int32x4_t __p0, int16x8_t __p1, int16_t __p2) { + int32x4_t __ret; + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __noswap_vmlsl_n_s16(__rev0, __noswap_vget_high_s16(__rev1), __p2); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#define vmulx_lane_f64(__p0_935, __p1_935, __p2_935) __extension__ ({ \ + float64x1_t __ret_935; \ + float64x1_t __s0_935 = __p0_935; \ + float64x1_t __s1_935 = __p1_935; \ + float64_t __x_935 = vget_lane_f64(__s0_935, 0); \ + float64_t __y_935 = vget_lane_f64(__s1_935, __p2_935); \ + float64_t __z_935 = vmulxd_f64(__x_935, __y_935); \ + __ret_935 = vset_lane_f64(__z_935, __s0_935, __p2_935); \ + __ret_935; \ +}) +#ifdef __LITTLE_ENDIAN__ +#define vmulx_laneq_f64(__p0_936, __p1_936, __p2_936) __extension__ ({ \ + float64x1_t __ret_936; \ + float64x1_t __s0_936 = __p0_936; \ + float64x2_t __s1_936 = __p1_936; \ + float64_t __x_936 = vget_lane_f64(__s0_936, 0); \ + float64_t __y_936 = vgetq_lane_f64(__s1_936, __p2_936); \ + float64_t __z_936 = vmulxd_f64(__x_936, __y_936); \ + __ret_936 = vset_lane_f64(__z_936, __s0_936, 0); \ + __ret_936; \ +}) +#else +#define vmulx_laneq_f64(__p0_937, __p1_937, __p2_937) __extension__ ({ \ + float64x1_t __ret_937; \ + float64x1_t __s0_937 = __p0_937; \ + float64x2_t __s1_937 = __p1_937; \ + float64x2_t __rev1_937; __rev1_937 = __builtin_shufflevector(__s1_937, __s1_937, 1, 0); \ + float64_t __x_937 = vget_lane_f64(__s0_937, 0); \ + float64_t __y_937 = __noswap_vgetq_lane_f64(__rev1_937, __p2_937); \ + float64_t __z_937 = vmulxd_f64(__x_937, __y_937); \ + __ret_937 = vset_lane_f64(__z_937, __s0_937, 0); \ + __ret_937; \ +}) +#endif + +#endif +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint16x8_t vabal_u8(uint16x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) { + uint16x8_t __ret; + __ret = __p0 + vabdl_u8(__p1, __p2); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint16x8_t vabal_u8(uint16x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) { + uint16x8_t __ret; + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __rev0 + __noswap_vabdl_u8(__rev1, __rev2); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +__ai __attribute__((target("neon"))) uint16x8_t __noswap_vabal_u8(uint16x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) { + uint16x8_t __ret; + __ret = __p0 + __noswap_vabdl_u8(__p1, __p2); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint64x2_t vabal_u32(uint64x2_t __p0, uint32x2_t __p1, uint32x2_t __p2) { + uint64x2_t __ret; + __ret = __p0 + vabdl_u32(__p1, __p2); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint64x2_t vabal_u32(uint64x2_t __p0, uint32x2_t __p1, uint32x2_t __p2) { + uint64x2_t __ret; + uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + uint32x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0); + __ret = __rev0 + __noswap_vabdl_u32(__rev1, __rev2); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +__ai __attribute__((target("neon"))) uint64x2_t __noswap_vabal_u32(uint64x2_t __p0, uint32x2_t __p1, uint32x2_t __p2) { + uint64x2_t __ret; + __ret = __p0 + __noswap_vabdl_u32(__p1, __p2); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint32x4_t vabal_u16(uint32x4_t __p0, uint16x4_t __p1, uint16x4_t __p2) { + uint32x4_t __ret; + __ret = __p0 + vabdl_u16(__p1, __p2); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint32x4_t vabal_u16(uint32x4_t __p0, uint16x4_t __p1, uint16x4_t __p2) { + uint32x4_t __ret; + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + uint16x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); + __ret = __rev0 + __noswap_vabdl_u16(__rev1, __rev2); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +__ai __attribute__((target("neon"))) uint32x4_t __noswap_vabal_u16(uint32x4_t __p0, uint16x4_t __p1, uint16x4_t __p2) { + uint32x4_t __ret; + __ret = __p0 + __noswap_vabdl_u16(__p1, __p2); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int16x8_t vabal_s8(int16x8_t __p0, int8x8_t __p1, int8x8_t __p2) { + int16x8_t __ret; + __ret = __p0 + vabdl_s8(__p1, __p2); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int16x8_t vabal_s8(int16x8_t __p0, int8x8_t __p1, int8x8_t __p2) { + int16x8_t __ret; + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __rev0 + __noswap_vabdl_s8(__rev1, __rev2); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +__ai __attribute__((target("neon"))) int16x8_t __noswap_vabal_s8(int16x8_t __p0, int8x8_t __p1, int8x8_t __p2) { + int16x8_t __ret; + __ret = __p0 + __noswap_vabdl_s8(__p1, __p2); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int64x2_t vabal_s32(int64x2_t __p0, int32x2_t __p1, int32x2_t __p2) { + int64x2_t __ret; + __ret = __p0 + vabdl_s32(__p1, __p2); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int64x2_t vabal_s32(int64x2_t __p0, int32x2_t __p1, int32x2_t __p2) { + int64x2_t __ret; + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + int32x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0); + __ret = __rev0 + __noswap_vabdl_s32(__rev1, __rev2); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +__ai __attribute__((target("neon"))) int64x2_t __noswap_vabal_s32(int64x2_t __p0, int32x2_t __p1, int32x2_t __p2) { + int64x2_t __ret; + __ret = __p0 + __noswap_vabdl_s32(__p1, __p2); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int32x4_t vabal_s16(int32x4_t __p0, int16x4_t __p1, int16x4_t __p2) { + int32x4_t __ret; + __ret = __p0 + vabdl_s16(__p1, __p2); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int32x4_t vabal_s16(int32x4_t __p0, int16x4_t __p1, int16x4_t __p2) { + int32x4_t __ret; + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + int16x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); + __ret = __rev0 + __noswap_vabdl_s16(__rev1, __rev2); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +__ai __attribute__((target("neon"))) int32x4_t __noswap_vabal_s16(int32x4_t __p0, int16x4_t __p1, int16x4_t __p2) { + int32x4_t __ret; + __ret = __p0 + __noswap_vabdl_s16(__p1, __p2); + return __ret; +} +#endif + +#if defined(__aarch64__) || defined(__arm64ec__) +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint16x8_t vabal_high_u8(uint16x8_t __p0, uint8x16_t __p1, uint8x16_t __p2) { + uint16x8_t __ret; + __ret = vabal_u8(__p0, vget_high_u8(__p1), vget_high_u8(__p2)); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint16x8_t vabal_high_u8(uint16x8_t __p0, uint8x16_t __p1, uint8x16_t __p2) { + uint16x8_t __ret; + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __noswap_vabal_u8(__rev0, __noswap_vget_high_u8(__rev1), __noswap_vget_high_u8(__rev2)); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint64x2_t vabal_high_u32(uint64x2_t __p0, uint32x4_t __p1, uint32x4_t __p2) { + uint64x2_t __ret; + __ret = vabal_u32(__p0, vget_high_u32(__p1), vget_high_u32(__p2)); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint64x2_t vabal_high_u32(uint64x2_t __p0, uint32x4_t __p1, uint32x4_t __p2) { + uint64x2_t __ret; + uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + uint32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); + __ret = __noswap_vabal_u32(__rev0, __noswap_vget_high_u32(__rev1), __noswap_vget_high_u32(__rev2)); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint32x4_t vabal_high_u16(uint32x4_t __p0, uint16x8_t __p1, uint16x8_t __p2) { + uint32x4_t __ret; + __ret = vabal_u16(__p0, vget_high_u16(__p1), vget_high_u16(__p2)); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint32x4_t vabal_high_u16(uint32x4_t __p0, uint16x8_t __p1, uint16x8_t __p2) { + uint32x4_t __ret; + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __noswap_vabal_u16(__rev0, __noswap_vget_high_u16(__rev1), __noswap_vget_high_u16(__rev2)); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int16x8_t vabal_high_s8(int16x8_t __p0, int8x16_t __p1, int8x16_t __p2) { + int16x8_t __ret; + __ret = vabal_s8(__p0, vget_high_s8(__p1), vget_high_s8(__p2)); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int16x8_t vabal_high_s8(int16x8_t __p0, int8x16_t __p1, int8x16_t __p2) { + int16x8_t __ret; + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __noswap_vabal_s8(__rev0, __noswap_vget_high_s8(__rev1), __noswap_vget_high_s8(__rev2)); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int64x2_t vabal_high_s32(int64x2_t __p0, int32x4_t __p1, int32x4_t __p2) { + int64x2_t __ret; + __ret = vabal_s32(__p0, vget_high_s32(__p1), vget_high_s32(__p2)); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int64x2_t vabal_high_s32(int64x2_t __p0, int32x4_t __p1, int32x4_t __p2) { + int64x2_t __ret; + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + int32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); + __ret = __noswap_vabal_s32(__rev0, __noswap_vget_high_s32(__rev1), __noswap_vget_high_s32(__rev2)); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int32x4_t vabal_high_s16(int32x4_t __p0, int16x8_t __p1, int16x8_t __p2) { + int32x4_t __ret; + __ret = vabal_s16(__p0, vget_high_s16(__p1), vget_high_s16(__p2)); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int32x4_t vabal_high_s16(int32x4_t __p0, int16x8_t __p1, int16x8_t __p2) { + int32x4_t __ret; + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __noswap_vabal_s16(__rev0, __noswap_vget_high_s16(__rev1), __noswap_vget_high_s16(__rev2)); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#endif + +#undef __ai + +#endif /* if !defined(__ARM_NEON) */ +#endif /* ifndef __ARM_FP */ diff --git a/third_party/aarch64/clang/arm_neon_sve_bridge.h b/third_party/aarch64/clang/arm_neon_sve_bridge.h new file mode 100644 index 000000000..a9fbdbaf4 --- /dev/null +++ b/third_party/aarch64/clang/arm_neon_sve_bridge.h @@ -0,0 +1,182 @@ +/*===---- arm_neon_sve_bridge.h - ARM NEON SVE Bridge intrinsics -----------=== + * + * + * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. + * See https://llvm.org/LICENSE.txt for license information. + * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + * + *===-----------------------------------------------------------------------=== + */ + +#ifndef __ARM_NEON_SVE_BRIDGE_H +#define __ARM_NEON_SVE_BRIDGE_H + +#include +#include + +#ifdef __cplusplus +extern "C" { +#endif + +/* Function attributes */ +#define __ai static __inline__ __attribute__((__always_inline__, __nodebug__)) +#define __aio \ + static __inline__ \ + __attribute__((__always_inline__, __nodebug__, __overloadable__)) + +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset_neonq_s8))) +svint8_t svset_neonq(svint8_t, int8x16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset_neonq_s16))) +svint16_t svset_neonq(svint16_t, int16x8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset_neonq_s32))) +svint32_t svset_neonq(svint32_t, int32x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset_neonq_s64))) +svint64_t svset_neonq(svint64_t, int64x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset_neonq_u8))) +svuint8_t svset_neonq(svuint8_t, uint8x16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset_neonq_u16))) +svuint16_t svset_neonq(svuint16_t, uint16x8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset_neonq_u32))) +svuint32_t svset_neonq(svuint32_t, uint32x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset_neonq_u64))) +svuint64_t svset_neonq(svuint64_t, uint64x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset_neonq_f16))) +svfloat16_t svset_neonq(svfloat16_t, float16x8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset_neonq_f32))) +svfloat32_t svset_neonq(svfloat32_t, float32x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset_neonq_f64))) +svfloat64_t svset_neonq(svfloat64_t, float64x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset_neonq_s8))) +svint8_t svset_neonq_s8(svint8_t, int8x16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset_neonq_s16))) +svint16_t svset_neonq_s16(svint16_t, int16x8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset_neonq_s32))) +svint32_t svset_neonq_s32(svint32_t, int32x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset_neonq_s64))) +svint64_t svset_neonq_s64(svint64_t, int64x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset_neonq_u8))) +svuint8_t svset_neonq_u8(svuint8_t, uint8x16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset_neonq_u16))) +svuint16_t svset_neonq_u16(svuint16_t, uint16x8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset_neonq_u32))) +svuint32_t svset_neonq_u32(svuint32_t, uint32x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset_neonq_u64))) +svuint64_t svset_neonq_u64(svuint64_t, uint64x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset_neonq_f16))) +svfloat16_t svset_neonq_f16(svfloat16_t, float16x8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset_neonq_f32))) +svfloat32_t svset_neonq_f32(svfloat32_t, float32x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset_neonq_f64))) +svfloat64_t svset_neonq_f64(svfloat64_t, float64x2_t); + +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget_neonq_s8))) +int8x16_t svget_neonq(svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget_neonq_s16))) +int16x8_t svget_neonq(svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget_neonq_s32))) +int32x4_t svget_neonq(svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget_neonq_s64))) +int64x2_t svget_neonq(svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget_neonq_u8))) +uint8x16_t svget_neonq(svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget_neonq_u16))) +uint16x8_t svget_neonq(svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget_neonq_u32))) +uint32x4_t svget_neonq(svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget_neonq_u64))) +uint64x2_t svget_neonq(svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget_neonq_f16))) +float16x8_t svget_neonq(svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget_neonq_f32))) +float32x4_t svget_neonq(svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget_neonq_f64))) +float64x2_t svget_neonq(svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget_neonq_s8))) +int8x16_t svget_neonq_s8(svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget_neonq_s16))) +int16x8_t svget_neonq_s16(svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget_neonq_s32))) +int32x4_t svget_neonq_s32(svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget_neonq_s64))) +int64x2_t svget_neonq_s64(svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget_neonq_u8))) +uint8x16_t svget_neonq_u8(svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget_neonq_u16))) +uint16x8_t svget_neonq_u16(svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget_neonq_u32))) +uint32x4_t svget_neonq_u32(svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget_neonq_u64))) +uint64x2_t svget_neonq_u64(svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget_neonq_f16))) +float16x8_t svget_neonq_f16(svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget_neonq_f32))) +float32x4_t svget_neonq_f32(svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget_neonq_f64))) +float64x2_t svget_neonq_f64(svfloat64_t); + +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_neonq_s8))) +svint8_t svdup_neonq(int8x16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_neonq_s16))) +svint16_t svdup_neonq(int16x8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_neonq_s32))) +svint32_t svdup_neonq(int32x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_neonq_s64))) +svint64_t svdup_neonq(int64x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_neonq_u8))) +svuint8_t svdup_neonq(uint8x16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_neonq_u16))) +svuint16_t svdup_neonq(uint16x8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_neonq_u32))) +svuint32_t svdup_neonq(uint32x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_neonq_u64))) +svuint64_t svdup_neonq(uint64x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_neonq_f16))) +svfloat16_t svdup_neonq(float16x8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_neonq_f32))) +svfloat32_t svdup_neonq(float32x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_neonq_f64))) +svfloat64_t svdup_neonq(float64x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_neonq_s8))) +svint8_t svdup_neonq_s8(int8x16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_neonq_s16))) +svint16_t svdup_neonq_s16(int16x8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_neonq_s32))) +svint32_t svdup_neonq_s32(int32x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_neonq_s64))) +svint64_t svdup_neonq_s64(int64x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_neonq_u8))) +svuint8_t svdup_neonq_u8(uint8x16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_neonq_u16))) +svuint16_t svdup_neonq_u16(uint16x8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_neonq_u32))) +svuint32_t svdup_neonq_u32(uint32x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_neonq_u64))) +svuint64_t svdup_neonq_u64(uint64x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_neonq_f16))) +svfloat16_t svdup_neonq_f16(float16x8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_neonq_f32))) +svfloat32_t svdup_neonq_f32(float32x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_neonq_f64))) +svfloat64_t svdup_neonq_f64(float64x2_t); + +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset_neonq_bf16))) +svbfloat16_t svset_neonq(svbfloat16_t, bfloat16x8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset_neonq_bf16))) +svbfloat16_t svset_neonq_bf16(svbfloat16_t, bfloat16x8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget_neonq_bf16))) +bfloat16x8_t svget_neonq(svbfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget_neonq_bf16))) +bfloat16x8_t svget_neonq_bf16(svbfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_neonq_bf16))) +svbfloat16_t svdup_neonq(bfloat16x8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_neonq_bf16))) +svbfloat16_t svdup_neonq_bf16(bfloat16x8_t); + +#undef __ai +#undef __aio + +#ifdef __cplusplus +} // extern "C" +#endif + +#endif //__ARM_NEON_SVE_BRIDGE_H diff --git a/third_party/aarch64/clang/arm_sme.h b/third_party/aarch64/clang/arm_sme.h new file mode 100644 index 000000000..cbfea38fe --- /dev/null +++ b/third_party/aarch64/clang/arm_sme.h @@ -0,0 +1,2819 @@ +/*===---- arm_sme.h - ARM SME intrinsics ------=== + * + * + * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. + * See https://llvm.org/LICENSE.txt for license information. + * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + * + *===-----------------------------------------------------------------------=== + */ + +#ifndef __ARM_SME_H +#define __ARM_SME_H + +#if !defined(__LITTLE_ENDIAN__) +#error "Big endian is currently not supported for arm_sme.h" +#endif +#include + +#include + +/* Function attributes */ +#define __ai static __inline__ __attribute__((__always_inline__, __nodebug__)) + +#define __aio static __inline__ __attribute__((__always_inline__, __nodebug__, __overloadable__)) + +#ifdef __cplusplus +extern "C" { +#endif + +void __arm_za_disable(void) __arm_streaming_compatible; + +__ai bool __arm_has_sme(void) __arm_streaming_compatible { + uint64_t x0, x1; + __builtin_arm_get_sme_state(&x0, &x1); + return x0 & (1ULL << 63); +} + +__ai bool __arm_in_streaming_mode(void) __arm_streaming_compatible { + uint64_t x0, x1; + __builtin_arm_get_sme_state(&x0, &x1); + return x0 & 1; +} + +void *__arm_sc_memcpy(void *dest, const void *src, size_t n) __arm_streaming_compatible; +void *__arm_sc_memmove(void *dest, const void *src, size_t n) __arm_streaming_compatible; +void *__arm_sc_memset(void *s, int c, size_t n) __arm_streaming_compatible; +void *__arm_sc_memchr(void *s, int c, size_t n) __arm_streaming_compatible; + +__ai __attribute__((target("sme"))) void svundef_za(void) __arm_streaming_compatible __arm_out("za") { } + +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svaddha_za32_u32_m))) +void svaddha_za32_u32_m(uint64_t, svbool_t, svbool_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svaddha_za32_s32_m))) +void svaddha_za32_s32_m(uint64_t, svbool_t, svbool_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svaddva_za32_u32_m))) +void svaddva_za32_u32_m(uint64_t, svbool_t, svbool_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svaddva_za32_s32_m))) +void svaddva_za32_s32_m(uint64_t, svbool_t, svbool_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svcntsb))) +uint64_t svcntsb(void); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svcntsd))) +uint64_t svcntsd(void); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svcntsh))) +uint64_t svcntsh(void); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svcntsw))) +uint64_t svcntsw(void); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svld1_hor_vnum_za128))) +void svld1_hor_vnum_za128(uint64_t, uint32_t, svbool_t, void const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svld1_hor_vnum_za16))) +void svld1_hor_vnum_za16(uint64_t, uint32_t, svbool_t, void const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svld1_hor_vnum_za32))) +void svld1_hor_vnum_za32(uint64_t, uint32_t, svbool_t, void const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svld1_hor_vnum_za64))) +void svld1_hor_vnum_za64(uint64_t, uint32_t, svbool_t, void const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svld1_hor_vnum_za8))) +void svld1_hor_vnum_za8(uint64_t, uint32_t, svbool_t, void const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svld1_hor_za128))) +void svld1_hor_za128(uint64_t, uint32_t, svbool_t, void const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svld1_hor_za16))) +void svld1_hor_za16(uint64_t, uint32_t, svbool_t, void const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svld1_hor_za32))) +void svld1_hor_za32(uint64_t, uint32_t, svbool_t, void const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svld1_hor_za64))) +void svld1_hor_za64(uint64_t, uint32_t, svbool_t, void const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svld1_hor_za8))) +void svld1_hor_za8(uint64_t, uint32_t, svbool_t, void const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svld1_ver_vnum_za128))) +void svld1_ver_vnum_za128(uint64_t, uint32_t, svbool_t, void const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svld1_ver_vnum_za16))) +void svld1_ver_vnum_za16(uint64_t, uint32_t, svbool_t, void const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svld1_ver_vnum_za32))) +void svld1_ver_vnum_za32(uint64_t, uint32_t, svbool_t, void const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svld1_ver_vnum_za64))) +void svld1_ver_vnum_za64(uint64_t, uint32_t, svbool_t, void const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svld1_ver_vnum_za8))) +void svld1_ver_vnum_za8(uint64_t, uint32_t, svbool_t, void const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svld1_ver_za128))) +void svld1_ver_za128(uint64_t, uint32_t, svbool_t, void const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svld1_ver_za16))) +void svld1_ver_za16(uint64_t, uint32_t, svbool_t, void const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svld1_ver_za32))) +void svld1_ver_za32(uint64_t, uint32_t, svbool_t, void const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svld1_ver_za64))) +void svld1_ver_za64(uint64_t, uint32_t, svbool_t, void const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svld1_ver_za8))) +void svld1_ver_za8(uint64_t, uint32_t, svbool_t, void const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svldr_vnum_za))) +void svldr_vnum_za(uint32_t, void const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svldr_za))) +void svldr_za(uint32_t, void const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmopa_za32_f16_m))) +void svmopa_za32_f16_m(uint64_t, svbool_t, svbool_t, svfloat16_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmopa_za32_bf16_m))) +void svmopa_za32_bf16_m(uint64_t, svbool_t, svbool_t, svbfloat16_t, svbfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmopa_za32_f32_m))) +void svmopa_za32_f32_m(uint64_t, svbool_t, svbool_t, svfloat32_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmopa_za32_s8_m))) +void svmopa_za32_s8_m(uint64_t, svbool_t, svbool_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmopa_za32_u8_m))) +void svmopa_za32_u8_m(uint64_t, svbool_t, svbool_t, svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmops_za32_f16_m))) +void svmops_za32_f16_m(uint64_t, svbool_t, svbool_t, svfloat16_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmops_za32_bf16_m))) +void svmops_za32_bf16_m(uint64_t, svbool_t, svbool_t, svbfloat16_t, svbfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmops_za32_f32_m))) +void svmops_za32_f32_m(uint64_t, svbool_t, svbool_t, svfloat32_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmops_za32_s8_m))) +void svmops_za32_s8_m(uint64_t, svbool_t, svbool_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmops_za32_u8_m))) +void svmops_za32_u8_m(uint64_t, svbool_t, svbool_t, svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_hor_za128_u8_m))) +svuint8_t svread_hor_za128_u8_m(svuint8_t, svbool_t, uint64_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_hor_za128_u32_m))) +svuint32_t svread_hor_za128_u32_m(svuint32_t, svbool_t, uint64_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_hor_za128_u64_m))) +svuint64_t svread_hor_za128_u64_m(svuint64_t, svbool_t, uint64_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_hor_za128_u16_m))) +svuint16_t svread_hor_za128_u16_m(svuint16_t, svbool_t, uint64_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_hor_za128_bf16_m))) +svbfloat16_t svread_hor_za128_bf16_m(svbfloat16_t, svbool_t, uint64_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_hor_za128_s8_m))) +svint8_t svread_hor_za128_s8_m(svint8_t, svbool_t, uint64_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_hor_za128_f64_m))) +svfloat64_t svread_hor_za128_f64_m(svfloat64_t, svbool_t, uint64_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_hor_za128_f32_m))) +svfloat32_t svread_hor_za128_f32_m(svfloat32_t, svbool_t, uint64_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_hor_za128_f16_m))) +svfloat16_t svread_hor_za128_f16_m(svfloat16_t, svbool_t, uint64_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_hor_za128_s32_m))) +svint32_t svread_hor_za128_s32_m(svint32_t, svbool_t, uint64_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_hor_za128_s64_m))) +svint64_t svread_hor_za128_s64_m(svint64_t, svbool_t, uint64_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_hor_za128_s16_m))) +svint16_t svread_hor_za128_s16_m(svint16_t, svbool_t, uint64_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_hor_za16_u16_m))) +svuint16_t svread_hor_za16_u16_m(svuint16_t, svbool_t, uint64_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_hor_za16_bf16_m))) +svbfloat16_t svread_hor_za16_bf16_m(svbfloat16_t, svbool_t, uint64_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_hor_za16_f16_m))) +svfloat16_t svread_hor_za16_f16_m(svfloat16_t, svbool_t, uint64_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_hor_za16_s16_m))) +svint16_t svread_hor_za16_s16_m(svint16_t, svbool_t, uint64_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_hor_za32_u32_m))) +svuint32_t svread_hor_za32_u32_m(svuint32_t, svbool_t, uint64_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_hor_za32_f32_m))) +svfloat32_t svread_hor_za32_f32_m(svfloat32_t, svbool_t, uint64_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_hor_za32_s32_m))) +svint32_t svread_hor_za32_s32_m(svint32_t, svbool_t, uint64_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_hor_za64_u64_m))) +svuint64_t svread_hor_za64_u64_m(svuint64_t, svbool_t, uint64_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_hor_za64_f64_m))) +svfloat64_t svread_hor_za64_f64_m(svfloat64_t, svbool_t, uint64_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_hor_za64_s64_m))) +svint64_t svread_hor_za64_s64_m(svint64_t, svbool_t, uint64_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_hor_za8_u8_m))) +svuint8_t svread_hor_za8_u8_m(svuint8_t, svbool_t, uint64_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_hor_za8_s8_m))) +svint8_t svread_hor_za8_s8_m(svint8_t, svbool_t, uint64_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_ver_za128_u8_m))) +svuint8_t svread_ver_za128_u8_m(svuint8_t, svbool_t, uint64_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_ver_za128_u32_m))) +svuint32_t svread_ver_za128_u32_m(svuint32_t, svbool_t, uint64_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_ver_za128_u64_m))) +svuint64_t svread_ver_za128_u64_m(svuint64_t, svbool_t, uint64_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_ver_za128_u16_m))) +svuint16_t svread_ver_za128_u16_m(svuint16_t, svbool_t, uint64_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_ver_za128_bf16_m))) +svbfloat16_t svread_ver_za128_bf16_m(svbfloat16_t, svbool_t, uint64_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_ver_za128_s8_m))) +svint8_t svread_ver_za128_s8_m(svint8_t, svbool_t, uint64_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_ver_za128_f64_m))) +svfloat64_t svread_ver_za128_f64_m(svfloat64_t, svbool_t, uint64_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_ver_za128_f32_m))) +svfloat32_t svread_ver_za128_f32_m(svfloat32_t, svbool_t, uint64_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_ver_za128_f16_m))) +svfloat16_t svread_ver_za128_f16_m(svfloat16_t, svbool_t, uint64_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_ver_za128_s32_m))) +svint32_t svread_ver_za128_s32_m(svint32_t, svbool_t, uint64_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_ver_za128_s64_m))) +svint64_t svread_ver_za128_s64_m(svint64_t, svbool_t, uint64_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_ver_za128_s16_m))) +svint16_t svread_ver_za128_s16_m(svint16_t, svbool_t, uint64_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_ver_za16_u16_m))) +svuint16_t svread_ver_za16_u16_m(svuint16_t, svbool_t, uint64_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_ver_za16_bf16_m))) +svbfloat16_t svread_ver_za16_bf16_m(svbfloat16_t, svbool_t, uint64_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_ver_za16_f16_m))) +svfloat16_t svread_ver_za16_f16_m(svfloat16_t, svbool_t, uint64_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_ver_za16_s16_m))) +svint16_t svread_ver_za16_s16_m(svint16_t, svbool_t, uint64_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_ver_za32_u32_m))) +svuint32_t svread_ver_za32_u32_m(svuint32_t, svbool_t, uint64_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_ver_za32_f32_m))) +svfloat32_t svread_ver_za32_f32_m(svfloat32_t, svbool_t, uint64_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_ver_za32_s32_m))) +svint32_t svread_ver_za32_s32_m(svint32_t, svbool_t, uint64_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_ver_za64_u64_m))) +svuint64_t svread_ver_za64_u64_m(svuint64_t, svbool_t, uint64_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_ver_za64_f64_m))) +svfloat64_t svread_ver_za64_f64_m(svfloat64_t, svbool_t, uint64_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_ver_za64_s64_m))) +svint64_t svread_ver_za64_s64_m(svint64_t, svbool_t, uint64_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_ver_za8_u8_m))) +svuint8_t svread_ver_za8_u8_m(svuint8_t, svbool_t, uint64_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_ver_za8_s8_m))) +svint8_t svread_ver_za8_s8_m(svint8_t, svbool_t, uint64_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svst1_hor_vnum_za128))) +void svst1_hor_vnum_za128(uint64_t, uint32_t, svbool_t, void *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svst1_hor_vnum_za16))) +void svst1_hor_vnum_za16(uint64_t, uint32_t, svbool_t, void *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svst1_hor_vnum_za32))) +void svst1_hor_vnum_za32(uint64_t, uint32_t, svbool_t, void *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svst1_hor_vnum_za64))) +void svst1_hor_vnum_za64(uint64_t, uint32_t, svbool_t, void *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svst1_hor_vnum_za8))) +void svst1_hor_vnum_za8(uint64_t, uint32_t, svbool_t, void *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svst1_hor_za128))) +void svst1_hor_za128(uint64_t, uint32_t, svbool_t, void *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svst1_hor_za16))) +void svst1_hor_za16(uint64_t, uint32_t, svbool_t, void *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svst1_hor_za32))) +void svst1_hor_za32(uint64_t, uint32_t, svbool_t, void *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svst1_hor_za64))) +void svst1_hor_za64(uint64_t, uint32_t, svbool_t, void *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svst1_hor_za8))) +void svst1_hor_za8(uint64_t, uint32_t, svbool_t, void *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svst1_ver_vnum_za128))) +void svst1_ver_vnum_za128(uint64_t, uint32_t, svbool_t, void *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svst1_ver_vnum_za16))) +void svst1_ver_vnum_za16(uint64_t, uint32_t, svbool_t, void *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svst1_ver_vnum_za32))) +void svst1_ver_vnum_za32(uint64_t, uint32_t, svbool_t, void *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svst1_ver_vnum_za64))) +void svst1_ver_vnum_za64(uint64_t, uint32_t, svbool_t, void *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svst1_ver_vnum_za8))) +void svst1_ver_vnum_za8(uint64_t, uint32_t, svbool_t, void *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svst1_ver_za128))) +void svst1_ver_za128(uint64_t, uint32_t, svbool_t, void *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svst1_ver_za16))) +void svst1_ver_za16(uint64_t, uint32_t, svbool_t, void *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svst1_ver_za32))) +void svst1_ver_za32(uint64_t, uint32_t, svbool_t, void *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svst1_ver_za64))) +void svst1_ver_za64(uint64_t, uint32_t, svbool_t, void *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svst1_ver_za8))) +void svst1_ver_za8(uint64_t, uint32_t, svbool_t, void *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svstr_vnum_za))) +void svstr_vnum_za(uint32_t, void *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svstr_za))) +void svstr_za(uint32_t, void *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svsumopa_za32_s8_m))) +void svsumopa_za32_s8_m(uint64_t, svbool_t, svbool_t, svint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svsumops_za32_s8_m))) +void svsumops_za32_s8_m(uint64_t, svbool_t, svbool_t, svint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svusmopa_za32_u8_m))) +void svusmopa_za32_u8_m(uint64_t, svbool_t, svbool_t, svuint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svusmops_za32_u8_m))) +void svusmops_za32_u8_m(uint64_t, svbool_t, svbool_t, svuint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_hor_za128_u8_m))) +void svwrite_hor_za128_u8_m(uint64_t, uint32_t, svbool_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_hor_za128_u32_m))) +void svwrite_hor_za128_u32_m(uint64_t, uint32_t, svbool_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_hor_za128_u64_m))) +void svwrite_hor_za128_u64_m(uint64_t, uint32_t, svbool_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_hor_za128_u16_m))) +void svwrite_hor_za128_u16_m(uint64_t, uint32_t, svbool_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_hor_za128_bf16_m))) +void svwrite_hor_za128_bf16_m(uint64_t, uint32_t, svbool_t, svbfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_hor_za128_s8_m))) +void svwrite_hor_za128_s8_m(uint64_t, uint32_t, svbool_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_hor_za128_f64_m))) +void svwrite_hor_za128_f64_m(uint64_t, uint32_t, svbool_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_hor_za128_f32_m))) +void svwrite_hor_za128_f32_m(uint64_t, uint32_t, svbool_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_hor_za128_f16_m))) +void svwrite_hor_za128_f16_m(uint64_t, uint32_t, svbool_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_hor_za128_s32_m))) +void svwrite_hor_za128_s32_m(uint64_t, uint32_t, svbool_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_hor_za128_s64_m))) +void svwrite_hor_za128_s64_m(uint64_t, uint32_t, svbool_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_hor_za128_s16_m))) +void svwrite_hor_za128_s16_m(uint64_t, uint32_t, svbool_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_hor_za16_u16_m))) +void svwrite_hor_za16_u16_m(uint64_t, uint32_t, svbool_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_hor_za16_bf16_m))) +void svwrite_hor_za16_bf16_m(uint64_t, uint32_t, svbool_t, svbfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_hor_za16_f16_m))) +void svwrite_hor_za16_f16_m(uint64_t, uint32_t, svbool_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_hor_za16_s16_m))) +void svwrite_hor_za16_s16_m(uint64_t, uint32_t, svbool_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_hor_za32_u32_m))) +void svwrite_hor_za32_u32_m(uint64_t, uint32_t, svbool_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_hor_za32_f32_m))) +void svwrite_hor_za32_f32_m(uint64_t, uint32_t, svbool_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_hor_za32_s32_m))) +void svwrite_hor_za32_s32_m(uint64_t, uint32_t, svbool_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_hor_za64_u64_m))) +void svwrite_hor_za64_u64_m(uint64_t, uint32_t, svbool_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_hor_za64_f64_m))) +void svwrite_hor_za64_f64_m(uint64_t, uint32_t, svbool_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_hor_za64_s64_m))) +void svwrite_hor_za64_s64_m(uint64_t, uint32_t, svbool_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_hor_za8_u8_m))) +void svwrite_hor_za8_u8_m(uint64_t, uint32_t, svbool_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_hor_za8_s8_m))) +void svwrite_hor_za8_s8_m(uint64_t, uint32_t, svbool_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_ver_za128_u8_m))) +void svwrite_ver_za128_u8_m(uint64_t, uint32_t, svbool_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_ver_za128_u32_m))) +void svwrite_ver_za128_u32_m(uint64_t, uint32_t, svbool_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_ver_za128_u64_m))) +void svwrite_ver_za128_u64_m(uint64_t, uint32_t, svbool_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_ver_za128_u16_m))) +void svwrite_ver_za128_u16_m(uint64_t, uint32_t, svbool_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_ver_za128_bf16_m))) +void svwrite_ver_za128_bf16_m(uint64_t, uint32_t, svbool_t, svbfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_ver_za128_s8_m))) +void svwrite_ver_za128_s8_m(uint64_t, uint32_t, svbool_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_ver_za128_f64_m))) +void svwrite_ver_za128_f64_m(uint64_t, uint32_t, svbool_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_ver_za128_f32_m))) +void svwrite_ver_za128_f32_m(uint64_t, uint32_t, svbool_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_ver_za128_f16_m))) +void svwrite_ver_za128_f16_m(uint64_t, uint32_t, svbool_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_ver_za128_s32_m))) +void svwrite_ver_za128_s32_m(uint64_t, uint32_t, svbool_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_ver_za128_s64_m))) +void svwrite_ver_za128_s64_m(uint64_t, uint32_t, svbool_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_ver_za128_s16_m))) +void svwrite_ver_za128_s16_m(uint64_t, uint32_t, svbool_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_ver_za16_u16_m))) +void svwrite_ver_za16_u16_m(uint64_t, uint32_t, svbool_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_ver_za16_bf16_m))) +void svwrite_ver_za16_bf16_m(uint64_t, uint32_t, svbool_t, svbfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_ver_za16_f16_m))) +void svwrite_ver_za16_f16_m(uint64_t, uint32_t, svbool_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_ver_za16_s16_m))) +void svwrite_ver_za16_s16_m(uint64_t, uint32_t, svbool_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_ver_za32_u32_m))) +void svwrite_ver_za32_u32_m(uint64_t, uint32_t, svbool_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_ver_za32_f32_m))) +void svwrite_ver_za32_f32_m(uint64_t, uint32_t, svbool_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_ver_za32_s32_m))) +void svwrite_ver_za32_s32_m(uint64_t, uint32_t, svbool_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_ver_za64_u64_m))) +void svwrite_ver_za64_u64_m(uint64_t, uint32_t, svbool_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_ver_za64_f64_m))) +void svwrite_ver_za64_f64_m(uint64_t, uint32_t, svbool_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_ver_za64_s64_m))) +void svwrite_ver_za64_s64_m(uint64_t, uint32_t, svbool_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_ver_za8_u8_m))) +void svwrite_ver_za8_u8_m(uint64_t, uint32_t, svbool_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_ver_za8_s8_m))) +void svwrite_ver_za8_s8_m(uint64_t, uint32_t, svbool_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svzero_mask_za))) +void svzero_mask_za(uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svzero_za))) +void svzero_za(void); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svaddha_za32_u32_m))) +void svaddha_za32_m(uint64_t, svbool_t, svbool_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svaddha_za32_s32_m))) +void svaddha_za32_m(uint64_t, svbool_t, svbool_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svaddva_za32_u32_m))) +void svaddva_za32_m(uint64_t, svbool_t, svbool_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svaddva_za32_s32_m))) +void svaddva_za32_m(uint64_t, svbool_t, svbool_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmopa_za32_f16_m))) +void svmopa_za32_m(uint64_t, svbool_t, svbool_t, svfloat16_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmopa_za32_bf16_m))) +void svmopa_za32_m(uint64_t, svbool_t, svbool_t, svbfloat16_t, svbfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmopa_za32_f32_m))) +void svmopa_za32_m(uint64_t, svbool_t, svbool_t, svfloat32_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmopa_za32_s8_m))) +void svmopa_za32_m(uint64_t, svbool_t, svbool_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmopa_za32_u8_m))) +void svmopa_za32_m(uint64_t, svbool_t, svbool_t, svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmops_za32_f16_m))) +void svmops_za32_m(uint64_t, svbool_t, svbool_t, svfloat16_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmops_za32_bf16_m))) +void svmops_za32_m(uint64_t, svbool_t, svbool_t, svbfloat16_t, svbfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmops_za32_f32_m))) +void svmops_za32_m(uint64_t, svbool_t, svbool_t, svfloat32_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmops_za32_s8_m))) +void svmops_za32_m(uint64_t, svbool_t, svbool_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmops_za32_u8_m))) +void svmops_za32_m(uint64_t, svbool_t, svbool_t, svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_hor_za128_u8_m))) +svuint8_t svread_hor_za128_m(svuint8_t, svbool_t, uint64_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_hor_za128_u32_m))) +svuint32_t svread_hor_za128_m(svuint32_t, svbool_t, uint64_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_hor_za128_u64_m))) +svuint64_t svread_hor_za128_m(svuint64_t, svbool_t, uint64_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_hor_za128_u16_m))) +svuint16_t svread_hor_za128_m(svuint16_t, svbool_t, uint64_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_hor_za128_bf16_m))) +svbfloat16_t svread_hor_za128_m(svbfloat16_t, svbool_t, uint64_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_hor_za128_s8_m))) +svint8_t svread_hor_za128_m(svint8_t, svbool_t, uint64_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_hor_za128_f64_m))) +svfloat64_t svread_hor_za128_m(svfloat64_t, svbool_t, uint64_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_hor_za128_f32_m))) +svfloat32_t svread_hor_za128_m(svfloat32_t, svbool_t, uint64_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_hor_za128_f16_m))) +svfloat16_t svread_hor_za128_m(svfloat16_t, svbool_t, uint64_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_hor_za128_s32_m))) +svint32_t svread_hor_za128_m(svint32_t, svbool_t, uint64_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_hor_za128_s64_m))) +svint64_t svread_hor_za128_m(svint64_t, svbool_t, uint64_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_hor_za128_s16_m))) +svint16_t svread_hor_za128_m(svint16_t, svbool_t, uint64_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_hor_za16_u16_m))) +svuint16_t svread_hor_za16_m(svuint16_t, svbool_t, uint64_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_hor_za16_bf16_m))) +svbfloat16_t svread_hor_za16_m(svbfloat16_t, svbool_t, uint64_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_hor_za16_f16_m))) +svfloat16_t svread_hor_za16_m(svfloat16_t, svbool_t, uint64_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_hor_za16_s16_m))) +svint16_t svread_hor_za16_m(svint16_t, svbool_t, uint64_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_hor_za32_u32_m))) +svuint32_t svread_hor_za32_m(svuint32_t, svbool_t, uint64_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_hor_za32_f32_m))) +svfloat32_t svread_hor_za32_m(svfloat32_t, svbool_t, uint64_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_hor_za32_s32_m))) +svint32_t svread_hor_za32_m(svint32_t, svbool_t, uint64_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_hor_za64_u64_m))) +svuint64_t svread_hor_za64_m(svuint64_t, svbool_t, uint64_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_hor_za64_f64_m))) +svfloat64_t svread_hor_za64_m(svfloat64_t, svbool_t, uint64_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_hor_za64_s64_m))) +svint64_t svread_hor_za64_m(svint64_t, svbool_t, uint64_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_hor_za8_u8_m))) +svuint8_t svread_hor_za8_m(svuint8_t, svbool_t, uint64_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_hor_za8_s8_m))) +svint8_t svread_hor_za8_m(svint8_t, svbool_t, uint64_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_ver_za128_u8_m))) +svuint8_t svread_ver_za128_m(svuint8_t, svbool_t, uint64_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_ver_za128_u32_m))) +svuint32_t svread_ver_za128_m(svuint32_t, svbool_t, uint64_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_ver_za128_u64_m))) +svuint64_t svread_ver_za128_m(svuint64_t, svbool_t, uint64_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_ver_za128_u16_m))) +svuint16_t svread_ver_za128_m(svuint16_t, svbool_t, uint64_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_ver_za128_bf16_m))) +svbfloat16_t svread_ver_za128_m(svbfloat16_t, svbool_t, uint64_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_ver_za128_s8_m))) +svint8_t svread_ver_za128_m(svint8_t, svbool_t, uint64_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_ver_za128_f64_m))) +svfloat64_t svread_ver_za128_m(svfloat64_t, svbool_t, uint64_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_ver_za128_f32_m))) +svfloat32_t svread_ver_za128_m(svfloat32_t, svbool_t, uint64_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_ver_za128_f16_m))) +svfloat16_t svread_ver_za128_m(svfloat16_t, svbool_t, uint64_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_ver_za128_s32_m))) +svint32_t svread_ver_za128_m(svint32_t, svbool_t, uint64_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_ver_za128_s64_m))) +svint64_t svread_ver_za128_m(svint64_t, svbool_t, uint64_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_ver_za128_s16_m))) +svint16_t svread_ver_za128_m(svint16_t, svbool_t, uint64_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_ver_za16_u16_m))) +svuint16_t svread_ver_za16_m(svuint16_t, svbool_t, uint64_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_ver_za16_bf16_m))) +svbfloat16_t svread_ver_za16_m(svbfloat16_t, svbool_t, uint64_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_ver_za16_f16_m))) +svfloat16_t svread_ver_za16_m(svfloat16_t, svbool_t, uint64_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_ver_za16_s16_m))) +svint16_t svread_ver_za16_m(svint16_t, svbool_t, uint64_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_ver_za32_u32_m))) +svuint32_t svread_ver_za32_m(svuint32_t, svbool_t, uint64_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_ver_za32_f32_m))) +svfloat32_t svread_ver_za32_m(svfloat32_t, svbool_t, uint64_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_ver_za32_s32_m))) +svint32_t svread_ver_za32_m(svint32_t, svbool_t, uint64_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_ver_za64_u64_m))) +svuint64_t svread_ver_za64_m(svuint64_t, svbool_t, uint64_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_ver_za64_f64_m))) +svfloat64_t svread_ver_za64_m(svfloat64_t, svbool_t, uint64_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_ver_za64_s64_m))) +svint64_t svread_ver_za64_m(svint64_t, svbool_t, uint64_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_ver_za8_u8_m))) +svuint8_t svread_ver_za8_m(svuint8_t, svbool_t, uint64_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_ver_za8_s8_m))) +svint8_t svread_ver_za8_m(svint8_t, svbool_t, uint64_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svsumopa_za32_s8_m))) +void svsumopa_za32_m(uint64_t, svbool_t, svbool_t, svint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svsumops_za32_s8_m))) +void svsumops_za32_m(uint64_t, svbool_t, svbool_t, svint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svusmopa_za32_u8_m))) +void svusmopa_za32_m(uint64_t, svbool_t, svbool_t, svuint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svusmops_za32_u8_m))) +void svusmops_za32_m(uint64_t, svbool_t, svbool_t, svuint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_hor_za128_u8_m))) +void svwrite_hor_za128_m(uint64_t, uint32_t, svbool_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_hor_za128_u32_m))) +void svwrite_hor_za128_m(uint64_t, uint32_t, svbool_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_hor_za128_u64_m))) +void svwrite_hor_za128_m(uint64_t, uint32_t, svbool_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_hor_za128_u16_m))) +void svwrite_hor_za128_m(uint64_t, uint32_t, svbool_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_hor_za128_bf16_m))) +void svwrite_hor_za128_m(uint64_t, uint32_t, svbool_t, svbfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_hor_za128_s8_m))) +void svwrite_hor_za128_m(uint64_t, uint32_t, svbool_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_hor_za128_f64_m))) +void svwrite_hor_za128_m(uint64_t, uint32_t, svbool_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_hor_za128_f32_m))) +void svwrite_hor_za128_m(uint64_t, uint32_t, svbool_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_hor_za128_f16_m))) +void svwrite_hor_za128_m(uint64_t, uint32_t, svbool_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_hor_za128_s32_m))) +void svwrite_hor_za128_m(uint64_t, uint32_t, svbool_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_hor_za128_s64_m))) +void svwrite_hor_za128_m(uint64_t, uint32_t, svbool_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_hor_za128_s16_m))) +void svwrite_hor_za128_m(uint64_t, uint32_t, svbool_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_hor_za16_u16_m))) +void svwrite_hor_za16_m(uint64_t, uint32_t, svbool_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_hor_za16_bf16_m))) +void svwrite_hor_za16_m(uint64_t, uint32_t, svbool_t, svbfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_hor_za16_f16_m))) +void svwrite_hor_za16_m(uint64_t, uint32_t, svbool_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_hor_za16_s16_m))) +void svwrite_hor_za16_m(uint64_t, uint32_t, svbool_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_hor_za32_u32_m))) +void svwrite_hor_za32_m(uint64_t, uint32_t, svbool_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_hor_za32_f32_m))) +void svwrite_hor_za32_m(uint64_t, uint32_t, svbool_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_hor_za32_s32_m))) +void svwrite_hor_za32_m(uint64_t, uint32_t, svbool_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_hor_za64_u64_m))) +void svwrite_hor_za64_m(uint64_t, uint32_t, svbool_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_hor_za64_f64_m))) +void svwrite_hor_za64_m(uint64_t, uint32_t, svbool_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_hor_za64_s64_m))) +void svwrite_hor_za64_m(uint64_t, uint32_t, svbool_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_hor_za8_u8_m))) +void svwrite_hor_za8_m(uint64_t, uint32_t, svbool_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_hor_za8_s8_m))) +void svwrite_hor_za8_m(uint64_t, uint32_t, svbool_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_ver_za128_u8_m))) +void svwrite_ver_za128_m(uint64_t, uint32_t, svbool_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_ver_za128_u32_m))) +void svwrite_ver_za128_m(uint64_t, uint32_t, svbool_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_ver_za128_u64_m))) +void svwrite_ver_za128_m(uint64_t, uint32_t, svbool_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_ver_za128_u16_m))) +void svwrite_ver_za128_m(uint64_t, uint32_t, svbool_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_ver_za128_bf16_m))) +void svwrite_ver_za128_m(uint64_t, uint32_t, svbool_t, svbfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_ver_za128_s8_m))) +void svwrite_ver_za128_m(uint64_t, uint32_t, svbool_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_ver_za128_f64_m))) +void svwrite_ver_za128_m(uint64_t, uint32_t, svbool_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_ver_za128_f32_m))) +void svwrite_ver_za128_m(uint64_t, uint32_t, svbool_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_ver_za128_f16_m))) +void svwrite_ver_za128_m(uint64_t, uint32_t, svbool_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_ver_za128_s32_m))) +void svwrite_ver_za128_m(uint64_t, uint32_t, svbool_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_ver_za128_s64_m))) +void svwrite_ver_za128_m(uint64_t, uint32_t, svbool_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_ver_za128_s16_m))) +void svwrite_ver_za128_m(uint64_t, uint32_t, svbool_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_ver_za16_u16_m))) +void svwrite_ver_za16_m(uint64_t, uint32_t, svbool_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_ver_za16_bf16_m))) +void svwrite_ver_za16_m(uint64_t, uint32_t, svbool_t, svbfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_ver_za16_f16_m))) +void svwrite_ver_za16_m(uint64_t, uint32_t, svbool_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_ver_za16_s16_m))) +void svwrite_ver_za16_m(uint64_t, uint32_t, svbool_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_ver_za32_u32_m))) +void svwrite_ver_za32_m(uint64_t, uint32_t, svbool_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_ver_za32_f32_m))) +void svwrite_ver_za32_m(uint64_t, uint32_t, svbool_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_ver_za32_s32_m))) +void svwrite_ver_za32_m(uint64_t, uint32_t, svbool_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_ver_za64_u64_m))) +void svwrite_ver_za64_m(uint64_t, uint32_t, svbool_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_ver_za64_f64_m))) +void svwrite_ver_za64_m(uint64_t, uint32_t, svbool_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_ver_za64_s64_m))) +void svwrite_ver_za64_m(uint64_t, uint32_t, svbool_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_ver_za8_u8_m))) +void svwrite_ver_za8_m(uint64_t, uint32_t, svbool_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_ver_za8_s8_m))) +void svwrite_ver_za8_m(uint64_t, uint32_t, svbool_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmla_single_za16_f16_vg1x2))) +void svmla_single_za16_f16_vg1x2(uint32_t, svfloat16x2_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmla_single_za16_f16_vg1x4))) +void svmla_single_za16_f16_vg1x4(uint32_t, svfloat16x4_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmla_lane_za16_f16_vg1x2))) +void svmla_lane_za16_f16_vg1x2(uint32_t, svfloat16x2_t, svfloat16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmla_lane_za16_f16_vg1x4))) +void svmla_lane_za16_f16_vg1x4(uint32_t, svfloat16x4_t, svfloat16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmla_za16_f16_vg1x2))) +void svmla_za16_f16_vg1x2(uint32_t, svfloat16x2_t, svfloat16x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmla_za16_f16_vg1x4))) +void svmla_za16_f16_vg1x4(uint32_t, svfloat16x4_t, svfloat16x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmls_single_za16_f16_vg1x2))) +void svmls_single_za16_f16_vg1x2(uint32_t, svfloat16x2_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmls_single_za16_f16_vg1x4))) +void svmls_single_za16_f16_vg1x4(uint32_t, svfloat16x4_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmls_lane_za16_f16_vg1x2))) +void svmls_lane_za16_f16_vg1x2(uint32_t, svfloat16x2_t, svfloat16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmls_lane_za16_f16_vg1x4))) +void svmls_lane_za16_f16_vg1x4(uint32_t, svfloat16x4_t, svfloat16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmls_za16_f16_vg1x2))) +void svmls_za16_f16_vg1x2(uint32_t, svfloat16x2_t, svfloat16x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmls_za16_f16_vg1x4))) +void svmls_za16_f16_vg1x4(uint32_t, svfloat16x4_t, svfloat16x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmopa_za16_f16_m))) +void svmopa_za16_f16_m(uint64_t, svbool_t, svbool_t, svfloat16_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmops_za16_f16_m))) +void svmops_za16_f16_m(uint64_t, svbool_t, svbool_t, svfloat16_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmla_single_za16_f16_vg1x2))) +void svmla_za16_vg1x2(uint32_t, svfloat16x2_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmla_single_za16_f16_vg1x4))) +void svmla_za16_vg1x4(uint32_t, svfloat16x4_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmla_lane_za16_f16_vg1x2))) +void svmla_lane_za16_vg1x2(uint32_t, svfloat16x2_t, svfloat16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmla_lane_za16_f16_vg1x4))) +void svmla_lane_za16_vg1x4(uint32_t, svfloat16x4_t, svfloat16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmla_za16_f16_vg1x2))) +void svmla_za16_vg1x2(uint32_t, svfloat16x2_t, svfloat16x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmla_za16_f16_vg1x4))) +void svmla_za16_vg1x4(uint32_t, svfloat16x4_t, svfloat16x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmls_single_za16_f16_vg1x2))) +void svmls_za16_vg1x2(uint32_t, svfloat16x2_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmls_single_za16_f16_vg1x4))) +void svmls_za16_vg1x4(uint32_t, svfloat16x4_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmls_lane_za16_f16_vg1x2))) +void svmls_lane_za16_vg1x2(uint32_t, svfloat16x2_t, svfloat16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmls_lane_za16_f16_vg1x4))) +void svmls_lane_za16_vg1x4(uint32_t, svfloat16x4_t, svfloat16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmls_za16_f16_vg1x2))) +void svmls_za16_vg1x2(uint32_t, svfloat16x2_t, svfloat16x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmls_za16_f16_vg1x4))) +void svmls_za16_vg1x4(uint32_t, svfloat16x4_t, svfloat16x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmopa_za16_f16_m))) +void svmopa_za16_m(uint64_t, svbool_t, svbool_t, svfloat16_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmops_za16_f16_m))) +void svmops_za16_m(uint64_t, svbool_t, svbool_t, svfloat16_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svadd_za16_f16_vg1x2))) +void svadd_za16_f16_vg1x2(uint32_t, svfloat16x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svadd_za16_f16_vg1x4))) +void svadd_za16_f16_vg1x4(uint32_t, svfloat16x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svsub_za16_f16_vg1x2))) +void svsub_za16_f16_vg1x2(uint32_t, svfloat16x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svsub_za16_f16_vg1x4))) +void svsub_za16_f16_vg1x4(uint32_t, svfloat16x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svadd_za16_f16_vg1x2))) +void svadd_za16_vg1x2(uint32_t, svfloat16x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svadd_za16_f16_vg1x4))) +void svadd_za16_vg1x4(uint32_t, svfloat16x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svsub_za16_f16_vg1x2))) +void svsub_za16_vg1x2(uint32_t, svfloat16x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svsub_za16_f16_vg1x4))) +void svsub_za16_vg1x4(uint32_t, svfloat16x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmopa_za64_f64_m))) +void svmopa_za64_f64_m(uint64_t, svbool_t, svbool_t, svfloat64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmops_za64_f64_m))) +void svmops_za64_f64_m(uint64_t, svbool_t, svbool_t, svfloat64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmopa_za64_f64_m))) +void svmopa_za64_m(uint64_t, svbool_t, svbool_t, svfloat64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmops_za64_f64_m))) +void svmops_za64_m(uint64_t, svbool_t, svbool_t, svfloat64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svaddha_za64_u64_m))) +void svaddha_za64_u64_m(uint64_t, svbool_t, svbool_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svaddha_za64_s64_m))) +void svaddha_za64_s64_m(uint64_t, svbool_t, svbool_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svaddva_za64_u64_m))) +void svaddva_za64_u64_m(uint64_t, svbool_t, svbool_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svaddva_za64_s64_m))) +void svaddva_za64_s64_m(uint64_t, svbool_t, svbool_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmopa_za64_s16_m))) +void svmopa_za64_s16_m(uint64_t, svbool_t, svbool_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmopa_za64_u16_m))) +void svmopa_za64_u16_m(uint64_t, svbool_t, svbool_t, svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmops_za64_s16_m))) +void svmops_za64_s16_m(uint64_t, svbool_t, svbool_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmops_za64_u16_m))) +void svmops_za64_u16_m(uint64_t, svbool_t, svbool_t, svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svsumopa_za64_s16_m))) +void svsumopa_za64_s16_m(uint64_t, svbool_t, svbool_t, svint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svsumops_za64_s16_m))) +void svsumops_za64_s16_m(uint64_t, svbool_t, svbool_t, svint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svusmopa_za64_u16_m))) +void svusmopa_za64_u16_m(uint64_t, svbool_t, svbool_t, svuint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svusmops_za64_u16_m))) +void svusmops_za64_u16_m(uint64_t, svbool_t, svbool_t, svuint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svaddha_za64_u64_m))) +void svaddha_za64_m(uint64_t, svbool_t, svbool_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svaddha_za64_s64_m))) +void svaddha_za64_m(uint64_t, svbool_t, svbool_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svaddva_za64_u64_m))) +void svaddva_za64_m(uint64_t, svbool_t, svbool_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svaddva_za64_s64_m))) +void svaddva_za64_m(uint64_t, svbool_t, svbool_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmopa_za64_s16_m))) +void svmopa_za64_m(uint64_t, svbool_t, svbool_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmopa_za64_u16_m))) +void svmopa_za64_m(uint64_t, svbool_t, svbool_t, svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmops_za64_s16_m))) +void svmops_za64_m(uint64_t, svbool_t, svbool_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmops_za64_u16_m))) +void svmops_za64_m(uint64_t, svbool_t, svbool_t, svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svsumopa_za64_s16_m))) +void svsumopa_za64_m(uint64_t, svbool_t, svbool_t, svint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svsumops_za64_s16_m))) +void svsumops_za64_m(uint64_t, svbool_t, svbool_t, svint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svusmopa_za64_u16_m))) +void svusmopa_za64_m(uint64_t, svbool_t, svbool_t, svuint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svusmops_za64_u16_m))) +void svusmops_za64_m(uint64_t, svbool_t, svbool_t, svuint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svadd_write_single_za32_u32_vg1x2))) +void svadd_write_single_za32_u32_vg1x2(uint32_t, svuint32x2_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svadd_write_single_za32_s32_vg1x2))) +void svadd_write_single_za32_s32_vg1x2(uint32_t, svint32x2_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svadd_write_single_za32_u32_vg1x4))) +void svadd_write_single_za32_u32_vg1x4(uint32_t, svuint32x4_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svadd_write_single_za32_s32_vg1x4))) +void svadd_write_single_za32_s32_vg1x4(uint32_t, svint32x4_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svadd_write_za32_u32_vg1x2))) +void svadd_write_za32_u32_vg1x2(uint32_t, svuint32x2_t, svuint32x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svadd_write_za32_s32_vg1x2))) +void svadd_write_za32_s32_vg1x2(uint32_t, svint32x2_t, svint32x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svadd_write_za32_u32_vg1x4))) +void svadd_write_za32_u32_vg1x4(uint32_t, svuint32x4_t, svuint32x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svadd_write_za32_s32_vg1x4))) +void svadd_write_za32_s32_vg1x4(uint32_t, svint32x4_t, svint32x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svadd_za32_u32_vg1x2))) +void svadd_za32_u32_vg1x2(uint32_t, svuint32x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svadd_za32_f32_vg1x2))) +void svadd_za32_f32_vg1x2(uint32_t, svfloat32x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svadd_za32_s32_vg1x2))) +void svadd_za32_s32_vg1x2(uint32_t, svint32x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svadd_za32_u32_vg1x4))) +void svadd_za32_u32_vg1x4(uint32_t, svuint32x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svadd_za32_f32_vg1x4))) +void svadd_za32_f32_vg1x4(uint32_t, svfloat32x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svadd_za32_s32_vg1x4))) +void svadd_za32_s32_vg1x4(uint32_t, svint32x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svbmopa_za32_u32_m))) +void svbmopa_za32_u32_m(uint64_t, svbool_t, svbool_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svbmopa_za32_s32_m))) +void svbmopa_za32_s32_m(uint64_t, svbool_t, svbool_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svbmops_za32_u32_m))) +void svbmops_za32_u32_m(uint64_t, svbool_t, svbool_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svbmops_za32_s32_m))) +void svbmops_za32_s32_m(uint64_t, svbool_t, svbool_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svdot_single_za32_bf16_vg1x2))) +void svdot_single_za32_bf16_vg1x2(uint32_t, svbfloat16x2_t, svbfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svdot_single_za32_f16_vg1x2))) +void svdot_single_za32_f16_vg1x2(uint32_t, svfloat16x2_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svdot_single_za32_s8_vg1x2))) +void svdot_single_za32_s8_vg1x2(uint32_t, svint8x2_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svdot_single_za32_s16_vg1x2))) +void svdot_single_za32_s16_vg1x2(uint32_t, svint16x2_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svdot_single_za32_u8_vg1x2))) +void svdot_single_za32_u8_vg1x2(uint32_t, svuint8x2_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svdot_single_za32_u16_vg1x2))) +void svdot_single_za32_u16_vg1x2(uint32_t, svuint16x2_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svdot_single_za32_bf16_vg1x4))) +void svdot_single_za32_bf16_vg1x4(uint32_t, svbfloat16x4_t, svbfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svdot_single_za32_f16_vg1x4))) +void svdot_single_za32_f16_vg1x4(uint32_t, svfloat16x4_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svdot_single_za32_s8_vg1x4))) +void svdot_single_za32_s8_vg1x4(uint32_t, svint8x4_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svdot_single_za32_s16_vg1x4))) +void svdot_single_za32_s16_vg1x4(uint32_t, svint16x4_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svdot_single_za32_u8_vg1x4))) +void svdot_single_za32_u8_vg1x4(uint32_t, svuint8x4_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svdot_single_za32_u16_vg1x4))) +void svdot_single_za32_u16_vg1x4(uint32_t, svuint16x4_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svdot_lane_za32_bf16_vg1x2))) +void svdot_lane_za32_bf16_vg1x2(uint32_t, svbfloat16x2_t, svbfloat16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svdot_lane_za32_f16_vg1x2))) +void svdot_lane_za32_f16_vg1x2(uint32_t, svfloat16x2_t, svfloat16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svdot_lane_za32_s8_vg1x2))) +void svdot_lane_za32_s8_vg1x2(uint32_t, svint8x2_t, svint8_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svdot_lane_za32_s16_vg1x2))) +void svdot_lane_za32_s16_vg1x2(uint32_t, svint16x2_t, svint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svdot_lane_za32_u8_vg1x2))) +void svdot_lane_za32_u8_vg1x2(uint32_t, svuint8x2_t, svuint8_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svdot_lane_za32_u16_vg1x2))) +void svdot_lane_za32_u16_vg1x2(uint32_t, svuint16x2_t, svuint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svdot_lane_za32_bf16_vg1x4))) +void svdot_lane_za32_bf16_vg1x4(uint32_t, svbfloat16x4_t, svbfloat16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svdot_lane_za32_f16_vg1x4))) +void svdot_lane_za32_f16_vg1x4(uint32_t, svfloat16x4_t, svfloat16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svdot_lane_za32_s8_vg1x4))) +void svdot_lane_za32_s8_vg1x4(uint32_t, svint8x4_t, svint8_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svdot_lane_za32_s16_vg1x4))) +void svdot_lane_za32_s16_vg1x4(uint32_t, svint16x4_t, svint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svdot_lane_za32_u8_vg1x4))) +void svdot_lane_za32_u8_vg1x4(uint32_t, svuint8x4_t, svuint8_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svdot_lane_za32_u16_vg1x4))) +void svdot_lane_za32_u16_vg1x4(uint32_t, svuint16x4_t, svuint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svdot_za32_bf16_vg1x2))) +void svdot_za32_bf16_vg1x2(uint32_t, svbfloat16x2_t, svbfloat16x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svdot_za32_f16_vg1x2))) +void svdot_za32_f16_vg1x2(uint32_t, svfloat16x2_t, svfloat16x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svdot_za32_s8_vg1x2))) +void svdot_za32_s8_vg1x2(uint32_t, svint8x2_t, svint8x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svdot_za32_s16_vg1x2))) +void svdot_za32_s16_vg1x2(uint32_t, svint16x2_t, svint16x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svdot_za32_u8_vg1x2))) +void svdot_za32_u8_vg1x2(uint32_t, svuint8x2_t, svuint8x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svdot_za32_u16_vg1x2))) +void svdot_za32_u16_vg1x2(uint32_t, svuint16x2_t, svuint16x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svdot_za32_bf16_vg1x4))) +void svdot_za32_bf16_vg1x4(uint32_t, svbfloat16x4_t, svbfloat16x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svdot_za32_f16_vg1x4))) +void svdot_za32_f16_vg1x4(uint32_t, svfloat16x4_t, svfloat16x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svdot_za32_s8_vg1x4))) +void svdot_za32_s8_vg1x4(uint32_t, svint8x4_t, svint8x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svdot_za32_s16_vg1x4))) +void svdot_za32_s16_vg1x4(uint32_t, svint16x4_t, svint16x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svdot_za32_u8_vg1x4))) +void svdot_za32_u8_vg1x4(uint32_t, svuint8x4_t, svuint8x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svdot_za32_u16_vg1x4))) +void svdot_za32_u16_vg1x4(uint32_t, svuint16x4_t, svuint16x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svldr_zt))) +void svldr_zt(uint64_t, void const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svluti2_lane_zt_u8))) +svuint8_t svluti2_lane_zt_u8(uint64_t, svuint8_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svluti2_lane_zt_u32))) +svuint32_t svluti2_lane_zt_u32(uint64_t, svuint8_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svluti2_lane_zt_u16))) +svuint16_t svluti2_lane_zt_u16(uint64_t, svuint8_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svluti2_lane_zt_bf16))) +svbfloat16_t svluti2_lane_zt_bf16(uint64_t, svuint8_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svluti2_lane_zt_s8))) +svint8_t svluti2_lane_zt_s8(uint64_t, svuint8_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svluti2_lane_zt_f32))) +svfloat32_t svluti2_lane_zt_f32(uint64_t, svuint8_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svluti2_lane_zt_f16))) +svfloat16_t svluti2_lane_zt_f16(uint64_t, svuint8_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svluti2_lane_zt_s32))) +svint32_t svluti2_lane_zt_s32(uint64_t, svuint8_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svluti2_lane_zt_s16))) +svint16_t svluti2_lane_zt_s16(uint64_t, svuint8_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svluti2_lane_zt_u8_x2))) +svuint8x2_t svluti2_lane_zt_u8_x2(uint64_t, svuint8_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svluti2_lane_zt_u32_x2))) +svuint32x2_t svluti2_lane_zt_u32_x2(uint64_t, svuint8_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svluti2_lane_zt_u16_x2))) +svuint16x2_t svluti2_lane_zt_u16_x2(uint64_t, svuint8_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svluti2_lane_zt_bf16_x2))) +svbfloat16x2_t svluti2_lane_zt_bf16_x2(uint64_t, svuint8_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svluti2_lane_zt_s8_x2))) +svint8x2_t svluti2_lane_zt_s8_x2(uint64_t, svuint8_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svluti2_lane_zt_f32_x2))) +svfloat32x2_t svluti2_lane_zt_f32_x2(uint64_t, svuint8_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svluti2_lane_zt_f16_x2))) +svfloat16x2_t svluti2_lane_zt_f16_x2(uint64_t, svuint8_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svluti2_lane_zt_s32_x2))) +svint32x2_t svluti2_lane_zt_s32_x2(uint64_t, svuint8_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svluti2_lane_zt_s16_x2))) +svint16x2_t svluti2_lane_zt_s16_x2(uint64_t, svuint8_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svluti2_lane_zt_u8_x4))) +svuint8x4_t svluti2_lane_zt_u8_x4(uint64_t, svuint8_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svluti2_lane_zt_u32_x4))) +svuint32x4_t svluti2_lane_zt_u32_x4(uint64_t, svuint8_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svluti2_lane_zt_u16_x4))) +svuint16x4_t svluti2_lane_zt_u16_x4(uint64_t, svuint8_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svluti2_lane_zt_bf16_x4))) +svbfloat16x4_t svluti2_lane_zt_bf16_x4(uint64_t, svuint8_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svluti2_lane_zt_s8_x4))) +svint8x4_t svluti2_lane_zt_s8_x4(uint64_t, svuint8_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svluti2_lane_zt_f32_x4))) +svfloat32x4_t svluti2_lane_zt_f32_x4(uint64_t, svuint8_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svluti2_lane_zt_f16_x4))) +svfloat16x4_t svluti2_lane_zt_f16_x4(uint64_t, svuint8_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svluti2_lane_zt_s32_x4))) +svint32x4_t svluti2_lane_zt_s32_x4(uint64_t, svuint8_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svluti2_lane_zt_s16_x4))) +svint16x4_t svluti2_lane_zt_s16_x4(uint64_t, svuint8_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svluti4_lane_zt_u8))) +svuint8_t svluti4_lane_zt_u8(uint64_t, svuint8_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svluti4_lane_zt_u32))) +svuint32_t svluti4_lane_zt_u32(uint64_t, svuint8_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svluti4_lane_zt_u16))) +svuint16_t svluti4_lane_zt_u16(uint64_t, svuint8_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svluti4_lane_zt_bf16))) +svbfloat16_t svluti4_lane_zt_bf16(uint64_t, svuint8_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svluti4_lane_zt_s8))) +svint8_t svluti4_lane_zt_s8(uint64_t, svuint8_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svluti4_lane_zt_f32))) +svfloat32_t svluti4_lane_zt_f32(uint64_t, svuint8_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svluti4_lane_zt_f16))) +svfloat16_t svluti4_lane_zt_f16(uint64_t, svuint8_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svluti4_lane_zt_s32))) +svint32_t svluti4_lane_zt_s32(uint64_t, svuint8_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svluti4_lane_zt_s16))) +svint16_t svluti4_lane_zt_s16(uint64_t, svuint8_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svluti4_lane_zt_u8_x2))) +svuint8x2_t svluti4_lane_zt_u8_x2(uint64_t, svuint8_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svluti4_lane_zt_u32_x2))) +svuint32x2_t svluti4_lane_zt_u32_x2(uint64_t, svuint8_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svluti4_lane_zt_u16_x2))) +svuint16x2_t svluti4_lane_zt_u16_x2(uint64_t, svuint8_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svluti4_lane_zt_bf16_x2))) +svbfloat16x2_t svluti4_lane_zt_bf16_x2(uint64_t, svuint8_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svluti4_lane_zt_s8_x2))) +svint8x2_t svluti4_lane_zt_s8_x2(uint64_t, svuint8_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svluti4_lane_zt_f32_x2))) +svfloat32x2_t svluti4_lane_zt_f32_x2(uint64_t, svuint8_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svluti4_lane_zt_f16_x2))) +svfloat16x2_t svluti4_lane_zt_f16_x2(uint64_t, svuint8_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svluti4_lane_zt_s32_x2))) +svint32x2_t svluti4_lane_zt_s32_x2(uint64_t, svuint8_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svluti4_lane_zt_s16_x2))) +svint16x2_t svluti4_lane_zt_s16_x2(uint64_t, svuint8_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svluti4_lane_zt_u32_x4))) +svuint32x4_t svluti4_lane_zt_u32_x4(uint64_t, svuint8_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svluti4_lane_zt_u16_x4))) +svuint16x4_t svluti4_lane_zt_u16_x4(uint64_t, svuint8_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svluti4_lane_zt_bf16_x4))) +svbfloat16x4_t svluti4_lane_zt_bf16_x4(uint64_t, svuint8_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svluti4_lane_zt_f32_x4))) +svfloat32x4_t svluti4_lane_zt_f32_x4(uint64_t, svuint8_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svluti4_lane_zt_f16_x4))) +svfloat16x4_t svluti4_lane_zt_f16_x4(uint64_t, svuint8_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svluti4_lane_zt_s32_x4))) +svint32x4_t svluti4_lane_zt_s32_x4(uint64_t, svuint8_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svluti4_lane_zt_s16_x4))) +svint16x4_t svluti4_lane_zt_s16_x4(uint64_t, svuint8_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmla_single_za32_f32_vg1x2))) +void svmla_single_za32_f32_vg1x2(uint32_t, svfloat32x2_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmla_single_za32_f32_vg1x4))) +void svmla_single_za32_f32_vg1x4(uint32_t, svfloat32x4_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmla_single_za32_bf16_vg2x2))) +void svmla_single_za32_bf16_vg2x2(uint32_t, svbfloat16x2_t, svbfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmla_single_za32_f16_vg2x2))) +void svmla_single_za32_f16_vg2x2(uint32_t, svfloat16x2_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmla_single_za32_s16_vg2x2))) +void svmla_single_za32_s16_vg2x2(uint32_t, svint16x2_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmla_single_za32_u16_vg2x2))) +void svmla_single_za32_u16_vg2x2(uint32_t, svuint16x2_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmla_single_za32_bf16_vg2x4))) +void svmla_single_za32_bf16_vg2x4(uint32_t, svbfloat16x4_t, svbfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmla_single_za32_f16_vg2x4))) +void svmla_single_za32_f16_vg2x4(uint32_t, svfloat16x4_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmla_single_za32_s16_vg2x4))) +void svmla_single_za32_s16_vg2x4(uint32_t, svint16x4_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmla_single_za32_u16_vg2x4))) +void svmla_single_za32_u16_vg2x4(uint32_t, svuint16x4_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmla_single_za32_s8_vg4x2))) +void svmla_single_za32_s8_vg4x2(uint32_t, svint8x2_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmla_single_za32_u8_vg4x2))) +void svmla_single_za32_u8_vg4x2(uint32_t, svuint8x2_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmla_single_za32_s8_vg4x4))) +void svmla_single_za32_s8_vg4x4(uint32_t, svint8x4_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmla_single_za32_u8_vg4x4))) +void svmla_single_za32_u8_vg4x4(uint32_t, svuint8x4_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmla_lane_za32_f32_vg1x2))) +void svmla_lane_za32_f32_vg1x2(uint32_t, svfloat32x2_t, svfloat32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmla_lane_za32_f32_vg1x4))) +void svmla_lane_za32_f32_vg1x4(uint32_t, svfloat32x4_t, svfloat32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmla_lane_za32_bf16_vg2x1))) +void svmla_lane_za32_bf16_vg2x1(uint32_t, svbfloat16_t, svbfloat16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmla_lane_za32_f16_vg2x1))) +void svmla_lane_za32_f16_vg2x1(uint32_t, svfloat16_t, svfloat16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmla_lane_za32_s16_vg2x1))) +void svmla_lane_za32_s16_vg2x1(uint32_t, svint16_t, svint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmla_lane_za32_u16_vg2x1))) +void svmla_lane_za32_u16_vg2x1(uint32_t, svuint16_t, svuint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmla_lane_za32_bf16_vg2x2))) +void svmla_lane_za32_bf16_vg2x2(uint32_t, svbfloat16x2_t, svbfloat16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmla_lane_za32_f16_vg2x2))) +void svmla_lane_za32_f16_vg2x2(uint32_t, svfloat16x2_t, svfloat16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmla_lane_za32_s16_vg2x2))) +void svmla_lane_za32_s16_vg2x2(uint32_t, svint16x2_t, svint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmla_lane_za32_u16_vg2x2))) +void svmla_lane_za32_u16_vg2x2(uint32_t, svuint16x2_t, svuint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmla_lane_za32_bf16_vg2x4))) +void svmla_lane_za32_bf16_vg2x4(uint32_t, svbfloat16x4_t, svbfloat16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmla_lane_za32_f16_vg2x4))) +void svmla_lane_za32_f16_vg2x4(uint32_t, svfloat16x4_t, svfloat16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmla_lane_za32_s16_vg2x4))) +void svmla_lane_za32_s16_vg2x4(uint32_t, svint16x4_t, svint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmla_lane_za32_u16_vg2x4))) +void svmla_lane_za32_u16_vg2x4(uint32_t, svuint16x4_t, svuint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmla_lane_za32_s8_vg4x1))) +void svmla_lane_za32_s8_vg4x1(uint32_t, svint8_t, svint8_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmla_lane_za32_u8_vg4x1))) +void svmla_lane_za32_u8_vg4x1(uint32_t, svuint8_t, svuint8_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmla_lane_za32_s8_vg4x2))) +void svmla_lane_za32_s8_vg4x2(uint32_t, svint8x2_t, svint8_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmla_lane_za32_u8_vg4x2))) +void svmla_lane_za32_u8_vg4x2(uint32_t, svuint8x2_t, svuint8_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmla_lane_za32_s8_vg4x4))) +void svmla_lane_za32_s8_vg4x4(uint32_t, svint8x4_t, svint8_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmla_lane_za32_u8_vg4x4))) +void svmla_lane_za32_u8_vg4x4(uint32_t, svuint8x4_t, svuint8_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmla_za32_f32_vg1x2))) +void svmla_za32_f32_vg1x2(uint32_t, svfloat32x2_t, svfloat32x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmla_za32_f32_vg1x4))) +void svmla_za32_f32_vg1x4(uint32_t, svfloat32x4_t, svfloat32x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmla_za32_bf16_vg2x1))) +void svmla_za32_bf16_vg2x1(uint32_t, svbfloat16_t, svbfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmla_za32_f16_vg2x1))) +void svmla_za32_f16_vg2x1(uint32_t, svfloat16_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmla_za32_s16_vg2x1))) +void svmla_za32_s16_vg2x1(uint32_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmla_za32_u16_vg2x1))) +void svmla_za32_u16_vg2x1(uint32_t, svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmla_za32_bf16_vg2x2))) +void svmla_za32_bf16_vg2x2(uint32_t, svbfloat16x2_t, svbfloat16x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmla_za32_f16_vg2x2))) +void svmla_za32_f16_vg2x2(uint32_t, svfloat16x2_t, svfloat16x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmla_za32_s16_vg2x2))) +void svmla_za32_s16_vg2x2(uint32_t, svint16x2_t, svint16x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmla_za32_u16_vg2x2))) +void svmla_za32_u16_vg2x2(uint32_t, svuint16x2_t, svuint16x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmla_za32_bf16_vg2x4))) +void svmla_za32_bf16_vg2x4(uint32_t, svbfloat16x4_t, svbfloat16x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmla_za32_f16_vg2x4))) +void svmla_za32_f16_vg2x4(uint32_t, svfloat16x4_t, svfloat16x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmla_za32_s16_vg2x4))) +void svmla_za32_s16_vg2x4(uint32_t, svint16x4_t, svint16x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmla_za32_u16_vg2x4))) +void svmla_za32_u16_vg2x4(uint32_t, svuint16x4_t, svuint16x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmla_za32_s8_vg4x1))) +void svmla_za32_s8_vg4x1(uint32_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmla_za32_u8_vg4x1))) +void svmla_za32_u8_vg4x1(uint32_t, svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmla_za32_s8_vg4x2))) +void svmla_za32_s8_vg4x2(uint32_t, svint8x2_t, svint8x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmla_za32_u8_vg4x2))) +void svmla_za32_u8_vg4x2(uint32_t, svuint8x2_t, svuint8x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmla_za32_s8_vg4x4))) +void svmla_za32_s8_vg4x4(uint32_t, svint8x4_t, svint8x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmla_za32_u8_vg4x4))) +void svmla_za32_u8_vg4x4(uint32_t, svuint8x4_t, svuint8x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmls_single_za32_f32_vg1x2))) +void svmls_single_za32_f32_vg1x2(uint32_t, svfloat32x2_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmls_single_za32_f32_vg1x4))) +void svmls_single_za32_f32_vg1x4(uint32_t, svfloat32x4_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmls_single_za32_bf16_vg2x2))) +void svmls_single_za32_bf16_vg2x2(uint32_t, svbfloat16x2_t, svbfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmls_single_za32_f16_vg2x2))) +void svmls_single_za32_f16_vg2x2(uint32_t, svfloat16x2_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmls_single_za32_s16_vg2x2))) +void svmls_single_za32_s16_vg2x2(uint32_t, svint16x2_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmls_single_za32_u16_vg2x2))) +void svmls_single_za32_u16_vg2x2(uint32_t, svuint16x2_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmls_single_za32_bf16_vg2x4))) +void svmls_single_za32_bf16_vg2x4(uint32_t, svbfloat16x4_t, svbfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmls_single_za32_f16_vg2x4))) +void svmls_single_za32_f16_vg2x4(uint32_t, svfloat16x4_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmls_single_za32_s16_vg2x4))) +void svmls_single_za32_s16_vg2x4(uint32_t, svint16x4_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmls_single_za32_u16_vg2x4))) +void svmls_single_za32_u16_vg2x4(uint32_t, svuint16x4_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmls_single_za32_s8_vg4x2))) +void svmls_single_za32_s8_vg4x2(uint32_t, svint8x2_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmls_single_za32_u8_vg4x2))) +void svmls_single_za32_u8_vg4x2(uint32_t, svuint8x2_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmls_single_za32_s8_vg4x4))) +void svmls_single_za32_s8_vg4x4(uint32_t, svint8x4_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmls_single_za32_u8_vg4x4))) +void svmls_single_za32_u8_vg4x4(uint32_t, svuint8x4_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmls_lane_za32_f32_vg1x2))) +void svmls_lane_za32_f32_vg1x2(uint32_t, svfloat32x2_t, svfloat32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmls_lane_za32_f32_vg1x4))) +void svmls_lane_za32_f32_vg1x4(uint32_t, svfloat32x4_t, svfloat32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmls_lane_za32_bf16_vg2x1))) +void svmls_lane_za32_bf16_vg2x1(uint32_t, svbfloat16_t, svbfloat16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmls_lane_za32_f16_vg2x1))) +void svmls_lane_za32_f16_vg2x1(uint32_t, svfloat16_t, svfloat16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmls_lane_za32_s16_vg2x1))) +void svmls_lane_za32_s16_vg2x1(uint32_t, svint16_t, svint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmls_lane_za32_u16_vg2x1))) +void svmls_lane_za32_u16_vg2x1(uint32_t, svuint16_t, svuint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmls_lane_za32_bf16_vg2x2))) +void svmls_lane_za32_bf16_vg2x2(uint32_t, svbfloat16x2_t, svbfloat16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmls_lane_za32_f16_vg2x2))) +void svmls_lane_za32_f16_vg2x2(uint32_t, svfloat16x2_t, svfloat16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmls_lane_za32_s16_vg2x2))) +void svmls_lane_za32_s16_vg2x2(uint32_t, svint16x2_t, svint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmls_lane_za32_u16_vg2x2))) +void svmls_lane_za32_u16_vg2x2(uint32_t, svuint16x2_t, svuint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmls_lane_za32_bf16_vg2x4))) +void svmls_lane_za32_bf16_vg2x4(uint32_t, svbfloat16x4_t, svbfloat16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmls_lane_za32_f16_vg2x4))) +void svmls_lane_za32_f16_vg2x4(uint32_t, svfloat16x4_t, svfloat16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmls_lane_za32_s16_vg2x4))) +void svmls_lane_za32_s16_vg2x4(uint32_t, svint16x4_t, svint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmls_lane_za32_u16_vg2x4))) +void svmls_lane_za32_u16_vg2x4(uint32_t, svuint16x4_t, svuint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmls_lane_za32_s8_vg4x1))) +void svmls_lane_za32_s8_vg4x1(uint32_t, svint8_t, svint8_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmls_lane_za32_u8_vg4x1))) +void svmls_lane_za32_u8_vg4x1(uint32_t, svuint8_t, svuint8_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmls_lane_za32_s8_vg4x2))) +void svmls_lane_za32_s8_vg4x2(uint32_t, svint8x2_t, svint8_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmls_lane_za32_u8_vg4x2))) +void svmls_lane_za32_u8_vg4x2(uint32_t, svuint8x2_t, svuint8_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmls_lane_za32_s8_vg4x4))) +void svmls_lane_za32_s8_vg4x4(uint32_t, svint8x4_t, svint8_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmls_lane_za32_u8_vg4x4))) +void svmls_lane_za32_u8_vg4x4(uint32_t, svuint8x4_t, svuint8_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmls_za32_f32_vg1x2))) +void svmls_za32_f32_vg1x2(uint32_t, svfloat32x2_t, svfloat32x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmls_za32_f32_vg1x4))) +void svmls_za32_f32_vg1x4(uint32_t, svfloat32x4_t, svfloat32x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmls_za32_bf16_vg2x1))) +void svmls_za32_bf16_vg2x1(uint32_t, svbfloat16_t, svbfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmls_za32_f16_vg2x1))) +void svmls_za32_f16_vg2x1(uint32_t, svfloat16_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmls_za32_s16_vg2x1))) +void svmls_za32_s16_vg2x1(uint32_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmls_za32_u16_vg2x1))) +void svmls_za32_u16_vg2x1(uint32_t, svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmls_za32_bf16_vg2x2))) +void svmls_za32_bf16_vg2x2(uint32_t, svbfloat16x2_t, svbfloat16x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmls_za32_f16_vg2x2))) +void svmls_za32_f16_vg2x2(uint32_t, svfloat16x2_t, svfloat16x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmls_za32_s16_vg2x2))) +void svmls_za32_s16_vg2x2(uint32_t, svint16x2_t, svint16x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmls_za32_u16_vg2x2))) +void svmls_za32_u16_vg2x2(uint32_t, svuint16x2_t, svuint16x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmls_za32_bf16_vg2x4))) +void svmls_za32_bf16_vg2x4(uint32_t, svbfloat16x4_t, svbfloat16x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmls_za32_f16_vg2x4))) +void svmls_za32_f16_vg2x4(uint32_t, svfloat16x4_t, svfloat16x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmls_za32_s16_vg2x4))) +void svmls_za32_s16_vg2x4(uint32_t, svint16x4_t, svint16x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmls_za32_u16_vg2x4))) +void svmls_za32_u16_vg2x4(uint32_t, svuint16x4_t, svuint16x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmls_za32_s8_vg4x1))) +void svmls_za32_s8_vg4x1(uint32_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmls_za32_u8_vg4x1))) +void svmls_za32_u8_vg4x1(uint32_t, svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmls_za32_s8_vg4x2))) +void svmls_za32_s8_vg4x2(uint32_t, svint8x2_t, svint8x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmls_za32_u8_vg4x2))) +void svmls_za32_u8_vg4x2(uint32_t, svuint8x2_t, svuint8x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmls_za32_s8_vg4x4))) +void svmls_za32_s8_vg4x4(uint32_t, svint8x4_t, svint8x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmls_za32_u8_vg4x4))) +void svmls_za32_u8_vg4x4(uint32_t, svuint8x4_t, svuint8x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmopa_za32_s16_m))) +void svmopa_za32_s16_m(uint64_t, svbool_t, svbool_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmopa_za32_u16_m))) +void svmopa_za32_u16_m(uint64_t, svbool_t, svbool_t, svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmops_za32_s16_m))) +void svmops_za32_s16_m(uint64_t, svbool_t, svbool_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmops_za32_u16_m))) +void svmops_za32_u16_m(uint64_t, svbool_t, svbool_t, svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_hor_za16_u16_vg2))) +svuint16x2_t svread_hor_za16_u16_vg2(uint64_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_hor_za16_bf16_vg2))) +svbfloat16x2_t svread_hor_za16_bf16_vg2(uint64_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_hor_za16_f16_vg2))) +svfloat16x2_t svread_hor_za16_f16_vg2(uint64_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_hor_za16_s16_vg2))) +svint16x2_t svread_hor_za16_s16_vg2(uint64_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_hor_za16_u16_vg4))) +svuint16x4_t svread_hor_za16_u16_vg4(uint64_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_hor_za16_bf16_vg4))) +svbfloat16x4_t svread_hor_za16_bf16_vg4(uint64_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_hor_za16_f16_vg4))) +svfloat16x4_t svread_hor_za16_f16_vg4(uint64_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_hor_za16_s16_vg4))) +svint16x4_t svread_hor_za16_s16_vg4(uint64_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_hor_za32_u32_vg2))) +svuint32x2_t svread_hor_za32_u32_vg2(uint64_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_hor_za32_f32_vg2))) +svfloat32x2_t svread_hor_za32_f32_vg2(uint64_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_hor_za32_s32_vg2))) +svint32x2_t svread_hor_za32_s32_vg2(uint64_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_hor_za32_u32_vg4))) +svuint32x4_t svread_hor_za32_u32_vg4(uint64_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_hor_za32_f32_vg4))) +svfloat32x4_t svread_hor_za32_f32_vg4(uint64_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_hor_za32_s32_vg4))) +svint32x4_t svread_hor_za32_s32_vg4(uint64_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_hor_za64_u64_vg2))) +svuint64x2_t svread_hor_za64_u64_vg2(uint64_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_hor_za64_f64_vg2))) +svfloat64x2_t svread_hor_za64_f64_vg2(uint64_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_hor_za64_s64_vg2))) +svint64x2_t svread_hor_za64_s64_vg2(uint64_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_hor_za64_u64_vg4))) +svuint64x4_t svread_hor_za64_u64_vg4(uint64_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_hor_za64_f64_vg4))) +svfloat64x4_t svread_hor_za64_f64_vg4(uint64_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_hor_za64_s64_vg4))) +svint64x4_t svread_hor_za64_s64_vg4(uint64_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_hor_za8_u8_vg2))) +svuint8x2_t svread_hor_za8_u8_vg2(uint64_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_hor_za8_s8_vg2))) +svint8x2_t svread_hor_za8_s8_vg2(uint64_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_hor_za8_u8_vg4))) +svuint8x4_t svread_hor_za8_u8_vg4(uint64_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_hor_za8_s8_vg4))) +svint8x4_t svread_hor_za8_s8_vg4(uint64_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_ver_za16_u16_vg2))) +svuint16x2_t svread_ver_za16_u16_vg2(uint64_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_ver_za16_bf16_vg2))) +svbfloat16x2_t svread_ver_za16_bf16_vg2(uint64_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_ver_za16_f16_vg2))) +svfloat16x2_t svread_ver_za16_f16_vg2(uint64_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_ver_za16_s16_vg2))) +svint16x2_t svread_ver_za16_s16_vg2(uint64_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_ver_za16_u16_vg4))) +svuint16x4_t svread_ver_za16_u16_vg4(uint64_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_ver_za16_bf16_vg4))) +svbfloat16x4_t svread_ver_za16_bf16_vg4(uint64_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_ver_za16_f16_vg4))) +svfloat16x4_t svread_ver_za16_f16_vg4(uint64_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_ver_za16_s16_vg4))) +svint16x4_t svread_ver_za16_s16_vg4(uint64_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_ver_za32_u32_vg2))) +svuint32x2_t svread_ver_za32_u32_vg2(uint64_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_ver_za32_f32_vg2))) +svfloat32x2_t svread_ver_za32_f32_vg2(uint64_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_ver_za32_s32_vg2))) +svint32x2_t svread_ver_za32_s32_vg2(uint64_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_ver_za32_u32_vg4))) +svuint32x4_t svread_ver_za32_u32_vg4(uint64_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_ver_za32_f32_vg4))) +svfloat32x4_t svread_ver_za32_f32_vg4(uint64_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_ver_za32_s32_vg4))) +svint32x4_t svread_ver_za32_s32_vg4(uint64_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_ver_za64_u64_vg2))) +svuint64x2_t svread_ver_za64_u64_vg2(uint64_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_ver_za64_f64_vg2))) +svfloat64x2_t svread_ver_za64_f64_vg2(uint64_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_ver_za64_s64_vg2))) +svint64x2_t svread_ver_za64_s64_vg2(uint64_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_ver_za64_u64_vg4))) +svuint64x4_t svread_ver_za64_u64_vg4(uint64_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_ver_za64_f64_vg4))) +svfloat64x4_t svread_ver_za64_f64_vg4(uint64_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_ver_za64_s64_vg4))) +svint64x4_t svread_ver_za64_s64_vg4(uint64_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_ver_za8_u8_vg2))) +svuint8x2_t svread_ver_za8_u8_vg2(uint64_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_ver_za8_s8_vg2))) +svint8x2_t svread_ver_za8_s8_vg2(uint64_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_ver_za8_u8_vg4))) +svuint8x4_t svread_ver_za8_u8_vg4(uint64_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_ver_za8_s8_vg4))) +svint8x4_t svread_ver_za8_s8_vg4(uint64_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_za16_u16_vg1x2))) +svuint16x2_t svread_za16_u16_vg1x2(uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_za16_bf16_vg1x2))) +svbfloat16x2_t svread_za16_bf16_vg1x2(uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_za16_f16_vg1x2))) +svfloat16x2_t svread_za16_f16_vg1x2(uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_za16_s16_vg1x2))) +svint16x2_t svread_za16_s16_vg1x2(uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_za16_u16_vg1x4))) +svuint16x4_t svread_za16_u16_vg1x4(uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_za16_bf16_vg1x4))) +svbfloat16x4_t svread_za16_bf16_vg1x4(uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_za16_f16_vg1x4))) +svfloat16x4_t svread_za16_f16_vg1x4(uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_za16_s16_vg1x4))) +svint16x4_t svread_za16_s16_vg1x4(uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_za32_u32_vg1x2))) +svuint32x2_t svread_za32_u32_vg1x2(uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_za32_f32_vg1x2))) +svfloat32x2_t svread_za32_f32_vg1x2(uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_za32_s32_vg1x2))) +svint32x2_t svread_za32_s32_vg1x2(uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_za32_u32_vg1x4))) +svuint32x4_t svread_za32_u32_vg1x4(uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_za32_f32_vg1x4))) +svfloat32x4_t svread_za32_f32_vg1x4(uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_za32_s32_vg1x4))) +svint32x4_t svread_za32_s32_vg1x4(uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_za64_u64_vg1x2))) +svuint64x2_t svread_za64_u64_vg1x2(uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_za64_f64_vg1x2))) +svfloat64x2_t svread_za64_f64_vg1x2(uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_za64_s64_vg1x2))) +svint64x2_t svread_za64_s64_vg1x2(uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_za64_u64_vg1x4))) +svuint64x4_t svread_za64_u64_vg1x4(uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_za64_f64_vg1x4))) +svfloat64x4_t svread_za64_f64_vg1x4(uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_za64_s64_vg1x4))) +svint64x4_t svread_za64_s64_vg1x4(uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_za8_u8_vg1x2))) +svuint8x2_t svread_za8_u8_vg1x2(uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_za8_s8_vg1x2))) +svint8x2_t svread_za8_s8_vg1x2(uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_za8_u8_vg1x4))) +svuint8x4_t svread_za8_u8_vg1x4(uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svread_za8_s8_vg1x4))) +svint8x4_t svread_za8_s8_vg1x4(uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svstr_zt))) +void svstr_zt(uint64_t, void *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svsub_write_single_za32_u32_vg1x2))) +void svsub_write_single_za32_u32_vg1x2(uint32_t, svuint32x2_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svsub_write_single_za32_s32_vg1x2))) +void svsub_write_single_za32_s32_vg1x2(uint32_t, svint32x2_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svsub_write_single_za32_u32_vg1x4))) +void svsub_write_single_za32_u32_vg1x4(uint32_t, svuint32x4_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svsub_write_single_za32_s32_vg1x4))) +void svsub_write_single_za32_s32_vg1x4(uint32_t, svint32x4_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svsub_write_za32_u32_vg1x2))) +void svsub_write_za32_u32_vg1x2(uint32_t, svuint32x2_t, svuint32x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svsub_write_za32_s32_vg1x2))) +void svsub_write_za32_s32_vg1x2(uint32_t, svint32x2_t, svint32x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svsub_write_za32_u32_vg1x4))) +void svsub_write_za32_u32_vg1x4(uint32_t, svuint32x4_t, svuint32x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svsub_write_za32_s32_vg1x4))) +void svsub_write_za32_s32_vg1x4(uint32_t, svint32x4_t, svint32x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svsub_za32_u32_vg1x2))) +void svsub_za32_u32_vg1x2(uint32_t, svuint32x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svsub_za32_f32_vg1x2))) +void svsub_za32_f32_vg1x2(uint32_t, svfloat32x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svsub_za32_s32_vg1x2))) +void svsub_za32_s32_vg1x2(uint32_t, svint32x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svsub_za32_u32_vg1x4))) +void svsub_za32_u32_vg1x4(uint32_t, svuint32x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svsub_za32_f32_vg1x4))) +void svsub_za32_f32_vg1x4(uint32_t, svfloat32x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svsub_za32_s32_vg1x4))) +void svsub_za32_s32_vg1x4(uint32_t, svint32x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svsudot_single_za32_s8_vg1x2))) +void svsudot_single_za32_s8_vg1x2(uint32_t, svint8x2_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svsudot_single_za32_s8_vg1x4))) +void svsudot_single_za32_s8_vg1x4(uint32_t, svint8x4_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svsudot_lane_za32_s8_vg1x2))) +void svsudot_lane_za32_s8_vg1x2(uint32_t, svint8x2_t, svuint8_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svsudot_lane_za32_s8_vg1x4))) +void svsudot_lane_za32_s8_vg1x4(uint32_t, svint8x4_t, svuint8_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svsudot_za32_s8_vg1x2))) +void svsudot_za32_s8_vg1x2(uint32_t, svint8x2_t, svuint8x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svsudot_za32_s8_vg1x4))) +void svsudot_za32_s8_vg1x4(uint32_t, svint8x4_t, svuint8x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svsumla_single_za32_s8_vg4x2))) +void svsumla_single_za32_s8_vg4x2(uint32_t, svint8x2_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svsumla_single_za32_s8_vg4x4))) +void svsumla_single_za32_s8_vg4x4(uint32_t, svint8x4_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svsumla_lane_za32_s8_vg4x1))) +void svsumla_lane_za32_s8_vg4x1(uint32_t, svint8_t, svuint8_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svsumla_lane_za32_s8_vg4x2))) +void svsumla_lane_za32_s8_vg4x2(uint32_t, svint8x2_t, svuint8_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svsumla_lane_za32_s8_vg4x4))) +void svsumla_lane_za32_s8_vg4x4(uint32_t, svint8x4_t, svuint8_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svsumla_za32_s8_vg4x1))) +void svsumla_za32_s8_vg4x1(uint32_t, svint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svsumla_za32_s8_vg4x2))) +void svsumla_za32_s8_vg4x2(uint32_t, svint8x2_t, svuint8x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svsumla_za32_s8_vg4x4))) +void svsumla_za32_s8_vg4x4(uint32_t, svint8x4_t, svuint8x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svsuvdot_lane_za32_s8_vg1x4))) +void svsuvdot_lane_za32_s8_vg1x4(uint32_t, svint8x4_t, svint8_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svusdot_single_za32_u8_vg1x2))) +void svusdot_single_za32_u8_vg1x2(uint32_t, svuint8x2_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svusdot_single_za32_u8_vg1x4))) +void svusdot_single_za32_u8_vg1x4(uint32_t, svuint8x4_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svusdot_lane_za32_u8_vg1x2))) +void svusdot_lane_za32_u8_vg1x2(uint32_t, svuint8x2_t, svint8_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svusdot_lane_za32_u8_vg1x4))) +void svusdot_lane_za32_u8_vg1x4(uint32_t, svuint8x4_t, svint8_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svusdot_za32_u8_vg1x2))) +void svusdot_za32_u8_vg1x2(uint32_t, svuint8x2_t, svint8x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svusdot_za32_u8_vg1x4))) +void svusdot_za32_u8_vg1x4(uint32_t, svuint8x4_t, svint8x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svusmla_single_za32_u8_vg4x2))) +void svusmla_single_za32_u8_vg4x2(uint32_t, svuint8x2_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svusmla_single_za32_u8_vg4x4))) +void svusmla_single_za32_u8_vg4x4(uint32_t, svuint8x4_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svusmla_lane_za32_u8_vg4x1))) +void svusmla_lane_za32_u8_vg4x1(uint32_t, svuint8_t, svint8_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svusmla_lane_za32_u8_vg4x2))) +void svusmla_lane_za32_u8_vg4x2(uint32_t, svuint8x2_t, svint8_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svusmla_lane_za32_u8_vg4x4))) +void svusmla_lane_za32_u8_vg4x4(uint32_t, svuint8x4_t, svint8_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svusmla_za32_u8_vg4x1))) +void svusmla_za32_u8_vg4x1(uint32_t, svuint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svusmla_za32_u8_vg4x2))) +void svusmla_za32_u8_vg4x2(uint32_t, svuint8x2_t, svint8x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svusmla_za32_u8_vg4x4))) +void svusmla_za32_u8_vg4x4(uint32_t, svuint8x4_t, svint8x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svusvdot_lane_za32_u8_vg1x4))) +void svusvdot_lane_za32_u8_vg1x4(uint32_t, svuint8x4_t, svuint8_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svvdot_lane_za32_bf16_vg1x2))) +void svvdot_lane_za32_bf16_vg1x2(uint32_t, svbfloat16x2_t, svbfloat16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svvdot_lane_za32_f16_vg1x2))) +void svvdot_lane_za32_f16_vg1x2(uint32_t, svfloat16x2_t, svfloat16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svvdot_lane_za32_s16_vg1x2))) +void svvdot_lane_za32_s16_vg1x2(uint32_t, svint16x2_t, svint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svvdot_lane_za32_u16_vg1x2))) +void svvdot_lane_za32_u16_vg1x2(uint32_t, svuint16x2_t, svuint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svvdot_lane_za32_s8_vg1x4))) +void svvdot_lane_za32_s8_vg1x4(uint32_t, svint8x4_t, svint8_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svvdot_lane_za32_u8_vg1x4))) +void svvdot_lane_za32_u8_vg1x4(uint32_t, svuint8x4_t, svuint8_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_hor_za16_u16_vg2))) +void svwrite_hor_za16_u16_vg2(uint64_t, uint32_t, svuint16x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_hor_za16_bf16_vg2))) +void svwrite_hor_za16_bf16_vg2(uint64_t, uint32_t, svbfloat16x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_hor_za16_f16_vg2))) +void svwrite_hor_za16_f16_vg2(uint64_t, uint32_t, svfloat16x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_hor_za16_s16_vg2))) +void svwrite_hor_za16_s16_vg2(uint64_t, uint32_t, svint16x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_hor_za16_u16_vg4))) +void svwrite_hor_za16_u16_vg4(uint64_t, uint32_t, svuint16x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_hor_za16_bf16_vg4))) +void svwrite_hor_za16_bf16_vg4(uint64_t, uint32_t, svbfloat16x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_hor_za16_f16_vg4))) +void svwrite_hor_za16_f16_vg4(uint64_t, uint32_t, svfloat16x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_hor_za16_s16_vg4))) +void svwrite_hor_za16_s16_vg4(uint64_t, uint32_t, svint16x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_hor_za32_u32_vg2))) +void svwrite_hor_za32_u32_vg2(uint64_t, uint32_t, svuint32x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_hor_za32_f32_vg2))) +void svwrite_hor_za32_f32_vg2(uint64_t, uint32_t, svfloat32x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_hor_za32_s32_vg2))) +void svwrite_hor_za32_s32_vg2(uint64_t, uint32_t, svint32x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_hor_za32_u32_vg4))) +void svwrite_hor_za32_u32_vg4(uint64_t, uint32_t, svuint32x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_hor_za32_f32_vg4))) +void svwrite_hor_za32_f32_vg4(uint64_t, uint32_t, svfloat32x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_hor_za32_s32_vg4))) +void svwrite_hor_za32_s32_vg4(uint64_t, uint32_t, svint32x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_hor_za64_u64_vg2))) +void svwrite_hor_za64_u64_vg2(uint64_t, uint32_t, svuint64x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_hor_za64_f64_vg2))) +void svwrite_hor_za64_f64_vg2(uint64_t, uint32_t, svfloat64x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_hor_za64_s64_vg2))) +void svwrite_hor_za64_s64_vg2(uint64_t, uint32_t, svint64x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_hor_za64_u64_vg4))) +void svwrite_hor_za64_u64_vg4(uint64_t, uint32_t, svuint64x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_hor_za64_f64_vg4))) +void svwrite_hor_za64_f64_vg4(uint64_t, uint32_t, svfloat64x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_hor_za64_s64_vg4))) +void svwrite_hor_za64_s64_vg4(uint64_t, uint32_t, svint64x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_hor_za8_u8_vg2))) +void svwrite_hor_za8_u8_vg2(uint64_t, uint32_t, svuint8x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_hor_za8_s8_vg2))) +void svwrite_hor_za8_s8_vg2(uint64_t, uint32_t, svint8x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_hor_za8_u8_vg4))) +void svwrite_hor_za8_u8_vg4(uint64_t, uint32_t, svuint8x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_hor_za8_s8_vg4))) +void svwrite_hor_za8_s8_vg4(uint64_t, uint32_t, svint8x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_ver_za16_u16_vg2))) +void svwrite_ver_za16_u16_vg2(uint64_t, uint32_t, svuint16x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_ver_za16_bf16_vg2))) +void svwrite_ver_za16_bf16_vg2(uint64_t, uint32_t, svbfloat16x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_ver_za16_f16_vg2))) +void svwrite_ver_za16_f16_vg2(uint64_t, uint32_t, svfloat16x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_ver_za16_s16_vg2))) +void svwrite_ver_za16_s16_vg2(uint64_t, uint32_t, svint16x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_ver_za16_u16_vg4))) +void svwrite_ver_za16_u16_vg4(uint64_t, uint32_t, svuint16x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_ver_za16_bf16_vg4))) +void svwrite_ver_za16_bf16_vg4(uint64_t, uint32_t, svbfloat16x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_ver_za16_f16_vg4))) +void svwrite_ver_za16_f16_vg4(uint64_t, uint32_t, svfloat16x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_ver_za16_s16_vg4))) +void svwrite_ver_za16_s16_vg4(uint64_t, uint32_t, svint16x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_ver_za32_u32_vg2))) +void svwrite_ver_za32_u32_vg2(uint64_t, uint32_t, svuint32x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_ver_za32_f32_vg2))) +void svwrite_ver_za32_f32_vg2(uint64_t, uint32_t, svfloat32x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_ver_za32_s32_vg2))) +void svwrite_ver_za32_s32_vg2(uint64_t, uint32_t, svint32x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_ver_za32_u32_vg4))) +void svwrite_ver_za32_u32_vg4(uint64_t, uint32_t, svuint32x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_ver_za32_f32_vg4))) +void svwrite_ver_za32_f32_vg4(uint64_t, uint32_t, svfloat32x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_ver_za32_s32_vg4))) +void svwrite_ver_za32_s32_vg4(uint64_t, uint32_t, svint32x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_ver_za64_u64_vg2))) +void svwrite_ver_za64_u64_vg2(uint64_t, uint32_t, svuint64x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_ver_za64_f64_vg2))) +void svwrite_ver_za64_f64_vg2(uint64_t, uint32_t, svfloat64x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_ver_za64_s64_vg2))) +void svwrite_ver_za64_s64_vg2(uint64_t, uint32_t, svint64x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_ver_za64_u64_vg4))) +void svwrite_ver_za64_u64_vg4(uint64_t, uint32_t, svuint64x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_ver_za64_f64_vg4))) +void svwrite_ver_za64_f64_vg4(uint64_t, uint32_t, svfloat64x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_ver_za64_s64_vg4))) +void svwrite_ver_za64_s64_vg4(uint64_t, uint32_t, svint64x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_ver_za8_u8_vg2))) +void svwrite_ver_za8_u8_vg2(uint64_t, uint32_t, svuint8x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_ver_za8_s8_vg2))) +void svwrite_ver_za8_s8_vg2(uint64_t, uint32_t, svint8x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_ver_za8_u8_vg4))) +void svwrite_ver_za8_u8_vg4(uint64_t, uint32_t, svuint8x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_ver_za8_s8_vg4))) +void svwrite_ver_za8_s8_vg4(uint64_t, uint32_t, svint8x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_za16_u16_vg1x2))) +void svwrite_za16_u16_vg1x2(uint32_t, svuint16x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_za16_bf16_vg1x2))) +void svwrite_za16_bf16_vg1x2(uint32_t, svbfloat16x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_za16_f16_vg1x2))) +void svwrite_za16_f16_vg1x2(uint32_t, svfloat16x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_za16_s16_vg1x2))) +void svwrite_za16_s16_vg1x2(uint32_t, svint16x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_za16_u16_vg1x4))) +void svwrite_za16_u16_vg1x4(uint32_t, svuint16x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_za16_bf16_vg1x4))) +void svwrite_za16_bf16_vg1x4(uint32_t, svbfloat16x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_za16_f16_vg1x4))) +void svwrite_za16_f16_vg1x4(uint32_t, svfloat16x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_za16_s16_vg1x4))) +void svwrite_za16_s16_vg1x4(uint32_t, svint16x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_za32_u32_vg1x2))) +void svwrite_za32_u32_vg1x2(uint32_t, svuint32x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_za32_f32_vg1x2))) +void svwrite_za32_f32_vg1x2(uint32_t, svfloat32x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_za32_s32_vg1x2))) +void svwrite_za32_s32_vg1x2(uint32_t, svint32x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_za32_u32_vg1x4))) +void svwrite_za32_u32_vg1x4(uint32_t, svuint32x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_za32_f32_vg1x4))) +void svwrite_za32_f32_vg1x4(uint32_t, svfloat32x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_za32_s32_vg1x4))) +void svwrite_za32_s32_vg1x4(uint32_t, svint32x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_za64_u64_vg1x2))) +void svwrite_za64_u64_vg1x2(uint32_t, svuint64x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_za64_f64_vg1x2))) +void svwrite_za64_f64_vg1x2(uint32_t, svfloat64x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_za64_s64_vg1x2))) +void svwrite_za64_s64_vg1x2(uint32_t, svint64x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_za64_u64_vg1x4))) +void svwrite_za64_u64_vg1x4(uint32_t, svuint64x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_za64_f64_vg1x4))) +void svwrite_za64_f64_vg1x4(uint32_t, svfloat64x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_za64_s64_vg1x4))) +void svwrite_za64_s64_vg1x4(uint32_t, svint64x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_za8_u8_vg1x2))) +void svwrite_za8_u8_vg1x2(uint32_t, svuint8x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_za8_s8_vg1x2))) +void svwrite_za8_s8_vg1x2(uint32_t, svint8x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_za8_u8_vg1x4))) +void svwrite_za8_u8_vg1x4(uint32_t, svuint8x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_za8_s8_vg1x4))) +void svwrite_za8_s8_vg1x4(uint32_t, svint8x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svzero_zt))) +void svzero_zt(uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svadd_write_single_za32_u32_vg1x2))) +void svadd_write_za32_vg1x2(uint32_t, svuint32x2_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svadd_write_single_za32_s32_vg1x2))) +void svadd_write_za32_vg1x2(uint32_t, svint32x2_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svadd_write_single_za32_u32_vg1x4))) +void svadd_write_za32_vg1x4(uint32_t, svuint32x4_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svadd_write_single_za32_s32_vg1x4))) +void svadd_write_za32_vg1x4(uint32_t, svint32x4_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svadd_write_za32_u32_vg1x2))) +void svadd_write_za32_vg1x2(uint32_t, svuint32x2_t, svuint32x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svadd_write_za32_s32_vg1x2))) +void svadd_write_za32_vg1x2(uint32_t, svint32x2_t, svint32x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svadd_write_za32_u32_vg1x4))) +void svadd_write_za32_vg1x4(uint32_t, svuint32x4_t, svuint32x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svadd_write_za32_s32_vg1x4))) +void svadd_write_za32_vg1x4(uint32_t, svint32x4_t, svint32x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svadd_za32_u32_vg1x2))) +void svadd_za32_vg1x2(uint32_t, svuint32x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svadd_za32_f32_vg1x2))) +void svadd_za32_vg1x2(uint32_t, svfloat32x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svadd_za32_s32_vg1x2))) +void svadd_za32_vg1x2(uint32_t, svint32x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svadd_za32_u32_vg1x4))) +void svadd_za32_vg1x4(uint32_t, svuint32x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svadd_za32_f32_vg1x4))) +void svadd_za32_vg1x4(uint32_t, svfloat32x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svadd_za32_s32_vg1x4))) +void svadd_za32_vg1x4(uint32_t, svint32x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svbmopa_za32_u32_m))) +void svbmopa_za32_m(uint64_t, svbool_t, svbool_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svbmopa_za32_s32_m))) +void svbmopa_za32_m(uint64_t, svbool_t, svbool_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svbmops_za32_u32_m))) +void svbmops_za32_m(uint64_t, svbool_t, svbool_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svbmops_za32_s32_m))) +void svbmops_za32_m(uint64_t, svbool_t, svbool_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svdot_single_za32_bf16_vg1x2))) +void svdot_za32_vg1x2(uint32_t, svbfloat16x2_t, svbfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svdot_single_za32_f16_vg1x2))) +void svdot_za32_vg1x2(uint32_t, svfloat16x2_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svdot_single_za32_s8_vg1x2))) +void svdot_za32_vg1x2(uint32_t, svint8x2_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svdot_single_za32_s16_vg1x2))) +void svdot_za32_vg1x2(uint32_t, svint16x2_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svdot_single_za32_u8_vg1x2))) +void svdot_za32_vg1x2(uint32_t, svuint8x2_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svdot_single_za32_u16_vg1x2))) +void svdot_za32_vg1x2(uint32_t, svuint16x2_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svdot_single_za32_bf16_vg1x4))) +void svdot_za32_vg1x4(uint32_t, svbfloat16x4_t, svbfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svdot_single_za32_f16_vg1x4))) +void svdot_za32_vg1x4(uint32_t, svfloat16x4_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svdot_single_za32_s8_vg1x4))) +void svdot_za32_vg1x4(uint32_t, svint8x4_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svdot_single_za32_s16_vg1x4))) +void svdot_za32_vg1x4(uint32_t, svint16x4_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svdot_single_za32_u8_vg1x4))) +void svdot_za32_vg1x4(uint32_t, svuint8x4_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svdot_single_za32_u16_vg1x4))) +void svdot_za32_vg1x4(uint32_t, svuint16x4_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svdot_lane_za32_bf16_vg1x2))) +void svdot_lane_za32_vg1x2(uint32_t, svbfloat16x2_t, svbfloat16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svdot_lane_za32_f16_vg1x2))) +void svdot_lane_za32_vg1x2(uint32_t, svfloat16x2_t, svfloat16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svdot_lane_za32_s8_vg1x2))) +void svdot_lane_za32_vg1x2(uint32_t, svint8x2_t, svint8_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svdot_lane_za32_s16_vg1x2))) +void svdot_lane_za32_vg1x2(uint32_t, svint16x2_t, svint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svdot_lane_za32_u8_vg1x2))) +void svdot_lane_za32_vg1x2(uint32_t, svuint8x2_t, svuint8_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svdot_lane_za32_u16_vg1x2))) +void svdot_lane_za32_vg1x2(uint32_t, svuint16x2_t, svuint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svdot_lane_za32_bf16_vg1x4))) +void svdot_lane_za32_vg1x4(uint32_t, svbfloat16x4_t, svbfloat16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svdot_lane_za32_f16_vg1x4))) +void svdot_lane_za32_vg1x4(uint32_t, svfloat16x4_t, svfloat16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svdot_lane_za32_s8_vg1x4))) +void svdot_lane_za32_vg1x4(uint32_t, svint8x4_t, svint8_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svdot_lane_za32_s16_vg1x4))) +void svdot_lane_za32_vg1x4(uint32_t, svint16x4_t, svint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svdot_lane_za32_u8_vg1x4))) +void svdot_lane_za32_vg1x4(uint32_t, svuint8x4_t, svuint8_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svdot_lane_za32_u16_vg1x4))) +void svdot_lane_za32_vg1x4(uint32_t, svuint16x4_t, svuint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svdot_za32_bf16_vg1x2))) +void svdot_za32_vg1x2(uint32_t, svbfloat16x2_t, svbfloat16x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svdot_za32_f16_vg1x2))) +void svdot_za32_vg1x2(uint32_t, svfloat16x2_t, svfloat16x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svdot_za32_s8_vg1x2))) +void svdot_za32_vg1x2(uint32_t, svint8x2_t, svint8x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svdot_za32_s16_vg1x2))) +void svdot_za32_vg1x2(uint32_t, svint16x2_t, svint16x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svdot_za32_u8_vg1x2))) +void svdot_za32_vg1x2(uint32_t, svuint8x2_t, svuint8x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svdot_za32_u16_vg1x2))) +void svdot_za32_vg1x2(uint32_t, svuint16x2_t, svuint16x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svdot_za32_bf16_vg1x4))) +void svdot_za32_vg1x4(uint32_t, svbfloat16x4_t, svbfloat16x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svdot_za32_f16_vg1x4))) +void svdot_za32_vg1x4(uint32_t, svfloat16x4_t, svfloat16x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svdot_za32_s8_vg1x4))) +void svdot_za32_vg1x4(uint32_t, svint8x4_t, svint8x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svdot_za32_s16_vg1x4))) +void svdot_za32_vg1x4(uint32_t, svint16x4_t, svint16x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svdot_za32_u8_vg1x4))) +void svdot_za32_vg1x4(uint32_t, svuint8x4_t, svuint8x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svdot_za32_u16_vg1x4))) +void svdot_za32_vg1x4(uint32_t, svuint16x4_t, svuint16x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmla_single_za32_f32_vg1x2))) +void svmla_za32_vg1x2(uint32_t, svfloat32x2_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmla_single_za32_f32_vg1x4))) +void svmla_za32_vg1x4(uint32_t, svfloat32x4_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmla_single_za32_bf16_vg2x2))) +void svmla_za32_vg2x2(uint32_t, svbfloat16x2_t, svbfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmla_single_za32_f16_vg2x2))) +void svmla_za32_vg2x2(uint32_t, svfloat16x2_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmla_single_za32_s16_vg2x2))) +void svmla_za32_vg2x2(uint32_t, svint16x2_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmla_single_za32_u16_vg2x2))) +void svmla_za32_vg2x2(uint32_t, svuint16x2_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmla_single_za32_bf16_vg2x4))) +void svmla_za32_vg2x4(uint32_t, svbfloat16x4_t, svbfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmla_single_za32_f16_vg2x4))) +void svmla_za32_vg2x4(uint32_t, svfloat16x4_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmla_single_za32_s16_vg2x4))) +void svmla_za32_vg2x4(uint32_t, svint16x4_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmla_single_za32_u16_vg2x4))) +void svmla_za32_vg2x4(uint32_t, svuint16x4_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmla_single_za32_s8_vg4x2))) +void svmla_za32_vg4x2(uint32_t, svint8x2_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmla_single_za32_u8_vg4x2))) +void svmla_za32_vg4x2(uint32_t, svuint8x2_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmla_single_za32_s8_vg4x4))) +void svmla_za32_vg4x4(uint32_t, svint8x4_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmla_single_za32_u8_vg4x4))) +void svmla_za32_vg4x4(uint32_t, svuint8x4_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmla_lane_za32_f32_vg1x2))) +void svmla_lane_za32_vg1x2(uint32_t, svfloat32x2_t, svfloat32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmla_lane_za32_f32_vg1x4))) +void svmla_lane_za32_vg1x4(uint32_t, svfloat32x4_t, svfloat32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmla_lane_za32_bf16_vg2x1))) +void svmla_lane_za32_vg2x1(uint32_t, svbfloat16_t, svbfloat16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmla_lane_za32_f16_vg2x1))) +void svmla_lane_za32_vg2x1(uint32_t, svfloat16_t, svfloat16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmla_lane_za32_s16_vg2x1))) +void svmla_lane_za32_vg2x1(uint32_t, svint16_t, svint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmla_lane_za32_u16_vg2x1))) +void svmla_lane_za32_vg2x1(uint32_t, svuint16_t, svuint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmla_lane_za32_bf16_vg2x2))) +void svmla_lane_za32_vg2x2(uint32_t, svbfloat16x2_t, svbfloat16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmla_lane_za32_f16_vg2x2))) +void svmla_lane_za32_vg2x2(uint32_t, svfloat16x2_t, svfloat16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmla_lane_za32_s16_vg2x2))) +void svmla_lane_za32_vg2x2(uint32_t, svint16x2_t, svint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmla_lane_za32_u16_vg2x2))) +void svmla_lane_za32_vg2x2(uint32_t, svuint16x2_t, svuint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmla_lane_za32_bf16_vg2x4))) +void svmla_lane_za32_vg2x4(uint32_t, svbfloat16x4_t, svbfloat16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmla_lane_za32_f16_vg2x4))) +void svmla_lane_za32_vg2x4(uint32_t, svfloat16x4_t, svfloat16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmla_lane_za32_s16_vg2x4))) +void svmla_lane_za32_vg2x4(uint32_t, svint16x4_t, svint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmla_lane_za32_u16_vg2x4))) +void svmla_lane_za32_vg2x4(uint32_t, svuint16x4_t, svuint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmla_lane_za32_s8_vg4x1))) +void svmla_lane_za32_vg4x1(uint32_t, svint8_t, svint8_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmla_lane_za32_u8_vg4x1))) +void svmla_lane_za32_vg4x1(uint32_t, svuint8_t, svuint8_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmla_lane_za32_s8_vg4x2))) +void svmla_lane_za32_vg4x2(uint32_t, svint8x2_t, svint8_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmla_lane_za32_u8_vg4x2))) +void svmla_lane_za32_vg4x2(uint32_t, svuint8x2_t, svuint8_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmla_lane_za32_s8_vg4x4))) +void svmla_lane_za32_vg4x4(uint32_t, svint8x4_t, svint8_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmla_lane_za32_u8_vg4x4))) +void svmla_lane_za32_vg4x4(uint32_t, svuint8x4_t, svuint8_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmla_za32_f32_vg1x2))) +void svmla_za32_vg1x2(uint32_t, svfloat32x2_t, svfloat32x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmla_za32_f32_vg1x4))) +void svmla_za32_vg1x4(uint32_t, svfloat32x4_t, svfloat32x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmla_za32_bf16_vg2x1))) +void svmla_za32_vg2x1(uint32_t, svbfloat16_t, svbfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmla_za32_f16_vg2x1))) +void svmla_za32_vg2x1(uint32_t, svfloat16_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmla_za32_s16_vg2x1))) +void svmla_za32_vg2x1(uint32_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmla_za32_u16_vg2x1))) +void svmla_za32_vg2x1(uint32_t, svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmla_za32_bf16_vg2x2))) +void svmla_za32_vg2x2(uint32_t, svbfloat16x2_t, svbfloat16x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmla_za32_f16_vg2x2))) +void svmla_za32_vg2x2(uint32_t, svfloat16x2_t, svfloat16x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmla_za32_s16_vg2x2))) +void svmla_za32_vg2x2(uint32_t, svint16x2_t, svint16x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmla_za32_u16_vg2x2))) +void svmla_za32_vg2x2(uint32_t, svuint16x2_t, svuint16x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmla_za32_bf16_vg2x4))) +void svmla_za32_vg2x4(uint32_t, svbfloat16x4_t, svbfloat16x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmla_za32_f16_vg2x4))) +void svmla_za32_vg2x4(uint32_t, svfloat16x4_t, svfloat16x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmla_za32_s16_vg2x4))) +void svmla_za32_vg2x4(uint32_t, svint16x4_t, svint16x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmla_za32_u16_vg2x4))) +void svmla_za32_vg2x4(uint32_t, svuint16x4_t, svuint16x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmla_za32_s8_vg4x1))) +void svmla_za32_vg4x1(uint32_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmla_za32_u8_vg4x1))) +void svmla_za32_vg4x1(uint32_t, svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmla_za32_s8_vg4x2))) +void svmla_za32_vg4x2(uint32_t, svint8x2_t, svint8x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmla_za32_u8_vg4x2))) +void svmla_za32_vg4x2(uint32_t, svuint8x2_t, svuint8x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmla_za32_s8_vg4x4))) +void svmla_za32_vg4x4(uint32_t, svint8x4_t, svint8x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmla_za32_u8_vg4x4))) +void svmla_za32_vg4x4(uint32_t, svuint8x4_t, svuint8x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmls_single_za32_f32_vg1x2))) +void svmls_za32_vg1x2(uint32_t, svfloat32x2_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmls_single_za32_f32_vg1x4))) +void svmls_za32_vg1x4(uint32_t, svfloat32x4_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmls_single_za32_bf16_vg2x2))) +void svmls_za32_vg2x2(uint32_t, svbfloat16x2_t, svbfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmls_single_za32_f16_vg2x2))) +void svmls_za32_vg2x2(uint32_t, svfloat16x2_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmls_single_za32_s16_vg2x2))) +void svmls_za32_vg2x2(uint32_t, svint16x2_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmls_single_za32_u16_vg2x2))) +void svmls_za32_vg2x2(uint32_t, svuint16x2_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmls_single_za32_bf16_vg2x4))) +void svmls_za32_vg2x4(uint32_t, svbfloat16x4_t, svbfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmls_single_za32_f16_vg2x4))) +void svmls_za32_vg2x4(uint32_t, svfloat16x4_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmls_single_za32_s16_vg2x4))) +void svmls_za32_vg2x4(uint32_t, svint16x4_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmls_single_za32_u16_vg2x4))) +void svmls_za32_vg2x4(uint32_t, svuint16x4_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmls_single_za32_s8_vg4x2))) +void svmls_za32_vg4x2(uint32_t, svint8x2_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmls_single_za32_u8_vg4x2))) +void svmls_za32_vg4x2(uint32_t, svuint8x2_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmls_single_za32_s8_vg4x4))) +void svmls_za32_vg4x4(uint32_t, svint8x4_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmls_single_za32_u8_vg4x4))) +void svmls_za32_vg4x4(uint32_t, svuint8x4_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmls_lane_za32_f32_vg1x2))) +void svmls_lane_za32_vg1x2(uint32_t, svfloat32x2_t, svfloat32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmls_lane_za32_f32_vg1x4))) +void svmls_lane_za32_vg1x4(uint32_t, svfloat32x4_t, svfloat32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmls_lane_za32_bf16_vg2x1))) +void svmls_lane_za32_vg2x1(uint32_t, svbfloat16_t, svbfloat16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmls_lane_za32_f16_vg2x1))) +void svmls_lane_za32_vg2x1(uint32_t, svfloat16_t, svfloat16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmls_lane_za32_s16_vg2x1))) +void svmls_lane_za32_vg2x1(uint32_t, svint16_t, svint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmls_lane_za32_u16_vg2x1))) +void svmls_lane_za32_vg2x1(uint32_t, svuint16_t, svuint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmls_lane_za32_bf16_vg2x2))) +void svmls_lane_za32_vg2x2(uint32_t, svbfloat16x2_t, svbfloat16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmls_lane_za32_f16_vg2x2))) +void svmls_lane_za32_vg2x2(uint32_t, svfloat16x2_t, svfloat16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmls_lane_za32_s16_vg2x2))) +void svmls_lane_za32_vg2x2(uint32_t, svint16x2_t, svint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmls_lane_za32_u16_vg2x2))) +void svmls_lane_za32_vg2x2(uint32_t, svuint16x2_t, svuint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmls_lane_za32_bf16_vg2x4))) +void svmls_lane_za32_vg2x4(uint32_t, svbfloat16x4_t, svbfloat16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmls_lane_za32_f16_vg2x4))) +void svmls_lane_za32_vg2x4(uint32_t, svfloat16x4_t, svfloat16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmls_lane_za32_s16_vg2x4))) +void svmls_lane_za32_vg2x4(uint32_t, svint16x4_t, svint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmls_lane_za32_u16_vg2x4))) +void svmls_lane_za32_vg2x4(uint32_t, svuint16x4_t, svuint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmls_lane_za32_s8_vg4x1))) +void svmls_lane_za32_vg4x1(uint32_t, svint8_t, svint8_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmls_lane_za32_u8_vg4x1))) +void svmls_lane_za32_vg4x1(uint32_t, svuint8_t, svuint8_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmls_lane_za32_s8_vg4x2))) +void svmls_lane_za32_vg4x2(uint32_t, svint8x2_t, svint8_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmls_lane_za32_u8_vg4x2))) +void svmls_lane_za32_vg4x2(uint32_t, svuint8x2_t, svuint8_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmls_lane_za32_s8_vg4x4))) +void svmls_lane_za32_vg4x4(uint32_t, svint8x4_t, svint8_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmls_lane_za32_u8_vg4x4))) +void svmls_lane_za32_vg4x4(uint32_t, svuint8x4_t, svuint8_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmls_za32_f32_vg1x2))) +void svmls_za32_vg1x2(uint32_t, svfloat32x2_t, svfloat32x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmls_za32_f32_vg1x4))) +void svmls_za32_vg1x4(uint32_t, svfloat32x4_t, svfloat32x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmls_za32_bf16_vg2x1))) +void svmls_za32_vg2x1(uint32_t, svbfloat16_t, svbfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmls_za32_f16_vg2x1))) +void svmls_za32_vg2x1(uint32_t, svfloat16_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmls_za32_s16_vg2x1))) +void svmls_za32_vg2x1(uint32_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmls_za32_u16_vg2x1))) +void svmls_za32_vg2x1(uint32_t, svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmls_za32_bf16_vg2x2))) +void svmls_za32_vg2x2(uint32_t, svbfloat16x2_t, svbfloat16x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmls_za32_f16_vg2x2))) +void svmls_za32_vg2x2(uint32_t, svfloat16x2_t, svfloat16x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmls_za32_s16_vg2x2))) +void svmls_za32_vg2x2(uint32_t, svint16x2_t, svint16x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmls_za32_u16_vg2x2))) +void svmls_za32_vg2x2(uint32_t, svuint16x2_t, svuint16x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmls_za32_bf16_vg2x4))) +void svmls_za32_vg2x4(uint32_t, svbfloat16x4_t, svbfloat16x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmls_za32_f16_vg2x4))) +void svmls_za32_vg2x4(uint32_t, svfloat16x4_t, svfloat16x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmls_za32_s16_vg2x4))) +void svmls_za32_vg2x4(uint32_t, svint16x4_t, svint16x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmls_za32_u16_vg2x4))) +void svmls_za32_vg2x4(uint32_t, svuint16x4_t, svuint16x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmls_za32_s8_vg4x1))) +void svmls_za32_vg4x1(uint32_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmls_za32_u8_vg4x1))) +void svmls_za32_vg4x1(uint32_t, svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmls_za32_s8_vg4x2))) +void svmls_za32_vg4x2(uint32_t, svint8x2_t, svint8x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmls_za32_u8_vg4x2))) +void svmls_za32_vg4x2(uint32_t, svuint8x2_t, svuint8x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmls_za32_s8_vg4x4))) +void svmls_za32_vg4x4(uint32_t, svint8x4_t, svint8x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmls_za32_u8_vg4x4))) +void svmls_za32_vg4x4(uint32_t, svuint8x4_t, svuint8x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmopa_za32_s16_m))) +void svmopa_za32_m(uint64_t, svbool_t, svbool_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmopa_za32_u16_m))) +void svmopa_za32_m(uint64_t, svbool_t, svbool_t, svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmops_za32_s16_m))) +void svmops_za32_m(uint64_t, svbool_t, svbool_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmops_za32_u16_m))) +void svmops_za32_m(uint64_t, svbool_t, svbool_t, svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svsub_write_single_za32_u32_vg1x2))) +void svsub_write_za32_vg1x2(uint32_t, svuint32x2_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svsub_write_single_za32_s32_vg1x2))) +void svsub_write_za32_vg1x2(uint32_t, svint32x2_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svsub_write_single_za32_u32_vg1x4))) +void svsub_write_za32_vg1x4(uint32_t, svuint32x4_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svsub_write_single_za32_s32_vg1x4))) +void svsub_write_za32_vg1x4(uint32_t, svint32x4_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svsub_write_za32_u32_vg1x2))) +void svsub_write_za32_vg1x2(uint32_t, svuint32x2_t, svuint32x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svsub_write_za32_s32_vg1x2))) +void svsub_write_za32_vg1x2(uint32_t, svint32x2_t, svint32x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svsub_write_za32_u32_vg1x4))) +void svsub_write_za32_vg1x4(uint32_t, svuint32x4_t, svuint32x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svsub_write_za32_s32_vg1x4))) +void svsub_write_za32_vg1x4(uint32_t, svint32x4_t, svint32x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svsub_za32_u32_vg1x2))) +void svsub_za32_vg1x2(uint32_t, svuint32x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svsub_za32_f32_vg1x2))) +void svsub_za32_vg1x2(uint32_t, svfloat32x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svsub_za32_s32_vg1x2))) +void svsub_za32_vg1x2(uint32_t, svint32x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svsub_za32_u32_vg1x4))) +void svsub_za32_vg1x4(uint32_t, svuint32x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svsub_za32_f32_vg1x4))) +void svsub_za32_vg1x4(uint32_t, svfloat32x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svsub_za32_s32_vg1x4))) +void svsub_za32_vg1x4(uint32_t, svint32x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svsudot_single_za32_s8_vg1x2))) +void svsudot_za32_vg1x2(uint32_t, svint8x2_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svsudot_single_za32_s8_vg1x4))) +void svsudot_za32_vg1x4(uint32_t, svint8x4_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svsudot_lane_za32_s8_vg1x2))) +void svsudot_lane_za32_vg1x2(uint32_t, svint8x2_t, svuint8_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svsudot_lane_za32_s8_vg1x4))) +void svsudot_lane_za32_vg1x4(uint32_t, svint8x4_t, svuint8_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svsudot_za32_s8_vg1x2))) +void svsudot_za32_vg1x2(uint32_t, svint8x2_t, svuint8x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svsudot_za32_s8_vg1x4))) +void svsudot_za32_vg1x4(uint32_t, svint8x4_t, svuint8x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svsumla_single_za32_s8_vg4x2))) +void svsumla_za32_vg4x2(uint32_t, svint8x2_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svsumla_single_za32_s8_vg4x4))) +void svsumla_za32_vg4x4(uint32_t, svint8x4_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svsumla_lane_za32_s8_vg4x1))) +void svsumla_lane_za32_vg4x1(uint32_t, svint8_t, svuint8_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svsumla_lane_za32_s8_vg4x2))) +void svsumla_lane_za32_vg4x2(uint32_t, svint8x2_t, svuint8_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svsumla_lane_za32_s8_vg4x4))) +void svsumla_lane_za32_vg4x4(uint32_t, svint8x4_t, svuint8_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svsumla_za32_s8_vg4x1))) +void svsumla_za32_vg4x1(uint32_t, svint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svsumla_za32_s8_vg4x2))) +void svsumla_za32_vg4x2(uint32_t, svint8x2_t, svuint8x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svsumla_za32_s8_vg4x4))) +void svsumla_za32_vg4x4(uint32_t, svint8x4_t, svuint8x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svsuvdot_lane_za32_s8_vg1x4))) +void svsuvdot_lane_za32_vg1x4(uint32_t, svint8x4_t, svint8_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svusdot_single_za32_u8_vg1x2))) +void svusdot_za32_vg1x2(uint32_t, svuint8x2_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svusdot_single_za32_u8_vg1x4))) +void svusdot_za32_vg1x4(uint32_t, svuint8x4_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svusdot_lane_za32_u8_vg1x2))) +void svusdot_lane_za32_vg1x2(uint32_t, svuint8x2_t, svint8_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svusdot_lane_za32_u8_vg1x4))) +void svusdot_lane_za32_vg1x4(uint32_t, svuint8x4_t, svint8_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svusdot_za32_u8_vg1x2))) +void svusdot_za32_vg1x2(uint32_t, svuint8x2_t, svint8x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svusdot_za32_u8_vg1x4))) +void svusdot_za32_vg1x4(uint32_t, svuint8x4_t, svint8x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svusmla_single_za32_u8_vg4x2))) +void svusmla_za32_vg4x2(uint32_t, svuint8x2_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svusmla_single_za32_u8_vg4x4))) +void svusmla_za32_vg4x4(uint32_t, svuint8x4_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svusmla_lane_za32_u8_vg4x1))) +void svusmla_lane_za32_vg4x1(uint32_t, svuint8_t, svint8_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svusmla_lane_za32_u8_vg4x2))) +void svusmla_lane_za32_vg4x2(uint32_t, svuint8x2_t, svint8_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svusmla_lane_za32_u8_vg4x4))) +void svusmla_lane_za32_vg4x4(uint32_t, svuint8x4_t, svint8_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svusmla_za32_u8_vg4x1))) +void svusmla_za32_vg4x1(uint32_t, svuint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svusmla_za32_u8_vg4x2))) +void svusmla_za32_vg4x2(uint32_t, svuint8x2_t, svint8x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svusmla_za32_u8_vg4x4))) +void svusmla_za32_vg4x4(uint32_t, svuint8x4_t, svint8x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svusvdot_lane_za32_u8_vg1x4))) +void svusvdot_lane_za32_vg1x4(uint32_t, svuint8x4_t, svuint8_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svvdot_lane_za32_bf16_vg1x2))) +void svvdot_lane_za32_vg1x2(uint32_t, svbfloat16x2_t, svbfloat16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svvdot_lane_za32_f16_vg1x2))) +void svvdot_lane_za32_vg1x2(uint32_t, svfloat16x2_t, svfloat16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svvdot_lane_za32_s16_vg1x2))) +void svvdot_lane_za32_vg1x2(uint32_t, svint16x2_t, svint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svvdot_lane_za32_u16_vg1x2))) +void svvdot_lane_za32_vg1x2(uint32_t, svuint16x2_t, svuint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svvdot_lane_za32_s8_vg1x4))) +void svvdot_lane_za32_vg1x4(uint32_t, svint8x4_t, svint8_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svvdot_lane_za32_u8_vg1x4))) +void svvdot_lane_za32_vg1x4(uint32_t, svuint8x4_t, svuint8_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_hor_za16_u16_vg2))) +void svwrite_hor_za16_vg2(uint64_t, uint32_t, svuint16x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_hor_za16_bf16_vg2))) +void svwrite_hor_za16_vg2(uint64_t, uint32_t, svbfloat16x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_hor_za16_f16_vg2))) +void svwrite_hor_za16_vg2(uint64_t, uint32_t, svfloat16x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_hor_za16_s16_vg2))) +void svwrite_hor_za16_vg2(uint64_t, uint32_t, svint16x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_hor_za16_u16_vg4))) +void svwrite_hor_za16_vg4(uint64_t, uint32_t, svuint16x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_hor_za16_bf16_vg4))) +void svwrite_hor_za16_vg4(uint64_t, uint32_t, svbfloat16x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_hor_za16_f16_vg4))) +void svwrite_hor_za16_vg4(uint64_t, uint32_t, svfloat16x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_hor_za16_s16_vg4))) +void svwrite_hor_za16_vg4(uint64_t, uint32_t, svint16x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_hor_za32_u32_vg2))) +void svwrite_hor_za32_vg2(uint64_t, uint32_t, svuint32x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_hor_za32_f32_vg2))) +void svwrite_hor_za32_vg2(uint64_t, uint32_t, svfloat32x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_hor_za32_s32_vg2))) +void svwrite_hor_za32_vg2(uint64_t, uint32_t, svint32x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_hor_za32_u32_vg4))) +void svwrite_hor_za32_vg4(uint64_t, uint32_t, svuint32x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_hor_za32_f32_vg4))) +void svwrite_hor_za32_vg4(uint64_t, uint32_t, svfloat32x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_hor_za32_s32_vg4))) +void svwrite_hor_za32_vg4(uint64_t, uint32_t, svint32x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_hor_za64_u64_vg2))) +void svwrite_hor_za64_vg2(uint64_t, uint32_t, svuint64x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_hor_za64_f64_vg2))) +void svwrite_hor_za64_vg2(uint64_t, uint32_t, svfloat64x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_hor_za64_s64_vg2))) +void svwrite_hor_za64_vg2(uint64_t, uint32_t, svint64x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_hor_za64_u64_vg4))) +void svwrite_hor_za64_vg4(uint64_t, uint32_t, svuint64x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_hor_za64_f64_vg4))) +void svwrite_hor_za64_vg4(uint64_t, uint32_t, svfloat64x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_hor_za64_s64_vg4))) +void svwrite_hor_za64_vg4(uint64_t, uint32_t, svint64x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_hor_za8_u8_vg2))) +void svwrite_hor_za8_vg2(uint64_t, uint32_t, svuint8x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_hor_za8_s8_vg2))) +void svwrite_hor_za8_vg2(uint64_t, uint32_t, svint8x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_hor_za8_u8_vg4))) +void svwrite_hor_za8_vg4(uint64_t, uint32_t, svuint8x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_hor_za8_s8_vg4))) +void svwrite_hor_za8_vg4(uint64_t, uint32_t, svint8x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_ver_za16_u16_vg2))) +void svwrite_ver_za16_vg2(uint64_t, uint32_t, svuint16x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_ver_za16_bf16_vg2))) +void svwrite_ver_za16_vg2(uint64_t, uint32_t, svbfloat16x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_ver_za16_f16_vg2))) +void svwrite_ver_za16_vg2(uint64_t, uint32_t, svfloat16x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_ver_za16_s16_vg2))) +void svwrite_ver_za16_vg2(uint64_t, uint32_t, svint16x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_ver_za16_u16_vg4))) +void svwrite_ver_za16_vg4(uint64_t, uint32_t, svuint16x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_ver_za16_bf16_vg4))) +void svwrite_ver_za16_vg4(uint64_t, uint32_t, svbfloat16x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_ver_za16_f16_vg4))) +void svwrite_ver_za16_vg4(uint64_t, uint32_t, svfloat16x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_ver_za16_s16_vg4))) +void svwrite_ver_za16_vg4(uint64_t, uint32_t, svint16x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_ver_za32_u32_vg2))) +void svwrite_ver_za32_vg2(uint64_t, uint32_t, svuint32x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_ver_za32_f32_vg2))) +void svwrite_ver_za32_vg2(uint64_t, uint32_t, svfloat32x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_ver_za32_s32_vg2))) +void svwrite_ver_za32_vg2(uint64_t, uint32_t, svint32x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_ver_za32_u32_vg4))) +void svwrite_ver_za32_vg4(uint64_t, uint32_t, svuint32x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_ver_za32_f32_vg4))) +void svwrite_ver_za32_vg4(uint64_t, uint32_t, svfloat32x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_ver_za32_s32_vg4))) +void svwrite_ver_za32_vg4(uint64_t, uint32_t, svint32x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_ver_za64_u64_vg2))) +void svwrite_ver_za64_vg2(uint64_t, uint32_t, svuint64x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_ver_za64_f64_vg2))) +void svwrite_ver_za64_vg2(uint64_t, uint32_t, svfloat64x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_ver_za64_s64_vg2))) +void svwrite_ver_za64_vg2(uint64_t, uint32_t, svint64x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_ver_za64_u64_vg4))) +void svwrite_ver_za64_vg4(uint64_t, uint32_t, svuint64x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_ver_za64_f64_vg4))) +void svwrite_ver_za64_vg4(uint64_t, uint32_t, svfloat64x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_ver_za64_s64_vg4))) +void svwrite_ver_za64_vg4(uint64_t, uint32_t, svint64x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_ver_za8_u8_vg2))) +void svwrite_ver_za8_vg2(uint64_t, uint32_t, svuint8x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_ver_za8_s8_vg2))) +void svwrite_ver_za8_vg2(uint64_t, uint32_t, svint8x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_ver_za8_u8_vg4))) +void svwrite_ver_za8_vg4(uint64_t, uint32_t, svuint8x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_ver_za8_s8_vg4))) +void svwrite_ver_za8_vg4(uint64_t, uint32_t, svint8x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_za16_u16_vg1x2))) +void svwrite_za16_vg1x2(uint32_t, svuint16x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_za16_bf16_vg1x2))) +void svwrite_za16_vg1x2(uint32_t, svbfloat16x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_za16_f16_vg1x2))) +void svwrite_za16_vg1x2(uint32_t, svfloat16x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_za16_s16_vg1x2))) +void svwrite_za16_vg1x2(uint32_t, svint16x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_za16_u16_vg1x4))) +void svwrite_za16_vg1x4(uint32_t, svuint16x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_za16_bf16_vg1x4))) +void svwrite_za16_vg1x4(uint32_t, svbfloat16x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_za16_f16_vg1x4))) +void svwrite_za16_vg1x4(uint32_t, svfloat16x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_za16_s16_vg1x4))) +void svwrite_za16_vg1x4(uint32_t, svint16x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_za32_u32_vg1x2))) +void svwrite_za32_vg1x2(uint32_t, svuint32x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_za32_f32_vg1x2))) +void svwrite_za32_vg1x2(uint32_t, svfloat32x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_za32_s32_vg1x2))) +void svwrite_za32_vg1x2(uint32_t, svint32x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_za32_u32_vg1x4))) +void svwrite_za32_vg1x4(uint32_t, svuint32x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_za32_f32_vg1x4))) +void svwrite_za32_vg1x4(uint32_t, svfloat32x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_za32_s32_vg1x4))) +void svwrite_za32_vg1x4(uint32_t, svint32x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_za64_u64_vg1x2))) +void svwrite_za64_vg1x2(uint32_t, svuint64x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_za64_f64_vg1x2))) +void svwrite_za64_vg1x2(uint32_t, svfloat64x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_za64_s64_vg1x2))) +void svwrite_za64_vg1x2(uint32_t, svint64x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_za64_u64_vg1x4))) +void svwrite_za64_vg1x4(uint32_t, svuint64x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_za64_f64_vg1x4))) +void svwrite_za64_vg1x4(uint32_t, svfloat64x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_za64_s64_vg1x4))) +void svwrite_za64_vg1x4(uint32_t, svint64x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_za8_u8_vg1x2))) +void svwrite_za8_vg1x2(uint32_t, svuint8x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_za8_s8_vg1x2))) +void svwrite_za8_vg1x2(uint32_t, svint8x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_za8_u8_vg1x4))) +void svwrite_za8_vg1x4(uint32_t, svuint8x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_za8_s8_vg1x4))) +void svwrite_za8_vg1x4(uint32_t, svint8x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svadd_za16_bf16_vg1x2))) +void svadd_za16_bf16_vg1x2(uint32_t, svbfloat16x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svadd_za16_bf16_vg1x4))) +void svadd_za16_bf16_vg1x4(uint32_t, svbfloat16x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmla_single_za16_bf16_vg1x2))) +void svmla_single_za16_bf16_vg1x2(uint32_t, svbfloat16x2_t, svbfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmla_single_za16_bf16_vg1x4))) +void svmla_single_za16_bf16_vg1x4(uint32_t, svbfloat16x4_t, svbfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmla_lane_za16_bf16_vg1x2))) +void svmla_lane_za16_bf16_vg1x2(uint32_t, svbfloat16x2_t, svbfloat16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmla_lane_za16_bf16_vg1x4))) +void svmla_lane_za16_bf16_vg1x4(uint32_t, svbfloat16x4_t, svbfloat16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmla_za16_bf16_vg1x2))) +void svmla_za16_bf16_vg1x2(uint32_t, svbfloat16x2_t, svbfloat16x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmla_za16_bf16_vg1x4))) +void svmla_za16_bf16_vg1x4(uint32_t, svbfloat16x4_t, svbfloat16x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmls_single_za16_bf16_vg1x2))) +void svmls_single_za16_bf16_vg1x2(uint32_t, svbfloat16x2_t, svbfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmls_single_za16_bf16_vg1x4))) +void svmls_single_za16_bf16_vg1x4(uint32_t, svbfloat16x4_t, svbfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmls_lane_za16_bf16_vg1x2))) +void svmls_lane_za16_bf16_vg1x2(uint32_t, svbfloat16x2_t, svbfloat16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmls_lane_za16_bf16_vg1x4))) +void svmls_lane_za16_bf16_vg1x4(uint32_t, svbfloat16x4_t, svbfloat16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmls_za16_bf16_vg1x2))) +void svmls_za16_bf16_vg1x2(uint32_t, svbfloat16x2_t, svbfloat16x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmls_za16_bf16_vg1x4))) +void svmls_za16_bf16_vg1x4(uint32_t, svbfloat16x4_t, svbfloat16x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmopa_za16_bf16_m))) +void svmopa_za16_bf16_m(uint64_t, svbool_t, svbool_t, svbfloat16_t, svbfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmops_za16_bf16_m))) +void svmops_za16_bf16_m(uint64_t, svbool_t, svbool_t, svbfloat16_t, svbfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svsub_za16_bf16_vg1x2))) +void svsub_za16_bf16_vg1x2(uint32_t, svbfloat16x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svsub_za16_bf16_vg1x4))) +void svsub_za16_bf16_vg1x4(uint32_t, svbfloat16x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svadd_za16_bf16_vg1x2))) +void svadd_za16_vg1x2(uint32_t, svbfloat16x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svadd_za16_bf16_vg1x4))) +void svadd_za16_vg1x4(uint32_t, svbfloat16x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmla_single_za16_bf16_vg1x2))) +void svmla_za16_vg1x2(uint32_t, svbfloat16x2_t, svbfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmla_single_za16_bf16_vg1x4))) +void svmla_za16_vg1x4(uint32_t, svbfloat16x4_t, svbfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmla_lane_za16_bf16_vg1x2))) +void svmla_lane_za16_vg1x2(uint32_t, svbfloat16x2_t, svbfloat16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmla_lane_za16_bf16_vg1x4))) +void svmla_lane_za16_vg1x4(uint32_t, svbfloat16x4_t, svbfloat16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmla_za16_bf16_vg1x2))) +void svmla_za16_vg1x2(uint32_t, svbfloat16x2_t, svbfloat16x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmla_za16_bf16_vg1x4))) +void svmla_za16_vg1x4(uint32_t, svbfloat16x4_t, svbfloat16x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmls_single_za16_bf16_vg1x2))) +void svmls_za16_vg1x2(uint32_t, svbfloat16x2_t, svbfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmls_single_za16_bf16_vg1x4))) +void svmls_za16_vg1x4(uint32_t, svbfloat16x4_t, svbfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmls_lane_za16_bf16_vg1x2))) +void svmls_lane_za16_vg1x2(uint32_t, svbfloat16x2_t, svbfloat16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmls_lane_za16_bf16_vg1x4))) +void svmls_lane_za16_vg1x4(uint32_t, svbfloat16x4_t, svbfloat16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmls_za16_bf16_vg1x2))) +void svmls_za16_vg1x2(uint32_t, svbfloat16x2_t, svbfloat16x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmls_za16_bf16_vg1x4))) +void svmls_za16_vg1x4(uint32_t, svbfloat16x4_t, svbfloat16x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmopa_za16_bf16_m))) +void svmopa_za16_m(uint64_t, svbool_t, svbool_t, svbfloat16_t, svbfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmops_za16_bf16_m))) +void svmops_za16_m(uint64_t, svbool_t, svbool_t, svbfloat16_t, svbfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svsub_za16_bf16_vg1x2))) +void svsub_za16_vg1x2(uint32_t, svbfloat16x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svsub_za16_bf16_vg1x4))) +void svsub_za16_vg1x4(uint32_t, svbfloat16x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svadd_za64_f64_vg1x2))) +void svadd_za64_f64_vg1x2(uint32_t, svfloat64x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svadd_za64_f64_vg1x4))) +void svadd_za64_f64_vg1x4(uint32_t, svfloat64x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmla_single_za64_f64_vg1x2))) +void svmla_single_za64_f64_vg1x2(uint32_t, svfloat64x2_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmla_single_za64_f64_vg1x4))) +void svmla_single_za64_f64_vg1x4(uint32_t, svfloat64x4_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmla_lane_za64_f64_vg1x2))) +void svmla_lane_za64_f64_vg1x2(uint32_t, svfloat64x2_t, svfloat64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmla_lane_za64_f64_vg1x4))) +void svmla_lane_za64_f64_vg1x4(uint32_t, svfloat64x4_t, svfloat64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmla_za64_f64_vg1x2))) +void svmla_za64_f64_vg1x2(uint32_t, svfloat64x2_t, svfloat64x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmla_za64_f64_vg1x4))) +void svmla_za64_f64_vg1x4(uint32_t, svfloat64x4_t, svfloat64x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmls_single_za64_f64_vg1x2))) +void svmls_single_za64_f64_vg1x2(uint32_t, svfloat64x2_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmls_single_za64_f64_vg1x4))) +void svmls_single_za64_f64_vg1x4(uint32_t, svfloat64x4_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmls_lane_za64_f64_vg1x2))) +void svmls_lane_za64_f64_vg1x2(uint32_t, svfloat64x2_t, svfloat64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmls_lane_za64_f64_vg1x4))) +void svmls_lane_za64_f64_vg1x4(uint32_t, svfloat64x4_t, svfloat64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmls_za64_f64_vg1x2))) +void svmls_za64_f64_vg1x2(uint32_t, svfloat64x2_t, svfloat64x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmls_za64_f64_vg1x4))) +void svmls_za64_f64_vg1x4(uint32_t, svfloat64x4_t, svfloat64x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svsub_za64_f64_vg1x2))) +void svsub_za64_f64_vg1x2(uint32_t, svfloat64x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svsub_za64_f64_vg1x4))) +void svsub_za64_f64_vg1x4(uint32_t, svfloat64x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svadd_za64_f64_vg1x2))) +void svadd_za64_vg1x2(uint32_t, svfloat64x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svadd_za64_f64_vg1x4))) +void svadd_za64_vg1x4(uint32_t, svfloat64x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmla_single_za64_f64_vg1x2))) +void svmla_za64_vg1x2(uint32_t, svfloat64x2_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmla_single_za64_f64_vg1x4))) +void svmla_za64_vg1x4(uint32_t, svfloat64x4_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmla_lane_za64_f64_vg1x2))) +void svmla_lane_za64_vg1x2(uint32_t, svfloat64x2_t, svfloat64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmla_lane_za64_f64_vg1x4))) +void svmla_lane_za64_vg1x4(uint32_t, svfloat64x4_t, svfloat64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmla_za64_f64_vg1x2))) +void svmla_za64_vg1x2(uint32_t, svfloat64x2_t, svfloat64x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmla_za64_f64_vg1x4))) +void svmla_za64_vg1x4(uint32_t, svfloat64x4_t, svfloat64x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmls_single_za64_f64_vg1x2))) +void svmls_za64_vg1x2(uint32_t, svfloat64x2_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmls_single_za64_f64_vg1x4))) +void svmls_za64_vg1x4(uint32_t, svfloat64x4_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmls_lane_za64_f64_vg1x2))) +void svmls_lane_za64_vg1x2(uint32_t, svfloat64x2_t, svfloat64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmls_lane_za64_f64_vg1x4))) +void svmls_lane_za64_vg1x4(uint32_t, svfloat64x4_t, svfloat64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmls_za64_f64_vg1x2))) +void svmls_za64_vg1x2(uint32_t, svfloat64x2_t, svfloat64x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmls_za64_f64_vg1x4))) +void svmls_za64_vg1x4(uint32_t, svfloat64x4_t, svfloat64x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svsub_za64_f64_vg1x2))) +void svsub_za64_vg1x2(uint32_t, svfloat64x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svsub_za64_f64_vg1x4))) +void svsub_za64_vg1x4(uint32_t, svfloat64x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svadd_write_single_za64_u64_vg1x2))) +void svadd_write_single_za64_u64_vg1x2(uint32_t, svuint64x2_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svadd_write_single_za64_s64_vg1x2))) +void svadd_write_single_za64_s64_vg1x2(uint32_t, svint64x2_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svadd_write_single_za64_u64_vg1x4))) +void svadd_write_single_za64_u64_vg1x4(uint32_t, svuint64x4_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svadd_write_single_za64_s64_vg1x4))) +void svadd_write_single_za64_s64_vg1x4(uint32_t, svint64x4_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svadd_write_za64_u64_vg1x2))) +void svadd_write_za64_u64_vg1x2(uint32_t, svuint64x2_t, svuint64x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svadd_write_za64_s64_vg1x2))) +void svadd_write_za64_s64_vg1x2(uint32_t, svint64x2_t, svint64x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svadd_write_za64_u64_vg1x4))) +void svadd_write_za64_u64_vg1x4(uint32_t, svuint64x4_t, svuint64x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svadd_write_za64_s64_vg1x4))) +void svadd_write_za64_s64_vg1x4(uint32_t, svint64x4_t, svint64x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svadd_za64_u64_vg1x2))) +void svadd_za64_u64_vg1x2(uint32_t, svuint64x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svadd_za64_s64_vg1x2))) +void svadd_za64_s64_vg1x2(uint32_t, svint64x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svadd_za64_u64_vg1x4))) +void svadd_za64_u64_vg1x4(uint32_t, svuint64x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svadd_za64_s64_vg1x4))) +void svadd_za64_s64_vg1x4(uint32_t, svint64x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svdot_single_za64_s16_vg1x2))) +void svdot_single_za64_s16_vg1x2(uint32_t, svint16x2_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svdot_single_za64_u16_vg1x2))) +void svdot_single_za64_u16_vg1x2(uint32_t, svuint16x2_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svdot_single_za64_s16_vg1x4))) +void svdot_single_za64_s16_vg1x4(uint32_t, svint16x4_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svdot_single_za64_u16_vg1x4))) +void svdot_single_za64_u16_vg1x4(uint32_t, svuint16x4_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svdot_lane_za64_s16_vg1x2))) +void svdot_lane_za64_s16_vg1x2(uint32_t, svint16x2_t, svint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svdot_lane_za64_u16_vg1x2))) +void svdot_lane_za64_u16_vg1x2(uint32_t, svuint16x2_t, svuint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svdot_lane_za64_s16_vg1x4))) +void svdot_lane_za64_s16_vg1x4(uint32_t, svint16x4_t, svint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svdot_lane_za64_u16_vg1x4))) +void svdot_lane_za64_u16_vg1x4(uint32_t, svuint16x4_t, svuint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svdot_za64_s16_vg1x2))) +void svdot_za64_s16_vg1x2(uint32_t, svint16x2_t, svint16x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svdot_za64_u16_vg1x2))) +void svdot_za64_u16_vg1x2(uint32_t, svuint16x2_t, svuint16x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svdot_za64_s16_vg1x4))) +void svdot_za64_s16_vg1x4(uint32_t, svint16x4_t, svint16x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svdot_za64_u16_vg1x4))) +void svdot_za64_u16_vg1x4(uint32_t, svuint16x4_t, svuint16x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmla_single_za64_s16_vg4x2))) +void svmla_single_za64_s16_vg4x2(uint32_t, svint16x2_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmla_single_za64_u16_vg4x2))) +void svmla_single_za64_u16_vg4x2(uint32_t, svuint16x2_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmla_single_za64_s16_vg4x4))) +void svmla_single_za64_s16_vg4x4(uint32_t, svint16x4_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmla_single_za64_u16_vg4x4))) +void svmla_single_za64_u16_vg4x4(uint32_t, svuint16x4_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmla_lane_za64_s16_vg4x1))) +void svmla_lane_za64_s16_vg4x1(uint32_t, svint16_t, svint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmla_lane_za64_u16_vg4x1))) +void svmla_lane_za64_u16_vg4x1(uint32_t, svuint16_t, svuint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmla_lane_za64_s16_vg4x2))) +void svmla_lane_za64_s16_vg4x2(uint32_t, svint16x2_t, svint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmla_lane_za64_u16_vg4x2))) +void svmla_lane_za64_u16_vg4x2(uint32_t, svuint16x2_t, svuint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmla_lane_za64_s16_vg4x4))) +void svmla_lane_za64_s16_vg4x4(uint32_t, svint16x4_t, svint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmla_lane_za64_u16_vg4x4))) +void svmla_lane_za64_u16_vg4x4(uint32_t, svuint16x4_t, svuint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmla_za64_s16_vg4x1))) +void svmla_za64_s16_vg4x1(uint32_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmla_za64_u16_vg4x1))) +void svmla_za64_u16_vg4x1(uint32_t, svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmla_za64_s16_vg4x2))) +void svmla_za64_s16_vg4x2(uint32_t, svint16x2_t, svint16x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmla_za64_u16_vg4x2))) +void svmla_za64_u16_vg4x2(uint32_t, svuint16x2_t, svuint16x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmla_za64_s16_vg4x4))) +void svmla_za64_s16_vg4x4(uint32_t, svint16x4_t, svint16x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmla_za64_u16_vg4x4))) +void svmla_za64_u16_vg4x4(uint32_t, svuint16x4_t, svuint16x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmls_single_za64_s16_vg4x2))) +void svmls_single_za64_s16_vg4x2(uint32_t, svint16x2_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmls_single_za64_u16_vg4x2))) +void svmls_single_za64_u16_vg4x2(uint32_t, svuint16x2_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmls_single_za64_s16_vg4x4))) +void svmls_single_za64_s16_vg4x4(uint32_t, svint16x4_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmls_single_za64_u16_vg4x4))) +void svmls_single_za64_u16_vg4x4(uint32_t, svuint16x4_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmls_lane_za64_s16_vg4x1))) +void svmls_lane_za64_s16_vg4x1(uint32_t, svint16_t, svint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmls_lane_za64_u16_vg4x1))) +void svmls_lane_za64_u16_vg4x1(uint32_t, svuint16_t, svuint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmls_lane_za64_s16_vg4x2))) +void svmls_lane_za64_s16_vg4x2(uint32_t, svint16x2_t, svint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmls_lane_za64_u16_vg4x2))) +void svmls_lane_za64_u16_vg4x2(uint32_t, svuint16x2_t, svuint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmls_lane_za64_s16_vg4x4))) +void svmls_lane_za64_s16_vg4x4(uint32_t, svint16x4_t, svint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmls_lane_za64_u16_vg4x4))) +void svmls_lane_za64_u16_vg4x4(uint32_t, svuint16x4_t, svuint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmls_za64_s16_vg4x1))) +void svmls_za64_s16_vg4x1(uint32_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmls_za64_u16_vg4x1))) +void svmls_za64_u16_vg4x1(uint32_t, svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmls_za64_s16_vg4x2))) +void svmls_za64_s16_vg4x2(uint32_t, svint16x2_t, svint16x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmls_za64_u16_vg4x2))) +void svmls_za64_u16_vg4x2(uint32_t, svuint16x2_t, svuint16x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmls_za64_s16_vg4x4))) +void svmls_za64_s16_vg4x4(uint32_t, svint16x4_t, svint16x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmls_za64_u16_vg4x4))) +void svmls_za64_u16_vg4x4(uint32_t, svuint16x4_t, svuint16x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svsub_write_single_za64_u64_vg1x2))) +void svsub_write_single_za64_u64_vg1x2(uint32_t, svuint64x2_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svsub_write_single_za64_s64_vg1x2))) +void svsub_write_single_za64_s64_vg1x2(uint32_t, svint64x2_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svsub_write_single_za64_u64_vg1x4))) +void svsub_write_single_za64_u64_vg1x4(uint32_t, svuint64x4_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svsub_write_single_za64_s64_vg1x4))) +void svsub_write_single_za64_s64_vg1x4(uint32_t, svint64x4_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svsub_write_za64_u64_vg1x2))) +void svsub_write_za64_u64_vg1x2(uint32_t, svuint64x2_t, svuint64x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svsub_write_za64_s64_vg1x2))) +void svsub_write_za64_s64_vg1x2(uint32_t, svint64x2_t, svint64x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svsub_write_za64_u64_vg1x4))) +void svsub_write_za64_u64_vg1x4(uint32_t, svuint64x4_t, svuint64x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svsub_write_za64_s64_vg1x4))) +void svsub_write_za64_s64_vg1x4(uint32_t, svint64x4_t, svint64x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svsub_za64_u64_vg1x2))) +void svsub_za64_u64_vg1x2(uint32_t, svuint64x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svsub_za64_s64_vg1x2))) +void svsub_za64_s64_vg1x2(uint32_t, svint64x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svsub_za64_u64_vg1x4))) +void svsub_za64_u64_vg1x4(uint32_t, svuint64x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svsub_za64_s64_vg1x4))) +void svsub_za64_s64_vg1x4(uint32_t, svint64x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svvdot_lane_za64_s16_vg1x4))) +void svvdot_lane_za64_s16_vg1x4(uint32_t, svint16x4_t, svint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svvdot_lane_za64_u16_vg1x4))) +void svvdot_lane_za64_u16_vg1x4(uint32_t, svuint16x4_t, svuint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svadd_write_single_za64_u64_vg1x2))) +void svadd_write_za64_vg1x2(uint32_t, svuint64x2_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svadd_write_single_za64_s64_vg1x2))) +void svadd_write_za64_vg1x2(uint32_t, svint64x2_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svadd_write_single_za64_u64_vg1x4))) +void svadd_write_za64_vg1x4(uint32_t, svuint64x4_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svadd_write_single_za64_s64_vg1x4))) +void svadd_write_za64_vg1x4(uint32_t, svint64x4_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svadd_write_za64_u64_vg1x2))) +void svadd_write_za64_vg1x2(uint32_t, svuint64x2_t, svuint64x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svadd_write_za64_s64_vg1x2))) +void svadd_write_za64_vg1x2(uint32_t, svint64x2_t, svint64x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svadd_write_za64_u64_vg1x4))) +void svadd_write_za64_vg1x4(uint32_t, svuint64x4_t, svuint64x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svadd_write_za64_s64_vg1x4))) +void svadd_write_za64_vg1x4(uint32_t, svint64x4_t, svint64x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svadd_za64_u64_vg1x2))) +void svadd_za64_vg1x2(uint32_t, svuint64x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svadd_za64_s64_vg1x2))) +void svadd_za64_vg1x2(uint32_t, svint64x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svadd_za64_u64_vg1x4))) +void svadd_za64_vg1x4(uint32_t, svuint64x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svadd_za64_s64_vg1x4))) +void svadd_za64_vg1x4(uint32_t, svint64x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svdot_single_za64_s16_vg1x2))) +void svdot_za64_vg1x2(uint32_t, svint16x2_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svdot_single_za64_u16_vg1x2))) +void svdot_za64_vg1x2(uint32_t, svuint16x2_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svdot_single_za64_s16_vg1x4))) +void svdot_za64_vg1x4(uint32_t, svint16x4_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svdot_single_za64_u16_vg1x4))) +void svdot_za64_vg1x4(uint32_t, svuint16x4_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svdot_lane_za64_s16_vg1x2))) +void svdot_lane_za64_vg1x2(uint32_t, svint16x2_t, svint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svdot_lane_za64_u16_vg1x2))) +void svdot_lane_za64_vg1x2(uint32_t, svuint16x2_t, svuint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svdot_lane_za64_s16_vg1x4))) +void svdot_lane_za64_vg1x4(uint32_t, svint16x4_t, svint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svdot_lane_za64_u16_vg1x4))) +void svdot_lane_za64_vg1x4(uint32_t, svuint16x4_t, svuint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svdot_za64_s16_vg1x2))) +void svdot_za64_vg1x2(uint32_t, svint16x2_t, svint16x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svdot_za64_u16_vg1x2))) +void svdot_za64_vg1x2(uint32_t, svuint16x2_t, svuint16x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svdot_za64_s16_vg1x4))) +void svdot_za64_vg1x4(uint32_t, svint16x4_t, svint16x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svdot_za64_u16_vg1x4))) +void svdot_za64_vg1x4(uint32_t, svuint16x4_t, svuint16x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmla_single_za64_s16_vg4x2))) +void svmla_za64_vg4x2(uint32_t, svint16x2_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmla_single_za64_u16_vg4x2))) +void svmla_za64_vg4x2(uint32_t, svuint16x2_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmla_single_za64_s16_vg4x4))) +void svmla_za64_vg4x4(uint32_t, svint16x4_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmla_single_za64_u16_vg4x4))) +void svmla_za64_vg4x4(uint32_t, svuint16x4_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmla_lane_za64_s16_vg4x1))) +void svmla_lane_za64_vg4x1(uint32_t, svint16_t, svint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmla_lane_za64_u16_vg4x1))) +void svmla_lane_za64_vg4x1(uint32_t, svuint16_t, svuint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmla_lane_za64_s16_vg4x2))) +void svmla_lane_za64_vg4x2(uint32_t, svint16x2_t, svint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmla_lane_za64_u16_vg4x2))) +void svmla_lane_za64_vg4x2(uint32_t, svuint16x2_t, svuint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmla_lane_za64_s16_vg4x4))) +void svmla_lane_za64_vg4x4(uint32_t, svint16x4_t, svint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmla_lane_za64_u16_vg4x4))) +void svmla_lane_za64_vg4x4(uint32_t, svuint16x4_t, svuint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmla_za64_s16_vg4x1))) +void svmla_za64_vg4x1(uint32_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmla_za64_u16_vg4x1))) +void svmla_za64_vg4x1(uint32_t, svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmla_za64_s16_vg4x2))) +void svmla_za64_vg4x2(uint32_t, svint16x2_t, svint16x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmla_za64_u16_vg4x2))) +void svmla_za64_vg4x2(uint32_t, svuint16x2_t, svuint16x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmla_za64_s16_vg4x4))) +void svmla_za64_vg4x4(uint32_t, svint16x4_t, svint16x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmla_za64_u16_vg4x4))) +void svmla_za64_vg4x4(uint32_t, svuint16x4_t, svuint16x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmls_single_za64_s16_vg4x2))) +void svmls_za64_vg4x2(uint32_t, svint16x2_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmls_single_za64_u16_vg4x2))) +void svmls_za64_vg4x2(uint32_t, svuint16x2_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmls_single_za64_s16_vg4x4))) +void svmls_za64_vg4x4(uint32_t, svint16x4_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmls_single_za64_u16_vg4x4))) +void svmls_za64_vg4x4(uint32_t, svuint16x4_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmls_lane_za64_s16_vg4x1))) +void svmls_lane_za64_vg4x1(uint32_t, svint16_t, svint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmls_lane_za64_u16_vg4x1))) +void svmls_lane_za64_vg4x1(uint32_t, svuint16_t, svuint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmls_lane_za64_s16_vg4x2))) +void svmls_lane_za64_vg4x2(uint32_t, svint16x2_t, svint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmls_lane_za64_u16_vg4x2))) +void svmls_lane_za64_vg4x2(uint32_t, svuint16x2_t, svuint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmls_lane_za64_s16_vg4x4))) +void svmls_lane_za64_vg4x4(uint32_t, svint16x4_t, svint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmls_lane_za64_u16_vg4x4))) +void svmls_lane_za64_vg4x4(uint32_t, svuint16x4_t, svuint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmls_za64_s16_vg4x1))) +void svmls_za64_vg4x1(uint32_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmls_za64_u16_vg4x1))) +void svmls_za64_vg4x1(uint32_t, svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmls_za64_s16_vg4x2))) +void svmls_za64_vg4x2(uint32_t, svint16x2_t, svint16x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmls_za64_u16_vg4x2))) +void svmls_za64_vg4x2(uint32_t, svuint16x2_t, svuint16x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmls_za64_s16_vg4x4))) +void svmls_za64_vg4x4(uint32_t, svint16x4_t, svint16x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmls_za64_u16_vg4x4))) +void svmls_za64_vg4x4(uint32_t, svuint16x4_t, svuint16x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svsub_write_single_za64_u64_vg1x2))) +void svsub_write_za64_vg1x2(uint32_t, svuint64x2_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svsub_write_single_za64_s64_vg1x2))) +void svsub_write_za64_vg1x2(uint32_t, svint64x2_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svsub_write_single_za64_u64_vg1x4))) +void svsub_write_za64_vg1x4(uint32_t, svuint64x4_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svsub_write_single_za64_s64_vg1x4))) +void svsub_write_za64_vg1x4(uint32_t, svint64x4_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svsub_write_za64_u64_vg1x2))) +void svsub_write_za64_vg1x2(uint32_t, svuint64x2_t, svuint64x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svsub_write_za64_s64_vg1x2))) +void svsub_write_za64_vg1x2(uint32_t, svint64x2_t, svint64x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svsub_write_za64_u64_vg1x4))) +void svsub_write_za64_vg1x4(uint32_t, svuint64x4_t, svuint64x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svsub_write_za64_s64_vg1x4))) +void svsub_write_za64_vg1x4(uint32_t, svint64x4_t, svint64x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svsub_za64_u64_vg1x2))) +void svsub_za64_vg1x2(uint32_t, svuint64x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svsub_za64_s64_vg1x2))) +void svsub_za64_vg1x2(uint32_t, svint64x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svsub_za64_u64_vg1x4))) +void svsub_za64_vg1x4(uint32_t, svuint64x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svsub_za64_s64_vg1x4))) +void svsub_za64_vg1x4(uint32_t, svint64x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svvdot_lane_za64_s16_vg1x4))) +void svvdot_lane_za64_vg1x4(uint32_t, svint16x4_t, svint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svvdot_lane_za64_u16_vg1x4))) +void svvdot_lane_za64_vg1x4(uint32_t, svuint16x4_t, svuint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_hor_za128_u8))) +svuint8_t svreadz_hor_za128_u8(uint64_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_hor_za128_u32))) +svuint32_t svreadz_hor_za128_u32(uint64_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_hor_za128_u64))) +svuint64_t svreadz_hor_za128_u64(uint64_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_hor_za128_u16))) +svuint16_t svreadz_hor_za128_u16(uint64_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_hor_za128_bf16))) +svbfloat16_t svreadz_hor_za128_bf16(uint64_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_hor_za128_s8))) +svint8_t svreadz_hor_za128_s8(uint64_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_hor_za128_f64))) +svfloat64_t svreadz_hor_za128_f64(uint64_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_hor_za128_f32))) +svfloat32_t svreadz_hor_za128_f32(uint64_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_hor_za128_f16))) +svfloat16_t svreadz_hor_za128_f16(uint64_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_hor_za128_s32))) +svint32_t svreadz_hor_za128_s32(uint64_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_hor_za128_s64))) +svint64_t svreadz_hor_za128_s64(uint64_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_hor_za128_s16))) +svint16_t svreadz_hor_za128_s16(uint64_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_hor_za16_u16))) +svuint16_t svreadz_hor_za16_u16(uint64_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_hor_za16_bf16))) +svbfloat16_t svreadz_hor_za16_bf16(uint64_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_hor_za16_f16))) +svfloat16_t svreadz_hor_za16_f16(uint64_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_hor_za16_s16))) +svint16_t svreadz_hor_za16_s16(uint64_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_hor_za16_u16_vg2))) +svuint16x2_t svreadz_hor_za16_u16_vg2(uint64_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_hor_za16_bf16_vg2))) +svbfloat16x2_t svreadz_hor_za16_bf16_vg2(uint64_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_hor_za16_f16_vg2))) +svfloat16x2_t svreadz_hor_za16_f16_vg2(uint64_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_hor_za16_s16_vg2))) +svint16x2_t svreadz_hor_za16_s16_vg2(uint64_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_hor_za16_u16_vg4))) +svuint16x4_t svreadz_hor_za16_u16_vg4(uint64_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_hor_za16_bf16_vg4))) +svbfloat16x4_t svreadz_hor_za16_bf16_vg4(uint64_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_hor_za16_f16_vg4))) +svfloat16x4_t svreadz_hor_za16_f16_vg4(uint64_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_hor_za16_s16_vg4))) +svint16x4_t svreadz_hor_za16_s16_vg4(uint64_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_hor_za32_u32))) +svuint32_t svreadz_hor_za32_u32(uint64_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_hor_za32_f32))) +svfloat32_t svreadz_hor_za32_f32(uint64_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_hor_za32_s32))) +svint32_t svreadz_hor_za32_s32(uint64_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_hor_za32_u32_vg2))) +svuint32x2_t svreadz_hor_za32_u32_vg2(uint64_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_hor_za32_f32_vg2))) +svfloat32x2_t svreadz_hor_za32_f32_vg2(uint64_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_hor_za32_s32_vg2))) +svint32x2_t svreadz_hor_za32_s32_vg2(uint64_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_hor_za32_u32_vg4))) +svuint32x4_t svreadz_hor_za32_u32_vg4(uint64_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_hor_za32_f32_vg4))) +svfloat32x4_t svreadz_hor_za32_f32_vg4(uint64_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_hor_za32_s32_vg4))) +svint32x4_t svreadz_hor_za32_s32_vg4(uint64_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_hor_za64_u64))) +svuint64_t svreadz_hor_za64_u64(uint64_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_hor_za64_f64))) +svfloat64_t svreadz_hor_za64_f64(uint64_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_hor_za64_s64))) +svint64_t svreadz_hor_za64_s64(uint64_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_hor_za64_u64_vg2))) +svuint64x2_t svreadz_hor_za64_u64_vg2(uint64_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_hor_za64_f64_vg2))) +svfloat64x2_t svreadz_hor_za64_f64_vg2(uint64_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_hor_za64_s64_vg2))) +svint64x2_t svreadz_hor_za64_s64_vg2(uint64_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_hor_za64_u64_vg4))) +svuint64x4_t svreadz_hor_za64_u64_vg4(uint64_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_hor_za64_f64_vg4))) +svfloat64x4_t svreadz_hor_za64_f64_vg4(uint64_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_hor_za64_s64_vg4))) +svint64x4_t svreadz_hor_za64_s64_vg4(uint64_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_hor_za8_u8))) +svuint8_t svreadz_hor_za8_u8(uint64_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_hor_za8_s8))) +svint8_t svreadz_hor_za8_s8(uint64_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_hor_za8_u8_vg2))) +svuint8x2_t svreadz_hor_za8_u8_vg2(uint64_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_hor_za8_s8_vg2))) +svint8x2_t svreadz_hor_za8_s8_vg2(uint64_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_hor_za8_u8_vg4))) +svuint8x4_t svreadz_hor_za8_u8_vg4(uint64_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_hor_za8_s8_vg4))) +svint8x4_t svreadz_hor_za8_s8_vg4(uint64_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_ver_za128_u8))) +svuint8_t svreadz_ver_za128_u8(uint64_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_ver_za128_u32))) +svuint32_t svreadz_ver_za128_u32(uint64_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_ver_za128_u64))) +svuint64_t svreadz_ver_za128_u64(uint64_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_ver_za128_u16))) +svuint16_t svreadz_ver_za128_u16(uint64_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_ver_za128_bf16))) +svbfloat16_t svreadz_ver_za128_bf16(uint64_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_ver_za128_s8))) +svint8_t svreadz_ver_za128_s8(uint64_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_ver_za128_f64))) +svfloat64_t svreadz_ver_za128_f64(uint64_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_ver_za128_f32))) +svfloat32_t svreadz_ver_za128_f32(uint64_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_ver_za128_f16))) +svfloat16_t svreadz_ver_za128_f16(uint64_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_ver_za128_s32))) +svint32_t svreadz_ver_za128_s32(uint64_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_ver_za128_s64))) +svint64_t svreadz_ver_za128_s64(uint64_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_ver_za128_s16))) +svint16_t svreadz_ver_za128_s16(uint64_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_ver_za16_u16))) +svuint16_t svreadz_ver_za16_u16(uint64_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_ver_za16_bf16))) +svbfloat16_t svreadz_ver_za16_bf16(uint64_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_ver_za16_f16))) +svfloat16_t svreadz_ver_za16_f16(uint64_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_ver_za16_s16))) +svint16_t svreadz_ver_za16_s16(uint64_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_ver_za16_u16_vg2))) +svuint16x2_t svreadz_ver_za16_u16_vg2(uint64_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_ver_za16_bf16_vg2))) +svbfloat16x2_t svreadz_ver_za16_bf16_vg2(uint64_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_ver_za16_f16_vg2))) +svfloat16x2_t svreadz_ver_za16_f16_vg2(uint64_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_ver_za16_s16_vg2))) +svint16x2_t svreadz_ver_za16_s16_vg2(uint64_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_ver_za16_u16_vg4))) +svuint16x4_t svreadz_ver_za16_u16_vg4(uint64_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_ver_za16_bf16_vg4))) +svbfloat16x4_t svreadz_ver_za16_bf16_vg4(uint64_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_ver_za16_f16_vg4))) +svfloat16x4_t svreadz_ver_za16_f16_vg4(uint64_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_ver_za16_s16_vg4))) +svint16x4_t svreadz_ver_za16_s16_vg4(uint64_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_ver_za32_u32))) +svuint32_t svreadz_ver_za32_u32(uint64_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_ver_za32_f32))) +svfloat32_t svreadz_ver_za32_f32(uint64_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_ver_za32_s32))) +svint32_t svreadz_ver_za32_s32(uint64_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_ver_za32_u32_vg2))) +svuint32x2_t svreadz_ver_za32_u32_vg2(uint64_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_ver_za32_f32_vg2))) +svfloat32x2_t svreadz_ver_za32_f32_vg2(uint64_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_ver_za32_s32_vg2))) +svint32x2_t svreadz_ver_za32_s32_vg2(uint64_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_ver_za32_u32_vg4))) +svuint32x4_t svreadz_ver_za32_u32_vg4(uint64_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_ver_za32_f32_vg4))) +svfloat32x4_t svreadz_ver_za32_f32_vg4(uint64_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_ver_za32_s32_vg4))) +svint32x4_t svreadz_ver_za32_s32_vg4(uint64_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_ver_za64_u64))) +svuint64_t svreadz_ver_za64_u64(uint64_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_ver_za64_f64))) +svfloat64_t svreadz_ver_za64_f64(uint64_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_ver_za64_s64))) +svint64_t svreadz_ver_za64_s64(uint64_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_ver_za64_u64_vg2))) +svuint64x2_t svreadz_ver_za64_u64_vg2(uint64_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_ver_za64_f64_vg2))) +svfloat64x2_t svreadz_ver_za64_f64_vg2(uint64_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_ver_za64_s64_vg2))) +svint64x2_t svreadz_ver_za64_s64_vg2(uint64_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_ver_za64_u64_vg4))) +svuint64x4_t svreadz_ver_za64_u64_vg4(uint64_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_ver_za64_f64_vg4))) +svfloat64x4_t svreadz_ver_za64_f64_vg4(uint64_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_ver_za64_s64_vg4))) +svint64x4_t svreadz_ver_za64_s64_vg4(uint64_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_ver_za8_u8))) +svuint8_t svreadz_ver_za8_u8(uint64_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_ver_za8_s8))) +svint8_t svreadz_ver_za8_s8(uint64_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_ver_za8_u8_vg2))) +svuint8x2_t svreadz_ver_za8_u8_vg2(uint64_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_ver_za8_s8_vg2))) +svint8x2_t svreadz_ver_za8_s8_vg2(uint64_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_ver_za8_u8_vg4))) +svuint8x4_t svreadz_ver_za8_u8_vg4(uint64_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_ver_za8_s8_vg4))) +svint8x4_t svreadz_ver_za8_s8_vg4(uint64_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_za16_u16_vg1x2))) +svuint16x2_t svreadz_za16_u16_vg1x2(uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_za16_bf16_vg1x2))) +svbfloat16x2_t svreadz_za16_bf16_vg1x2(uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_za16_f16_vg1x2))) +svfloat16x2_t svreadz_za16_f16_vg1x2(uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_za16_s16_vg1x2))) +svint16x2_t svreadz_za16_s16_vg1x2(uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_za16_u16_vg1x4))) +svuint16x4_t svreadz_za16_u16_vg1x4(uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_za16_bf16_vg1x4))) +svbfloat16x4_t svreadz_za16_bf16_vg1x4(uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_za16_f16_vg1x4))) +svfloat16x4_t svreadz_za16_f16_vg1x4(uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_za16_s16_vg1x4))) +svint16x4_t svreadz_za16_s16_vg1x4(uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_za32_u32_vg1x2))) +svuint32x2_t svreadz_za32_u32_vg1x2(uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_za32_f32_vg1x2))) +svfloat32x2_t svreadz_za32_f32_vg1x2(uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_za32_s32_vg1x2))) +svint32x2_t svreadz_za32_s32_vg1x2(uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_za32_u32_vg1x4))) +svuint32x4_t svreadz_za32_u32_vg1x4(uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_za32_f32_vg1x4))) +svfloat32x4_t svreadz_za32_f32_vg1x4(uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_za32_s32_vg1x4))) +svint32x4_t svreadz_za32_s32_vg1x4(uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_za64_u64_vg1x2))) +svuint64x2_t svreadz_za64_u64_vg1x2(uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_za64_f64_vg1x2))) +svfloat64x2_t svreadz_za64_f64_vg1x2(uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_za64_s64_vg1x2))) +svint64x2_t svreadz_za64_s64_vg1x2(uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_za64_u64_vg1x4))) +svuint64x4_t svreadz_za64_u64_vg1x4(uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_za64_f64_vg1x4))) +svfloat64x4_t svreadz_za64_f64_vg1x4(uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_za64_s64_vg1x4))) +svint64x4_t svreadz_za64_s64_vg1x4(uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_za8_u8_vg1x2))) +svuint8x2_t svreadz_za8_u8_vg1x2(uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_za8_s8_vg1x2))) +svint8x2_t svreadz_za8_s8_vg1x2(uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_za8_u8_vg1x4))) +svuint8x4_t svreadz_za8_u8_vg1x4(uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_za8_s8_vg1x4))) +svint8x4_t svreadz_za8_s8_vg1x4(uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svzero_za64_vg1x2))) +void svzero_za64_vg1x2(uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svzero_za64_vg1x4))) +void svzero_za64_vg1x4(uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svzero_za64_vg2x1))) +void svzero_za64_vg2x1(uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svzero_za64_vg2x2))) +void svzero_za64_vg2x2(uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svzero_za64_vg2x4))) +void svzero_za64_vg2x4(uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svzero_za64_vg4x1))) +void svzero_za64_vg4x1(uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svzero_za64_vg4x2))) +void svzero_za64_vg4x2(uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svzero_za64_vg4x4))) +void svzero_za64_vg4x4(uint32_t); +#ifdef __cplusplus +} // extern "C" +#endif + +#undef __ai + +#endif /* __ARM_SME_H */ diff --git a/third_party/aarch64/clang/arm_sve.h b/third_party/aarch64/clang/arm_sve.h new file mode 100644 index 000000000..f9aa68374 --- /dev/null +++ b/third_party/aarch64/clang/arm_sve.h @@ -0,0 +1,30537 @@ +/*===---- arm_sve.h - ARM SVE intrinsics -----------------------------------=== + * + * + * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. + * See https://llvm.org/LICENSE.txt for license information. + * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + * + *===-----------------------------------------------------------------------=== + */ + +#ifndef __ARM_SVE_H +#define __ARM_SVE_H + +#if !defined(__LITTLE_ENDIAN__) +#error "Big endian is currently not supported for arm_sve.h" +#endif +#include + +#ifdef __cplusplus +extern "C" { +#else +#include +#endif + +typedef __fp16 float16_t; +typedef float float32_t; +typedef double float64_t; +typedef __SVInt8_t svint8_t; +typedef __SVInt16_t svint16_t; +typedef __SVInt32_t svint32_t; +typedef __SVInt64_t svint64_t; +typedef __SVUint8_t svuint8_t; +typedef __SVUint16_t svuint16_t; +typedef __SVUint32_t svuint32_t; +typedef __SVUint64_t svuint64_t; +typedef __SVFloat16_t svfloat16_t; + +typedef __SVBfloat16_t svbfloat16_t; +#include +#include +typedef __SVFloat32_t svfloat32_t; +typedef __SVFloat64_t svfloat64_t; +typedef __clang_svint8x2_t svint8x2_t; +typedef __clang_svint16x2_t svint16x2_t; +typedef __clang_svint32x2_t svint32x2_t; +typedef __clang_svint64x2_t svint64x2_t; +typedef __clang_svuint8x2_t svuint8x2_t; +typedef __clang_svuint16x2_t svuint16x2_t; +typedef __clang_svuint32x2_t svuint32x2_t; +typedef __clang_svuint64x2_t svuint64x2_t; +typedef __clang_svfloat16x2_t svfloat16x2_t; +typedef __clang_svfloat32x2_t svfloat32x2_t; +typedef __clang_svfloat64x2_t svfloat64x2_t; +typedef __clang_svint8x3_t svint8x3_t; +typedef __clang_svint16x3_t svint16x3_t; +typedef __clang_svint32x3_t svint32x3_t; +typedef __clang_svint64x3_t svint64x3_t; +typedef __clang_svuint8x3_t svuint8x3_t; +typedef __clang_svuint16x3_t svuint16x3_t; +typedef __clang_svuint32x3_t svuint32x3_t; +typedef __clang_svuint64x3_t svuint64x3_t; +typedef __clang_svfloat16x3_t svfloat16x3_t; +typedef __clang_svfloat32x3_t svfloat32x3_t; +typedef __clang_svfloat64x3_t svfloat64x3_t; +typedef __clang_svint8x4_t svint8x4_t; +typedef __clang_svint16x4_t svint16x4_t; +typedef __clang_svint32x4_t svint32x4_t; +typedef __clang_svint64x4_t svint64x4_t; +typedef __clang_svuint8x4_t svuint8x4_t; +typedef __clang_svuint16x4_t svuint16x4_t; +typedef __clang_svuint32x4_t svuint32x4_t; +typedef __clang_svuint64x4_t svuint64x4_t; +typedef __clang_svfloat16x4_t svfloat16x4_t; +typedef __clang_svfloat32x4_t svfloat32x4_t; +typedef __clang_svfloat64x4_t svfloat64x4_t; +typedef __SVBool_t svbool_t; +typedef __clang_svboolx2_t svboolx2_t; +typedef __clang_svboolx4_t svboolx4_t; + +typedef __clang_svbfloat16x2_t svbfloat16x2_t; +typedef __clang_svbfloat16x3_t svbfloat16x3_t; +typedef __clang_svbfloat16x4_t svbfloat16x4_t; +typedef __SVCount_t svcount_t; + +enum svpattern +{ + SV_POW2 = 0, + SV_VL1 = 1, + SV_VL2 = 2, + SV_VL3 = 3, + SV_VL4 = 4, + SV_VL5 = 5, + SV_VL6 = 6, + SV_VL7 = 7, + SV_VL8 = 8, + SV_VL16 = 9, + SV_VL32 = 10, + SV_VL64 = 11, + SV_VL128 = 12, + SV_VL256 = 13, + SV_MUL4 = 29, + SV_MUL3 = 30, + SV_ALL = 31 +}; + +enum svprfop +{ + SV_PLDL1KEEP = 0, + SV_PLDL1STRM = 1, + SV_PLDL2KEEP = 2, + SV_PLDL2STRM = 3, + SV_PLDL3KEEP = 4, + SV_PLDL3STRM = 5, + SV_PSTL1KEEP = 8, + SV_PSTL1STRM = 9, + SV_PSTL2KEEP = 10, + SV_PSTL2STRM = 11, + SV_PSTL3KEEP = 12, + SV_PSTL3STRM = 13 +}; + +/* Function attributes */ +#define __ai static __inline__ __attribute__((__always_inline__, __nodebug__)) + +#define __aio static __inline__ __attribute__((__always_inline__, __nodebug__, __overloadable__)) + +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s8_s8))) +svint8_t svreinterpret_s8_s8(svint8_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s8_u8))) +svint8_t svreinterpret_s8_u8(svuint8_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s8_s16))) +svint8_t svreinterpret_s8_s16(svint16_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s8_u16))) +svint8_t svreinterpret_s8_u16(svuint16_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s8_s32))) +svint8_t svreinterpret_s8_s32(svint32_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s8_u32))) +svint8_t svreinterpret_s8_u32(svuint32_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s8_s64))) +svint8_t svreinterpret_s8_s64(svint64_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s8_u64))) +svint8_t svreinterpret_s8_u64(svuint64_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s8_f16))) +svint8_t svreinterpret_s8_f16(svfloat16_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s8_bf16))) +svint8_t svreinterpret_s8_bf16(svbfloat16_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s8_f32))) +svint8_t svreinterpret_s8_f32(svfloat32_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s8_f64))) +svint8_t svreinterpret_s8_f64(svfloat64_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u8_s8))) +svuint8_t svreinterpret_u8_s8(svint8_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u8_u8))) +svuint8_t svreinterpret_u8_u8(svuint8_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u8_s16))) +svuint8_t svreinterpret_u8_s16(svint16_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u8_u16))) +svuint8_t svreinterpret_u8_u16(svuint16_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u8_s32))) +svuint8_t svreinterpret_u8_s32(svint32_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u8_u32))) +svuint8_t svreinterpret_u8_u32(svuint32_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u8_s64))) +svuint8_t svreinterpret_u8_s64(svint64_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u8_u64))) +svuint8_t svreinterpret_u8_u64(svuint64_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u8_f16))) +svuint8_t svreinterpret_u8_f16(svfloat16_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u8_bf16))) +svuint8_t svreinterpret_u8_bf16(svbfloat16_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u8_f32))) +svuint8_t svreinterpret_u8_f32(svfloat32_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u8_f64))) +svuint8_t svreinterpret_u8_f64(svfloat64_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s16_s8))) +svint16_t svreinterpret_s16_s8(svint8_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s16_u8))) +svint16_t svreinterpret_s16_u8(svuint8_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s16_s16))) +svint16_t svreinterpret_s16_s16(svint16_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s16_u16))) +svint16_t svreinterpret_s16_u16(svuint16_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s16_s32))) +svint16_t svreinterpret_s16_s32(svint32_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s16_u32))) +svint16_t svreinterpret_s16_u32(svuint32_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s16_s64))) +svint16_t svreinterpret_s16_s64(svint64_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s16_u64))) +svint16_t svreinterpret_s16_u64(svuint64_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s16_f16))) +svint16_t svreinterpret_s16_f16(svfloat16_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s16_bf16))) +svint16_t svreinterpret_s16_bf16(svbfloat16_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s16_f32))) +svint16_t svreinterpret_s16_f32(svfloat32_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s16_f64))) +svint16_t svreinterpret_s16_f64(svfloat64_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u16_s8))) +svuint16_t svreinterpret_u16_s8(svint8_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u16_u8))) +svuint16_t svreinterpret_u16_u8(svuint8_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u16_s16))) +svuint16_t svreinterpret_u16_s16(svint16_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u16_u16))) +svuint16_t svreinterpret_u16_u16(svuint16_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u16_s32))) +svuint16_t svreinterpret_u16_s32(svint32_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u16_u32))) +svuint16_t svreinterpret_u16_u32(svuint32_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u16_s64))) +svuint16_t svreinterpret_u16_s64(svint64_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u16_u64))) +svuint16_t svreinterpret_u16_u64(svuint64_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u16_f16))) +svuint16_t svreinterpret_u16_f16(svfloat16_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u16_bf16))) +svuint16_t svreinterpret_u16_bf16(svbfloat16_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u16_f32))) +svuint16_t svreinterpret_u16_f32(svfloat32_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u16_f64))) +svuint16_t svreinterpret_u16_f64(svfloat64_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s32_s8))) +svint32_t svreinterpret_s32_s8(svint8_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s32_u8))) +svint32_t svreinterpret_s32_u8(svuint8_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s32_s16))) +svint32_t svreinterpret_s32_s16(svint16_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s32_u16))) +svint32_t svreinterpret_s32_u16(svuint16_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s32_s32))) +svint32_t svreinterpret_s32_s32(svint32_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s32_u32))) +svint32_t svreinterpret_s32_u32(svuint32_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s32_s64))) +svint32_t svreinterpret_s32_s64(svint64_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s32_u64))) +svint32_t svreinterpret_s32_u64(svuint64_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s32_f16))) +svint32_t svreinterpret_s32_f16(svfloat16_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s32_bf16))) +svint32_t svreinterpret_s32_bf16(svbfloat16_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s32_f32))) +svint32_t svreinterpret_s32_f32(svfloat32_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s32_f64))) +svint32_t svreinterpret_s32_f64(svfloat64_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u32_s8))) +svuint32_t svreinterpret_u32_s8(svint8_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u32_u8))) +svuint32_t svreinterpret_u32_u8(svuint8_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u32_s16))) +svuint32_t svreinterpret_u32_s16(svint16_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u32_u16))) +svuint32_t svreinterpret_u32_u16(svuint16_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u32_s32))) +svuint32_t svreinterpret_u32_s32(svint32_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u32_u32))) +svuint32_t svreinterpret_u32_u32(svuint32_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u32_s64))) +svuint32_t svreinterpret_u32_s64(svint64_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u32_u64))) +svuint32_t svreinterpret_u32_u64(svuint64_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u32_f16))) +svuint32_t svreinterpret_u32_f16(svfloat16_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u32_bf16))) +svuint32_t svreinterpret_u32_bf16(svbfloat16_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u32_f32))) +svuint32_t svreinterpret_u32_f32(svfloat32_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u32_f64))) +svuint32_t svreinterpret_u32_f64(svfloat64_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s64_s8))) +svint64_t svreinterpret_s64_s8(svint8_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s64_u8))) +svint64_t svreinterpret_s64_u8(svuint8_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s64_s16))) +svint64_t svreinterpret_s64_s16(svint16_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s64_u16))) +svint64_t svreinterpret_s64_u16(svuint16_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s64_s32))) +svint64_t svreinterpret_s64_s32(svint32_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s64_u32))) +svint64_t svreinterpret_s64_u32(svuint32_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s64_s64))) +svint64_t svreinterpret_s64_s64(svint64_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s64_u64))) +svint64_t svreinterpret_s64_u64(svuint64_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s64_f16))) +svint64_t svreinterpret_s64_f16(svfloat16_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s64_bf16))) +svint64_t svreinterpret_s64_bf16(svbfloat16_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s64_f32))) +svint64_t svreinterpret_s64_f32(svfloat32_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s64_f64))) +svint64_t svreinterpret_s64_f64(svfloat64_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u64_s8))) +svuint64_t svreinterpret_u64_s8(svint8_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u64_u8))) +svuint64_t svreinterpret_u64_u8(svuint8_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u64_s16))) +svuint64_t svreinterpret_u64_s16(svint16_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u64_u16))) +svuint64_t svreinterpret_u64_u16(svuint16_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u64_s32))) +svuint64_t svreinterpret_u64_s32(svint32_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u64_u32))) +svuint64_t svreinterpret_u64_u32(svuint32_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u64_s64))) +svuint64_t svreinterpret_u64_s64(svint64_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u64_u64))) +svuint64_t svreinterpret_u64_u64(svuint64_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u64_f16))) +svuint64_t svreinterpret_u64_f16(svfloat16_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u64_bf16))) +svuint64_t svreinterpret_u64_bf16(svbfloat16_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u64_f32))) +svuint64_t svreinterpret_u64_f32(svfloat32_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u64_f64))) +svuint64_t svreinterpret_u64_f64(svfloat64_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f16_s8))) +svfloat16_t svreinterpret_f16_s8(svint8_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f16_u8))) +svfloat16_t svreinterpret_f16_u8(svuint8_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f16_s16))) +svfloat16_t svreinterpret_f16_s16(svint16_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f16_u16))) +svfloat16_t svreinterpret_f16_u16(svuint16_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f16_s32))) +svfloat16_t svreinterpret_f16_s32(svint32_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f16_u32))) +svfloat16_t svreinterpret_f16_u32(svuint32_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f16_s64))) +svfloat16_t svreinterpret_f16_s64(svint64_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f16_u64))) +svfloat16_t svreinterpret_f16_u64(svuint64_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f16_f16))) +svfloat16_t svreinterpret_f16_f16(svfloat16_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f16_bf16))) +svfloat16_t svreinterpret_f16_bf16(svbfloat16_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f16_f32))) +svfloat16_t svreinterpret_f16_f32(svfloat32_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f16_f64))) +svfloat16_t svreinterpret_f16_f64(svfloat64_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_bf16_s8))) +svbfloat16_t svreinterpret_bf16_s8(svint8_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_bf16_u8))) +svbfloat16_t svreinterpret_bf16_u8(svuint8_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_bf16_s16))) +svbfloat16_t svreinterpret_bf16_s16(svint16_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_bf16_u16))) +svbfloat16_t svreinterpret_bf16_u16(svuint16_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_bf16_s32))) +svbfloat16_t svreinterpret_bf16_s32(svint32_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_bf16_u32))) +svbfloat16_t svreinterpret_bf16_u32(svuint32_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_bf16_s64))) +svbfloat16_t svreinterpret_bf16_s64(svint64_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_bf16_u64))) +svbfloat16_t svreinterpret_bf16_u64(svuint64_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_bf16_f16))) +svbfloat16_t svreinterpret_bf16_f16(svfloat16_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_bf16_bf16))) +svbfloat16_t svreinterpret_bf16_bf16(svbfloat16_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_bf16_f32))) +svbfloat16_t svreinterpret_bf16_f32(svfloat32_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_bf16_f64))) +svbfloat16_t svreinterpret_bf16_f64(svfloat64_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f32_s8))) +svfloat32_t svreinterpret_f32_s8(svint8_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f32_u8))) +svfloat32_t svreinterpret_f32_u8(svuint8_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f32_s16))) +svfloat32_t svreinterpret_f32_s16(svint16_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f32_u16))) +svfloat32_t svreinterpret_f32_u16(svuint16_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f32_s32))) +svfloat32_t svreinterpret_f32_s32(svint32_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f32_u32))) +svfloat32_t svreinterpret_f32_u32(svuint32_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f32_s64))) +svfloat32_t svreinterpret_f32_s64(svint64_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f32_u64))) +svfloat32_t svreinterpret_f32_u64(svuint64_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f32_f16))) +svfloat32_t svreinterpret_f32_f16(svfloat16_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f32_bf16))) +svfloat32_t svreinterpret_f32_bf16(svbfloat16_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f32_f32))) +svfloat32_t svreinterpret_f32_f32(svfloat32_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f32_f64))) +svfloat32_t svreinterpret_f32_f64(svfloat64_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f64_s8))) +svfloat64_t svreinterpret_f64_s8(svint8_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f64_u8))) +svfloat64_t svreinterpret_f64_u8(svuint8_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f64_s16))) +svfloat64_t svreinterpret_f64_s16(svint16_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f64_u16))) +svfloat64_t svreinterpret_f64_u16(svuint16_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f64_s32))) +svfloat64_t svreinterpret_f64_s32(svint32_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f64_u32))) +svfloat64_t svreinterpret_f64_u32(svuint32_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f64_s64))) +svfloat64_t svreinterpret_f64_s64(svint64_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f64_u64))) +svfloat64_t svreinterpret_f64_u64(svuint64_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f64_f16))) +svfloat64_t svreinterpret_f64_f16(svfloat16_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f64_bf16))) +svfloat64_t svreinterpret_f64_bf16(svbfloat16_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f64_f32))) +svfloat64_t svreinterpret_f64_f32(svfloat32_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f64_f64))) +svfloat64_t svreinterpret_f64_f64(svfloat64_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s8_s8))) +svint8_t svreinterpret_s8(svint8_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s8_u8))) +svint8_t svreinterpret_s8(svuint8_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s8_s16))) +svint8_t svreinterpret_s8(svint16_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s8_u16))) +svint8_t svreinterpret_s8(svuint16_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s8_s32))) +svint8_t svreinterpret_s8(svint32_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s8_u32))) +svint8_t svreinterpret_s8(svuint32_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s8_s64))) +svint8_t svreinterpret_s8(svint64_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s8_u64))) +svint8_t svreinterpret_s8(svuint64_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s8_f16))) +svint8_t svreinterpret_s8(svfloat16_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s8_bf16))) +svint8_t svreinterpret_s8(svbfloat16_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s8_f32))) +svint8_t svreinterpret_s8(svfloat32_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s8_f64))) +svint8_t svreinterpret_s8(svfloat64_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u8_s8))) +svuint8_t svreinterpret_u8(svint8_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u8_u8))) +svuint8_t svreinterpret_u8(svuint8_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u8_s16))) +svuint8_t svreinterpret_u8(svint16_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u8_u16))) +svuint8_t svreinterpret_u8(svuint16_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u8_s32))) +svuint8_t svreinterpret_u8(svint32_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u8_u32))) +svuint8_t svreinterpret_u8(svuint32_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u8_s64))) +svuint8_t svreinterpret_u8(svint64_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u8_u64))) +svuint8_t svreinterpret_u8(svuint64_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u8_f16))) +svuint8_t svreinterpret_u8(svfloat16_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u8_bf16))) +svuint8_t svreinterpret_u8(svbfloat16_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u8_f32))) +svuint8_t svreinterpret_u8(svfloat32_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u8_f64))) +svuint8_t svreinterpret_u8(svfloat64_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s16_s8))) +svint16_t svreinterpret_s16(svint8_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s16_u8))) +svint16_t svreinterpret_s16(svuint8_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s16_s16))) +svint16_t svreinterpret_s16(svint16_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s16_u16))) +svint16_t svreinterpret_s16(svuint16_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s16_s32))) +svint16_t svreinterpret_s16(svint32_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s16_u32))) +svint16_t svreinterpret_s16(svuint32_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s16_s64))) +svint16_t svreinterpret_s16(svint64_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s16_u64))) +svint16_t svreinterpret_s16(svuint64_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s16_f16))) +svint16_t svreinterpret_s16(svfloat16_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s16_bf16))) +svint16_t svreinterpret_s16(svbfloat16_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s16_f32))) +svint16_t svreinterpret_s16(svfloat32_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s16_f64))) +svint16_t svreinterpret_s16(svfloat64_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u16_s8))) +svuint16_t svreinterpret_u16(svint8_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u16_u8))) +svuint16_t svreinterpret_u16(svuint8_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u16_s16))) +svuint16_t svreinterpret_u16(svint16_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u16_u16))) +svuint16_t svreinterpret_u16(svuint16_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u16_s32))) +svuint16_t svreinterpret_u16(svint32_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u16_u32))) +svuint16_t svreinterpret_u16(svuint32_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u16_s64))) +svuint16_t svreinterpret_u16(svint64_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u16_u64))) +svuint16_t svreinterpret_u16(svuint64_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u16_f16))) +svuint16_t svreinterpret_u16(svfloat16_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u16_bf16))) +svuint16_t svreinterpret_u16(svbfloat16_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u16_f32))) +svuint16_t svreinterpret_u16(svfloat32_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u16_f64))) +svuint16_t svreinterpret_u16(svfloat64_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s32_s8))) +svint32_t svreinterpret_s32(svint8_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s32_u8))) +svint32_t svreinterpret_s32(svuint8_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s32_s16))) +svint32_t svreinterpret_s32(svint16_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s32_u16))) +svint32_t svreinterpret_s32(svuint16_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s32_s32))) +svint32_t svreinterpret_s32(svint32_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s32_u32))) +svint32_t svreinterpret_s32(svuint32_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s32_s64))) +svint32_t svreinterpret_s32(svint64_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s32_u64))) +svint32_t svreinterpret_s32(svuint64_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s32_f16))) +svint32_t svreinterpret_s32(svfloat16_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s32_bf16))) +svint32_t svreinterpret_s32(svbfloat16_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s32_f32))) +svint32_t svreinterpret_s32(svfloat32_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s32_f64))) +svint32_t svreinterpret_s32(svfloat64_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u32_s8))) +svuint32_t svreinterpret_u32(svint8_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u32_u8))) +svuint32_t svreinterpret_u32(svuint8_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u32_s16))) +svuint32_t svreinterpret_u32(svint16_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u32_u16))) +svuint32_t svreinterpret_u32(svuint16_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u32_s32))) +svuint32_t svreinterpret_u32(svint32_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u32_u32))) +svuint32_t svreinterpret_u32(svuint32_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u32_s64))) +svuint32_t svreinterpret_u32(svint64_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u32_u64))) +svuint32_t svreinterpret_u32(svuint64_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u32_f16))) +svuint32_t svreinterpret_u32(svfloat16_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u32_bf16))) +svuint32_t svreinterpret_u32(svbfloat16_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u32_f32))) +svuint32_t svreinterpret_u32(svfloat32_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u32_f64))) +svuint32_t svreinterpret_u32(svfloat64_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s64_s8))) +svint64_t svreinterpret_s64(svint8_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s64_u8))) +svint64_t svreinterpret_s64(svuint8_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s64_s16))) +svint64_t svreinterpret_s64(svint16_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s64_u16))) +svint64_t svreinterpret_s64(svuint16_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s64_s32))) +svint64_t svreinterpret_s64(svint32_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s64_u32))) +svint64_t svreinterpret_s64(svuint32_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s64_s64))) +svint64_t svreinterpret_s64(svint64_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s64_u64))) +svint64_t svreinterpret_s64(svuint64_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s64_f16))) +svint64_t svreinterpret_s64(svfloat16_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s64_bf16))) +svint64_t svreinterpret_s64(svbfloat16_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s64_f32))) +svint64_t svreinterpret_s64(svfloat32_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s64_f64))) +svint64_t svreinterpret_s64(svfloat64_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u64_s8))) +svuint64_t svreinterpret_u64(svint8_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u64_u8))) +svuint64_t svreinterpret_u64(svuint8_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u64_s16))) +svuint64_t svreinterpret_u64(svint16_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u64_u16))) +svuint64_t svreinterpret_u64(svuint16_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u64_s32))) +svuint64_t svreinterpret_u64(svint32_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u64_u32))) +svuint64_t svreinterpret_u64(svuint32_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u64_s64))) +svuint64_t svreinterpret_u64(svint64_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u64_u64))) +svuint64_t svreinterpret_u64(svuint64_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u64_f16))) +svuint64_t svreinterpret_u64(svfloat16_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u64_bf16))) +svuint64_t svreinterpret_u64(svbfloat16_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u64_f32))) +svuint64_t svreinterpret_u64(svfloat32_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u64_f64))) +svuint64_t svreinterpret_u64(svfloat64_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f16_s8))) +svfloat16_t svreinterpret_f16(svint8_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f16_u8))) +svfloat16_t svreinterpret_f16(svuint8_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f16_s16))) +svfloat16_t svreinterpret_f16(svint16_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f16_u16))) +svfloat16_t svreinterpret_f16(svuint16_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f16_s32))) +svfloat16_t svreinterpret_f16(svint32_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f16_u32))) +svfloat16_t svreinterpret_f16(svuint32_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f16_s64))) +svfloat16_t svreinterpret_f16(svint64_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f16_u64))) +svfloat16_t svreinterpret_f16(svuint64_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f16_f16))) +svfloat16_t svreinterpret_f16(svfloat16_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f16_bf16))) +svfloat16_t svreinterpret_f16(svbfloat16_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f16_f32))) +svfloat16_t svreinterpret_f16(svfloat32_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f16_f64))) +svfloat16_t svreinterpret_f16(svfloat64_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_bf16_s8))) +svbfloat16_t svreinterpret_bf16(svint8_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_bf16_u8))) +svbfloat16_t svreinterpret_bf16(svuint8_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_bf16_s16))) +svbfloat16_t svreinterpret_bf16(svint16_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_bf16_u16))) +svbfloat16_t svreinterpret_bf16(svuint16_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_bf16_s32))) +svbfloat16_t svreinterpret_bf16(svint32_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_bf16_u32))) +svbfloat16_t svreinterpret_bf16(svuint32_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_bf16_s64))) +svbfloat16_t svreinterpret_bf16(svint64_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_bf16_u64))) +svbfloat16_t svreinterpret_bf16(svuint64_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_bf16_f16))) +svbfloat16_t svreinterpret_bf16(svfloat16_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_bf16_bf16))) +svbfloat16_t svreinterpret_bf16(svbfloat16_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_bf16_f32))) +svbfloat16_t svreinterpret_bf16(svfloat32_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_bf16_f64))) +svbfloat16_t svreinterpret_bf16(svfloat64_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f32_s8))) +svfloat32_t svreinterpret_f32(svint8_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f32_u8))) +svfloat32_t svreinterpret_f32(svuint8_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f32_s16))) +svfloat32_t svreinterpret_f32(svint16_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f32_u16))) +svfloat32_t svreinterpret_f32(svuint16_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f32_s32))) +svfloat32_t svreinterpret_f32(svint32_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f32_u32))) +svfloat32_t svreinterpret_f32(svuint32_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f32_s64))) +svfloat32_t svreinterpret_f32(svint64_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f32_u64))) +svfloat32_t svreinterpret_f32(svuint64_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f32_f16))) +svfloat32_t svreinterpret_f32(svfloat16_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f32_bf16))) +svfloat32_t svreinterpret_f32(svbfloat16_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f32_f32))) +svfloat32_t svreinterpret_f32(svfloat32_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f32_f64))) +svfloat32_t svreinterpret_f32(svfloat64_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f64_s8))) +svfloat64_t svreinterpret_f64(svint8_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f64_u8))) +svfloat64_t svreinterpret_f64(svuint8_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f64_s16))) +svfloat64_t svreinterpret_f64(svint16_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f64_u16))) +svfloat64_t svreinterpret_f64(svuint16_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f64_s32))) +svfloat64_t svreinterpret_f64(svint32_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f64_u32))) +svfloat64_t svreinterpret_f64(svuint32_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f64_s64))) +svfloat64_t svreinterpret_f64(svint64_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f64_u64))) +svfloat64_t svreinterpret_f64(svuint64_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f64_f16))) +svfloat64_t svreinterpret_f64(svfloat16_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f64_bf16))) +svfloat64_t svreinterpret_f64(svbfloat16_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f64_f32))) +svfloat64_t svreinterpret_f64(svfloat32_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f64_f64))) +svfloat64_t svreinterpret_f64(svfloat64_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s8_s8_x2))) +svint8x2_t svreinterpret_s8_s8_x2(svint8x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s8_u8_x2))) +svint8x2_t svreinterpret_s8_u8_x2(svuint8x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s8_s16_x2))) +svint8x2_t svreinterpret_s8_s16_x2(svint16x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s8_u16_x2))) +svint8x2_t svreinterpret_s8_u16_x2(svuint16x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s8_s32_x2))) +svint8x2_t svreinterpret_s8_s32_x2(svint32x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s8_u32_x2))) +svint8x2_t svreinterpret_s8_u32_x2(svuint32x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s8_s64_x2))) +svint8x2_t svreinterpret_s8_s64_x2(svint64x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s8_u64_x2))) +svint8x2_t svreinterpret_s8_u64_x2(svuint64x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s8_f16_x2))) +svint8x2_t svreinterpret_s8_f16_x2(svfloat16x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s8_bf16_x2))) +svint8x2_t svreinterpret_s8_bf16_x2(svbfloat16x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s8_f32_x2))) +svint8x2_t svreinterpret_s8_f32_x2(svfloat32x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s8_f64_x2))) +svint8x2_t svreinterpret_s8_f64_x2(svfloat64x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u8_s8_x2))) +svuint8x2_t svreinterpret_u8_s8_x2(svint8x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u8_u8_x2))) +svuint8x2_t svreinterpret_u8_u8_x2(svuint8x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u8_s16_x2))) +svuint8x2_t svreinterpret_u8_s16_x2(svint16x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u8_u16_x2))) +svuint8x2_t svreinterpret_u8_u16_x2(svuint16x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u8_s32_x2))) +svuint8x2_t svreinterpret_u8_s32_x2(svint32x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u8_u32_x2))) +svuint8x2_t svreinterpret_u8_u32_x2(svuint32x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u8_s64_x2))) +svuint8x2_t svreinterpret_u8_s64_x2(svint64x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u8_u64_x2))) +svuint8x2_t svreinterpret_u8_u64_x2(svuint64x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u8_f16_x2))) +svuint8x2_t svreinterpret_u8_f16_x2(svfloat16x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u8_bf16_x2))) +svuint8x2_t svreinterpret_u8_bf16_x2(svbfloat16x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u8_f32_x2))) +svuint8x2_t svreinterpret_u8_f32_x2(svfloat32x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u8_f64_x2))) +svuint8x2_t svreinterpret_u8_f64_x2(svfloat64x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s16_s8_x2))) +svint16x2_t svreinterpret_s16_s8_x2(svint8x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s16_u8_x2))) +svint16x2_t svreinterpret_s16_u8_x2(svuint8x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s16_s16_x2))) +svint16x2_t svreinterpret_s16_s16_x2(svint16x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s16_u16_x2))) +svint16x2_t svreinterpret_s16_u16_x2(svuint16x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s16_s32_x2))) +svint16x2_t svreinterpret_s16_s32_x2(svint32x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s16_u32_x2))) +svint16x2_t svreinterpret_s16_u32_x2(svuint32x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s16_s64_x2))) +svint16x2_t svreinterpret_s16_s64_x2(svint64x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s16_u64_x2))) +svint16x2_t svreinterpret_s16_u64_x2(svuint64x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s16_f16_x2))) +svint16x2_t svreinterpret_s16_f16_x2(svfloat16x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s16_bf16_x2))) +svint16x2_t svreinterpret_s16_bf16_x2(svbfloat16x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s16_f32_x2))) +svint16x2_t svreinterpret_s16_f32_x2(svfloat32x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s16_f64_x2))) +svint16x2_t svreinterpret_s16_f64_x2(svfloat64x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u16_s8_x2))) +svuint16x2_t svreinterpret_u16_s8_x2(svint8x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u16_u8_x2))) +svuint16x2_t svreinterpret_u16_u8_x2(svuint8x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u16_s16_x2))) +svuint16x2_t svreinterpret_u16_s16_x2(svint16x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u16_u16_x2))) +svuint16x2_t svreinterpret_u16_u16_x2(svuint16x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u16_s32_x2))) +svuint16x2_t svreinterpret_u16_s32_x2(svint32x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u16_u32_x2))) +svuint16x2_t svreinterpret_u16_u32_x2(svuint32x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u16_s64_x2))) +svuint16x2_t svreinterpret_u16_s64_x2(svint64x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u16_u64_x2))) +svuint16x2_t svreinterpret_u16_u64_x2(svuint64x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u16_f16_x2))) +svuint16x2_t svreinterpret_u16_f16_x2(svfloat16x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u16_bf16_x2))) +svuint16x2_t svreinterpret_u16_bf16_x2(svbfloat16x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u16_f32_x2))) +svuint16x2_t svreinterpret_u16_f32_x2(svfloat32x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u16_f64_x2))) +svuint16x2_t svreinterpret_u16_f64_x2(svfloat64x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s32_s8_x2))) +svint32x2_t svreinterpret_s32_s8_x2(svint8x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s32_u8_x2))) +svint32x2_t svreinterpret_s32_u8_x2(svuint8x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s32_s16_x2))) +svint32x2_t svreinterpret_s32_s16_x2(svint16x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s32_u16_x2))) +svint32x2_t svreinterpret_s32_u16_x2(svuint16x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s32_s32_x2))) +svint32x2_t svreinterpret_s32_s32_x2(svint32x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s32_u32_x2))) +svint32x2_t svreinterpret_s32_u32_x2(svuint32x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s32_s64_x2))) +svint32x2_t svreinterpret_s32_s64_x2(svint64x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s32_u64_x2))) +svint32x2_t svreinterpret_s32_u64_x2(svuint64x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s32_f16_x2))) +svint32x2_t svreinterpret_s32_f16_x2(svfloat16x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s32_bf16_x2))) +svint32x2_t svreinterpret_s32_bf16_x2(svbfloat16x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s32_f32_x2))) +svint32x2_t svreinterpret_s32_f32_x2(svfloat32x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s32_f64_x2))) +svint32x2_t svreinterpret_s32_f64_x2(svfloat64x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u32_s8_x2))) +svuint32x2_t svreinterpret_u32_s8_x2(svint8x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u32_u8_x2))) +svuint32x2_t svreinterpret_u32_u8_x2(svuint8x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u32_s16_x2))) +svuint32x2_t svreinterpret_u32_s16_x2(svint16x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u32_u16_x2))) +svuint32x2_t svreinterpret_u32_u16_x2(svuint16x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u32_s32_x2))) +svuint32x2_t svreinterpret_u32_s32_x2(svint32x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u32_u32_x2))) +svuint32x2_t svreinterpret_u32_u32_x2(svuint32x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u32_s64_x2))) +svuint32x2_t svreinterpret_u32_s64_x2(svint64x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u32_u64_x2))) +svuint32x2_t svreinterpret_u32_u64_x2(svuint64x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u32_f16_x2))) +svuint32x2_t svreinterpret_u32_f16_x2(svfloat16x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u32_bf16_x2))) +svuint32x2_t svreinterpret_u32_bf16_x2(svbfloat16x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u32_f32_x2))) +svuint32x2_t svreinterpret_u32_f32_x2(svfloat32x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u32_f64_x2))) +svuint32x2_t svreinterpret_u32_f64_x2(svfloat64x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s64_s8_x2))) +svint64x2_t svreinterpret_s64_s8_x2(svint8x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s64_u8_x2))) +svint64x2_t svreinterpret_s64_u8_x2(svuint8x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s64_s16_x2))) +svint64x2_t svreinterpret_s64_s16_x2(svint16x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s64_u16_x2))) +svint64x2_t svreinterpret_s64_u16_x2(svuint16x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s64_s32_x2))) +svint64x2_t svreinterpret_s64_s32_x2(svint32x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s64_u32_x2))) +svint64x2_t svreinterpret_s64_u32_x2(svuint32x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s64_s64_x2))) +svint64x2_t svreinterpret_s64_s64_x2(svint64x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s64_u64_x2))) +svint64x2_t svreinterpret_s64_u64_x2(svuint64x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s64_f16_x2))) +svint64x2_t svreinterpret_s64_f16_x2(svfloat16x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s64_bf16_x2))) +svint64x2_t svreinterpret_s64_bf16_x2(svbfloat16x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s64_f32_x2))) +svint64x2_t svreinterpret_s64_f32_x2(svfloat32x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s64_f64_x2))) +svint64x2_t svreinterpret_s64_f64_x2(svfloat64x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u64_s8_x2))) +svuint64x2_t svreinterpret_u64_s8_x2(svint8x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u64_u8_x2))) +svuint64x2_t svreinterpret_u64_u8_x2(svuint8x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u64_s16_x2))) +svuint64x2_t svreinterpret_u64_s16_x2(svint16x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u64_u16_x2))) +svuint64x2_t svreinterpret_u64_u16_x2(svuint16x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u64_s32_x2))) +svuint64x2_t svreinterpret_u64_s32_x2(svint32x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u64_u32_x2))) +svuint64x2_t svreinterpret_u64_u32_x2(svuint32x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u64_s64_x2))) +svuint64x2_t svreinterpret_u64_s64_x2(svint64x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u64_u64_x2))) +svuint64x2_t svreinterpret_u64_u64_x2(svuint64x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u64_f16_x2))) +svuint64x2_t svreinterpret_u64_f16_x2(svfloat16x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u64_bf16_x2))) +svuint64x2_t svreinterpret_u64_bf16_x2(svbfloat16x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u64_f32_x2))) +svuint64x2_t svreinterpret_u64_f32_x2(svfloat32x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u64_f64_x2))) +svuint64x2_t svreinterpret_u64_f64_x2(svfloat64x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f16_s8_x2))) +svfloat16x2_t svreinterpret_f16_s8_x2(svint8x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f16_u8_x2))) +svfloat16x2_t svreinterpret_f16_u8_x2(svuint8x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f16_s16_x2))) +svfloat16x2_t svreinterpret_f16_s16_x2(svint16x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f16_u16_x2))) +svfloat16x2_t svreinterpret_f16_u16_x2(svuint16x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f16_s32_x2))) +svfloat16x2_t svreinterpret_f16_s32_x2(svint32x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f16_u32_x2))) +svfloat16x2_t svreinterpret_f16_u32_x2(svuint32x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f16_s64_x2))) +svfloat16x2_t svreinterpret_f16_s64_x2(svint64x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f16_u64_x2))) +svfloat16x2_t svreinterpret_f16_u64_x2(svuint64x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f16_f16_x2))) +svfloat16x2_t svreinterpret_f16_f16_x2(svfloat16x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f16_bf16_x2))) +svfloat16x2_t svreinterpret_f16_bf16_x2(svbfloat16x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f16_f32_x2))) +svfloat16x2_t svreinterpret_f16_f32_x2(svfloat32x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f16_f64_x2))) +svfloat16x2_t svreinterpret_f16_f64_x2(svfloat64x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_bf16_s8_x2))) +svbfloat16x2_t svreinterpret_bf16_s8_x2(svint8x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_bf16_u8_x2))) +svbfloat16x2_t svreinterpret_bf16_u8_x2(svuint8x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_bf16_s16_x2))) +svbfloat16x2_t svreinterpret_bf16_s16_x2(svint16x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_bf16_u16_x2))) +svbfloat16x2_t svreinterpret_bf16_u16_x2(svuint16x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_bf16_s32_x2))) +svbfloat16x2_t svreinterpret_bf16_s32_x2(svint32x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_bf16_u32_x2))) +svbfloat16x2_t svreinterpret_bf16_u32_x2(svuint32x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_bf16_s64_x2))) +svbfloat16x2_t svreinterpret_bf16_s64_x2(svint64x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_bf16_u64_x2))) +svbfloat16x2_t svreinterpret_bf16_u64_x2(svuint64x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_bf16_f16_x2))) +svbfloat16x2_t svreinterpret_bf16_f16_x2(svfloat16x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_bf16_bf16_x2))) +svbfloat16x2_t svreinterpret_bf16_bf16_x2(svbfloat16x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_bf16_f32_x2))) +svbfloat16x2_t svreinterpret_bf16_f32_x2(svfloat32x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_bf16_f64_x2))) +svbfloat16x2_t svreinterpret_bf16_f64_x2(svfloat64x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f32_s8_x2))) +svfloat32x2_t svreinterpret_f32_s8_x2(svint8x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f32_u8_x2))) +svfloat32x2_t svreinterpret_f32_u8_x2(svuint8x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f32_s16_x2))) +svfloat32x2_t svreinterpret_f32_s16_x2(svint16x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f32_u16_x2))) +svfloat32x2_t svreinterpret_f32_u16_x2(svuint16x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f32_s32_x2))) +svfloat32x2_t svreinterpret_f32_s32_x2(svint32x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f32_u32_x2))) +svfloat32x2_t svreinterpret_f32_u32_x2(svuint32x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f32_s64_x2))) +svfloat32x2_t svreinterpret_f32_s64_x2(svint64x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f32_u64_x2))) +svfloat32x2_t svreinterpret_f32_u64_x2(svuint64x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f32_f16_x2))) +svfloat32x2_t svreinterpret_f32_f16_x2(svfloat16x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f32_bf16_x2))) +svfloat32x2_t svreinterpret_f32_bf16_x2(svbfloat16x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f32_f32_x2))) +svfloat32x2_t svreinterpret_f32_f32_x2(svfloat32x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f32_f64_x2))) +svfloat32x2_t svreinterpret_f32_f64_x2(svfloat64x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f64_s8_x2))) +svfloat64x2_t svreinterpret_f64_s8_x2(svint8x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f64_u8_x2))) +svfloat64x2_t svreinterpret_f64_u8_x2(svuint8x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f64_s16_x2))) +svfloat64x2_t svreinterpret_f64_s16_x2(svint16x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f64_u16_x2))) +svfloat64x2_t svreinterpret_f64_u16_x2(svuint16x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f64_s32_x2))) +svfloat64x2_t svreinterpret_f64_s32_x2(svint32x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f64_u32_x2))) +svfloat64x2_t svreinterpret_f64_u32_x2(svuint32x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f64_s64_x2))) +svfloat64x2_t svreinterpret_f64_s64_x2(svint64x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f64_u64_x2))) +svfloat64x2_t svreinterpret_f64_u64_x2(svuint64x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f64_f16_x2))) +svfloat64x2_t svreinterpret_f64_f16_x2(svfloat16x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f64_bf16_x2))) +svfloat64x2_t svreinterpret_f64_bf16_x2(svbfloat16x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f64_f32_x2))) +svfloat64x2_t svreinterpret_f64_f32_x2(svfloat32x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f64_f64_x2))) +svfloat64x2_t svreinterpret_f64_f64_x2(svfloat64x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s8_s8_x2))) +svint8x2_t svreinterpret_s8(svint8x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s8_u8_x2))) +svint8x2_t svreinterpret_s8(svuint8x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s8_s16_x2))) +svint8x2_t svreinterpret_s8(svint16x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s8_u16_x2))) +svint8x2_t svreinterpret_s8(svuint16x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s8_s32_x2))) +svint8x2_t svreinterpret_s8(svint32x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s8_u32_x2))) +svint8x2_t svreinterpret_s8(svuint32x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s8_s64_x2))) +svint8x2_t svreinterpret_s8(svint64x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s8_u64_x2))) +svint8x2_t svreinterpret_s8(svuint64x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s8_f16_x2))) +svint8x2_t svreinterpret_s8(svfloat16x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s8_bf16_x2))) +svint8x2_t svreinterpret_s8(svbfloat16x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s8_f32_x2))) +svint8x2_t svreinterpret_s8(svfloat32x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s8_f64_x2))) +svint8x2_t svreinterpret_s8(svfloat64x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u8_s8_x2))) +svuint8x2_t svreinterpret_u8(svint8x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u8_u8_x2))) +svuint8x2_t svreinterpret_u8(svuint8x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u8_s16_x2))) +svuint8x2_t svreinterpret_u8(svint16x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u8_u16_x2))) +svuint8x2_t svreinterpret_u8(svuint16x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u8_s32_x2))) +svuint8x2_t svreinterpret_u8(svint32x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u8_u32_x2))) +svuint8x2_t svreinterpret_u8(svuint32x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u8_s64_x2))) +svuint8x2_t svreinterpret_u8(svint64x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u8_u64_x2))) +svuint8x2_t svreinterpret_u8(svuint64x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u8_f16_x2))) +svuint8x2_t svreinterpret_u8(svfloat16x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u8_bf16_x2))) +svuint8x2_t svreinterpret_u8(svbfloat16x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u8_f32_x2))) +svuint8x2_t svreinterpret_u8(svfloat32x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u8_f64_x2))) +svuint8x2_t svreinterpret_u8(svfloat64x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s16_s8_x2))) +svint16x2_t svreinterpret_s16(svint8x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s16_u8_x2))) +svint16x2_t svreinterpret_s16(svuint8x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s16_s16_x2))) +svint16x2_t svreinterpret_s16(svint16x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s16_u16_x2))) +svint16x2_t svreinterpret_s16(svuint16x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s16_s32_x2))) +svint16x2_t svreinterpret_s16(svint32x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s16_u32_x2))) +svint16x2_t svreinterpret_s16(svuint32x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s16_s64_x2))) +svint16x2_t svreinterpret_s16(svint64x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s16_u64_x2))) +svint16x2_t svreinterpret_s16(svuint64x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s16_f16_x2))) +svint16x2_t svreinterpret_s16(svfloat16x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s16_bf16_x2))) +svint16x2_t svreinterpret_s16(svbfloat16x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s16_f32_x2))) +svint16x2_t svreinterpret_s16(svfloat32x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s16_f64_x2))) +svint16x2_t svreinterpret_s16(svfloat64x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u16_s8_x2))) +svuint16x2_t svreinterpret_u16(svint8x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u16_u8_x2))) +svuint16x2_t svreinterpret_u16(svuint8x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u16_s16_x2))) +svuint16x2_t svreinterpret_u16(svint16x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u16_u16_x2))) +svuint16x2_t svreinterpret_u16(svuint16x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u16_s32_x2))) +svuint16x2_t svreinterpret_u16(svint32x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u16_u32_x2))) +svuint16x2_t svreinterpret_u16(svuint32x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u16_s64_x2))) +svuint16x2_t svreinterpret_u16(svint64x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u16_u64_x2))) +svuint16x2_t svreinterpret_u16(svuint64x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u16_f16_x2))) +svuint16x2_t svreinterpret_u16(svfloat16x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u16_bf16_x2))) +svuint16x2_t svreinterpret_u16(svbfloat16x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u16_f32_x2))) +svuint16x2_t svreinterpret_u16(svfloat32x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u16_f64_x2))) +svuint16x2_t svreinterpret_u16(svfloat64x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s32_s8_x2))) +svint32x2_t svreinterpret_s32(svint8x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s32_u8_x2))) +svint32x2_t svreinterpret_s32(svuint8x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s32_s16_x2))) +svint32x2_t svreinterpret_s32(svint16x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s32_u16_x2))) +svint32x2_t svreinterpret_s32(svuint16x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s32_s32_x2))) +svint32x2_t svreinterpret_s32(svint32x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s32_u32_x2))) +svint32x2_t svreinterpret_s32(svuint32x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s32_s64_x2))) +svint32x2_t svreinterpret_s32(svint64x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s32_u64_x2))) +svint32x2_t svreinterpret_s32(svuint64x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s32_f16_x2))) +svint32x2_t svreinterpret_s32(svfloat16x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s32_bf16_x2))) +svint32x2_t svreinterpret_s32(svbfloat16x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s32_f32_x2))) +svint32x2_t svreinterpret_s32(svfloat32x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s32_f64_x2))) +svint32x2_t svreinterpret_s32(svfloat64x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u32_s8_x2))) +svuint32x2_t svreinterpret_u32(svint8x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u32_u8_x2))) +svuint32x2_t svreinterpret_u32(svuint8x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u32_s16_x2))) +svuint32x2_t svreinterpret_u32(svint16x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u32_u16_x2))) +svuint32x2_t svreinterpret_u32(svuint16x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u32_s32_x2))) +svuint32x2_t svreinterpret_u32(svint32x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u32_u32_x2))) +svuint32x2_t svreinterpret_u32(svuint32x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u32_s64_x2))) +svuint32x2_t svreinterpret_u32(svint64x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u32_u64_x2))) +svuint32x2_t svreinterpret_u32(svuint64x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u32_f16_x2))) +svuint32x2_t svreinterpret_u32(svfloat16x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u32_bf16_x2))) +svuint32x2_t svreinterpret_u32(svbfloat16x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u32_f32_x2))) +svuint32x2_t svreinterpret_u32(svfloat32x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u32_f64_x2))) +svuint32x2_t svreinterpret_u32(svfloat64x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s64_s8_x2))) +svint64x2_t svreinterpret_s64(svint8x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s64_u8_x2))) +svint64x2_t svreinterpret_s64(svuint8x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s64_s16_x2))) +svint64x2_t svreinterpret_s64(svint16x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s64_u16_x2))) +svint64x2_t svreinterpret_s64(svuint16x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s64_s32_x2))) +svint64x2_t svreinterpret_s64(svint32x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s64_u32_x2))) +svint64x2_t svreinterpret_s64(svuint32x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s64_s64_x2))) +svint64x2_t svreinterpret_s64(svint64x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s64_u64_x2))) +svint64x2_t svreinterpret_s64(svuint64x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s64_f16_x2))) +svint64x2_t svreinterpret_s64(svfloat16x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s64_bf16_x2))) +svint64x2_t svreinterpret_s64(svbfloat16x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s64_f32_x2))) +svint64x2_t svreinterpret_s64(svfloat32x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s64_f64_x2))) +svint64x2_t svreinterpret_s64(svfloat64x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u64_s8_x2))) +svuint64x2_t svreinterpret_u64(svint8x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u64_u8_x2))) +svuint64x2_t svreinterpret_u64(svuint8x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u64_s16_x2))) +svuint64x2_t svreinterpret_u64(svint16x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u64_u16_x2))) +svuint64x2_t svreinterpret_u64(svuint16x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u64_s32_x2))) +svuint64x2_t svreinterpret_u64(svint32x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u64_u32_x2))) +svuint64x2_t svreinterpret_u64(svuint32x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u64_s64_x2))) +svuint64x2_t svreinterpret_u64(svint64x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u64_u64_x2))) +svuint64x2_t svreinterpret_u64(svuint64x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u64_f16_x2))) +svuint64x2_t svreinterpret_u64(svfloat16x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u64_bf16_x2))) +svuint64x2_t svreinterpret_u64(svbfloat16x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u64_f32_x2))) +svuint64x2_t svreinterpret_u64(svfloat32x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u64_f64_x2))) +svuint64x2_t svreinterpret_u64(svfloat64x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f16_s8_x2))) +svfloat16x2_t svreinterpret_f16(svint8x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f16_u8_x2))) +svfloat16x2_t svreinterpret_f16(svuint8x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f16_s16_x2))) +svfloat16x2_t svreinterpret_f16(svint16x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f16_u16_x2))) +svfloat16x2_t svreinterpret_f16(svuint16x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f16_s32_x2))) +svfloat16x2_t svreinterpret_f16(svint32x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f16_u32_x2))) +svfloat16x2_t svreinterpret_f16(svuint32x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f16_s64_x2))) +svfloat16x2_t svreinterpret_f16(svint64x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f16_u64_x2))) +svfloat16x2_t svreinterpret_f16(svuint64x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f16_f16_x2))) +svfloat16x2_t svreinterpret_f16(svfloat16x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f16_bf16_x2))) +svfloat16x2_t svreinterpret_f16(svbfloat16x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f16_f32_x2))) +svfloat16x2_t svreinterpret_f16(svfloat32x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f16_f64_x2))) +svfloat16x2_t svreinterpret_f16(svfloat64x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_bf16_s8_x2))) +svbfloat16x2_t svreinterpret_bf16(svint8x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_bf16_u8_x2))) +svbfloat16x2_t svreinterpret_bf16(svuint8x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_bf16_s16_x2))) +svbfloat16x2_t svreinterpret_bf16(svint16x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_bf16_u16_x2))) +svbfloat16x2_t svreinterpret_bf16(svuint16x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_bf16_s32_x2))) +svbfloat16x2_t svreinterpret_bf16(svint32x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_bf16_u32_x2))) +svbfloat16x2_t svreinterpret_bf16(svuint32x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_bf16_s64_x2))) +svbfloat16x2_t svreinterpret_bf16(svint64x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_bf16_u64_x2))) +svbfloat16x2_t svreinterpret_bf16(svuint64x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_bf16_f16_x2))) +svbfloat16x2_t svreinterpret_bf16(svfloat16x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_bf16_bf16_x2))) +svbfloat16x2_t svreinterpret_bf16(svbfloat16x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_bf16_f32_x2))) +svbfloat16x2_t svreinterpret_bf16(svfloat32x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_bf16_f64_x2))) +svbfloat16x2_t svreinterpret_bf16(svfloat64x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f32_s8_x2))) +svfloat32x2_t svreinterpret_f32(svint8x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f32_u8_x2))) +svfloat32x2_t svreinterpret_f32(svuint8x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f32_s16_x2))) +svfloat32x2_t svreinterpret_f32(svint16x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f32_u16_x2))) +svfloat32x2_t svreinterpret_f32(svuint16x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f32_s32_x2))) +svfloat32x2_t svreinterpret_f32(svint32x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f32_u32_x2))) +svfloat32x2_t svreinterpret_f32(svuint32x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f32_s64_x2))) +svfloat32x2_t svreinterpret_f32(svint64x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f32_u64_x2))) +svfloat32x2_t svreinterpret_f32(svuint64x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f32_f16_x2))) +svfloat32x2_t svreinterpret_f32(svfloat16x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f32_bf16_x2))) +svfloat32x2_t svreinterpret_f32(svbfloat16x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f32_f32_x2))) +svfloat32x2_t svreinterpret_f32(svfloat32x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f32_f64_x2))) +svfloat32x2_t svreinterpret_f32(svfloat64x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f64_s8_x2))) +svfloat64x2_t svreinterpret_f64(svint8x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f64_u8_x2))) +svfloat64x2_t svreinterpret_f64(svuint8x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f64_s16_x2))) +svfloat64x2_t svreinterpret_f64(svint16x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f64_u16_x2))) +svfloat64x2_t svreinterpret_f64(svuint16x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f64_s32_x2))) +svfloat64x2_t svreinterpret_f64(svint32x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f64_u32_x2))) +svfloat64x2_t svreinterpret_f64(svuint32x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f64_s64_x2))) +svfloat64x2_t svreinterpret_f64(svint64x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f64_u64_x2))) +svfloat64x2_t svreinterpret_f64(svuint64x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f64_f16_x2))) +svfloat64x2_t svreinterpret_f64(svfloat16x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f64_bf16_x2))) +svfloat64x2_t svreinterpret_f64(svbfloat16x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f64_f32_x2))) +svfloat64x2_t svreinterpret_f64(svfloat32x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f64_f64_x2))) +svfloat64x2_t svreinterpret_f64(svfloat64x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s8_s8_x3))) +svint8x3_t svreinterpret_s8_s8_x3(svint8x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s8_u8_x3))) +svint8x3_t svreinterpret_s8_u8_x3(svuint8x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s8_s16_x3))) +svint8x3_t svreinterpret_s8_s16_x3(svint16x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s8_u16_x3))) +svint8x3_t svreinterpret_s8_u16_x3(svuint16x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s8_s32_x3))) +svint8x3_t svreinterpret_s8_s32_x3(svint32x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s8_u32_x3))) +svint8x3_t svreinterpret_s8_u32_x3(svuint32x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s8_s64_x3))) +svint8x3_t svreinterpret_s8_s64_x3(svint64x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s8_u64_x3))) +svint8x3_t svreinterpret_s8_u64_x3(svuint64x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s8_f16_x3))) +svint8x3_t svreinterpret_s8_f16_x3(svfloat16x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s8_bf16_x3))) +svint8x3_t svreinterpret_s8_bf16_x3(svbfloat16x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s8_f32_x3))) +svint8x3_t svreinterpret_s8_f32_x3(svfloat32x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s8_f64_x3))) +svint8x3_t svreinterpret_s8_f64_x3(svfloat64x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u8_s8_x3))) +svuint8x3_t svreinterpret_u8_s8_x3(svint8x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u8_u8_x3))) +svuint8x3_t svreinterpret_u8_u8_x3(svuint8x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u8_s16_x3))) +svuint8x3_t svreinterpret_u8_s16_x3(svint16x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u8_u16_x3))) +svuint8x3_t svreinterpret_u8_u16_x3(svuint16x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u8_s32_x3))) +svuint8x3_t svreinterpret_u8_s32_x3(svint32x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u8_u32_x3))) +svuint8x3_t svreinterpret_u8_u32_x3(svuint32x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u8_s64_x3))) +svuint8x3_t svreinterpret_u8_s64_x3(svint64x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u8_u64_x3))) +svuint8x3_t svreinterpret_u8_u64_x3(svuint64x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u8_f16_x3))) +svuint8x3_t svreinterpret_u8_f16_x3(svfloat16x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u8_bf16_x3))) +svuint8x3_t svreinterpret_u8_bf16_x3(svbfloat16x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u8_f32_x3))) +svuint8x3_t svreinterpret_u8_f32_x3(svfloat32x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u8_f64_x3))) +svuint8x3_t svreinterpret_u8_f64_x3(svfloat64x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s16_s8_x3))) +svint16x3_t svreinterpret_s16_s8_x3(svint8x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s16_u8_x3))) +svint16x3_t svreinterpret_s16_u8_x3(svuint8x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s16_s16_x3))) +svint16x3_t svreinterpret_s16_s16_x3(svint16x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s16_u16_x3))) +svint16x3_t svreinterpret_s16_u16_x3(svuint16x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s16_s32_x3))) +svint16x3_t svreinterpret_s16_s32_x3(svint32x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s16_u32_x3))) +svint16x3_t svreinterpret_s16_u32_x3(svuint32x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s16_s64_x3))) +svint16x3_t svreinterpret_s16_s64_x3(svint64x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s16_u64_x3))) +svint16x3_t svreinterpret_s16_u64_x3(svuint64x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s16_f16_x3))) +svint16x3_t svreinterpret_s16_f16_x3(svfloat16x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s16_bf16_x3))) +svint16x3_t svreinterpret_s16_bf16_x3(svbfloat16x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s16_f32_x3))) +svint16x3_t svreinterpret_s16_f32_x3(svfloat32x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s16_f64_x3))) +svint16x3_t svreinterpret_s16_f64_x3(svfloat64x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u16_s8_x3))) +svuint16x3_t svreinterpret_u16_s8_x3(svint8x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u16_u8_x3))) +svuint16x3_t svreinterpret_u16_u8_x3(svuint8x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u16_s16_x3))) +svuint16x3_t svreinterpret_u16_s16_x3(svint16x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u16_u16_x3))) +svuint16x3_t svreinterpret_u16_u16_x3(svuint16x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u16_s32_x3))) +svuint16x3_t svreinterpret_u16_s32_x3(svint32x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u16_u32_x3))) +svuint16x3_t svreinterpret_u16_u32_x3(svuint32x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u16_s64_x3))) +svuint16x3_t svreinterpret_u16_s64_x3(svint64x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u16_u64_x3))) +svuint16x3_t svreinterpret_u16_u64_x3(svuint64x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u16_f16_x3))) +svuint16x3_t svreinterpret_u16_f16_x3(svfloat16x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u16_bf16_x3))) +svuint16x3_t svreinterpret_u16_bf16_x3(svbfloat16x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u16_f32_x3))) +svuint16x3_t svreinterpret_u16_f32_x3(svfloat32x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u16_f64_x3))) +svuint16x3_t svreinterpret_u16_f64_x3(svfloat64x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s32_s8_x3))) +svint32x3_t svreinterpret_s32_s8_x3(svint8x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s32_u8_x3))) +svint32x3_t svreinterpret_s32_u8_x3(svuint8x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s32_s16_x3))) +svint32x3_t svreinterpret_s32_s16_x3(svint16x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s32_u16_x3))) +svint32x3_t svreinterpret_s32_u16_x3(svuint16x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s32_s32_x3))) +svint32x3_t svreinterpret_s32_s32_x3(svint32x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s32_u32_x3))) +svint32x3_t svreinterpret_s32_u32_x3(svuint32x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s32_s64_x3))) +svint32x3_t svreinterpret_s32_s64_x3(svint64x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s32_u64_x3))) +svint32x3_t svreinterpret_s32_u64_x3(svuint64x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s32_f16_x3))) +svint32x3_t svreinterpret_s32_f16_x3(svfloat16x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s32_bf16_x3))) +svint32x3_t svreinterpret_s32_bf16_x3(svbfloat16x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s32_f32_x3))) +svint32x3_t svreinterpret_s32_f32_x3(svfloat32x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s32_f64_x3))) +svint32x3_t svreinterpret_s32_f64_x3(svfloat64x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u32_s8_x3))) +svuint32x3_t svreinterpret_u32_s8_x3(svint8x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u32_u8_x3))) +svuint32x3_t svreinterpret_u32_u8_x3(svuint8x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u32_s16_x3))) +svuint32x3_t svreinterpret_u32_s16_x3(svint16x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u32_u16_x3))) +svuint32x3_t svreinterpret_u32_u16_x3(svuint16x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u32_s32_x3))) +svuint32x3_t svreinterpret_u32_s32_x3(svint32x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u32_u32_x3))) +svuint32x3_t svreinterpret_u32_u32_x3(svuint32x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u32_s64_x3))) +svuint32x3_t svreinterpret_u32_s64_x3(svint64x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u32_u64_x3))) +svuint32x3_t svreinterpret_u32_u64_x3(svuint64x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u32_f16_x3))) +svuint32x3_t svreinterpret_u32_f16_x3(svfloat16x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u32_bf16_x3))) +svuint32x3_t svreinterpret_u32_bf16_x3(svbfloat16x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u32_f32_x3))) +svuint32x3_t svreinterpret_u32_f32_x3(svfloat32x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u32_f64_x3))) +svuint32x3_t svreinterpret_u32_f64_x3(svfloat64x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s64_s8_x3))) +svint64x3_t svreinterpret_s64_s8_x3(svint8x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s64_u8_x3))) +svint64x3_t svreinterpret_s64_u8_x3(svuint8x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s64_s16_x3))) +svint64x3_t svreinterpret_s64_s16_x3(svint16x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s64_u16_x3))) +svint64x3_t svreinterpret_s64_u16_x3(svuint16x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s64_s32_x3))) +svint64x3_t svreinterpret_s64_s32_x3(svint32x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s64_u32_x3))) +svint64x3_t svreinterpret_s64_u32_x3(svuint32x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s64_s64_x3))) +svint64x3_t svreinterpret_s64_s64_x3(svint64x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s64_u64_x3))) +svint64x3_t svreinterpret_s64_u64_x3(svuint64x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s64_f16_x3))) +svint64x3_t svreinterpret_s64_f16_x3(svfloat16x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s64_bf16_x3))) +svint64x3_t svreinterpret_s64_bf16_x3(svbfloat16x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s64_f32_x3))) +svint64x3_t svreinterpret_s64_f32_x3(svfloat32x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s64_f64_x3))) +svint64x3_t svreinterpret_s64_f64_x3(svfloat64x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u64_s8_x3))) +svuint64x3_t svreinterpret_u64_s8_x3(svint8x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u64_u8_x3))) +svuint64x3_t svreinterpret_u64_u8_x3(svuint8x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u64_s16_x3))) +svuint64x3_t svreinterpret_u64_s16_x3(svint16x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u64_u16_x3))) +svuint64x3_t svreinterpret_u64_u16_x3(svuint16x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u64_s32_x3))) +svuint64x3_t svreinterpret_u64_s32_x3(svint32x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u64_u32_x3))) +svuint64x3_t svreinterpret_u64_u32_x3(svuint32x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u64_s64_x3))) +svuint64x3_t svreinterpret_u64_s64_x3(svint64x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u64_u64_x3))) +svuint64x3_t svreinterpret_u64_u64_x3(svuint64x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u64_f16_x3))) +svuint64x3_t svreinterpret_u64_f16_x3(svfloat16x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u64_bf16_x3))) +svuint64x3_t svreinterpret_u64_bf16_x3(svbfloat16x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u64_f32_x3))) +svuint64x3_t svreinterpret_u64_f32_x3(svfloat32x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u64_f64_x3))) +svuint64x3_t svreinterpret_u64_f64_x3(svfloat64x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f16_s8_x3))) +svfloat16x3_t svreinterpret_f16_s8_x3(svint8x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f16_u8_x3))) +svfloat16x3_t svreinterpret_f16_u8_x3(svuint8x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f16_s16_x3))) +svfloat16x3_t svreinterpret_f16_s16_x3(svint16x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f16_u16_x3))) +svfloat16x3_t svreinterpret_f16_u16_x3(svuint16x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f16_s32_x3))) +svfloat16x3_t svreinterpret_f16_s32_x3(svint32x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f16_u32_x3))) +svfloat16x3_t svreinterpret_f16_u32_x3(svuint32x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f16_s64_x3))) +svfloat16x3_t svreinterpret_f16_s64_x3(svint64x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f16_u64_x3))) +svfloat16x3_t svreinterpret_f16_u64_x3(svuint64x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f16_f16_x3))) +svfloat16x3_t svreinterpret_f16_f16_x3(svfloat16x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f16_bf16_x3))) +svfloat16x3_t svreinterpret_f16_bf16_x3(svbfloat16x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f16_f32_x3))) +svfloat16x3_t svreinterpret_f16_f32_x3(svfloat32x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f16_f64_x3))) +svfloat16x3_t svreinterpret_f16_f64_x3(svfloat64x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_bf16_s8_x3))) +svbfloat16x3_t svreinterpret_bf16_s8_x3(svint8x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_bf16_u8_x3))) +svbfloat16x3_t svreinterpret_bf16_u8_x3(svuint8x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_bf16_s16_x3))) +svbfloat16x3_t svreinterpret_bf16_s16_x3(svint16x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_bf16_u16_x3))) +svbfloat16x3_t svreinterpret_bf16_u16_x3(svuint16x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_bf16_s32_x3))) +svbfloat16x3_t svreinterpret_bf16_s32_x3(svint32x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_bf16_u32_x3))) +svbfloat16x3_t svreinterpret_bf16_u32_x3(svuint32x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_bf16_s64_x3))) +svbfloat16x3_t svreinterpret_bf16_s64_x3(svint64x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_bf16_u64_x3))) +svbfloat16x3_t svreinterpret_bf16_u64_x3(svuint64x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_bf16_f16_x3))) +svbfloat16x3_t svreinterpret_bf16_f16_x3(svfloat16x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_bf16_bf16_x3))) +svbfloat16x3_t svreinterpret_bf16_bf16_x3(svbfloat16x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_bf16_f32_x3))) +svbfloat16x3_t svreinterpret_bf16_f32_x3(svfloat32x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_bf16_f64_x3))) +svbfloat16x3_t svreinterpret_bf16_f64_x3(svfloat64x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f32_s8_x3))) +svfloat32x3_t svreinterpret_f32_s8_x3(svint8x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f32_u8_x3))) +svfloat32x3_t svreinterpret_f32_u8_x3(svuint8x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f32_s16_x3))) +svfloat32x3_t svreinterpret_f32_s16_x3(svint16x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f32_u16_x3))) +svfloat32x3_t svreinterpret_f32_u16_x3(svuint16x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f32_s32_x3))) +svfloat32x3_t svreinterpret_f32_s32_x3(svint32x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f32_u32_x3))) +svfloat32x3_t svreinterpret_f32_u32_x3(svuint32x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f32_s64_x3))) +svfloat32x3_t svreinterpret_f32_s64_x3(svint64x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f32_u64_x3))) +svfloat32x3_t svreinterpret_f32_u64_x3(svuint64x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f32_f16_x3))) +svfloat32x3_t svreinterpret_f32_f16_x3(svfloat16x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f32_bf16_x3))) +svfloat32x3_t svreinterpret_f32_bf16_x3(svbfloat16x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f32_f32_x3))) +svfloat32x3_t svreinterpret_f32_f32_x3(svfloat32x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f32_f64_x3))) +svfloat32x3_t svreinterpret_f32_f64_x3(svfloat64x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f64_s8_x3))) +svfloat64x3_t svreinterpret_f64_s8_x3(svint8x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f64_u8_x3))) +svfloat64x3_t svreinterpret_f64_u8_x3(svuint8x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f64_s16_x3))) +svfloat64x3_t svreinterpret_f64_s16_x3(svint16x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f64_u16_x3))) +svfloat64x3_t svreinterpret_f64_u16_x3(svuint16x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f64_s32_x3))) +svfloat64x3_t svreinterpret_f64_s32_x3(svint32x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f64_u32_x3))) +svfloat64x3_t svreinterpret_f64_u32_x3(svuint32x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f64_s64_x3))) +svfloat64x3_t svreinterpret_f64_s64_x3(svint64x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f64_u64_x3))) +svfloat64x3_t svreinterpret_f64_u64_x3(svuint64x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f64_f16_x3))) +svfloat64x3_t svreinterpret_f64_f16_x3(svfloat16x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f64_bf16_x3))) +svfloat64x3_t svreinterpret_f64_bf16_x3(svbfloat16x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f64_f32_x3))) +svfloat64x3_t svreinterpret_f64_f32_x3(svfloat32x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f64_f64_x3))) +svfloat64x3_t svreinterpret_f64_f64_x3(svfloat64x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s8_s8_x3))) +svint8x3_t svreinterpret_s8(svint8x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s8_u8_x3))) +svint8x3_t svreinterpret_s8(svuint8x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s8_s16_x3))) +svint8x3_t svreinterpret_s8(svint16x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s8_u16_x3))) +svint8x3_t svreinterpret_s8(svuint16x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s8_s32_x3))) +svint8x3_t svreinterpret_s8(svint32x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s8_u32_x3))) +svint8x3_t svreinterpret_s8(svuint32x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s8_s64_x3))) +svint8x3_t svreinterpret_s8(svint64x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s8_u64_x3))) +svint8x3_t svreinterpret_s8(svuint64x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s8_f16_x3))) +svint8x3_t svreinterpret_s8(svfloat16x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s8_bf16_x3))) +svint8x3_t svreinterpret_s8(svbfloat16x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s8_f32_x3))) +svint8x3_t svreinterpret_s8(svfloat32x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s8_f64_x3))) +svint8x3_t svreinterpret_s8(svfloat64x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u8_s8_x3))) +svuint8x3_t svreinterpret_u8(svint8x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u8_u8_x3))) +svuint8x3_t svreinterpret_u8(svuint8x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u8_s16_x3))) +svuint8x3_t svreinterpret_u8(svint16x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u8_u16_x3))) +svuint8x3_t svreinterpret_u8(svuint16x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u8_s32_x3))) +svuint8x3_t svreinterpret_u8(svint32x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u8_u32_x3))) +svuint8x3_t svreinterpret_u8(svuint32x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u8_s64_x3))) +svuint8x3_t svreinterpret_u8(svint64x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u8_u64_x3))) +svuint8x3_t svreinterpret_u8(svuint64x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u8_f16_x3))) +svuint8x3_t svreinterpret_u8(svfloat16x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u8_bf16_x3))) +svuint8x3_t svreinterpret_u8(svbfloat16x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u8_f32_x3))) +svuint8x3_t svreinterpret_u8(svfloat32x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u8_f64_x3))) +svuint8x3_t svreinterpret_u8(svfloat64x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s16_s8_x3))) +svint16x3_t svreinterpret_s16(svint8x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s16_u8_x3))) +svint16x3_t svreinterpret_s16(svuint8x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s16_s16_x3))) +svint16x3_t svreinterpret_s16(svint16x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s16_u16_x3))) +svint16x3_t svreinterpret_s16(svuint16x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s16_s32_x3))) +svint16x3_t svreinterpret_s16(svint32x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s16_u32_x3))) +svint16x3_t svreinterpret_s16(svuint32x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s16_s64_x3))) +svint16x3_t svreinterpret_s16(svint64x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s16_u64_x3))) +svint16x3_t svreinterpret_s16(svuint64x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s16_f16_x3))) +svint16x3_t svreinterpret_s16(svfloat16x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s16_bf16_x3))) +svint16x3_t svreinterpret_s16(svbfloat16x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s16_f32_x3))) +svint16x3_t svreinterpret_s16(svfloat32x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s16_f64_x3))) +svint16x3_t svreinterpret_s16(svfloat64x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u16_s8_x3))) +svuint16x3_t svreinterpret_u16(svint8x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u16_u8_x3))) +svuint16x3_t svreinterpret_u16(svuint8x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u16_s16_x3))) +svuint16x3_t svreinterpret_u16(svint16x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u16_u16_x3))) +svuint16x3_t svreinterpret_u16(svuint16x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u16_s32_x3))) +svuint16x3_t svreinterpret_u16(svint32x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u16_u32_x3))) +svuint16x3_t svreinterpret_u16(svuint32x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u16_s64_x3))) +svuint16x3_t svreinterpret_u16(svint64x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u16_u64_x3))) +svuint16x3_t svreinterpret_u16(svuint64x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u16_f16_x3))) +svuint16x3_t svreinterpret_u16(svfloat16x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u16_bf16_x3))) +svuint16x3_t svreinterpret_u16(svbfloat16x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u16_f32_x3))) +svuint16x3_t svreinterpret_u16(svfloat32x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u16_f64_x3))) +svuint16x3_t svreinterpret_u16(svfloat64x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s32_s8_x3))) +svint32x3_t svreinterpret_s32(svint8x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s32_u8_x3))) +svint32x3_t svreinterpret_s32(svuint8x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s32_s16_x3))) +svint32x3_t svreinterpret_s32(svint16x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s32_u16_x3))) +svint32x3_t svreinterpret_s32(svuint16x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s32_s32_x3))) +svint32x3_t svreinterpret_s32(svint32x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s32_u32_x3))) +svint32x3_t svreinterpret_s32(svuint32x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s32_s64_x3))) +svint32x3_t svreinterpret_s32(svint64x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s32_u64_x3))) +svint32x3_t svreinterpret_s32(svuint64x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s32_f16_x3))) +svint32x3_t svreinterpret_s32(svfloat16x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s32_bf16_x3))) +svint32x3_t svreinterpret_s32(svbfloat16x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s32_f32_x3))) +svint32x3_t svreinterpret_s32(svfloat32x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s32_f64_x3))) +svint32x3_t svreinterpret_s32(svfloat64x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u32_s8_x3))) +svuint32x3_t svreinterpret_u32(svint8x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u32_u8_x3))) +svuint32x3_t svreinterpret_u32(svuint8x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u32_s16_x3))) +svuint32x3_t svreinterpret_u32(svint16x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u32_u16_x3))) +svuint32x3_t svreinterpret_u32(svuint16x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u32_s32_x3))) +svuint32x3_t svreinterpret_u32(svint32x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u32_u32_x3))) +svuint32x3_t svreinterpret_u32(svuint32x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u32_s64_x3))) +svuint32x3_t svreinterpret_u32(svint64x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u32_u64_x3))) +svuint32x3_t svreinterpret_u32(svuint64x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u32_f16_x3))) +svuint32x3_t svreinterpret_u32(svfloat16x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u32_bf16_x3))) +svuint32x3_t svreinterpret_u32(svbfloat16x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u32_f32_x3))) +svuint32x3_t svreinterpret_u32(svfloat32x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u32_f64_x3))) +svuint32x3_t svreinterpret_u32(svfloat64x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s64_s8_x3))) +svint64x3_t svreinterpret_s64(svint8x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s64_u8_x3))) +svint64x3_t svreinterpret_s64(svuint8x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s64_s16_x3))) +svint64x3_t svreinterpret_s64(svint16x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s64_u16_x3))) +svint64x3_t svreinterpret_s64(svuint16x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s64_s32_x3))) +svint64x3_t svreinterpret_s64(svint32x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s64_u32_x3))) +svint64x3_t svreinterpret_s64(svuint32x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s64_s64_x3))) +svint64x3_t svreinterpret_s64(svint64x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s64_u64_x3))) +svint64x3_t svreinterpret_s64(svuint64x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s64_f16_x3))) +svint64x3_t svreinterpret_s64(svfloat16x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s64_bf16_x3))) +svint64x3_t svreinterpret_s64(svbfloat16x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s64_f32_x3))) +svint64x3_t svreinterpret_s64(svfloat32x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s64_f64_x3))) +svint64x3_t svreinterpret_s64(svfloat64x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u64_s8_x3))) +svuint64x3_t svreinterpret_u64(svint8x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u64_u8_x3))) +svuint64x3_t svreinterpret_u64(svuint8x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u64_s16_x3))) +svuint64x3_t svreinterpret_u64(svint16x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u64_u16_x3))) +svuint64x3_t svreinterpret_u64(svuint16x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u64_s32_x3))) +svuint64x3_t svreinterpret_u64(svint32x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u64_u32_x3))) +svuint64x3_t svreinterpret_u64(svuint32x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u64_s64_x3))) +svuint64x3_t svreinterpret_u64(svint64x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u64_u64_x3))) +svuint64x3_t svreinterpret_u64(svuint64x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u64_f16_x3))) +svuint64x3_t svreinterpret_u64(svfloat16x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u64_bf16_x3))) +svuint64x3_t svreinterpret_u64(svbfloat16x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u64_f32_x3))) +svuint64x3_t svreinterpret_u64(svfloat32x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u64_f64_x3))) +svuint64x3_t svreinterpret_u64(svfloat64x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f16_s8_x3))) +svfloat16x3_t svreinterpret_f16(svint8x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f16_u8_x3))) +svfloat16x3_t svreinterpret_f16(svuint8x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f16_s16_x3))) +svfloat16x3_t svreinterpret_f16(svint16x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f16_u16_x3))) +svfloat16x3_t svreinterpret_f16(svuint16x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f16_s32_x3))) +svfloat16x3_t svreinterpret_f16(svint32x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f16_u32_x3))) +svfloat16x3_t svreinterpret_f16(svuint32x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f16_s64_x3))) +svfloat16x3_t svreinterpret_f16(svint64x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f16_u64_x3))) +svfloat16x3_t svreinterpret_f16(svuint64x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f16_f16_x3))) +svfloat16x3_t svreinterpret_f16(svfloat16x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f16_bf16_x3))) +svfloat16x3_t svreinterpret_f16(svbfloat16x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f16_f32_x3))) +svfloat16x3_t svreinterpret_f16(svfloat32x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f16_f64_x3))) +svfloat16x3_t svreinterpret_f16(svfloat64x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_bf16_s8_x3))) +svbfloat16x3_t svreinterpret_bf16(svint8x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_bf16_u8_x3))) +svbfloat16x3_t svreinterpret_bf16(svuint8x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_bf16_s16_x3))) +svbfloat16x3_t svreinterpret_bf16(svint16x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_bf16_u16_x3))) +svbfloat16x3_t svreinterpret_bf16(svuint16x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_bf16_s32_x3))) +svbfloat16x3_t svreinterpret_bf16(svint32x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_bf16_u32_x3))) +svbfloat16x3_t svreinterpret_bf16(svuint32x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_bf16_s64_x3))) +svbfloat16x3_t svreinterpret_bf16(svint64x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_bf16_u64_x3))) +svbfloat16x3_t svreinterpret_bf16(svuint64x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_bf16_f16_x3))) +svbfloat16x3_t svreinterpret_bf16(svfloat16x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_bf16_bf16_x3))) +svbfloat16x3_t svreinterpret_bf16(svbfloat16x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_bf16_f32_x3))) +svbfloat16x3_t svreinterpret_bf16(svfloat32x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_bf16_f64_x3))) +svbfloat16x3_t svreinterpret_bf16(svfloat64x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f32_s8_x3))) +svfloat32x3_t svreinterpret_f32(svint8x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f32_u8_x3))) +svfloat32x3_t svreinterpret_f32(svuint8x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f32_s16_x3))) +svfloat32x3_t svreinterpret_f32(svint16x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f32_u16_x3))) +svfloat32x3_t svreinterpret_f32(svuint16x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f32_s32_x3))) +svfloat32x3_t svreinterpret_f32(svint32x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f32_u32_x3))) +svfloat32x3_t svreinterpret_f32(svuint32x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f32_s64_x3))) +svfloat32x3_t svreinterpret_f32(svint64x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f32_u64_x3))) +svfloat32x3_t svreinterpret_f32(svuint64x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f32_f16_x3))) +svfloat32x3_t svreinterpret_f32(svfloat16x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f32_bf16_x3))) +svfloat32x3_t svreinterpret_f32(svbfloat16x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f32_f32_x3))) +svfloat32x3_t svreinterpret_f32(svfloat32x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f32_f64_x3))) +svfloat32x3_t svreinterpret_f32(svfloat64x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f64_s8_x3))) +svfloat64x3_t svreinterpret_f64(svint8x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f64_u8_x3))) +svfloat64x3_t svreinterpret_f64(svuint8x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f64_s16_x3))) +svfloat64x3_t svreinterpret_f64(svint16x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f64_u16_x3))) +svfloat64x3_t svreinterpret_f64(svuint16x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f64_s32_x3))) +svfloat64x3_t svreinterpret_f64(svint32x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f64_u32_x3))) +svfloat64x3_t svreinterpret_f64(svuint32x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f64_s64_x3))) +svfloat64x3_t svreinterpret_f64(svint64x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f64_u64_x3))) +svfloat64x3_t svreinterpret_f64(svuint64x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f64_f16_x3))) +svfloat64x3_t svreinterpret_f64(svfloat16x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f64_bf16_x3))) +svfloat64x3_t svreinterpret_f64(svbfloat16x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f64_f32_x3))) +svfloat64x3_t svreinterpret_f64(svfloat32x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f64_f64_x3))) +svfloat64x3_t svreinterpret_f64(svfloat64x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s8_s8_x4))) +svint8x4_t svreinterpret_s8_s8_x4(svint8x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s8_u8_x4))) +svint8x4_t svreinterpret_s8_u8_x4(svuint8x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s8_s16_x4))) +svint8x4_t svreinterpret_s8_s16_x4(svint16x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s8_u16_x4))) +svint8x4_t svreinterpret_s8_u16_x4(svuint16x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s8_s32_x4))) +svint8x4_t svreinterpret_s8_s32_x4(svint32x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s8_u32_x4))) +svint8x4_t svreinterpret_s8_u32_x4(svuint32x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s8_s64_x4))) +svint8x4_t svreinterpret_s8_s64_x4(svint64x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s8_u64_x4))) +svint8x4_t svreinterpret_s8_u64_x4(svuint64x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s8_f16_x4))) +svint8x4_t svreinterpret_s8_f16_x4(svfloat16x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s8_bf16_x4))) +svint8x4_t svreinterpret_s8_bf16_x4(svbfloat16x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s8_f32_x4))) +svint8x4_t svreinterpret_s8_f32_x4(svfloat32x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s8_f64_x4))) +svint8x4_t svreinterpret_s8_f64_x4(svfloat64x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u8_s8_x4))) +svuint8x4_t svreinterpret_u8_s8_x4(svint8x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u8_u8_x4))) +svuint8x4_t svreinterpret_u8_u8_x4(svuint8x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u8_s16_x4))) +svuint8x4_t svreinterpret_u8_s16_x4(svint16x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u8_u16_x4))) +svuint8x4_t svreinterpret_u8_u16_x4(svuint16x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u8_s32_x4))) +svuint8x4_t svreinterpret_u8_s32_x4(svint32x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u8_u32_x4))) +svuint8x4_t svreinterpret_u8_u32_x4(svuint32x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u8_s64_x4))) +svuint8x4_t svreinterpret_u8_s64_x4(svint64x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u8_u64_x4))) +svuint8x4_t svreinterpret_u8_u64_x4(svuint64x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u8_f16_x4))) +svuint8x4_t svreinterpret_u8_f16_x4(svfloat16x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u8_bf16_x4))) +svuint8x4_t svreinterpret_u8_bf16_x4(svbfloat16x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u8_f32_x4))) +svuint8x4_t svreinterpret_u8_f32_x4(svfloat32x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u8_f64_x4))) +svuint8x4_t svreinterpret_u8_f64_x4(svfloat64x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s16_s8_x4))) +svint16x4_t svreinterpret_s16_s8_x4(svint8x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s16_u8_x4))) +svint16x4_t svreinterpret_s16_u8_x4(svuint8x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s16_s16_x4))) +svint16x4_t svreinterpret_s16_s16_x4(svint16x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s16_u16_x4))) +svint16x4_t svreinterpret_s16_u16_x4(svuint16x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s16_s32_x4))) +svint16x4_t svreinterpret_s16_s32_x4(svint32x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s16_u32_x4))) +svint16x4_t svreinterpret_s16_u32_x4(svuint32x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s16_s64_x4))) +svint16x4_t svreinterpret_s16_s64_x4(svint64x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s16_u64_x4))) +svint16x4_t svreinterpret_s16_u64_x4(svuint64x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s16_f16_x4))) +svint16x4_t svreinterpret_s16_f16_x4(svfloat16x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s16_bf16_x4))) +svint16x4_t svreinterpret_s16_bf16_x4(svbfloat16x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s16_f32_x4))) +svint16x4_t svreinterpret_s16_f32_x4(svfloat32x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s16_f64_x4))) +svint16x4_t svreinterpret_s16_f64_x4(svfloat64x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u16_s8_x4))) +svuint16x4_t svreinterpret_u16_s8_x4(svint8x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u16_u8_x4))) +svuint16x4_t svreinterpret_u16_u8_x4(svuint8x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u16_s16_x4))) +svuint16x4_t svreinterpret_u16_s16_x4(svint16x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u16_u16_x4))) +svuint16x4_t svreinterpret_u16_u16_x4(svuint16x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u16_s32_x4))) +svuint16x4_t svreinterpret_u16_s32_x4(svint32x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u16_u32_x4))) +svuint16x4_t svreinterpret_u16_u32_x4(svuint32x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u16_s64_x4))) +svuint16x4_t svreinterpret_u16_s64_x4(svint64x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u16_u64_x4))) +svuint16x4_t svreinterpret_u16_u64_x4(svuint64x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u16_f16_x4))) +svuint16x4_t svreinterpret_u16_f16_x4(svfloat16x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u16_bf16_x4))) +svuint16x4_t svreinterpret_u16_bf16_x4(svbfloat16x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u16_f32_x4))) +svuint16x4_t svreinterpret_u16_f32_x4(svfloat32x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u16_f64_x4))) +svuint16x4_t svreinterpret_u16_f64_x4(svfloat64x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s32_s8_x4))) +svint32x4_t svreinterpret_s32_s8_x4(svint8x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s32_u8_x4))) +svint32x4_t svreinterpret_s32_u8_x4(svuint8x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s32_s16_x4))) +svint32x4_t svreinterpret_s32_s16_x4(svint16x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s32_u16_x4))) +svint32x4_t svreinterpret_s32_u16_x4(svuint16x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s32_s32_x4))) +svint32x4_t svreinterpret_s32_s32_x4(svint32x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s32_u32_x4))) +svint32x4_t svreinterpret_s32_u32_x4(svuint32x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s32_s64_x4))) +svint32x4_t svreinterpret_s32_s64_x4(svint64x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s32_u64_x4))) +svint32x4_t svreinterpret_s32_u64_x4(svuint64x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s32_f16_x4))) +svint32x4_t svreinterpret_s32_f16_x4(svfloat16x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s32_bf16_x4))) +svint32x4_t svreinterpret_s32_bf16_x4(svbfloat16x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s32_f32_x4))) +svint32x4_t svreinterpret_s32_f32_x4(svfloat32x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s32_f64_x4))) +svint32x4_t svreinterpret_s32_f64_x4(svfloat64x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u32_s8_x4))) +svuint32x4_t svreinterpret_u32_s8_x4(svint8x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u32_u8_x4))) +svuint32x4_t svreinterpret_u32_u8_x4(svuint8x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u32_s16_x4))) +svuint32x4_t svreinterpret_u32_s16_x4(svint16x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u32_u16_x4))) +svuint32x4_t svreinterpret_u32_u16_x4(svuint16x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u32_s32_x4))) +svuint32x4_t svreinterpret_u32_s32_x4(svint32x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u32_u32_x4))) +svuint32x4_t svreinterpret_u32_u32_x4(svuint32x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u32_s64_x4))) +svuint32x4_t svreinterpret_u32_s64_x4(svint64x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u32_u64_x4))) +svuint32x4_t svreinterpret_u32_u64_x4(svuint64x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u32_f16_x4))) +svuint32x4_t svreinterpret_u32_f16_x4(svfloat16x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u32_bf16_x4))) +svuint32x4_t svreinterpret_u32_bf16_x4(svbfloat16x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u32_f32_x4))) +svuint32x4_t svreinterpret_u32_f32_x4(svfloat32x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u32_f64_x4))) +svuint32x4_t svreinterpret_u32_f64_x4(svfloat64x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s64_s8_x4))) +svint64x4_t svreinterpret_s64_s8_x4(svint8x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s64_u8_x4))) +svint64x4_t svreinterpret_s64_u8_x4(svuint8x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s64_s16_x4))) +svint64x4_t svreinterpret_s64_s16_x4(svint16x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s64_u16_x4))) +svint64x4_t svreinterpret_s64_u16_x4(svuint16x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s64_s32_x4))) +svint64x4_t svreinterpret_s64_s32_x4(svint32x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s64_u32_x4))) +svint64x4_t svreinterpret_s64_u32_x4(svuint32x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s64_s64_x4))) +svint64x4_t svreinterpret_s64_s64_x4(svint64x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s64_u64_x4))) +svint64x4_t svreinterpret_s64_u64_x4(svuint64x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s64_f16_x4))) +svint64x4_t svreinterpret_s64_f16_x4(svfloat16x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s64_bf16_x4))) +svint64x4_t svreinterpret_s64_bf16_x4(svbfloat16x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s64_f32_x4))) +svint64x4_t svreinterpret_s64_f32_x4(svfloat32x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s64_f64_x4))) +svint64x4_t svreinterpret_s64_f64_x4(svfloat64x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u64_s8_x4))) +svuint64x4_t svreinterpret_u64_s8_x4(svint8x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u64_u8_x4))) +svuint64x4_t svreinterpret_u64_u8_x4(svuint8x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u64_s16_x4))) +svuint64x4_t svreinterpret_u64_s16_x4(svint16x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u64_u16_x4))) +svuint64x4_t svreinterpret_u64_u16_x4(svuint16x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u64_s32_x4))) +svuint64x4_t svreinterpret_u64_s32_x4(svint32x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u64_u32_x4))) +svuint64x4_t svreinterpret_u64_u32_x4(svuint32x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u64_s64_x4))) +svuint64x4_t svreinterpret_u64_s64_x4(svint64x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u64_u64_x4))) +svuint64x4_t svreinterpret_u64_u64_x4(svuint64x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u64_f16_x4))) +svuint64x4_t svreinterpret_u64_f16_x4(svfloat16x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u64_bf16_x4))) +svuint64x4_t svreinterpret_u64_bf16_x4(svbfloat16x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u64_f32_x4))) +svuint64x4_t svreinterpret_u64_f32_x4(svfloat32x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u64_f64_x4))) +svuint64x4_t svreinterpret_u64_f64_x4(svfloat64x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f16_s8_x4))) +svfloat16x4_t svreinterpret_f16_s8_x4(svint8x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f16_u8_x4))) +svfloat16x4_t svreinterpret_f16_u8_x4(svuint8x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f16_s16_x4))) +svfloat16x4_t svreinterpret_f16_s16_x4(svint16x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f16_u16_x4))) +svfloat16x4_t svreinterpret_f16_u16_x4(svuint16x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f16_s32_x4))) +svfloat16x4_t svreinterpret_f16_s32_x4(svint32x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f16_u32_x4))) +svfloat16x4_t svreinterpret_f16_u32_x4(svuint32x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f16_s64_x4))) +svfloat16x4_t svreinterpret_f16_s64_x4(svint64x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f16_u64_x4))) +svfloat16x4_t svreinterpret_f16_u64_x4(svuint64x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f16_f16_x4))) +svfloat16x4_t svreinterpret_f16_f16_x4(svfloat16x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f16_bf16_x4))) +svfloat16x4_t svreinterpret_f16_bf16_x4(svbfloat16x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f16_f32_x4))) +svfloat16x4_t svreinterpret_f16_f32_x4(svfloat32x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f16_f64_x4))) +svfloat16x4_t svreinterpret_f16_f64_x4(svfloat64x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_bf16_s8_x4))) +svbfloat16x4_t svreinterpret_bf16_s8_x4(svint8x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_bf16_u8_x4))) +svbfloat16x4_t svreinterpret_bf16_u8_x4(svuint8x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_bf16_s16_x4))) +svbfloat16x4_t svreinterpret_bf16_s16_x4(svint16x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_bf16_u16_x4))) +svbfloat16x4_t svreinterpret_bf16_u16_x4(svuint16x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_bf16_s32_x4))) +svbfloat16x4_t svreinterpret_bf16_s32_x4(svint32x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_bf16_u32_x4))) +svbfloat16x4_t svreinterpret_bf16_u32_x4(svuint32x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_bf16_s64_x4))) +svbfloat16x4_t svreinterpret_bf16_s64_x4(svint64x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_bf16_u64_x4))) +svbfloat16x4_t svreinterpret_bf16_u64_x4(svuint64x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_bf16_f16_x4))) +svbfloat16x4_t svreinterpret_bf16_f16_x4(svfloat16x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_bf16_bf16_x4))) +svbfloat16x4_t svreinterpret_bf16_bf16_x4(svbfloat16x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_bf16_f32_x4))) +svbfloat16x4_t svreinterpret_bf16_f32_x4(svfloat32x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_bf16_f64_x4))) +svbfloat16x4_t svreinterpret_bf16_f64_x4(svfloat64x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f32_s8_x4))) +svfloat32x4_t svreinterpret_f32_s8_x4(svint8x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f32_u8_x4))) +svfloat32x4_t svreinterpret_f32_u8_x4(svuint8x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f32_s16_x4))) +svfloat32x4_t svreinterpret_f32_s16_x4(svint16x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f32_u16_x4))) +svfloat32x4_t svreinterpret_f32_u16_x4(svuint16x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f32_s32_x4))) +svfloat32x4_t svreinterpret_f32_s32_x4(svint32x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f32_u32_x4))) +svfloat32x4_t svreinterpret_f32_u32_x4(svuint32x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f32_s64_x4))) +svfloat32x4_t svreinterpret_f32_s64_x4(svint64x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f32_u64_x4))) +svfloat32x4_t svreinterpret_f32_u64_x4(svuint64x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f32_f16_x4))) +svfloat32x4_t svreinterpret_f32_f16_x4(svfloat16x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f32_bf16_x4))) +svfloat32x4_t svreinterpret_f32_bf16_x4(svbfloat16x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f32_f32_x4))) +svfloat32x4_t svreinterpret_f32_f32_x4(svfloat32x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f32_f64_x4))) +svfloat32x4_t svreinterpret_f32_f64_x4(svfloat64x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f64_s8_x4))) +svfloat64x4_t svreinterpret_f64_s8_x4(svint8x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f64_u8_x4))) +svfloat64x4_t svreinterpret_f64_u8_x4(svuint8x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f64_s16_x4))) +svfloat64x4_t svreinterpret_f64_s16_x4(svint16x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f64_u16_x4))) +svfloat64x4_t svreinterpret_f64_u16_x4(svuint16x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f64_s32_x4))) +svfloat64x4_t svreinterpret_f64_s32_x4(svint32x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f64_u32_x4))) +svfloat64x4_t svreinterpret_f64_u32_x4(svuint32x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f64_s64_x4))) +svfloat64x4_t svreinterpret_f64_s64_x4(svint64x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f64_u64_x4))) +svfloat64x4_t svreinterpret_f64_u64_x4(svuint64x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f64_f16_x4))) +svfloat64x4_t svreinterpret_f64_f16_x4(svfloat16x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f64_bf16_x4))) +svfloat64x4_t svreinterpret_f64_bf16_x4(svbfloat16x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f64_f32_x4))) +svfloat64x4_t svreinterpret_f64_f32_x4(svfloat32x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f64_f64_x4))) +svfloat64x4_t svreinterpret_f64_f64_x4(svfloat64x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s8_s8_x4))) +svint8x4_t svreinterpret_s8(svint8x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s8_u8_x4))) +svint8x4_t svreinterpret_s8(svuint8x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s8_s16_x4))) +svint8x4_t svreinterpret_s8(svint16x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s8_u16_x4))) +svint8x4_t svreinterpret_s8(svuint16x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s8_s32_x4))) +svint8x4_t svreinterpret_s8(svint32x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s8_u32_x4))) +svint8x4_t svreinterpret_s8(svuint32x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s8_s64_x4))) +svint8x4_t svreinterpret_s8(svint64x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s8_u64_x4))) +svint8x4_t svreinterpret_s8(svuint64x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s8_f16_x4))) +svint8x4_t svreinterpret_s8(svfloat16x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s8_bf16_x4))) +svint8x4_t svreinterpret_s8(svbfloat16x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s8_f32_x4))) +svint8x4_t svreinterpret_s8(svfloat32x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s8_f64_x4))) +svint8x4_t svreinterpret_s8(svfloat64x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u8_s8_x4))) +svuint8x4_t svreinterpret_u8(svint8x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u8_u8_x4))) +svuint8x4_t svreinterpret_u8(svuint8x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u8_s16_x4))) +svuint8x4_t svreinterpret_u8(svint16x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u8_u16_x4))) +svuint8x4_t svreinterpret_u8(svuint16x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u8_s32_x4))) +svuint8x4_t svreinterpret_u8(svint32x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u8_u32_x4))) +svuint8x4_t svreinterpret_u8(svuint32x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u8_s64_x4))) +svuint8x4_t svreinterpret_u8(svint64x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u8_u64_x4))) +svuint8x4_t svreinterpret_u8(svuint64x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u8_f16_x4))) +svuint8x4_t svreinterpret_u8(svfloat16x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u8_bf16_x4))) +svuint8x4_t svreinterpret_u8(svbfloat16x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u8_f32_x4))) +svuint8x4_t svreinterpret_u8(svfloat32x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u8_f64_x4))) +svuint8x4_t svreinterpret_u8(svfloat64x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s16_s8_x4))) +svint16x4_t svreinterpret_s16(svint8x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s16_u8_x4))) +svint16x4_t svreinterpret_s16(svuint8x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s16_s16_x4))) +svint16x4_t svreinterpret_s16(svint16x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s16_u16_x4))) +svint16x4_t svreinterpret_s16(svuint16x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s16_s32_x4))) +svint16x4_t svreinterpret_s16(svint32x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s16_u32_x4))) +svint16x4_t svreinterpret_s16(svuint32x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s16_s64_x4))) +svint16x4_t svreinterpret_s16(svint64x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s16_u64_x4))) +svint16x4_t svreinterpret_s16(svuint64x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s16_f16_x4))) +svint16x4_t svreinterpret_s16(svfloat16x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s16_bf16_x4))) +svint16x4_t svreinterpret_s16(svbfloat16x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s16_f32_x4))) +svint16x4_t svreinterpret_s16(svfloat32x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s16_f64_x4))) +svint16x4_t svreinterpret_s16(svfloat64x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u16_s8_x4))) +svuint16x4_t svreinterpret_u16(svint8x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u16_u8_x4))) +svuint16x4_t svreinterpret_u16(svuint8x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u16_s16_x4))) +svuint16x4_t svreinterpret_u16(svint16x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u16_u16_x4))) +svuint16x4_t svreinterpret_u16(svuint16x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u16_s32_x4))) +svuint16x4_t svreinterpret_u16(svint32x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u16_u32_x4))) +svuint16x4_t svreinterpret_u16(svuint32x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u16_s64_x4))) +svuint16x4_t svreinterpret_u16(svint64x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u16_u64_x4))) +svuint16x4_t svreinterpret_u16(svuint64x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u16_f16_x4))) +svuint16x4_t svreinterpret_u16(svfloat16x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u16_bf16_x4))) +svuint16x4_t svreinterpret_u16(svbfloat16x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u16_f32_x4))) +svuint16x4_t svreinterpret_u16(svfloat32x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u16_f64_x4))) +svuint16x4_t svreinterpret_u16(svfloat64x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s32_s8_x4))) +svint32x4_t svreinterpret_s32(svint8x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s32_u8_x4))) +svint32x4_t svreinterpret_s32(svuint8x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s32_s16_x4))) +svint32x4_t svreinterpret_s32(svint16x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s32_u16_x4))) +svint32x4_t svreinterpret_s32(svuint16x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s32_s32_x4))) +svint32x4_t svreinterpret_s32(svint32x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s32_u32_x4))) +svint32x4_t svreinterpret_s32(svuint32x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s32_s64_x4))) +svint32x4_t svreinterpret_s32(svint64x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s32_u64_x4))) +svint32x4_t svreinterpret_s32(svuint64x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s32_f16_x4))) +svint32x4_t svreinterpret_s32(svfloat16x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s32_bf16_x4))) +svint32x4_t svreinterpret_s32(svbfloat16x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s32_f32_x4))) +svint32x4_t svreinterpret_s32(svfloat32x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s32_f64_x4))) +svint32x4_t svreinterpret_s32(svfloat64x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u32_s8_x4))) +svuint32x4_t svreinterpret_u32(svint8x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u32_u8_x4))) +svuint32x4_t svreinterpret_u32(svuint8x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u32_s16_x4))) +svuint32x4_t svreinterpret_u32(svint16x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u32_u16_x4))) +svuint32x4_t svreinterpret_u32(svuint16x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u32_s32_x4))) +svuint32x4_t svreinterpret_u32(svint32x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u32_u32_x4))) +svuint32x4_t svreinterpret_u32(svuint32x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u32_s64_x4))) +svuint32x4_t svreinterpret_u32(svint64x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u32_u64_x4))) +svuint32x4_t svreinterpret_u32(svuint64x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u32_f16_x4))) +svuint32x4_t svreinterpret_u32(svfloat16x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u32_bf16_x4))) +svuint32x4_t svreinterpret_u32(svbfloat16x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u32_f32_x4))) +svuint32x4_t svreinterpret_u32(svfloat32x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u32_f64_x4))) +svuint32x4_t svreinterpret_u32(svfloat64x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s64_s8_x4))) +svint64x4_t svreinterpret_s64(svint8x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s64_u8_x4))) +svint64x4_t svreinterpret_s64(svuint8x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s64_s16_x4))) +svint64x4_t svreinterpret_s64(svint16x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s64_u16_x4))) +svint64x4_t svreinterpret_s64(svuint16x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s64_s32_x4))) +svint64x4_t svreinterpret_s64(svint32x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s64_u32_x4))) +svint64x4_t svreinterpret_s64(svuint32x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s64_s64_x4))) +svint64x4_t svreinterpret_s64(svint64x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s64_u64_x4))) +svint64x4_t svreinterpret_s64(svuint64x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s64_f16_x4))) +svint64x4_t svreinterpret_s64(svfloat16x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s64_bf16_x4))) +svint64x4_t svreinterpret_s64(svbfloat16x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s64_f32_x4))) +svint64x4_t svreinterpret_s64(svfloat32x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s64_f64_x4))) +svint64x4_t svreinterpret_s64(svfloat64x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u64_s8_x4))) +svuint64x4_t svreinterpret_u64(svint8x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u64_u8_x4))) +svuint64x4_t svreinterpret_u64(svuint8x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u64_s16_x4))) +svuint64x4_t svreinterpret_u64(svint16x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u64_u16_x4))) +svuint64x4_t svreinterpret_u64(svuint16x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u64_s32_x4))) +svuint64x4_t svreinterpret_u64(svint32x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u64_u32_x4))) +svuint64x4_t svreinterpret_u64(svuint32x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u64_s64_x4))) +svuint64x4_t svreinterpret_u64(svint64x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u64_u64_x4))) +svuint64x4_t svreinterpret_u64(svuint64x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u64_f16_x4))) +svuint64x4_t svreinterpret_u64(svfloat16x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u64_bf16_x4))) +svuint64x4_t svreinterpret_u64(svbfloat16x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u64_f32_x4))) +svuint64x4_t svreinterpret_u64(svfloat32x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u64_f64_x4))) +svuint64x4_t svreinterpret_u64(svfloat64x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f16_s8_x4))) +svfloat16x4_t svreinterpret_f16(svint8x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f16_u8_x4))) +svfloat16x4_t svreinterpret_f16(svuint8x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f16_s16_x4))) +svfloat16x4_t svreinterpret_f16(svint16x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f16_u16_x4))) +svfloat16x4_t svreinterpret_f16(svuint16x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f16_s32_x4))) +svfloat16x4_t svreinterpret_f16(svint32x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f16_u32_x4))) +svfloat16x4_t svreinterpret_f16(svuint32x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f16_s64_x4))) +svfloat16x4_t svreinterpret_f16(svint64x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f16_u64_x4))) +svfloat16x4_t svreinterpret_f16(svuint64x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f16_f16_x4))) +svfloat16x4_t svreinterpret_f16(svfloat16x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f16_bf16_x4))) +svfloat16x4_t svreinterpret_f16(svbfloat16x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f16_f32_x4))) +svfloat16x4_t svreinterpret_f16(svfloat32x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f16_f64_x4))) +svfloat16x4_t svreinterpret_f16(svfloat64x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_bf16_s8_x4))) +svbfloat16x4_t svreinterpret_bf16(svint8x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_bf16_u8_x4))) +svbfloat16x4_t svreinterpret_bf16(svuint8x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_bf16_s16_x4))) +svbfloat16x4_t svreinterpret_bf16(svint16x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_bf16_u16_x4))) +svbfloat16x4_t svreinterpret_bf16(svuint16x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_bf16_s32_x4))) +svbfloat16x4_t svreinterpret_bf16(svint32x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_bf16_u32_x4))) +svbfloat16x4_t svreinterpret_bf16(svuint32x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_bf16_s64_x4))) +svbfloat16x4_t svreinterpret_bf16(svint64x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_bf16_u64_x4))) +svbfloat16x4_t svreinterpret_bf16(svuint64x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_bf16_f16_x4))) +svbfloat16x4_t svreinterpret_bf16(svfloat16x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_bf16_bf16_x4))) +svbfloat16x4_t svreinterpret_bf16(svbfloat16x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_bf16_f32_x4))) +svbfloat16x4_t svreinterpret_bf16(svfloat32x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_bf16_f64_x4))) +svbfloat16x4_t svreinterpret_bf16(svfloat64x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f32_s8_x4))) +svfloat32x4_t svreinterpret_f32(svint8x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f32_u8_x4))) +svfloat32x4_t svreinterpret_f32(svuint8x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f32_s16_x4))) +svfloat32x4_t svreinterpret_f32(svint16x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f32_u16_x4))) +svfloat32x4_t svreinterpret_f32(svuint16x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f32_s32_x4))) +svfloat32x4_t svreinterpret_f32(svint32x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f32_u32_x4))) +svfloat32x4_t svreinterpret_f32(svuint32x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f32_s64_x4))) +svfloat32x4_t svreinterpret_f32(svint64x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f32_u64_x4))) +svfloat32x4_t svreinterpret_f32(svuint64x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f32_f16_x4))) +svfloat32x4_t svreinterpret_f32(svfloat16x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f32_bf16_x4))) +svfloat32x4_t svreinterpret_f32(svbfloat16x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f32_f32_x4))) +svfloat32x4_t svreinterpret_f32(svfloat32x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f32_f64_x4))) +svfloat32x4_t svreinterpret_f32(svfloat64x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f64_s8_x4))) +svfloat64x4_t svreinterpret_f64(svint8x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f64_u8_x4))) +svfloat64x4_t svreinterpret_f64(svuint8x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f64_s16_x4))) +svfloat64x4_t svreinterpret_f64(svint16x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f64_u16_x4))) +svfloat64x4_t svreinterpret_f64(svuint16x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f64_s32_x4))) +svfloat64x4_t svreinterpret_f64(svint32x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f64_u32_x4))) +svfloat64x4_t svreinterpret_f64(svuint32x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f64_s64_x4))) +svfloat64x4_t svreinterpret_f64(svint64x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f64_u64_x4))) +svfloat64x4_t svreinterpret_f64(svuint64x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f64_f16_x4))) +svfloat64x4_t svreinterpret_f64(svfloat16x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f64_bf16_x4))) +svfloat64x4_t svreinterpret_f64(svbfloat16x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f64_f32_x4))) +svfloat64x4_t svreinterpret_f64(svfloat32x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f64_f64_x4))) +svfloat64x4_t svreinterpret_f64(svfloat64x4_t op); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f32_f16_x2))) +svfloat32x2_t svcvt_f32_f16_x2(svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvtl_f32_f16_x2))) +svfloat32x2_t svcvtl_f32_f16_x2(svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f32_f16_x2))) +svfloat32x2_t svcvt_f32(svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvtl_f32_f16_x2))) +svfloat32x2_t svcvtl_f32(svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_single_u8_x2))) +svuint8x2_t svadd_single_u8_x2(svuint8x2_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_single_u32_x2))) +svuint32x2_t svadd_single_u32_x2(svuint32x2_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_single_u64_x2))) +svuint64x2_t svadd_single_u64_x2(svuint64x2_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_single_u16_x2))) +svuint16x2_t svadd_single_u16_x2(svuint16x2_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_single_s8_x2))) +svint8x2_t svadd_single_s8_x2(svint8x2_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_single_s32_x2))) +svint32x2_t svadd_single_s32_x2(svint32x2_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_single_s64_x2))) +svint64x2_t svadd_single_s64_x2(svint64x2_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_single_s16_x2))) +svint16x2_t svadd_single_s16_x2(svint16x2_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_single_u8_x4))) +svuint8x4_t svadd_single_u8_x4(svuint8x4_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_single_u32_x4))) +svuint32x4_t svadd_single_u32_x4(svuint32x4_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_single_u64_x4))) +svuint64x4_t svadd_single_u64_x4(svuint64x4_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_single_u16_x4))) +svuint16x4_t svadd_single_u16_x4(svuint16x4_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_single_s8_x4))) +svint8x4_t svadd_single_s8_x4(svint8x4_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_single_s32_x4))) +svint32x4_t svadd_single_s32_x4(svint32x4_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_single_s64_x4))) +svint64x4_t svadd_single_s64_x4(svint64x4_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_single_s16_x4))) +svint16x4_t svadd_single_s16_x4(svint16x4_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclamp_single_f64_x2))) +svfloat64x2_t svclamp_single_f64_x2(svfloat64x2_t, svfloat64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclamp_single_f32_x2))) +svfloat32x2_t svclamp_single_f32_x2(svfloat32x2_t, svfloat32_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclamp_single_f16_x2))) +svfloat16x2_t svclamp_single_f16_x2(svfloat16x2_t, svfloat16_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclamp_single_s8_x2))) +svint8x2_t svclamp_single_s8_x2(svint8x2_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclamp_single_s32_x2))) +svint32x2_t svclamp_single_s32_x2(svint32x2_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclamp_single_s64_x2))) +svint64x2_t svclamp_single_s64_x2(svint64x2_t, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclamp_single_s16_x2))) +svint16x2_t svclamp_single_s16_x2(svint16x2_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclamp_single_u8_x2))) +svuint8x2_t svclamp_single_u8_x2(svuint8x2_t, svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclamp_single_u32_x2))) +svuint32x2_t svclamp_single_u32_x2(svuint32x2_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclamp_single_u64_x2))) +svuint64x2_t svclamp_single_u64_x2(svuint64x2_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclamp_single_u16_x2))) +svuint16x2_t svclamp_single_u16_x2(svuint16x2_t, svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclamp_single_f64_x4))) +svfloat64x4_t svclamp_single_f64_x4(svfloat64x4_t, svfloat64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclamp_single_f32_x4))) +svfloat32x4_t svclamp_single_f32_x4(svfloat32x4_t, svfloat32_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclamp_single_f16_x4))) +svfloat16x4_t svclamp_single_f16_x4(svfloat16x4_t, svfloat16_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclamp_single_s8_x4))) +svint8x4_t svclamp_single_s8_x4(svint8x4_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclamp_single_s32_x4))) +svint32x4_t svclamp_single_s32_x4(svint32x4_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclamp_single_s64_x4))) +svint64x4_t svclamp_single_s64_x4(svint64x4_t, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclamp_single_s16_x4))) +svint16x4_t svclamp_single_s16_x4(svint16x4_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclamp_single_u8_x4))) +svuint8x4_t svclamp_single_u8_x4(svuint8x4_t, svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclamp_single_u32_x4))) +svuint32x4_t svclamp_single_u32_x4(svuint32x4_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclamp_single_u64_x4))) +svuint64x4_t svclamp_single_u64_x4(svuint64x4_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclamp_single_u16_x4))) +svuint16x4_t svclamp_single_u16_x4(svuint16x4_t, svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_bf16_f32_x2))) +svbfloat16_t svcvt_bf16_f32_x2(svfloat32x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f16_f32_x2))) +svfloat16_t svcvt_f16_f32_x2(svfloat32x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_s32_f32_x2))) +svint32x2_t svcvt_s32_f32_x2(svfloat32x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_u32_f32_x2))) +svuint32x2_t svcvt_u32_f32_x2(svfloat32x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_s32_f32_x4))) +svint32x4_t svcvt_s32_f32_x4(svfloat32x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_u32_f32_x4))) +svuint32x4_t svcvt_u32_f32_x4(svfloat32x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f32_s32_x2))) +svfloat32x2_t svcvt_f32_s32_x2(svint32x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f32_s32_x4))) +svfloat32x4_t svcvt_f32_s32_x4(svint32x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f32_u32_x2))) +svfloat32x2_t svcvt_f32_u32_x2(svuint32x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f32_u32_x4))) +svfloat32x4_t svcvt_f32_u32_x4(svuint32x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvtn_bf16_f32_x2))) +svbfloat16_t svcvtn_bf16_f32_x2(svfloat32x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvtn_f16_f32_x2))) +svfloat16_t svcvtn_f16_f32_x2(svfloat32x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_single_bf16_x2))) +svbfloat16x2_t svmax_single_bf16_x2(svbfloat16x2_t, svbfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_single_f64_x2))) +svfloat64x2_t svmax_single_f64_x2(svfloat64x2_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_single_f32_x2))) +svfloat32x2_t svmax_single_f32_x2(svfloat32x2_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_single_f16_x2))) +svfloat16x2_t svmax_single_f16_x2(svfloat16x2_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_single_s8_x2))) +svint8x2_t svmax_single_s8_x2(svint8x2_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_single_s32_x2))) +svint32x2_t svmax_single_s32_x2(svint32x2_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_single_s64_x2))) +svint64x2_t svmax_single_s64_x2(svint64x2_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_single_s16_x2))) +svint16x2_t svmax_single_s16_x2(svint16x2_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_single_u8_x2))) +svuint8x2_t svmax_single_u8_x2(svuint8x2_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_single_u32_x2))) +svuint32x2_t svmax_single_u32_x2(svuint32x2_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_single_u64_x2))) +svuint64x2_t svmax_single_u64_x2(svuint64x2_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_single_u16_x2))) +svuint16x2_t svmax_single_u16_x2(svuint16x2_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_single_bf16_x4))) +svbfloat16x4_t svmax_single_bf16_x4(svbfloat16x4_t, svbfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_single_f64_x4))) +svfloat64x4_t svmax_single_f64_x4(svfloat64x4_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_single_f32_x4))) +svfloat32x4_t svmax_single_f32_x4(svfloat32x4_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_single_f16_x4))) +svfloat16x4_t svmax_single_f16_x4(svfloat16x4_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_single_s8_x4))) +svint8x4_t svmax_single_s8_x4(svint8x4_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_single_s32_x4))) +svint32x4_t svmax_single_s32_x4(svint32x4_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_single_s64_x4))) +svint64x4_t svmax_single_s64_x4(svint64x4_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_single_s16_x4))) +svint16x4_t svmax_single_s16_x4(svint16x4_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_single_u8_x4))) +svuint8x4_t svmax_single_u8_x4(svuint8x4_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_single_u32_x4))) +svuint32x4_t svmax_single_u32_x4(svuint32x4_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_single_u64_x4))) +svuint64x4_t svmax_single_u64_x4(svuint64x4_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_single_u16_x4))) +svuint16x4_t svmax_single_u16_x4(svuint16x4_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_bf16_x2))) +svbfloat16x2_t svmax_bf16_x2(svbfloat16x2_t, svbfloat16x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_f64_x2))) +svfloat64x2_t svmax_f64_x2(svfloat64x2_t, svfloat64x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_f32_x2))) +svfloat32x2_t svmax_f32_x2(svfloat32x2_t, svfloat32x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_f16_x2))) +svfloat16x2_t svmax_f16_x2(svfloat16x2_t, svfloat16x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_s8_x2))) +svint8x2_t svmax_s8_x2(svint8x2_t, svint8x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_s32_x2))) +svint32x2_t svmax_s32_x2(svint32x2_t, svint32x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_s64_x2))) +svint64x2_t svmax_s64_x2(svint64x2_t, svint64x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_s16_x2))) +svint16x2_t svmax_s16_x2(svint16x2_t, svint16x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_u8_x2))) +svuint8x2_t svmax_u8_x2(svuint8x2_t, svuint8x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_u32_x2))) +svuint32x2_t svmax_u32_x2(svuint32x2_t, svuint32x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_u64_x2))) +svuint64x2_t svmax_u64_x2(svuint64x2_t, svuint64x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_u16_x2))) +svuint16x2_t svmax_u16_x2(svuint16x2_t, svuint16x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_bf16_x4))) +svbfloat16x4_t svmax_bf16_x4(svbfloat16x4_t, svbfloat16x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_f64_x4))) +svfloat64x4_t svmax_f64_x4(svfloat64x4_t, svfloat64x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_f32_x4))) +svfloat32x4_t svmax_f32_x4(svfloat32x4_t, svfloat32x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_f16_x4))) +svfloat16x4_t svmax_f16_x4(svfloat16x4_t, svfloat16x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_s8_x4))) +svint8x4_t svmax_s8_x4(svint8x4_t, svint8x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_s32_x4))) +svint32x4_t svmax_s32_x4(svint32x4_t, svint32x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_s64_x4))) +svint64x4_t svmax_s64_x4(svint64x4_t, svint64x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_s16_x4))) +svint16x4_t svmax_s16_x4(svint16x4_t, svint16x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_u8_x4))) +svuint8x4_t svmax_u8_x4(svuint8x4_t, svuint8x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_u32_x4))) +svuint32x4_t svmax_u32_x4(svuint32x4_t, svuint32x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_u64_x4))) +svuint64x4_t svmax_u64_x4(svuint64x4_t, svuint64x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_u16_x4))) +svuint16x4_t svmax_u16_x4(svuint16x4_t, svuint16x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnm_single_bf16_x2))) +svbfloat16x2_t svmaxnm_single_bf16_x2(svbfloat16x2_t, svbfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnm_single_f64_x2))) +svfloat64x2_t svmaxnm_single_f64_x2(svfloat64x2_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnm_single_f32_x2))) +svfloat32x2_t svmaxnm_single_f32_x2(svfloat32x2_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnm_single_f16_x2))) +svfloat16x2_t svmaxnm_single_f16_x2(svfloat16x2_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnm_single_bf16_x4))) +svbfloat16x4_t svmaxnm_single_bf16_x4(svbfloat16x4_t, svbfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnm_single_f64_x4))) +svfloat64x4_t svmaxnm_single_f64_x4(svfloat64x4_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnm_single_f32_x4))) +svfloat32x4_t svmaxnm_single_f32_x4(svfloat32x4_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnm_single_f16_x4))) +svfloat16x4_t svmaxnm_single_f16_x4(svfloat16x4_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnm_bf16_x2))) +svbfloat16x2_t svmaxnm_bf16_x2(svbfloat16x2_t, svbfloat16x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnm_f64_x2))) +svfloat64x2_t svmaxnm_f64_x2(svfloat64x2_t, svfloat64x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnm_f32_x2))) +svfloat32x2_t svmaxnm_f32_x2(svfloat32x2_t, svfloat32x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnm_f16_x2))) +svfloat16x2_t svmaxnm_f16_x2(svfloat16x2_t, svfloat16x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnm_bf16_x4))) +svbfloat16x4_t svmaxnm_bf16_x4(svbfloat16x4_t, svbfloat16x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnm_f64_x4))) +svfloat64x4_t svmaxnm_f64_x4(svfloat64x4_t, svfloat64x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnm_f32_x4))) +svfloat32x4_t svmaxnm_f32_x4(svfloat32x4_t, svfloat32x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnm_f16_x4))) +svfloat16x4_t svmaxnm_f16_x4(svfloat16x4_t, svfloat16x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_single_bf16_x2))) +svbfloat16x2_t svmin_single_bf16_x2(svbfloat16x2_t, svbfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_single_f64_x2))) +svfloat64x2_t svmin_single_f64_x2(svfloat64x2_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_single_f32_x2))) +svfloat32x2_t svmin_single_f32_x2(svfloat32x2_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_single_f16_x2))) +svfloat16x2_t svmin_single_f16_x2(svfloat16x2_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_single_s8_x2))) +svint8x2_t svmin_single_s8_x2(svint8x2_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_single_s32_x2))) +svint32x2_t svmin_single_s32_x2(svint32x2_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_single_s64_x2))) +svint64x2_t svmin_single_s64_x2(svint64x2_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_single_s16_x2))) +svint16x2_t svmin_single_s16_x2(svint16x2_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_single_u8_x2))) +svuint8x2_t svmin_single_u8_x2(svuint8x2_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_single_u32_x2))) +svuint32x2_t svmin_single_u32_x2(svuint32x2_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_single_u64_x2))) +svuint64x2_t svmin_single_u64_x2(svuint64x2_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_single_u16_x2))) +svuint16x2_t svmin_single_u16_x2(svuint16x2_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_single_bf16_x4))) +svbfloat16x4_t svmin_single_bf16_x4(svbfloat16x4_t, svbfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_single_f64_x4))) +svfloat64x4_t svmin_single_f64_x4(svfloat64x4_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_single_f32_x4))) +svfloat32x4_t svmin_single_f32_x4(svfloat32x4_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_single_f16_x4))) +svfloat16x4_t svmin_single_f16_x4(svfloat16x4_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_single_s8_x4))) +svint8x4_t svmin_single_s8_x4(svint8x4_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_single_s32_x4))) +svint32x4_t svmin_single_s32_x4(svint32x4_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_single_s64_x4))) +svint64x4_t svmin_single_s64_x4(svint64x4_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_single_s16_x4))) +svint16x4_t svmin_single_s16_x4(svint16x4_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_single_u8_x4))) +svuint8x4_t svmin_single_u8_x4(svuint8x4_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_single_u32_x4))) +svuint32x4_t svmin_single_u32_x4(svuint32x4_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_single_u64_x4))) +svuint64x4_t svmin_single_u64_x4(svuint64x4_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_single_u16_x4))) +svuint16x4_t svmin_single_u16_x4(svuint16x4_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_bf16_x2))) +svbfloat16x2_t svmin_bf16_x2(svbfloat16x2_t, svbfloat16x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_f64_x2))) +svfloat64x2_t svmin_f64_x2(svfloat64x2_t, svfloat64x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_f32_x2))) +svfloat32x2_t svmin_f32_x2(svfloat32x2_t, svfloat32x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_f16_x2))) +svfloat16x2_t svmin_f16_x2(svfloat16x2_t, svfloat16x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_s8_x2))) +svint8x2_t svmin_s8_x2(svint8x2_t, svint8x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_s32_x2))) +svint32x2_t svmin_s32_x2(svint32x2_t, svint32x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_s64_x2))) +svint64x2_t svmin_s64_x2(svint64x2_t, svint64x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_s16_x2))) +svint16x2_t svmin_s16_x2(svint16x2_t, svint16x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_u8_x2))) +svuint8x2_t svmin_u8_x2(svuint8x2_t, svuint8x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_u32_x2))) +svuint32x2_t svmin_u32_x2(svuint32x2_t, svuint32x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_u64_x2))) +svuint64x2_t svmin_u64_x2(svuint64x2_t, svuint64x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_u16_x2))) +svuint16x2_t svmin_u16_x2(svuint16x2_t, svuint16x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_bf16_x4))) +svbfloat16x4_t svmin_bf16_x4(svbfloat16x4_t, svbfloat16x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_f64_x4))) +svfloat64x4_t svmin_f64_x4(svfloat64x4_t, svfloat64x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_f32_x4))) +svfloat32x4_t svmin_f32_x4(svfloat32x4_t, svfloat32x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_f16_x4))) +svfloat16x4_t svmin_f16_x4(svfloat16x4_t, svfloat16x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_s8_x4))) +svint8x4_t svmin_s8_x4(svint8x4_t, svint8x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_s32_x4))) +svint32x4_t svmin_s32_x4(svint32x4_t, svint32x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_s64_x4))) +svint64x4_t svmin_s64_x4(svint64x4_t, svint64x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_s16_x4))) +svint16x4_t svmin_s16_x4(svint16x4_t, svint16x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_u8_x4))) +svuint8x4_t svmin_u8_x4(svuint8x4_t, svuint8x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_u32_x4))) +svuint32x4_t svmin_u32_x4(svuint32x4_t, svuint32x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_u64_x4))) +svuint64x4_t svmin_u64_x4(svuint64x4_t, svuint64x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_u16_x4))) +svuint16x4_t svmin_u16_x4(svuint16x4_t, svuint16x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnm_single_bf16_x2))) +svbfloat16x2_t svminnm_single_bf16_x2(svbfloat16x2_t, svbfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnm_single_f64_x2))) +svfloat64x2_t svminnm_single_f64_x2(svfloat64x2_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnm_single_f32_x2))) +svfloat32x2_t svminnm_single_f32_x2(svfloat32x2_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnm_single_f16_x2))) +svfloat16x2_t svminnm_single_f16_x2(svfloat16x2_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnm_single_bf16_x4))) +svbfloat16x4_t svminnm_single_bf16_x4(svbfloat16x4_t, svbfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnm_single_f64_x4))) +svfloat64x4_t svminnm_single_f64_x4(svfloat64x4_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnm_single_f32_x4))) +svfloat32x4_t svminnm_single_f32_x4(svfloat32x4_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnm_single_f16_x4))) +svfloat16x4_t svminnm_single_f16_x4(svfloat16x4_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnm_bf16_x2))) +svbfloat16x2_t svminnm_bf16_x2(svbfloat16x2_t, svbfloat16x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnm_f64_x2))) +svfloat64x2_t svminnm_f64_x2(svfloat64x2_t, svfloat64x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnm_f32_x2))) +svfloat32x2_t svminnm_f32_x2(svfloat32x2_t, svfloat32x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnm_f16_x2))) +svfloat16x2_t svminnm_f16_x2(svfloat16x2_t, svfloat16x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnm_bf16_x4))) +svbfloat16x4_t svminnm_bf16_x4(svbfloat16x4_t, svbfloat16x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnm_f64_x4))) +svfloat64x4_t svminnm_f64_x4(svfloat64x4_t, svfloat64x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnm_f32_x4))) +svfloat32x4_t svminnm_f32_x4(svfloat32x4_t, svfloat32x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnm_f16_x4))) +svfloat16x4_t svminnm_f16_x4(svfloat16x4_t, svfloat16x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqcvt_s16_s32_x2))) +svint16_t svqcvt_s16_s32_x2(svint32x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqcvt_s16_s64_x4))) +svint16_t svqcvt_s16_s64_x4(svint64x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqcvt_s8_s32_x4))) +svint8_t svqcvt_s8_s32_x4(svint32x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqcvt_u16_s32_x2))) +svuint16_t svqcvt_u16_s32_x2(svint32x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqcvt_u16_u32_x2))) +svuint16_t svqcvt_u16_u32_x2(svuint32x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqcvt_u16_s64_x4))) +svuint16_t svqcvt_u16_s64_x4(svint64x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqcvt_u16_u64_x4))) +svuint16_t svqcvt_u16_u64_x4(svuint64x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqcvt_u8_s32_x4))) +svuint8_t svqcvt_u8_s32_x4(svint32x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqcvt_u8_u32_x4))) +svuint8_t svqcvt_u8_u32_x4(svuint32x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqcvtn_s16_s64_x4))) +svint16_t svqcvtn_s16_s64_x4(svint64x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqcvtn_s8_s32_x4))) +svint8_t svqcvtn_s8_s32_x4(svint32x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqcvtn_u16_s64_x4))) +svuint16_t svqcvtn_u16_s64_x4(svint64x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqcvtn_u16_u64_x4))) +svuint16_t svqcvtn_u16_u64_x4(svuint64x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqcvtn_u8_s32_x4))) +svuint8_t svqcvtn_u8_s32_x4(svint32x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqcvtn_u8_u32_x4))) +svuint8_t svqcvtn_u8_u32_x4(svuint32x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmulh_single_s8_x2))) +svint8x2_t svqdmulh_single_s8_x2(svint8x2_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmulh_single_s32_x2))) +svint32x2_t svqdmulh_single_s32_x2(svint32x2_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmulh_single_s64_x2))) +svint64x2_t svqdmulh_single_s64_x2(svint64x2_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmulh_single_s16_x2))) +svint16x2_t svqdmulh_single_s16_x2(svint16x2_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmulh_single_s8_x4))) +svint8x4_t svqdmulh_single_s8_x4(svint8x4_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmulh_single_s32_x4))) +svint32x4_t svqdmulh_single_s32_x4(svint32x4_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmulh_single_s64_x4))) +svint64x4_t svqdmulh_single_s64_x4(svint64x4_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmulh_single_s16_x4))) +svint16x4_t svqdmulh_single_s16_x4(svint16x4_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmulh_s8_x2))) +svint8x2_t svqdmulh_s8_x2(svint8x2_t, svint8x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmulh_s32_x2))) +svint32x2_t svqdmulh_s32_x2(svint32x2_t, svint32x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmulh_s64_x2))) +svint64x2_t svqdmulh_s64_x2(svint64x2_t, svint64x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmulh_s16_x2))) +svint16x2_t svqdmulh_s16_x2(svint16x2_t, svint16x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmulh_s8_x4))) +svint8x4_t svqdmulh_s8_x4(svint8x4_t, svint8x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmulh_s32_x4))) +svint32x4_t svqdmulh_s32_x4(svint32x4_t, svint32x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmulh_s64_x4))) +svint64x4_t svqdmulh_s64_x4(svint64x4_t, svint64x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmulh_s16_x4))) +svint16x4_t svqdmulh_s16_x4(svint16x4_t, svint16x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshr_n_s16_s32_x2))) +svint16_t svqrshr_n_s16_s32_x2(svint32x2_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshr_n_u16_u32_x2))) +svuint16_t svqrshr_n_u16_u32_x2(svuint32x2_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshr_n_s8_s32_x4))) +svint8_t svqrshr_n_s8_s32_x4(svint32x4_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshr_n_s16_s64_x4))) +svint16_t svqrshr_n_s16_s64_x4(svint64x4_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshr_n_u8_u32_x4))) +svuint8_t svqrshr_n_u8_u32_x4(svuint32x4_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshr_n_u16_u64_x4))) +svuint16_t svqrshr_n_u16_u64_x4(svuint64x4_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshrn_n_s8_s32_x4))) +svint8_t svqrshrn_n_s8_s32_x4(svint32x4_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshrn_n_s16_s64_x4))) +svint16_t svqrshrn_n_s16_s64_x4(svint64x4_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshrn_n_u8_u32_x4))) +svuint8_t svqrshrn_n_u8_u32_x4(svuint32x4_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshrn_n_u16_u64_x4))) +svuint16_t svqrshrn_n_u16_u64_x4(svuint64x4_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshru_n_u16_s32_x2))) +svuint16_t svqrshru_n_u16_s32_x2(svint32x2_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshru_n_u8_s32_x4))) +svuint8_t svqrshru_n_u8_s32_x4(svint32x4_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshru_n_u16_s64_x4))) +svuint16_t svqrshru_n_u16_s64_x4(svint64x4_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshrun_n_u8_s32_x4))) +svuint8_t svqrshrun_n_u8_s32_x4(svint32x4_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshrun_n_u16_s64_x4))) +svuint16_t svqrshrun_n_u16_s64_x4(svint64x4_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrinta_f32_x2))) +svfloat32x2_t svrinta_f32_x2(svfloat32x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrinta_f32_x4))) +svfloat32x4_t svrinta_f32_x4(svfloat32x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintm_f32_x2))) +svfloat32x2_t svrintm_f32_x2(svfloat32x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintm_f32_x4))) +svfloat32x4_t svrintm_f32_x4(svfloat32x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintn_f32_x2))) +svfloat32x2_t svrintn_f32_x2(svfloat32x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintn_f32_x4))) +svfloat32x4_t svrintn_f32_x4(svfloat32x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintp_f32_x2))) +svfloat32x2_t svrintp_f32_x2(svfloat32x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintp_f32_x4))) +svfloat32x4_t svrintp_f32_x4(svfloat32x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_single_s8_x2))) +svint8x2_t svrshl_single_s8_x2(svint8x2_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_single_s32_x2))) +svint32x2_t svrshl_single_s32_x2(svint32x2_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_single_s64_x2))) +svint64x2_t svrshl_single_s64_x2(svint64x2_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_single_s16_x2))) +svint16x2_t svrshl_single_s16_x2(svint16x2_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_single_u8_x2))) +svuint8x2_t svrshl_single_u8_x2(svuint8x2_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_single_u32_x2))) +svuint32x2_t svrshl_single_u32_x2(svuint32x2_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_single_u64_x2))) +svuint64x2_t svrshl_single_u64_x2(svuint64x2_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_single_u16_x2))) +svuint16x2_t svrshl_single_u16_x2(svuint16x2_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_single_s8_x4))) +svint8x4_t svrshl_single_s8_x4(svint8x4_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_single_s32_x4))) +svint32x4_t svrshl_single_s32_x4(svint32x4_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_single_s64_x4))) +svint64x4_t svrshl_single_s64_x4(svint64x4_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_single_s16_x4))) +svint16x4_t svrshl_single_s16_x4(svint16x4_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_single_u8_x4))) +svuint8x4_t svrshl_single_u8_x4(svuint8x4_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_single_u32_x4))) +svuint32x4_t svrshl_single_u32_x4(svuint32x4_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_single_u64_x4))) +svuint64x4_t svrshl_single_u64_x4(svuint64x4_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_single_u16_x4))) +svuint16x4_t svrshl_single_u16_x4(svuint16x4_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_s8_x2))) +svint8x2_t svrshl_s8_x2(svint8x2_t, svint8x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_s32_x2))) +svint32x2_t svrshl_s32_x2(svint32x2_t, svint32x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_s64_x2))) +svint64x2_t svrshl_s64_x2(svint64x2_t, svint64x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_s16_x2))) +svint16x2_t svrshl_s16_x2(svint16x2_t, svint16x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_u8_x2))) +svuint8x2_t svrshl_u8_x2(svuint8x2_t, svuint8x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_u32_x2))) +svuint32x2_t svrshl_u32_x2(svuint32x2_t, svuint32x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_u64_x2))) +svuint64x2_t svrshl_u64_x2(svuint64x2_t, svuint64x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_u16_x2))) +svuint16x2_t svrshl_u16_x2(svuint16x2_t, svuint16x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_s8_x4))) +svint8x4_t svrshl_s8_x4(svint8x4_t, svint8x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_s32_x4))) +svint32x4_t svrshl_s32_x4(svint32x4_t, svint32x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_s64_x4))) +svint64x4_t svrshl_s64_x4(svint64x4_t, svint64x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_s16_x4))) +svint16x4_t svrshl_s16_x4(svint16x4_t, svint16x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_u8_x4))) +svuint8x4_t svrshl_u8_x4(svuint8x4_t, svuint8x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_u32_x4))) +svuint32x4_t svrshl_u32_x4(svuint32x4_t, svuint32x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_u64_x4))) +svuint64x4_t svrshl_u64_x4(svuint64x4_t, svuint64x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_u16_x4))) +svuint16x4_t svrshl_u16_x4(svuint16x4_t, svuint16x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsel_u8_x2))) +svuint8x2_t svsel_u8_x2(svcount_t, svuint8x2_t, svuint8x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsel_u32_x2))) +svuint32x2_t svsel_u32_x2(svcount_t, svuint32x2_t, svuint32x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsel_u64_x2))) +svuint64x2_t svsel_u64_x2(svcount_t, svuint64x2_t, svuint64x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsel_u16_x2))) +svuint16x2_t svsel_u16_x2(svcount_t, svuint16x2_t, svuint16x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsel_bf16_x2))) +svbfloat16x2_t svsel_bf16_x2(svcount_t, svbfloat16x2_t, svbfloat16x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsel_s8_x2))) +svint8x2_t svsel_s8_x2(svcount_t, svint8x2_t, svint8x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsel_f64_x2))) +svfloat64x2_t svsel_f64_x2(svcount_t, svfloat64x2_t, svfloat64x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsel_f32_x2))) +svfloat32x2_t svsel_f32_x2(svcount_t, svfloat32x2_t, svfloat32x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsel_f16_x2))) +svfloat16x2_t svsel_f16_x2(svcount_t, svfloat16x2_t, svfloat16x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsel_s32_x2))) +svint32x2_t svsel_s32_x2(svcount_t, svint32x2_t, svint32x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsel_s64_x2))) +svint64x2_t svsel_s64_x2(svcount_t, svint64x2_t, svint64x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsel_s16_x2))) +svint16x2_t svsel_s16_x2(svcount_t, svint16x2_t, svint16x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsel_u8_x4))) +svuint8x4_t svsel_u8_x4(svcount_t, svuint8x4_t, svuint8x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsel_u32_x4))) +svuint32x4_t svsel_u32_x4(svcount_t, svuint32x4_t, svuint32x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsel_u64_x4))) +svuint64x4_t svsel_u64_x4(svcount_t, svuint64x4_t, svuint64x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsel_u16_x4))) +svuint16x4_t svsel_u16_x4(svcount_t, svuint16x4_t, svuint16x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsel_bf16_x4))) +svbfloat16x4_t svsel_bf16_x4(svcount_t, svbfloat16x4_t, svbfloat16x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsel_s8_x4))) +svint8x4_t svsel_s8_x4(svcount_t, svint8x4_t, svint8x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsel_f64_x4))) +svfloat64x4_t svsel_f64_x4(svcount_t, svfloat64x4_t, svfloat64x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsel_f32_x4))) +svfloat32x4_t svsel_f32_x4(svcount_t, svfloat32x4_t, svfloat32x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsel_f16_x4))) +svfloat16x4_t svsel_f16_x4(svcount_t, svfloat16x4_t, svfloat16x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsel_s32_x4))) +svint32x4_t svsel_s32_x4(svcount_t, svint32x4_t, svint32x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsel_s64_x4))) +svint64x4_t svsel_s64_x4(svcount_t, svint64x4_t, svint64x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsel_s16_x4))) +svint16x4_t svsel_s16_x4(svcount_t, svint16x4_t, svint16x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svunpk_s32_s16_x2))) +svint32x2_t svunpk_s32_s16_x2(svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svunpk_s64_s32_x2))) +svint64x2_t svunpk_s64_s32_x2(svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svunpk_s16_s8_x2))) +svint16x2_t svunpk_s16_s8_x2(svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svunpk_u32_u16_x2))) +svuint32x2_t svunpk_u32_u16_x2(svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svunpk_u64_u32_x2))) +svuint64x2_t svunpk_u64_u32_x2(svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svunpk_u16_u8_x2))) +svuint16x2_t svunpk_u16_u8_x2(svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svunpk_s32_s16_x4))) +svint32x4_t svunpk_s32_s16_x4(svint16x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svunpk_s64_s32_x4))) +svint64x4_t svunpk_s64_s32_x4(svint32x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svunpk_s16_s8_x4))) +svint16x4_t svunpk_s16_s8_x4(svint8x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svunpk_u32_u16_x4))) +svuint32x4_t svunpk_u32_u16_x4(svuint16x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svunpk_u64_u32_x4))) +svuint64x4_t svunpk_u64_u32_x4(svuint32x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svunpk_u16_u8_x4))) +svuint16x4_t svunpk_u16_u8_x4(svuint8x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp_u8_x2))) +svuint8x2_t svuzp_u8_x2(svuint8x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp_u32_x2))) +svuint32x2_t svuzp_u32_x2(svuint32x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp_u64_x2))) +svuint64x2_t svuzp_u64_x2(svuint64x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp_u16_x2))) +svuint16x2_t svuzp_u16_x2(svuint16x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp_bf16_x2))) +svbfloat16x2_t svuzp_bf16_x2(svbfloat16x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp_s8_x2))) +svint8x2_t svuzp_s8_x2(svint8x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp_f64_x2))) +svfloat64x2_t svuzp_f64_x2(svfloat64x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp_f32_x2))) +svfloat32x2_t svuzp_f32_x2(svfloat32x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp_f16_x2))) +svfloat16x2_t svuzp_f16_x2(svfloat16x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp_s32_x2))) +svint32x2_t svuzp_s32_x2(svint32x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp_s64_x2))) +svint64x2_t svuzp_s64_x2(svint64x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp_s16_x2))) +svint16x2_t svuzp_s16_x2(svint16x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp_u8_x4))) +svuint8x4_t svuzp_u8_x4(svuint8x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp_u32_x4))) +svuint32x4_t svuzp_u32_x4(svuint32x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp_u64_x4))) +svuint64x4_t svuzp_u64_x4(svuint64x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp_u16_x4))) +svuint16x4_t svuzp_u16_x4(svuint16x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp_bf16_x4))) +svbfloat16x4_t svuzp_bf16_x4(svbfloat16x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp_s8_x4))) +svint8x4_t svuzp_s8_x4(svint8x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp_f64_x4))) +svfloat64x4_t svuzp_f64_x4(svfloat64x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp_f32_x4))) +svfloat32x4_t svuzp_f32_x4(svfloat32x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp_f16_x4))) +svfloat16x4_t svuzp_f16_x4(svfloat16x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp_s32_x4))) +svint32x4_t svuzp_s32_x4(svint32x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp_s64_x4))) +svint64x4_t svuzp_s64_x4(svint64x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp_s16_x4))) +svint16x4_t svuzp_s16_x4(svint16x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzpq_u8_x2))) +svuint8x2_t svuzpq_u8_x2(svuint8x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzpq_u32_x2))) +svuint32x2_t svuzpq_u32_x2(svuint32x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzpq_u64_x2))) +svuint64x2_t svuzpq_u64_x2(svuint64x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzpq_u16_x2))) +svuint16x2_t svuzpq_u16_x2(svuint16x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzpq_bf16_x2))) +svbfloat16x2_t svuzpq_bf16_x2(svbfloat16x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzpq_s8_x2))) +svint8x2_t svuzpq_s8_x2(svint8x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzpq_f64_x2))) +svfloat64x2_t svuzpq_f64_x2(svfloat64x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzpq_f32_x2))) +svfloat32x2_t svuzpq_f32_x2(svfloat32x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzpq_f16_x2))) +svfloat16x2_t svuzpq_f16_x2(svfloat16x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzpq_s32_x2))) +svint32x2_t svuzpq_s32_x2(svint32x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzpq_s64_x2))) +svint64x2_t svuzpq_s64_x2(svint64x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzpq_s16_x2))) +svint16x2_t svuzpq_s16_x2(svint16x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzpq_u8_x4))) +svuint8x4_t svuzpq_u8_x4(svuint8x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzpq_u32_x4))) +svuint32x4_t svuzpq_u32_x4(svuint32x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzpq_u64_x4))) +svuint64x4_t svuzpq_u64_x4(svuint64x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzpq_u16_x4))) +svuint16x4_t svuzpq_u16_x4(svuint16x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzpq_bf16_x4))) +svbfloat16x4_t svuzpq_bf16_x4(svbfloat16x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzpq_s8_x4))) +svint8x4_t svuzpq_s8_x4(svint8x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzpq_f64_x4))) +svfloat64x4_t svuzpq_f64_x4(svfloat64x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzpq_f32_x4))) +svfloat32x4_t svuzpq_f32_x4(svfloat32x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzpq_f16_x4))) +svfloat16x4_t svuzpq_f16_x4(svfloat16x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzpq_s32_x4))) +svint32x4_t svuzpq_s32_x4(svint32x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzpq_s64_x4))) +svint64x4_t svuzpq_s64_x4(svint64x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzpq_s16_x4))) +svint16x4_t svuzpq_s16_x4(svint16x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip_u8_x2))) +svuint8x2_t svzip_u8_x2(svuint8x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip_u32_x2))) +svuint32x2_t svzip_u32_x2(svuint32x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip_u64_x2))) +svuint64x2_t svzip_u64_x2(svuint64x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip_u16_x2))) +svuint16x2_t svzip_u16_x2(svuint16x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip_bf16_x2))) +svbfloat16x2_t svzip_bf16_x2(svbfloat16x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip_s8_x2))) +svint8x2_t svzip_s8_x2(svint8x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip_f64_x2))) +svfloat64x2_t svzip_f64_x2(svfloat64x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip_f32_x2))) +svfloat32x2_t svzip_f32_x2(svfloat32x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip_f16_x2))) +svfloat16x2_t svzip_f16_x2(svfloat16x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip_s32_x2))) +svint32x2_t svzip_s32_x2(svint32x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip_s64_x2))) +svint64x2_t svzip_s64_x2(svint64x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip_s16_x2))) +svint16x2_t svzip_s16_x2(svint16x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip_u8_x4))) +svuint8x4_t svzip_u8_x4(svuint8x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip_u32_x4))) +svuint32x4_t svzip_u32_x4(svuint32x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip_u64_x4))) +svuint64x4_t svzip_u64_x4(svuint64x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip_u16_x4))) +svuint16x4_t svzip_u16_x4(svuint16x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip_bf16_x4))) +svbfloat16x4_t svzip_bf16_x4(svbfloat16x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip_s8_x4))) +svint8x4_t svzip_s8_x4(svint8x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip_f64_x4))) +svfloat64x4_t svzip_f64_x4(svfloat64x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip_f32_x4))) +svfloat32x4_t svzip_f32_x4(svfloat32x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip_f16_x4))) +svfloat16x4_t svzip_f16_x4(svfloat16x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip_s32_x4))) +svint32x4_t svzip_s32_x4(svint32x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip_s64_x4))) +svint64x4_t svzip_s64_x4(svint64x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip_s16_x4))) +svint16x4_t svzip_s16_x4(svint16x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzipq_u8_x2))) +svuint8x2_t svzipq_u8_x2(svuint8x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzipq_u32_x2))) +svuint32x2_t svzipq_u32_x2(svuint32x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzipq_u64_x2))) +svuint64x2_t svzipq_u64_x2(svuint64x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzipq_u16_x2))) +svuint16x2_t svzipq_u16_x2(svuint16x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzipq_bf16_x2))) +svbfloat16x2_t svzipq_bf16_x2(svbfloat16x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzipq_s8_x2))) +svint8x2_t svzipq_s8_x2(svint8x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzipq_f64_x2))) +svfloat64x2_t svzipq_f64_x2(svfloat64x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzipq_f32_x2))) +svfloat32x2_t svzipq_f32_x2(svfloat32x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzipq_f16_x2))) +svfloat16x2_t svzipq_f16_x2(svfloat16x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzipq_s32_x2))) +svint32x2_t svzipq_s32_x2(svint32x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzipq_s64_x2))) +svint64x2_t svzipq_s64_x2(svint64x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzipq_s16_x2))) +svint16x2_t svzipq_s16_x2(svint16x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzipq_u8_x4))) +svuint8x4_t svzipq_u8_x4(svuint8x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzipq_u32_x4))) +svuint32x4_t svzipq_u32_x4(svuint32x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzipq_u64_x4))) +svuint64x4_t svzipq_u64_x4(svuint64x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzipq_u16_x4))) +svuint16x4_t svzipq_u16_x4(svuint16x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzipq_bf16_x4))) +svbfloat16x4_t svzipq_bf16_x4(svbfloat16x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzipq_s8_x4))) +svint8x4_t svzipq_s8_x4(svint8x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzipq_f64_x4))) +svfloat64x4_t svzipq_f64_x4(svfloat64x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzipq_f32_x4))) +svfloat32x4_t svzipq_f32_x4(svfloat32x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzipq_f16_x4))) +svfloat16x4_t svzipq_f16_x4(svfloat16x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzipq_s32_x4))) +svint32x4_t svzipq_s32_x4(svint32x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzipq_s64_x4))) +svint64x4_t svzipq_s64_x4(svint64x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzipq_s16_x4))) +svint16x4_t svzipq_s16_x4(svint16x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_single_u8_x2))) +svuint8x2_t svadd(svuint8x2_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_single_u32_x2))) +svuint32x2_t svadd(svuint32x2_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_single_u64_x2))) +svuint64x2_t svadd(svuint64x2_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_single_u16_x2))) +svuint16x2_t svadd(svuint16x2_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_single_s8_x2))) +svint8x2_t svadd(svint8x2_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_single_s32_x2))) +svint32x2_t svadd(svint32x2_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_single_s64_x2))) +svint64x2_t svadd(svint64x2_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_single_s16_x2))) +svint16x2_t svadd(svint16x2_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_single_u8_x4))) +svuint8x4_t svadd(svuint8x4_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_single_u32_x4))) +svuint32x4_t svadd(svuint32x4_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_single_u64_x4))) +svuint64x4_t svadd(svuint64x4_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_single_u16_x4))) +svuint16x4_t svadd(svuint16x4_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_single_s8_x4))) +svint8x4_t svadd(svint8x4_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_single_s32_x4))) +svint32x4_t svadd(svint32x4_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_single_s64_x4))) +svint64x4_t svadd(svint64x4_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_single_s16_x4))) +svint16x4_t svadd(svint16x4_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclamp_single_f64_x2))) +svfloat64x2_t svclamp(svfloat64x2_t, svfloat64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclamp_single_f32_x2))) +svfloat32x2_t svclamp(svfloat32x2_t, svfloat32_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclamp_single_f16_x2))) +svfloat16x2_t svclamp(svfloat16x2_t, svfloat16_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclamp_single_s8_x2))) +svint8x2_t svclamp(svint8x2_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclamp_single_s32_x2))) +svint32x2_t svclamp(svint32x2_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclamp_single_s64_x2))) +svint64x2_t svclamp(svint64x2_t, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclamp_single_s16_x2))) +svint16x2_t svclamp(svint16x2_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclamp_single_u8_x2))) +svuint8x2_t svclamp(svuint8x2_t, svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclamp_single_u32_x2))) +svuint32x2_t svclamp(svuint32x2_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclamp_single_u64_x2))) +svuint64x2_t svclamp(svuint64x2_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclamp_single_u16_x2))) +svuint16x2_t svclamp(svuint16x2_t, svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclamp_single_f64_x4))) +svfloat64x4_t svclamp(svfloat64x4_t, svfloat64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclamp_single_f32_x4))) +svfloat32x4_t svclamp(svfloat32x4_t, svfloat32_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclamp_single_f16_x4))) +svfloat16x4_t svclamp(svfloat16x4_t, svfloat16_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclamp_single_s8_x4))) +svint8x4_t svclamp(svint8x4_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclamp_single_s32_x4))) +svint32x4_t svclamp(svint32x4_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclamp_single_s64_x4))) +svint64x4_t svclamp(svint64x4_t, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclamp_single_s16_x4))) +svint16x4_t svclamp(svint16x4_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclamp_single_u8_x4))) +svuint8x4_t svclamp(svuint8x4_t, svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclamp_single_u32_x4))) +svuint32x4_t svclamp(svuint32x4_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclamp_single_u64_x4))) +svuint64x4_t svclamp(svuint64x4_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclamp_single_u16_x4))) +svuint16x4_t svclamp(svuint16x4_t, svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_bf16_f32_x2))) +svbfloat16_t svcvt_bf16(svfloat32x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f16_f32_x2))) +svfloat16_t svcvt_f16(svfloat32x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_s32_f32_x2))) +svint32x2_t svcvt_s32(svfloat32x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_u32_f32_x2))) +svuint32x2_t svcvt_u32(svfloat32x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_s32_f32_x4))) +svint32x4_t svcvt_s32(svfloat32x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_u32_f32_x4))) +svuint32x4_t svcvt_u32(svfloat32x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f32_s32_x2))) +svfloat32x2_t svcvt_f32(svint32x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f32_s32_x4))) +svfloat32x4_t svcvt_f32(svint32x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f32_u32_x2))) +svfloat32x2_t svcvt_f32(svuint32x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f32_u32_x4))) +svfloat32x4_t svcvt_f32(svuint32x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvtn_bf16_f32_x2))) +svbfloat16_t svcvtn_bf16(svfloat32x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvtn_f16_f32_x2))) +svfloat16_t svcvtn_f16(svfloat32x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_single_bf16_x2))) +svbfloat16x2_t svmax(svbfloat16x2_t, svbfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_single_f64_x2))) +svfloat64x2_t svmax(svfloat64x2_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_single_f32_x2))) +svfloat32x2_t svmax(svfloat32x2_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_single_f16_x2))) +svfloat16x2_t svmax(svfloat16x2_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_single_s8_x2))) +svint8x2_t svmax(svint8x2_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_single_s32_x2))) +svint32x2_t svmax(svint32x2_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_single_s64_x2))) +svint64x2_t svmax(svint64x2_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_single_s16_x2))) +svint16x2_t svmax(svint16x2_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_single_u8_x2))) +svuint8x2_t svmax(svuint8x2_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_single_u32_x2))) +svuint32x2_t svmax(svuint32x2_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_single_u64_x2))) +svuint64x2_t svmax(svuint64x2_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_single_u16_x2))) +svuint16x2_t svmax(svuint16x2_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_single_bf16_x4))) +svbfloat16x4_t svmax(svbfloat16x4_t, svbfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_single_f64_x4))) +svfloat64x4_t svmax(svfloat64x4_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_single_f32_x4))) +svfloat32x4_t svmax(svfloat32x4_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_single_f16_x4))) +svfloat16x4_t svmax(svfloat16x4_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_single_s8_x4))) +svint8x4_t svmax(svint8x4_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_single_s32_x4))) +svint32x4_t svmax(svint32x4_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_single_s64_x4))) +svint64x4_t svmax(svint64x4_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_single_s16_x4))) +svint16x4_t svmax(svint16x4_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_single_u8_x4))) +svuint8x4_t svmax(svuint8x4_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_single_u32_x4))) +svuint32x4_t svmax(svuint32x4_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_single_u64_x4))) +svuint64x4_t svmax(svuint64x4_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_single_u16_x4))) +svuint16x4_t svmax(svuint16x4_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_bf16_x2))) +svbfloat16x2_t svmax(svbfloat16x2_t, svbfloat16x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_f64_x2))) +svfloat64x2_t svmax(svfloat64x2_t, svfloat64x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_f32_x2))) +svfloat32x2_t svmax(svfloat32x2_t, svfloat32x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_f16_x2))) +svfloat16x2_t svmax(svfloat16x2_t, svfloat16x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_s8_x2))) +svint8x2_t svmax(svint8x2_t, svint8x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_s32_x2))) +svint32x2_t svmax(svint32x2_t, svint32x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_s64_x2))) +svint64x2_t svmax(svint64x2_t, svint64x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_s16_x2))) +svint16x2_t svmax(svint16x2_t, svint16x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_u8_x2))) +svuint8x2_t svmax(svuint8x2_t, svuint8x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_u32_x2))) +svuint32x2_t svmax(svuint32x2_t, svuint32x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_u64_x2))) +svuint64x2_t svmax(svuint64x2_t, svuint64x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_u16_x2))) +svuint16x2_t svmax(svuint16x2_t, svuint16x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_bf16_x4))) +svbfloat16x4_t svmax(svbfloat16x4_t, svbfloat16x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_f64_x4))) +svfloat64x4_t svmax(svfloat64x4_t, svfloat64x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_f32_x4))) +svfloat32x4_t svmax(svfloat32x4_t, svfloat32x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_f16_x4))) +svfloat16x4_t svmax(svfloat16x4_t, svfloat16x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_s8_x4))) +svint8x4_t svmax(svint8x4_t, svint8x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_s32_x4))) +svint32x4_t svmax(svint32x4_t, svint32x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_s64_x4))) +svint64x4_t svmax(svint64x4_t, svint64x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_s16_x4))) +svint16x4_t svmax(svint16x4_t, svint16x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_u8_x4))) +svuint8x4_t svmax(svuint8x4_t, svuint8x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_u32_x4))) +svuint32x4_t svmax(svuint32x4_t, svuint32x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_u64_x4))) +svuint64x4_t svmax(svuint64x4_t, svuint64x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_u16_x4))) +svuint16x4_t svmax(svuint16x4_t, svuint16x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnm_single_bf16_x2))) +svbfloat16x2_t svmaxnm(svbfloat16x2_t, svbfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnm_single_f64_x2))) +svfloat64x2_t svmaxnm(svfloat64x2_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnm_single_f32_x2))) +svfloat32x2_t svmaxnm(svfloat32x2_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnm_single_f16_x2))) +svfloat16x2_t svmaxnm(svfloat16x2_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnm_single_bf16_x4))) +svbfloat16x4_t svmaxnm(svbfloat16x4_t, svbfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnm_single_f64_x4))) +svfloat64x4_t svmaxnm(svfloat64x4_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnm_single_f32_x4))) +svfloat32x4_t svmaxnm(svfloat32x4_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnm_single_f16_x4))) +svfloat16x4_t svmaxnm(svfloat16x4_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnm_bf16_x2))) +svbfloat16x2_t svmaxnm(svbfloat16x2_t, svbfloat16x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnm_f64_x2))) +svfloat64x2_t svmaxnm(svfloat64x2_t, svfloat64x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnm_f32_x2))) +svfloat32x2_t svmaxnm(svfloat32x2_t, svfloat32x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnm_f16_x2))) +svfloat16x2_t svmaxnm(svfloat16x2_t, svfloat16x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnm_bf16_x4))) +svbfloat16x4_t svmaxnm(svbfloat16x4_t, svbfloat16x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnm_f64_x4))) +svfloat64x4_t svmaxnm(svfloat64x4_t, svfloat64x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnm_f32_x4))) +svfloat32x4_t svmaxnm(svfloat32x4_t, svfloat32x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnm_f16_x4))) +svfloat16x4_t svmaxnm(svfloat16x4_t, svfloat16x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_single_bf16_x2))) +svbfloat16x2_t svmin(svbfloat16x2_t, svbfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_single_f64_x2))) +svfloat64x2_t svmin(svfloat64x2_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_single_f32_x2))) +svfloat32x2_t svmin(svfloat32x2_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_single_f16_x2))) +svfloat16x2_t svmin(svfloat16x2_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_single_s8_x2))) +svint8x2_t svmin(svint8x2_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_single_s32_x2))) +svint32x2_t svmin(svint32x2_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_single_s64_x2))) +svint64x2_t svmin(svint64x2_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_single_s16_x2))) +svint16x2_t svmin(svint16x2_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_single_u8_x2))) +svuint8x2_t svmin(svuint8x2_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_single_u32_x2))) +svuint32x2_t svmin(svuint32x2_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_single_u64_x2))) +svuint64x2_t svmin(svuint64x2_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_single_u16_x2))) +svuint16x2_t svmin(svuint16x2_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_single_bf16_x4))) +svbfloat16x4_t svmin(svbfloat16x4_t, svbfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_single_f64_x4))) +svfloat64x4_t svmin(svfloat64x4_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_single_f32_x4))) +svfloat32x4_t svmin(svfloat32x4_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_single_f16_x4))) +svfloat16x4_t svmin(svfloat16x4_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_single_s8_x4))) +svint8x4_t svmin(svint8x4_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_single_s32_x4))) +svint32x4_t svmin(svint32x4_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_single_s64_x4))) +svint64x4_t svmin(svint64x4_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_single_s16_x4))) +svint16x4_t svmin(svint16x4_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_single_u8_x4))) +svuint8x4_t svmin(svuint8x4_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_single_u32_x4))) +svuint32x4_t svmin(svuint32x4_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_single_u64_x4))) +svuint64x4_t svmin(svuint64x4_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_single_u16_x4))) +svuint16x4_t svmin(svuint16x4_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_bf16_x2))) +svbfloat16x2_t svmin(svbfloat16x2_t, svbfloat16x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_f64_x2))) +svfloat64x2_t svmin(svfloat64x2_t, svfloat64x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_f32_x2))) +svfloat32x2_t svmin(svfloat32x2_t, svfloat32x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_f16_x2))) +svfloat16x2_t svmin(svfloat16x2_t, svfloat16x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_s8_x2))) +svint8x2_t svmin(svint8x2_t, svint8x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_s32_x2))) +svint32x2_t svmin(svint32x2_t, svint32x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_s64_x2))) +svint64x2_t svmin(svint64x2_t, svint64x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_s16_x2))) +svint16x2_t svmin(svint16x2_t, svint16x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_u8_x2))) +svuint8x2_t svmin(svuint8x2_t, svuint8x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_u32_x2))) +svuint32x2_t svmin(svuint32x2_t, svuint32x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_u64_x2))) +svuint64x2_t svmin(svuint64x2_t, svuint64x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_u16_x2))) +svuint16x2_t svmin(svuint16x2_t, svuint16x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_bf16_x4))) +svbfloat16x4_t svmin(svbfloat16x4_t, svbfloat16x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_f64_x4))) +svfloat64x4_t svmin(svfloat64x4_t, svfloat64x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_f32_x4))) +svfloat32x4_t svmin(svfloat32x4_t, svfloat32x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_f16_x4))) +svfloat16x4_t svmin(svfloat16x4_t, svfloat16x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_s8_x4))) +svint8x4_t svmin(svint8x4_t, svint8x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_s32_x4))) +svint32x4_t svmin(svint32x4_t, svint32x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_s64_x4))) +svint64x4_t svmin(svint64x4_t, svint64x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_s16_x4))) +svint16x4_t svmin(svint16x4_t, svint16x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_u8_x4))) +svuint8x4_t svmin(svuint8x4_t, svuint8x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_u32_x4))) +svuint32x4_t svmin(svuint32x4_t, svuint32x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_u64_x4))) +svuint64x4_t svmin(svuint64x4_t, svuint64x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_u16_x4))) +svuint16x4_t svmin(svuint16x4_t, svuint16x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnm_single_bf16_x2))) +svbfloat16x2_t svminnm(svbfloat16x2_t, svbfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnm_single_f64_x2))) +svfloat64x2_t svminnm(svfloat64x2_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnm_single_f32_x2))) +svfloat32x2_t svminnm(svfloat32x2_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnm_single_f16_x2))) +svfloat16x2_t svminnm(svfloat16x2_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnm_single_bf16_x4))) +svbfloat16x4_t svminnm(svbfloat16x4_t, svbfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnm_single_f64_x4))) +svfloat64x4_t svminnm(svfloat64x4_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnm_single_f32_x4))) +svfloat32x4_t svminnm(svfloat32x4_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnm_single_f16_x4))) +svfloat16x4_t svminnm(svfloat16x4_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnm_bf16_x2))) +svbfloat16x2_t svminnm(svbfloat16x2_t, svbfloat16x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnm_f64_x2))) +svfloat64x2_t svminnm(svfloat64x2_t, svfloat64x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnm_f32_x2))) +svfloat32x2_t svminnm(svfloat32x2_t, svfloat32x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnm_f16_x2))) +svfloat16x2_t svminnm(svfloat16x2_t, svfloat16x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnm_bf16_x4))) +svbfloat16x4_t svminnm(svbfloat16x4_t, svbfloat16x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnm_f64_x4))) +svfloat64x4_t svminnm(svfloat64x4_t, svfloat64x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnm_f32_x4))) +svfloat32x4_t svminnm(svfloat32x4_t, svfloat32x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnm_f16_x4))) +svfloat16x4_t svminnm(svfloat16x4_t, svfloat16x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqcvt_s16_s32_x2))) +svint16_t svqcvt_s16(svint32x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqcvt_s16_s64_x4))) +svint16_t svqcvt_s16(svint64x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqcvt_s8_s32_x4))) +svint8_t svqcvt_s8(svint32x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqcvt_u16_s32_x2))) +svuint16_t svqcvt_u16(svint32x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqcvt_u16_u32_x2))) +svuint16_t svqcvt_u16(svuint32x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqcvt_u16_s64_x4))) +svuint16_t svqcvt_u16(svint64x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqcvt_u16_u64_x4))) +svuint16_t svqcvt_u16(svuint64x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqcvt_u8_s32_x4))) +svuint8_t svqcvt_u8(svint32x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqcvt_u8_u32_x4))) +svuint8_t svqcvt_u8(svuint32x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqcvtn_s16_s64_x4))) +svint16_t svqcvtn_s16(svint64x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqcvtn_s8_s32_x4))) +svint8_t svqcvtn_s8(svint32x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqcvtn_u16_s64_x4))) +svuint16_t svqcvtn_u16(svint64x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqcvtn_u16_u64_x4))) +svuint16_t svqcvtn_u16(svuint64x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqcvtn_u8_s32_x4))) +svuint8_t svqcvtn_u8(svint32x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqcvtn_u8_u32_x4))) +svuint8_t svqcvtn_u8(svuint32x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmulh_single_s8_x2))) +svint8x2_t svqdmulh(svint8x2_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmulh_single_s32_x2))) +svint32x2_t svqdmulh(svint32x2_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmulh_single_s64_x2))) +svint64x2_t svqdmulh(svint64x2_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmulh_single_s16_x2))) +svint16x2_t svqdmulh(svint16x2_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmulh_single_s8_x4))) +svint8x4_t svqdmulh(svint8x4_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmulh_single_s32_x4))) +svint32x4_t svqdmulh(svint32x4_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmulh_single_s64_x4))) +svint64x4_t svqdmulh(svint64x4_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmulh_single_s16_x4))) +svint16x4_t svqdmulh(svint16x4_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmulh_s8_x2))) +svint8x2_t svqdmulh(svint8x2_t, svint8x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmulh_s32_x2))) +svint32x2_t svqdmulh(svint32x2_t, svint32x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmulh_s64_x2))) +svint64x2_t svqdmulh(svint64x2_t, svint64x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmulh_s16_x2))) +svint16x2_t svqdmulh(svint16x2_t, svint16x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmulh_s8_x4))) +svint8x4_t svqdmulh(svint8x4_t, svint8x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmulh_s32_x4))) +svint32x4_t svqdmulh(svint32x4_t, svint32x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmulh_s64_x4))) +svint64x4_t svqdmulh(svint64x4_t, svint64x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmulh_s16_x4))) +svint16x4_t svqdmulh(svint16x4_t, svint16x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshr_n_s16_s32_x2))) +svint16_t svqrshr_s16(svint32x2_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshr_n_u16_u32_x2))) +svuint16_t svqrshr_u16(svuint32x2_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshr_n_s8_s32_x4))) +svint8_t svqrshr_s8(svint32x4_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshr_n_s16_s64_x4))) +svint16_t svqrshr_s16(svint64x4_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshr_n_u8_u32_x4))) +svuint8_t svqrshr_u8(svuint32x4_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshr_n_u16_u64_x4))) +svuint16_t svqrshr_u16(svuint64x4_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshrn_n_s8_s32_x4))) +svint8_t svqrshrn_s8(svint32x4_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshrn_n_s16_s64_x4))) +svint16_t svqrshrn_s16(svint64x4_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshrn_n_u8_u32_x4))) +svuint8_t svqrshrn_u8(svuint32x4_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshrn_n_u16_u64_x4))) +svuint16_t svqrshrn_u16(svuint64x4_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshru_n_u16_s32_x2))) +svuint16_t svqrshru_u16(svint32x2_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshru_n_u8_s32_x4))) +svuint8_t svqrshru_u8(svint32x4_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshru_n_u16_s64_x4))) +svuint16_t svqrshru_u16(svint64x4_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshrun_n_u8_s32_x4))) +svuint8_t svqrshrun_u8(svint32x4_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshrun_n_u16_s64_x4))) +svuint16_t svqrshrun_u16(svint64x4_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrinta_f32_x2))) +svfloat32x2_t svrinta(svfloat32x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrinta_f32_x4))) +svfloat32x4_t svrinta(svfloat32x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintm_f32_x2))) +svfloat32x2_t svrintm(svfloat32x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintm_f32_x4))) +svfloat32x4_t svrintm(svfloat32x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintn_f32_x2))) +svfloat32x2_t svrintn(svfloat32x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintn_f32_x4))) +svfloat32x4_t svrintn(svfloat32x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintp_f32_x2))) +svfloat32x2_t svrintp(svfloat32x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintp_f32_x4))) +svfloat32x4_t svrintp(svfloat32x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_single_s8_x2))) +svint8x2_t svrshl(svint8x2_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_single_s32_x2))) +svint32x2_t svrshl(svint32x2_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_single_s64_x2))) +svint64x2_t svrshl(svint64x2_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_single_s16_x2))) +svint16x2_t svrshl(svint16x2_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_single_u8_x2))) +svuint8x2_t svrshl(svuint8x2_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_single_u32_x2))) +svuint32x2_t svrshl(svuint32x2_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_single_u64_x2))) +svuint64x2_t svrshl(svuint64x2_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_single_u16_x2))) +svuint16x2_t svrshl(svuint16x2_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_single_s8_x4))) +svint8x4_t svrshl(svint8x4_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_single_s32_x4))) +svint32x4_t svrshl(svint32x4_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_single_s64_x4))) +svint64x4_t svrshl(svint64x4_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_single_s16_x4))) +svint16x4_t svrshl(svint16x4_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_single_u8_x4))) +svuint8x4_t svrshl(svuint8x4_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_single_u32_x4))) +svuint32x4_t svrshl(svuint32x4_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_single_u64_x4))) +svuint64x4_t svrshl(svuint64x4_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_single_u16_x4))) +svuint16x4_t svrshl(svuint16x4_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_s8_x2))) +svint8x2_t svrshl(svint8x2_t, svint8x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_s32_x2))) +svint32x2_t svrshl(svint32x2_t, svint32x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_s64_x2))) +svint64x2_t svrshl(svint64x2_t, svint64x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_s16_x2))) +svint16x2_t svrshl(svint16x2_t, svint16x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_u8_x2))) +svuint8x2_t svrshl(svuint8x2_t, svuint8x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_u32_x2))) +svuint32x2_t svrshl(svuint32x2_t, svuint32x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_u64_x2))) +svuint64x2_t svrshl(svuint64x2_t, svuint64x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_u16_x2))) +svuint16x2_t svrshl(svuint16x2_t, svuint16x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_s8_x4))) +svint8x4_t svrshl(svint8x4_t, svint8x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_s32_x4))) +svint32x4_t svrshl(svint32x4_t, svint32x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_s64_x4))) +svint64x4_t svrshl(svint64x4_t, svint64x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_s16_x4))) +svint16x4_t svrshl(svint16x4_t, svint16x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_u8_x4))) +svuint8x4_t svrshl(svuint8x4_t, svuint8x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_u32_x4))) +svuint32x4_t svrshl(svuint32x4_t, svuint32x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_u64_x4))) +svuint64x4_t svrshl(svuint64x4_t, svuint64x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_u16_x4))) +svuint16x4_t svrshl(svuint16x4_t, svuint16x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsel_u8_x2))) +svuint8x2_t svsel(svcount_t, svuint8x2_t, svuint8x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsel_u32_x2))) +svuint32x2_t svsel(svcount_t, svuint32x2_t, svuint32x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsel_u64_x2))) +svuint64x2_t svsel(svcount_t, svuint64x2_t, svuint64x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsel_u16_x2))) +svuint16x2_t svsel(svcount_t, svuint16x2_t, svuint16x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsel_bf16_x2))) +svbfloat16x2_t svsel(svcount_t, svbfloat16x2_t, svbfloat16x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsel_s8_x2))) +svint8x2_t svsel(svcount_t, svint8x2_t, svint8x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsel_f64_x2))) +svfloat64x2_t svsel(svcount_t, svfloat64x2_t, svfloat64x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsel_f32_x2))) +svfloat32x2_t svsel(svcount_t, svfloat32x2_t, svfloat32x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsel_f16_x2))) +svfloat16x2_t svsel(svcount_t, svfloat16x2_t, svfloat16x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsel_s32_x2))) +svint32x2_t svsel(svcount_t, svint32x2_t, svint32x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsel_s64_x2))) +svint64x2_t svsel(svcount_t, svint64x2_t, svint64x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsel_s16_x2))) +svint16x2_t svsel(svcount_t, svint16x2_t, svint16x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsel_u8_x4))) +svuint8x4_t svsel(svcount_t, svuint8x4_t, svuint8x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsel_u32_x4))) +svuint32x4_t svsel(svcount_t, svuint32x4_t, svuint32x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsel_u64_x4))) +svuint64x4_t svsel(svcount_t, svuint64x4_t, svuint64x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsel_u16_x4))) +svuint16x4_t svsel(svcount_t, svuint16x4_t, svuint16x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsel_bf16_x4))) +svbfloat16x4_t svsel(svcount_t, svbfloat16x4_t, svbfloat16x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsel_s8_x4))) +svint8x4_t svsel(svcount_t, svint8x4_t, svint8x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsel_f64_x4))) +svfloat64x4_t svsel(svcount_t, svfloat64x4_t, svfloat64x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsel_f32_x4))) +svfloat32x4_t svsel(svcount_t, svfloat32x4_t, svfloat32x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsel_f16_x4))) +svfloat16x4_t svsel(svcount_t, svfloat16x4_t, svfloat16x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsel_s32_x4))) +svint32x4_t svsel(svcount_t, svint32x4_t, svint32x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsel_s64_x4))) +svint64x4_t svsel(svcount_t, svint64x4_t, svint64x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsel_s16_x4))) +svint16x4_t svsel(svcount_t, svint16x4_t, svint16x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svunpk_s32_s16_x2))) +svint32x2_t svunpk_s32(svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svunpk_s64_s32_x2))) +svint64x2_t svunpk_s64(svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svunpk_s16_s8_x2))) +svint16x2_t svunpk_s16(svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svunpk_u32_u16_x2))) +svuint32x2_t svunpk_u32(svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svunpk_u64_u32_x2))) +svuint64x2_t svunpk_u64(svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svunpk_u16_u8_x2))) +svuint16x2_t svunpk_u16(svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svunpk_s32_s16_x4))) +svint32x4_t svunpk_s32(svint16x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svunpk_s64_s32_x4))) +svint64x4_t svunpk_s64(svint32x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svunpk_s16_s8_x4))) +svint16x4_t svunpk_s16(svint8x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svunpk_u32_u16_x4))) +svuint32x4_t svunpk_u32(svuint16x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svunpk_u64_u32_x4))) +svuint64x4_t svunpk_u64(svuint32x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svunpk_u16_u8_x4))) +svuint16x4_t svunpk_u16(svuint8x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp_u8_x2))) +svuint8x2_t svuzp(svuint8x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp_u32_x2))) +svuint32x2_t svuzp(svuint32x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp_u64_x2))) +svuint64x2_t svuzp(svuint64x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp_u16_x2))) +svuint16x2_t svuzp(svuint16x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp_bf16_x2))) +svbfloat16x2_t svuzp(svbfloat16x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp_s8_x2))) +svint8x2_t svuzp(svint8x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp_f64_x2))) +svfloat64x2_t svuzp(svfloat64x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp_f32_x2))) +svfloat32x2_t svuzp(svfloat32x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp_f16_x2))) +svfloat16x2_t svuzp(svfloat16x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp_s32_x2))) +svint32x2_t svuzp(svint32x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp_s64_x2))) +svint64x2_t svuzp(svint64x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp_s16_x2))) +svint16x2_t svuzp(svint16x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp_u8_x4))) +svuint8x4_t svuzp(svuint8x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp_u32_x4))) +svuint32x4_t svuzp(svuint32x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp_u64_x4))) +svuint64x4_t svuzp(svuint64x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp_u16_x4))) +svuint16x4_t svuzp(svuint16x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp_bf16_x4))) +svbfloat16x4_t svuzp(svbfloat16x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp_s8_x4))) +svint8x4_t svuzp(svint8x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp_f64_x4))) +svfloat64x4_t svuzp(svfloat64x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp_f32_x4))) +svfloat32x4_t svuzp(svfloat32x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp_f16_x4))) +svfloat16x4_t svuzp(svfloat16x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp_s32_x4))) +svint32x4_t svuzp(svint32x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp_s64_x4))) +svint64x4_t svuzp(svint64x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp_s16_x4))) +svint16x4_t svuzp(svint16x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzpq_u8_x2))) +svuint8x2_t svuzpq(svuint8x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzpq_u32_x2))) +svuint32x2_t svuzpq(svuint32x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzpq_u64_x2))) +svuint64x2_t svuzpq(svuint64x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzpq_u16_x2))) +svuint16x2_t svuzpq(svuint16x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzpq_bf16_x2))) +svbfloat16x2_t svuzpq(svbfloat16x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzpq_s8_x2))) +svint8x2_t svuzpq(svint8x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzpq_f64_x2))) +svfloat64x2_t svuzpq(svfloat64x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzpq_f32_x2))) +svfloat32x2_t svuzpq(svfloat32x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzpq_f16_x2))) +svfloat16x2_t svuzpq(svfloat16x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzpq_s32_x2))) +svint32x2_t svuzpq(svint32x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzpq_s64_x2))) +svint64x2_t svuzpq(svint64x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzpq_s16_x2))) +svint16x2_t svuzpq(svint16x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzpq_u8_x4))) +svuint8x4_t svuzpq(svuint8x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzpq_u32_x4))) +svuint32x4_t svuzpq(svuint32x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzpq_u64_x4))) +svuint64x4_t svuzpq(svuint64x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzpq_u16_x4))) +svuint16x4_t svuzpq(svuint16x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzpq_bf16_x4))) +svbfloat16x4_t svuzpq(svbfloat16x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzpq_s8_x4))) +svint8x4_t svuzpq(svint8x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzpq_f64_x4))) +svfloat64x4_t svuzpq(svfloat64x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzpq_f32_x4))) +svfloat32x4_t svuzpq(svfloat32x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzpq_f16_x4))) +svfloat16x4_t svuzpq(svfloat16x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzpq_s32_x4))) +svint32x4_t svuzpq(svint32x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzpq_s64_x4))) +svint64x4_t svuzpq(svint64x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzpq_s16_x4))) +svint16x4_t svuzpq(svint16x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip_u8_x2))) +svuint8x2_t svzip(svuint8x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip_u32_x2))) +svuint32x2_t svzip(svuint32x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip_u64_x2))) +svuint64x2_t svzip(svuint64x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip_u16_x2))) +svuint16x2_t svzip(svuint16x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip_bf16_x2))) +svbfloat16x2_t svzip(svbfloat16x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip_s8_x2))) +svint8x2_t svzip(svint8x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip_f64_x2))) +svfloat64x2_t svzip(svfloat64x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip_f32_x2))) +svfloat32x2_t svzip(svfloat32x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip_f16_x2))) +svfloat16x2_t svzip(svfloat16x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip_s32_x2))) +svint32x2_t svzip(svint32x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip_s64_x2))) +svint64x2_t svzip(svint64x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip_s16_x2))) +svint16x2_t svzip(svint16x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip_u8_x4))) +svuint8x4_t svzip(svuint8x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip_u32_x4))) +svuint32x4_t svzip(svuint32x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip_u64_x4))) +svuint64x4_t svzip(svuint64x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip_u16_x4))) +svuint16x4_t svzip(svuint16x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip_bf16_x4))) +svbfloat16x4_t svzip(svbfloat16x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip_s8_x4))) +svint8x4_t svzip(svint8x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip_f64_x4))) +svfloat64x4_t svzip(svfloat64x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip_f32_x4))) +svfloat32x4_t svzip(svfloat32x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip_f16_x4))) +svfloat16x4_t svzip(svfloat16x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip_s32_x4))) +svint32x4_t svzip(svint32x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip_s64_x4))) +svint64x4_t svzip(svint64x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip_s16_x4))) +svint16x4_t svzip(svint16x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzipq_u8_x2))) +svuint8x2_t svzipq(svuint8x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzipq_u32_x2))) +svuint32x2_t svzipq(svuint32x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzipq_u64_x2))) +svuint64x2_t svzipq(svuint64x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzipq_u16_x2))) +svuint16x2_t svzipq(svuint16x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzipq_bf16_x2))) +svbfloat16x2_t svzipq(svbfloat16x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzipq_s8_x2))) +svint8x2_t svzipq(svint8x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzipq_f64_x2))) +svfloat64x2_t svzipq(svfloat64x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzipq_f32_x2))) +svfloat32x2_t svzipq(svfloat32x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzipq_f16_x2))) +svfloat16x2_t svzipq(svfloat16x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzipq_s32_x2))) +svint32x2_t svzipq(svint32x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzipq_s64_x2))) +svint64x2_t svzipq(svint64x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzipq_s16_x2))) +svint16x2_t svzipq(svint16x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzipq_u8_x4))) +svuint8x4_t svzipq(svuint8x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzipq_u32_x4))) +svuint32x4_t svzipq(svuint32x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzipq_u64_x4))) +svuint64x4_t svzipq(svuint64x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzipq_u16_x4))) +svuint16x4_t svzipq(svuint16x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzipq_bf16_x4))) +svbfloat16x4_t svzipq(svbfloat16x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzipq_s8_x4))) +svint8x4_t svzipq(svint8x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzipq_f64_x4))) +svfloat64x4_t svzipq(svfloat64x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzipq_f32_x4))) +svfloat32x4_t svzipq(svfloat32x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzipq_f16_x4))) +svfloat16x4_t svzipq(svfloat16x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzipq_s32_x4))) +svint32x4_t svzipq(svint32x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzipq_s64_x4))) +svint64x4_t svzipq(svint64x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzipq_s16_x4))) +svint16x4_t svzipq(svint16x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclamp_single_bf16_x2))) +svbfloat16x2_t svclamp_single_bf16_x2(svbfloat16x2_t, svbfloat16_t, svbfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclamp_single_bf16_x4))) +svbfloat16x4_t svclamp_single_bf16_x4(svbfloat16x4_t, svbfloat16_t, svbfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclamp_single_bf16_x2))) +svbfloat16x2_t svclamp(svbfloat16x2_t, svbfloat16_t, svbfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclamp_single_bf16_x4))) +svbfloat16x4_t svclamp(svbfloat16x4_t, svbfloat16_t, svbfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadrb_u32base_u32offset))) +svuint32_t svadrb_u32base_u32offset(svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadrb_u64base_u64offset))) +svuint64_t svadrb_u64base_u64offset(svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadrb_u32base_s32offset))) +svuint32_t svadrb_u32base_s32offset(svuint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadrb_u64base_s64offset))) +svuint64_t svadrb_u64base_s64offset(svuint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadrd_u32base_u32index))) +svuint32_t svadrd_u32base_u32index(svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadrd_u64base_u64index))) +svuint64_t svadrd_u64base_u64index(svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadrd_u32base_s32index))) +svuint32_t svadrd_u32base_s32index(svuint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadrd_u64base_s64index))) +svuint64_t svadrd_u64base_s64index(svuint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadrh_u32base_u32index))) +svuint32_t svadrh_u32base_u32index(svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadrh_u64base_u64index))) +svuint64_t svadrh_u64base_u64index(svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadrh_u32base_s32index))) +svuint32_t svadrh_u32base_s32index(svuint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadrh_u64base_s64index))) +svuint64_t svadrh_u64base_s64index(svuint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadrw_u32base_u32index))) +svuint32_t svadrw_u32base_u32index(svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadrw_u64base_u64index))) +svuint64_t svadrw_u64base_u64index(svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadrw_u32base_s32index))) +svuint32_t svadrw_u32base_s32index(svuint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadrw_u64base_s64index))) +svuint64_t svadrw_u64base_s64index(svuint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcompact_u32))) +svuint32_t svcompact_u32(svbool_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcompact_u64))) +svuint64_t svcompact_u64(svbool_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcompact_f64))) +svfloat64_t svcompact_f64(svbool_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcompact_f32))) +svfloat32_t svcompact_f32(svbool_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcompact_s32))) +svint32_t svcompact_s32(svbool_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcompact_s64))) +svint64_t svcompact_s64(svbool_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svexpa_f64))) +svfloat64_t svexpa_f64(svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svexpa_f32))) +svfloat32_t svexpa_f32(svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svexpa_f16))) +svfloat16_t svexpa_f16(svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u32base_index_u32))) +svuint32_t svld1_gather_u32base_index_u32(svbool_t, svuint32_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u64base_index_u64))) +svuint64_t svld1_gather_u64base_index_u64(svbool_t, svuint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u64base_index_f64))) +svfloat64_t svld1_gather_u64base_index_f64(svbool_t, svuint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u32base_index_f32))) +svfloat32_t svld1_gather_u32base_index_f32(svbool_t, svuint32_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u32base_index_s32))) +svint32_t svld1_gather_u32base_index_s32(svbool_t, svuint32_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u64base_index_s64))) +svint64_t svld1_gather_u64base_index_s64(svbool_t, svuint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u32base_offset_u32))) +svuint32_t svld1_gather_u32base_offset_u32(svbool_t, svuint32_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u64base_offset_u64))) +svuint64_t svld1_gather_u64base_offset_u64(svbool_t, svuint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u64base_offset_f64))) +svfloat64_t svld1_gather_u64base_offset_f64(svbool_t, svuint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u32base_offset_f32))) +svfloat32_t svld1_gather_u32base_offset_f32(svbool_t, svuint32_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u32base_offset_s32))) +svint32_t svld1_gather_u32base_offset_s32(svbool_t, svuint32_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u64base_offset_s64))) +svint64_t svld1_gather_u64base_offset_s64(svbool_t, svuint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u32base_u32))) +svuint32_t svld1_gather_u32base_u32(svbool_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u64base_u64))) +svuint64_t svld1_gather_u64base_u64(svbool_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u64base_f64))) +svfloat64_t svld1_gather_u64base_f64(svbool_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u32base_f32))) +svfloat32_t svld1_gather_u32base_f32(svbool_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u32base_s32))) +svint32_t svld1_gather_u32base_s32(svbool_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u64base_s64))) +svint64_t svld1_gather_u64base_s64(svbool_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_s32index_u32))) +svuint32_t svld1_gather_s32index_u32(svbool_t, uint32_t const *, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_s32index_f32))) +svfloat32_t svld1_gather_s32index_f32(svbool_t, float32_t const *, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_s32index_s32))) +svint32_t svld1_gather_s32index_s32(svbool_t, int32_t const *, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u32index_u32))) +svuint32_t svld1_gather_u32index_u32(svbool_t, uint32_t const *, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u32index_f32))) +svfloat32_t svld1_gather_u32index_f32(svbool_t, float32_t const *, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u32index_s32))) +svint32_t svld1_gather_u32index_s32(svbool_t, int32_t const *, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_s64index_u64))) +svuint64_t svld1_gather_s64index_u64(svbool_t, uint64_t const *, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_s64index_f64))) +svfloat64_t svld1_gather_s64index_f64(svbool_t, float64_t const *, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_s64index_s64))) +svint64_t svld1_gather_s64index_s64(svbool_t, int64_t const *, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u64index_u64))) +svuint64_t svld1_gather_u64index_u64(svbool_t, uint64_t const *, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u64index_f64))) +svfloat64_t svld1_gather_u64index_f64(svbool_t, float64_t const *, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u64index_s64))) +svint64_t svld1_gather_u64index_s64(svbool_t, int64_t const *, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_s32offset_u32))) +svuint32_t svld1_gather_s32offset_u32(svbool_t, uint32_t const *, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_s32offset_f32))) +svfloat32_t svld1_gather_s32offset_f32(svbool_t, float32_t const *, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_s32offset_s32))) +svint32_t svld1_gather_s32offset_s32(svbool_t, int32_t const *, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u32offset_u32))) +svuint32_t svld1_gather_u32offset_u32(svbool_t, uint32_t const *, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u32offset_f32))) +svfloat32_t svld1_gather_u32offset_f32(svbool_t, float32_t const *, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u32offset_s32))) +svint32_t svld1_gather_u32offset_s32(svbool_t, int32_t const *, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_s64offset_u64))) +svuint64_t svld1_gather_s64offset_u64(svbool_t, uint64_t const *, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_s64offset_f64))) +svfloat64_t svld1_gather_s64offset_f64(svbool_t, float64_t const *, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_s64offset_s64))) +svint64_t svld1_gather_s64offset_s64(svbool_t, int64_t const *, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u64offset_u64))) +svuint64_t svld1_gather_u64offset_u64(svbool_t, uint64_t const *, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u64offset_f64))) +svfloat64_t svld1_gather_u64offset_f64(svbool_t, float64_t const *, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u64offset_s64))) +svint64_t svld1_gather_u64offset_s64(svbool_t, int64_t const *, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sb_gather_u32base_offset_u32))) +svuint32_t svld1sb_gather_u32base_offset_u32(svbool_t, svuint32_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sb_gather_u64base_offset_u64))) +svuint64_t svld1sb_gather_u64base_offset_u64(svbool_t, svuint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sb_gather_u32base_offset_s32))) +svint32_t svld1sb_gather_u32base_offset_s32(svbool_t, svuint32_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sb_gather_u64base_offset_s64))) +svint64_t svld1sb_gather_u64base_offset_s64(svbool_t, svuint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sb_gather_u32base_u32))) +svuint32_t svld1sb_gather_u32base_u32(svbool_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sb_gather_u64base_u64))) +svuint64_t svld1sb_gather_u64base_u64(svbool_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sb_gather_u32base_s32))) +svint32_t svld1sb_gather_u32base_s32(svbool_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sb_gather_u64base_s64))) +svint64_t svld1sb_gather_u64base_s64(svbool_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sb_gather_s32offset_u32))) +svuint32_t svld1sb_gather_s32offset_u32(svbool_t, int8_t const *, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sb_gather_s32offset_s32))) +svint32_t svld1sb_gather_s32offset_s32(svbool_t, int8_t const *, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sb_gather_u32offset_u32))) +svuint32_t svld1sb_gather_u32offset_u32(svbool_t, int8_t const *, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sb_gather_u32offset_s32))) +svint32_t svld1sb_gather_u32offset_s32(svbool_t, int8_t const *, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sb_gather_s64offset_u64))) +svuint64_t svld1sb_gather_s64offset_u64(svbool_t, int8_t const *, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sb_gather_s64offset_s64))) +svint64_t svld1sb_gather_s64offset_s64(svbool_t, int8_t const *, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sb_gather_u64offset_u64))) +svuint64_t svld1sb_gather_u64offset_u64(svbool_t, int8_t const *, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sb_gather_u64offset_s64))) +svint64_t svld1sb_gather_u64offset_s64(svbool_t, int8_t const *, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_u32base_index_u32))) +svuint32_t svld1sh_gather_u32base_index_u32(svbool_t, svuint32_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_u64base_index_u64))) +svuint64_t svld1sh_gather_u64base_index_u64(svbool_t, svuint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_u32base_index_s32))) +svint32_t svld1sh_gather_u32base_index_s32(svbool_t, svuint32_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_u64base_index_s64))) +svint64_t svld1sh_gather_u64base_index_s64(svbool_t, svuint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_u32base_offset_u32))) +svuint32_t svld1sh_gather_u32base_offset_u32(svbool_t, svuint32_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_u64base_offset_u64))) +svuint64_t svld1sh_gather_u64base_offset_u64(svbool_t, svuint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_u32base_offset_s32))) +svint32_t svld1sh_gather_u32base_offset_s32(svbool_t, svuint32_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_u64base_offset_s64))) +svint64_t svld1sh_gather_u64base_offset_s64(svbool_t, svuint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_u32base_u32))) +svuint32_t svld1sh_gather_u32base_u32(svbool_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_u64base_u64))) +svuint64_t svld1sh_gather_u64base_u64(svbool_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_u32base_s32))) +svint32_t svld1sh_gather_u32base_s32(svbool_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_u64base_s64))) +svint64_t svld1sh_gather_u64base_s64(svbool_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_s32index_u32))) +svuint32_t svld1sh_gather_s32index_u32(svbool_t, int16_t const *, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_s32index_s32))) +svint32_t svld1sh_gather_s32index_s32(svbool_t, int16_t const *, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_u32index_u32))) +svuint32_t svld1sh_gather_u32index_u32(svbool_t, int16_t const *, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_u32index_s32))) +svint32_t svld1sh_gather_u32index_s32(svbool_t, int16_t const *, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_s64index_u64))) +svuint64_t svld1sh_gather_s64index_u64(svbool_t, int16_t const *, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_s64index_s64))) +svint64_t svld1sh_gather_s64index_s64(svbool_t, int16_t const *, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_u64index_u64))) +svuint64_t svld1sh_gather_u64index_u64(svbool_t, int16_t const *, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_u64index_s64))) +svint64_t svld1sh_gather_u64index_s64(svbool_t, int16_t const *, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_s32offset_u32))) +svuint32_t svld1sh_gather_s32offset_u32(svbool_t, int16_t const *, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_s32offset_s32))) +svint32_t svld1sh_gather_s32offset_s32(svbool_t, int16_t const *, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_u32offset_u32))) +svuint32_t svld1sh_gather_u32offset_u32(svbool_t, int16_t const *, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_u32offset_s32))) +svint32_t svld1sh_gather_u32offset_s32(svbool_t, int16_t const *, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_s64offset_u64))) +svuint64_t svld1sh_gather_s64offset_u64(svbool_t, int16_t const *, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_s64offset_s64))) +svint64_t svld1sh_gather_s64offset_s64(svbool_t, int16_t const *, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_u64offset_u64))) +svuint64_t svld1sh_gather_u64offset_u64(svbool_t, int16_t const *, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_u64offset_s64))) +svint64_t svld1sh_gather_u64offset_s64(svbool_t, int16_t const *, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sw_gather_u64base_index_u64))) +svuint64_t svld1sw_gather_u64base_index_u64(svbool_t, svuint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sw_gather_u64base_index_s64))) +svint64_t svld1sw_gather_u64base_index_s64(svbool_t, svuint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sw_gather_u64base_offset_u64))) +svuint64_t svld1sw_gather_u64base_offset_u64(svbool_t, svuint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sw_gather_u64base_offset_s64))) +svint64_t svld1sw_gather_u64base_offset_s64(svbool_t, svuint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sw_gather_u64base_u64))) +svuint64_t svld1sw_gather_u64base_u64(svbool_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sw_gather_u64base_s64))) +svint64_t svld1sw_gather_u64base_s64(svbool_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sw_gather_s64index_u64))) +svuint64_t svld1sw_gather_s64index_u64(svbool_t, int32_t const *, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sw_gather_s64index_s64))) +svint64_t svld1sw_gather_s64index_s64(svbool_t, int32_t const *, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sw_gather_u64index_u64))) +svuint64_t svld1sw_gather_u64index_u64(svbool_t, int32_t const *, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sw_gather_u64index_s64))) +svint64_t svld1sw_gather_u64index_s64(svbool_t, int32_t const *, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sw_gather_s64offset_u64))) +svuint64_t svld1sw_gather_s64offset_u64(svbool_t, int32_t const *, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sw_gather_s64offset_s64))) +svint64_t svld1sw_gather_s64offset_s64(svbool_t, int32_t const *, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sw_gather_u64offset_u64))) +svuint64_t svld1sw_gather_u64offset_u64(svbool_t, int32_t const *, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sw_gather_u64offset_s64))) +svint64_t svld1sw_gather_u64offset_s64(svbool_t, int32_t const *, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ub_gather_u32base_offset_u32))) +svuint32_t svld1ub_gather_u32base_offset_u32(svbool_t, svuint32_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ub_gather_u64base_offset_u64))) +svuint64_t svld1ub_gather_u64base_offset_u64(svbool_t, svuint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ub_gather_u32base_offset_s32))) +svint32_t svld1ub_gather_u32base_offset_s32(svbool_t, svuint32_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ub_gather_u64base_offset_s64))) +svint64_t svld1ub_gather_u64base_offset_s64(svbool_t, svuint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ub_gather_u32base_u32))) +svuint32_t svld1ub_gather_u32base_u32(svbool_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ub_gather_u64base_u64))) +svuint64_t svld1ub_gather_u64base_u64(svbool_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ub_gather_u32base_s32))) +svint32_t svld1ub_gather_u32base_s32(svbool_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ub_gather_u64base_s64))) +svint64_t svld1ub_gather_u64base_s64(svbool_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ub_gather_s32offset_u32))) +svuint32_t svld1ub_gather_s32offset_u32(svbool_t, uint8_t const *, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ub_gather_s32offset_s32))) +svint32_t svld1ub_gather_s32offset_s32(svbool_t, uint8_t const *, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ub_gather_u32offset_u32))) +svuint32_t svld1ub_gather_u32offset_u32(svbool_t, uint8_t const *, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ub_gather_u32offset_s32))) +svint32_t svld1ub_gather_u32offset_s32(svbool_t, uint8_t const *, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ub_gather_s64offset_u64))) +svuint64_t svld1ub_gather_s64offset_u64(svbool_t, uint8_t const *, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ub_gather_s64offset_s64))) +svint64_t svld1ub_gather_s64offset_s64(svbool_t, uint8_t const *, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ub_gather_u64offset_u64))) +svuint64_t svld1ub_gather_u64offset_u64(svbool_t, uint8_t const *, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ub_gather_u64offset_s64))) +svint64_t svld1ub_gather_u64offset_s64(svbool_t, uint8_t const *, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_u32base_index_u32))) +svuint32_t svld1uh_gather_u32base_index_u32(svbool_t, svuint32_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_u64base_index_u64))) +svuint64_t svld1uh_gather_u64base_index_u64(svbool_t, svuint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_u32base_index_s32))) +svint32_t svld1uh_gather_u32base_index_s32(svbool_t, svuint32_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_u64base_index_s64))) +svint64_t svld1uh_gather_u64base_index_s64(svbool_t, svuint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_u32base_offset_u32))) +svuint32_t svld1uh_gather_u32base_offset_u32(svbool_t, svuint32_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_u64base_offset_u64))) +svuint64_t svld1uh_gather_u64base_offset_u64(svbool_t, svuint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_u32base_offset_s32))) +svint32_t svld1uh_gather_u32base_offset_s32(svbool_t, svuint32_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_u64base_offset_s64))) +svint64_t svld1uh_gather_u64base_offset_s64(svbool_t, svuint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_u32base_u32))) +svuint32_t svld1uh_gather_u32base_u32(svbool_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_u64base_u64))) +svuint64_t svld1uh_gather_u64base_u64(svbool_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_u32base_s32))) +svint32_t svld1uh_gather_u32base_s32(svbool_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_u64base_s64))) +svint64_t svld1uh_gather_u64base_s64(svbool_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_s32index_u32))) +svuint32_t svld1uh_gather_s32index_u32(svbool_t, uint16_t const *, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_s32index_s32))) +svint32_t svld1uh_gather_s32index_s32(svbool_t, uint16_t const *, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_u32index_u32))) +svuint32_t svld1uh_gather_u32index_u32(svbool_t, uint16_t const *, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_u32index_s32))) +svint32_t svld1uh_gather_u32index_s32(svbool_t, uint16_t const *, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_s64index_u64))) +svuint64_t svld1uh_gather_s64index_u64(svbool_t, uint16_t const *, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_s64index_s64))) +svint64_t svld1uh_gather_s64index_s64(svbool_t, uint16_t const *, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_u64index_u64))) +svuint64_t svld1uh_gather_u64index_u64(svbool_t, uint16_t const *, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_u64index_s64))) +svint64_t svld1uh_gather_u64index_s64(svbool_t, uint16_t const *, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_s32offset_u32))) +svuint32_t svld1uh_gather_s32offset_u32(svbool_t, uint16_t const *, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_s32offset_s32))) +svint32_t svld1uh_gather_s32offset_s32(svbool_t, uint16_t const *, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_u32offset_u32))) +svuint32_t svld1uh_gather_u32offset_u32(svbool_t, uint16_t const *, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_u32offset_s32))) +svint32_t svld1uh_gather_u32offset_s32(svbool_t, uint16_t const *, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_s64offset_u64))) +svuint64_t svld1uh_gather_s64offset_u64(svbool_t, uint16_t const *, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_s64offset_s64))) +svint64_t svld1uh_gather_s64offset_s64(svbool_t, uint16_t const *, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_u64offset_u64))) +svuint64_t svld1uh_gather_u64offset_u64(svbool_t, uint16_t const *, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_u64offset_s64))) +svint64_t svld1uh_gather_u64offset_s64(svbool_t, uint16_t const *, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uw_gather_u64base_index_u64))) +svuint64_t svld1uw_gather_u64base_index_u64(svbool_t, svuint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uw_gather_u64base_index_s64))) +svint64_t svld1uw_gather_u64base_index_s64(svbool_t, svuint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uw_gather_u64base_offset_u64))) +svuint64_t svld1uw_gather_u64base_offset_u64(svbool_t, svuint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uw_gather_u64base_offset_s64))) +svint64_t svld1uw_gather_u64base_offset_s64(svbool_t, svuint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uw_gather_u64base_u64))) +svuint64_t svld1uw_gather_u64base_u64(svbool_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uw_gather_u64base_s64))) +svint64_t svld1uw_gather_u64base_s64(svbool_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uw_gather_s64index_u64))) +svuint64_t svld1uw_gather_s64index_u64(svbool_t, uint32_t const *, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uw_gather_s64index_s64))) +svint64_t svld1uw_gather_s64index_s64(svbool_t, uint32_t const *, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uw_gather_u64index_u64))) +svuint64_t svld1uw_gather_u64index_u64(svbool_t, uint32_t const *, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uw_gather_u64index_s64))) +svint64_t svld1uw_gather_u64index_s64(svbool_t, uint32_t const *, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uw_gather_s64offset_u64))) +svuint64_t svld1uw_gather_s64offset_u64(svbool_t, uint32_t const *, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uw_gather_s64offset_s64))) +svint64_t svld1uw_gather_s64offset_s64(svbool_t, uint32_t const *, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uw_gather_u64offset_u64))) +svuint64_t svld1uw_gather_u64offset_u64(svbool_t, uint32_t const *, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uw_gather_u64offset_s64))) +svint64_t svld1uw_gather_u64offset_s64(svbool_t, uint32_t const *, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_u8))) +svuint8_t svldff1_u8(svbool_t, uint8_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_u32))) +svuint32_t svldff1_u32(svbool_t, uint32_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_u64))) +svuint64_t svldff1_u64(svbool_t, uint64_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_u16))) +svuint16_t svldff1_u16(svbool_t, uint16_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_s8))) +svint8_t svldff1_s8(svbool_t, int8_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_f64))) +svfloat64_t svldff1_f64(svbool_t, float64_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_f32))) +svfloat32_t svldff1_f32(svbool_t, float32_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_f16))) +svfloat16_t svldff1_f16(svbool_t, float16_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_s32))) +svint32_t svldff1_s32(svbool_t, int32_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_s64))) +svint64_t svldff1_s64(svbool_t, int64_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_s16))) +svint16_t svldff1_s16(svbool_t, int16_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u32base_index_u32))) +svuint32_t svldff1_gather_u32base_index_u32(svbool_t, svuint32_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u64base_index_u64))) +svuint64_t svldff1_gather_u64base_index_u64(svbool_t, svuint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u64base_index_f64))) +svfloat64_t svldff1_gather_u64base_index_f64(svbool_t, svuint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u32base_index_f32))) +svfloat32_t svldff1_gather_u32base_index_f32(svbool_t, svuint32_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u32base_index_s32))) +svint32_t svldff1_gather_u32base_index_s32(svbool_t, svuint32_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u64base_index_s64))) +svint64_t svldff1_gather_u64base_index_s64(svbool_t, svuint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u32base_offset_u32))) +svuint32_t svldff1_gather_u32base_offset_u32(svbool_t, svuint32_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u64base_offset_u64))) +svuint64_t svldff1_gather_u64base_offset_u64(svbool_t, svuint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u64base_offset_f64))) +svfloat64_t svldff1_gather_u64base_offset_f64(svbool_t, svuint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u32base_offset_f32))) +svfloat32_t svldff1_gather_u32base_offset_f32(svbool_t, svuint32_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u32base_offset_s32))) +svint32_t svldff1_gather_u32base_offset_s32(svbool_t, svuint32_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u64base_offset_s64))) +svint64_t svldff1_gather_u64base_offset_s64(svbool_t, svuint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u32base_u32))) +svuint32_t svldff1_gather_u32base_u32(svbool_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u64base_u64))) +svuint64_t svldff1_gather_u64base_u64(svbool_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u64base_f64))) +svfloat64_t svldff1_gather_u64base_f64(svbool_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u32base_f32))) +svfloat32_t svldff1_gather_u32base_f32(svbool_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u32base_s32))) +svint32_t svldff1_gather_u32base_s32(svbool_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u64base_s64))) +svint64_t svldff1_gather_u64base_s64(svbool_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_s32index_u32))) +svuint32_t svldff1_gather_s32index_u32(svbool_t, uint32_t const *, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_s32index_f32))) +svfloat32_t svldff1_gather_s32index_f32(svbool_t, float32_t const *, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_s32index_s32))) +svint32_t svldff1_gather_s32index_s32(svbool_t, int32_t const *, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u32index_u32))) +svuint32_t svldff1_gather_u32index_u32(svbool_t, uint32_t const *, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u32index_f32))) +svfloat32_t svldff1_gather_u32index_f32(svbool_t, float32_t const *, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u32index_s32))) +svint32_t svldff1_gather_u32index_s32(svbool_t, int32_t const *, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_s64index_u64))) +svuint64_t svldff1_gather_s64index_u64(svbool_t, uint64_t const *, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_s64index_f64))) +svfloat64_t svldff1_gather_s64index_f64(svbool_t, float64_t const *, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_s64index_s64))) +svint64_t svldff1_gather_s64index_s64(svbool_t, int64_t const *, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u64index_u64))) +svuint64_t svldff1_gather_u64index_u64(svbool_t, uint64_t const *, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u64index_f64))) +svfloat64_t svldff1_gather_u64index_f64(svbool_t, float64_t const *, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u64index_s64))) +svint64_t svldff1_gather_u64index_s64(svbool_t, int64_t const *, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_s32offset_u32))) +svuint32_t svldff1_gather_s32offset_u32(svbool_t, uint32_t const *, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_s32offset_f32))) +svfloat32_t svldff1_gather_s32offset_f32(svbool_t, float32_t const *, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_s32offset_s32))) +svint32_t svldff1_gather_s32offset_s32(svbool_t, int32_t const *, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u32offset_u32))) +svuint32_t svldff1_gather_u32offset_u32(svbool_t, uint32_t const *, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u32offset_f32))) +svfloat32_t svldff1_gather_u32offset_f32(svbool_t, float32_t const *, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u32offset_s32))) +svint32_t svldff1_gather_u32offset_s32(svbool_t, int32_t const *, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_s64offset_u64))) +svuint64_t svldff1_gather_s64offset_u64(svbool_t, uint64_t const *, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_s64offset_f64))) +svfloat64_t svldff1_gather_s64offset_f64(svbool_t, float64_t const *, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_s64offset_s64))) +svint64_t svldff1_gather_s64offset_s64(svbool_t, int64_t const *, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u64offset_u64))) +svuint64_t svldff1_gather_u64offset_u64(svbool_t, uint64_t const *, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u64offset_f64))) +svfloat64_t svldff1_gather_u64offset_f64(svbool_t, float64_t const *, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u64offset_s64))) +svint64_t svldff1_gather_u64offset_s64(svbool_t, int64_t const *, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_vnum_u8))) +svuint8_t svldff1_vnum_u8(svbool_t, uint8_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_vnum_u32))) +svuint32_t svldff1_vnum_u32(svbool_t, uint32_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_vnum_u64))) +svuint64_t svldff1_vnum_u64(svbool_t, uint64_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_vnum_u16))) +svuint16_t svldff1_vnum_u16(svbool_t, uint16_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_vnum_s8))) +svint8_t svldff1_vnum_s8(svbool_t, int8_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_vnum_f64))) +svfloat64_t svldff1_vnum_f64(svbool_t, float64_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_vnum_f32))) +svfloat32_t svldff1_vnum_f32(svbool_t, float32_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_vnum_f16))) +svfloat16_t svldff1_vnum_f16(svbool_t, float16_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_vnum_s32))) +svint32_t svldff1_vnum_s32(svbool_t, int32_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_vnum_s64))) +svint64_t svldff1_vnum_s64(svbool_t, int64_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_vnum_s16))) +svint16_t svldff1_vnum_s16(svbool_t, int16_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sb_gather_u32base_offset_u32))) +svuint32_t svldff1sb_gather_u32base_offset_u32(svbool_t, svuint32_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sb_gather_u64base_offset_u64))) +svuint64_t svldff1sb_gather_u64base_offset_u64(svbool_t, svuint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sb_gather_u32base_offset_s32))) +svint32_t svldff1sb_gather_u32base_offset_s32(svbool_t, svuint32_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sb_gather_u64base_offset_s64))) +svint64_t svldff1sb_gather_u64base_offset_s64(svbool_t, svuint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sb_gather_u32base_u32))) +svuint32_t svldff1sb_gather_u32base_u32(svbool_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sb_gather_u64base_u64))) +svuint64_t svldff1sb_gather_u64base_u64(svbool_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sb_gather_u32base_s32))) +svint32_t svldff1sb_gather_u32base_s32(svbool_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sb_gather_u64base_s64))) +svint64_t svldff1sb_gather_u64base_s64(svbool_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sb_gather_s32offset_u32))) +svuint32_t svldff1sb_gather_s32offset_u32(svbool_t, int8_t const *, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sb_gather_s32offset_s32))) +svint32_t svldff1sb_gather_s32offset_s32(svbool_t, int8_t const *, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sb_gather_u32offset_u32))) +svuint32_t svldff1sb_gather_u32offset_u32(svbool_t, int8_t const *, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sb_gather_u32offset_s32))) +svint32_t svldff1sb_gather_u32offset_s32(svbool_t, int8_t const *, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sb_gather_s64offset_u64))) +svuint64_t svldff1sb_gather_s64offset_u64(svbool_t, int8_t const *, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sb_gather_s64offset_s64))) +svint64_t svldff1sb_gather_s64offset_s64(svbool_t, int8_t const *, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sb_gather_u64offset_u64))) +svuint64_t svldff1sb_gather_u64offset_u64(svbool_t, int8_t const *, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sb_gather_u64offset_s64))) +svint64_t svldff1sb_gather_u64offset_s64(svbool_t, int8_t const *, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sb_vnum_u32))) +svuint32_t svldff1sb_vnum_u32(svbool_t, int8_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sb_vnum_u64))) +svuint64_t svldff1sb_vnum_u64(svbool_t, int8_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sb_vnum_u16))) +svuint16_t svldff1sb_vnum_u16(svbool_t, int8_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sb_vnum_s32))) +svint32_t svldff1sb_vnum_s32(svbool_t, int8_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sb_vnum_s64))) +svint64_t svldff1sb_vnum_s64(svbool_t, int8_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sb_vnum_s16))) +svint16_t svldff1sb_vnum_s16(svbool_t, int8_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sb_u32))) +svuint32_t svldff1sb_u32(svbool_t, int8_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sb_u64))) +svuint64_t svldff1sb_u64(svbool_t, int8_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sb_u16))) +svuint16_t svldff1sb_u16(svbool_t, int8_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sb_s32))) +svint32_t svldff1sb_s32(svbool_t, int8_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sb_s64))) +svint64_t svldff1sb_s64(svbool_t, int8_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sb_s16))) +svint16_t svldff1sb_s16(svbool_t, int8_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_u32base_index_u32))) +svuint32_t svldff1sh_gather_u32base_index_u32(svbool_t, svuint32_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_u64base_index_u64))) +svuint64_t svldff1sh_gather_u64base_index_u64(svbool_t, svuint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_u32base_index_s32))) +svint32_t svldff1sh_gather_u32base_index_s32(svbool_t, svuint32_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_u64base_index_s64))) +svint64_t svldff1sh_gather_u64base_index_s64(svbool_t, svuint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_u32base_offset_u32))) +svuint32_t svldff1sh_gather_u32base_offset_u32(svbool_t, svuint32_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_u64base_offset_u64))) +svuint64_t svldff1sh_gather_u64base_offset_u64(svbool_t, svuint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_u32base_offset_s32))) +svint32_t svldff1sh_gather_u32base_offset_s32(svbool_t, svuint32_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_u64base_offset_s64))) +svint64_t svldff1sh_gather_u64base_offset_s64(svbool_t, svuint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_u32base_u32))) +svuint32_t svldff1sh_gather_u32base_u32(svbool_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_u64base_u64))) +svuint64_t svldff1sh_gather_u64base_u64(svbool_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_u32base_s32))) +svint32_t svldff1sh_gather_u32base_s32(svbool_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_u64base_s64))) +svint64_t svldff1sh_gather_u64base_s64(svbool_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_s32index_u32))) +svuint32_t svldff1sh_gather_s32index_u32(svbool_t, int16_t const *, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_s32index_s32))) +svint32_t svldff1sh_gather_s32index_s32(svbool_t, int16_t const *, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_u32index_u32))) +svuint32_t svldff1sh_gather_u32index_u32(svbool_t, int16_t const *, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_u32index_s32))) +svint32_t svldff1sh_gather_u32index_s32(svbool_t, int16_t const *, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_s64index_u64))) +svuint64_t svldff1sh_gather_s64index_u64(svbool_t, int16_t const *, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_s64index_s64))) +svint64_t svldff1sh_gather_s64index_s64(svbool_t, int16_t const *, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_u64index_u64))) +svuint64_t svldff1sh_gather_u64index_u64(svbool_t, int16_t const *, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_u64index_s64))) +svint64_t svldff1sh_gather_u64index_s64(svbool_t, int16_t const *, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_s32offset_u32))) +svuint32_t svldff1sh_gather_s32offset_u32(svbool_t, int16_t const *, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_s32offset_s32))) +svint32_t svldff1sh_gather_s32offset_s32(svbool_t, int16_t const *, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_u32offset_u32))) +svuint32_t svldff1sh_gather_u32offset_u32(svbool_t, int16_t const *, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_u32offset_s32))) +svint32_t svldff1sh_gather_u32offset_s32(svbool_t, int16_t const *, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_s64offset_u64))) +svuint64_t svldff1sh_gather_s64offset_u64(svbool_t, int16_t const *, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_s64offset_s64))) +svint64_t svldff1sh_gather_s64offset_s64(svbool_t, int16_t const *, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_u64offset_u64))) +svuint64_t svldff1sh_gather_u64offset_u64(svbool_t, int16_t const *, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_u64offset_s64))) +svint64_t svldff1sh_gather_u64offset_s64(svbool_t, int16_t const *, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_vnum_u32))) +svuint32_t svldff1sh_vnum_u32(svbool_t, int16_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_vnum_u64))) +svuint64_t svldff1sh_vnum_u64(svbool_t, int16_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_vnum_s32))) +svint32_t svldff1sh_vnum_s32(svbool_t, int16_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_vnum_s64))) +svint64_t svldff1sh_vnum_s64(svbool_t, int16_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_u32))) +svuint32_t svldff1sh_u32(svbool_t, int16_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_u64))) +svuint64_t svldff1sh_u64(svbool_t, int16_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_s32))) +svint32_t svldff1sh_s32(svbool_t, int16_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_s64))) +svint64_t svldff1sh_s64(svbool_t, int16_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sw_gather_u64base_index_u64))) +svuint64_t svldff1sw_gather_u64base_index_u64(svbool_t, svuint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sw_gather_u64base_index_s64))) +svint64_t svldff1sw_gather_u64base_index_s64(svbool_t, svuint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sw_gather_u64base_offset_u64))) +svuint64_t svldff1sw_gather_u64base_offset_u64(svbool_t, svuint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sw_gather_u64base_offset_s64))) +svint64_t svldff1sw_gather_u64base_offset_s64(svbool_t, svuint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sw_gather_u64base_u64))) +svuint64_t svldff1sw_gather_u64base_u64(svbool_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sw_gather_u64base_s64))) +svint64_t svldff1sw_gather_u64base_s64(svbool_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sw_gather_s64index_u64))) +svuint64_t svldff1sw_gather_s64index_u64(svbool_t, int32_t const *, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sw_gather_s64index_s64))) +svint64_t svldff1sw_gather_s64index_s64(svbool_t, int32_t const *, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sw_gather_u64index_u64))) +svuint64_t svldff1sw_gather_u64index_u64(svbool_t, int32_t const *, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sw_gather_u64index_s64))) +svint64_t svldff1sw_gather_u64index_s64(svbool_t, int32_t const *, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sw_gather_s64offset_u64))) +svuint64_t svldff1sw_gather_s64offset_u64(svbool_t, int32_t const *, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sw_gather_s64offset_s64))) +svint64_t svldff1sw_gather_s64offset_s64(svbool_t, int32_t const *, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sw_gather_u64offset_u64))) +svuint64_t svldff1sw_gather_u64offset_u64(svbool_t, int32_t const *, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sw_gather_u64offset_s64))) +svint64_t svldff1sw_gather_u64offset_s64(svbool_t, int32_t const *, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sw_vnum_u64))) +svuint64_t svldff1sw_vnum_u64(svbool_t, int32_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sw_vnum_s64))) +svint64_t svldff1sw_vnum_s64(svbool_t, int32_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sw_u64))) +svuint64_t svldff1sw_u64(svbool_t, int32_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sw_s64))) +svint64_t svldff1sw_s64(svbool_t, int32_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1ub_gather_u32base_offset_u32))) +svuint32_t svldff1ub_gather_u32base_offset_u32(svbool_t, svuint32_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1ub_gather_u64base_offset_u64))) +svuint64_t svldff1ub_gather_u64base_offset_u64(svbool_t, svuint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1ub_gather_u32base_offset_s32))) +svint32_t svldff1ub_gather_u32base_offset_s32(svbool_t, svuint32_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1ub_gather_u64base_offset_s64))) +svint64_t svldff1ub_gather_u64base_offset_s64(svbool_t, svuint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1ub_gather_u32base_u32))) +svuint32_t svldff1ub_gather_u32base_u32(svbool_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1ub_gather_u64base_u64))) +svuint64_t svldff1ub_gather_u64base_u64(svbool_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1ub_gather_u32base_s32))) +svint32_t svldff1ub_gather_u32base_s32(svbool_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1ub_gather_u64base_s64))) +svint64_t svldff1ub_gather_u64base_s64(svbool_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1ub_gather_s32offset_u32))) +svuint32_t svldff1ub_gather_s32offset_u32(svbool_t, uint8_t const *, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1ub_gather_s32offset_s32))) +svint32_t svldff1ub_gather_s32offset_s32(svbool_t, uint8_t const *, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1ub_gather_u32offset_u32))) +svuint32_t svldff1ub_gather_u32offset_u32(svbool_t, uint8_t const *, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1ub_gather_u32offset_s32))) +svint32_t svldff1ub_gather_u32offset_s32(svbool_t, uint8_t const *, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1ub_gather_s64offset_u64))) +svuint64_t svldff1ub_gather_s64offset_u64(svbool_t, uint8_t const *, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1ub_gather_s64offset_s64))) +svint64_t svldff1ub_gather_s64offset_s64(svbool_t, uint8_t const *, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1ub_gather_u64offset_u64))) +svuint64_t svldff1ub_gather_u64offset_u64(svbool_t, uint8_t const *, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1ub_gather_u64offset_s64))) +svint64_t svldff1ub_gather_u64offset_s64(svbool_t, uint8_t const *, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1ub_vnum_u32))) +svuint32_t svldff1ub_vnum_u32(svbool_t, uint8_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1ub_vnum_u64))) +svuint64_t svldff1ub_vnum_u64(svbool_t, uint8_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1ub_vnum_u16))) +svuint16_t svldff1ub_vnum_u16(svbool_t, uint8_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1ub_vnum_s32))) +svint32_t svldff1ub_vnum_s32(svbool_t, uint8_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1ub_vnum_s64))) +svint64_t svldff1ub_vnum_s64(svbool_t, uint8_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1ub_vnum_s16))) +svint16_t svldff1ub_vnum_s16(svbool_t, uint8_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1ub_u32))) +svuint32_t svldff1ub_u32(svbool_t, uint8_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1ub_u64))) +svuint64_t svldff1ub_u64(svbool_t, uint8_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1ub_u16))) +svuint16_t svldff1ub_u16(svbool_t, uint8_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1ub_s32))) +svint32_t svldff1ub_s32(svbool_t, uint8_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1ub_s64))) +svint64_t svldff1ub_s64(svbool_t, uint8_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1ub_s16))) +svint16_t svldff1ub_s16(svbool_t, uint8_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_u32base_index_u32))) +svuint32_t svldff1uh_gather_u32base_index_u32(svbool_t, svuint32_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_u64base_index_u64))) +svuint64_t svldff1uh_gather_u64base_index_u64(svbool_t, svuint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_u32base_index_s32))) +svint32_t svldff1uh_gather_u32base_index_s32(svbool_t, svuint32_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_u64base_index_s64))) +svint64_t svldff1uh_gather_u64base_index_s64(svbool_t, svuint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_u32base_offset_u32))) +svuint32_t svldff1uh_gather_u32base_offset_u32(svbool_t, svuint32_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_u64base_offset_u64))) +svuint64_t svldff1uh_gather_u64base_offset_u64(svbool_t, svuint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_u32base_offset_s32))) +svint32_t svldff1uh_gather_u32base_offset_s32(svbool_t, svuint32_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_u64base_offset_s64))) +svint64_t svldff1uh_gather_u64base_offset_s64(svbool_t, svuint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_u32base_u32))) +svuint32_t svldff1uh_gather_u32base_u32(svbool_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_u64base_u64))) +svuint64_t svldff1uh_gather_u64base_u64(svbool_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_u32base_s32))) +svint32_t svldff1uh_gather_u32base_s32(svbool_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_u64base_s64))) +svint64_t svldff1uh_gather_u64base_s64(svbool_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_s32index_u32))) +svuint32_t svldff1uh_gather_s32index_u32(svbool_t, uint16_t const *, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_s32index_s32))) +svint32_t svldff1uh_gather_s32index_s32(svbool_t, uint16_t const *, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_u32index_u32))) +svuint32_t svldff1uh_gather_u32index_u32(svbool_t, uint16_t const *, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_u32index_s32))) +svint32_t svldff1uh_gather_u32index_s32(svbool_t, uint16_t const *, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_s64index_u64))) +svuint64_t svldff1uh_gather_s64index_u64(svbool_t, uint16_t const *, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_s64index_s64))) +svint64_t svldff1uh_gather_s64index_s64(svbool_t, uint16_t const *, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_u64index_u64))) +svuint64_t svldff1uh_gather_u64index_u64(svbool_t, uint16_t const *, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_u64index_s64))) +svint64_t svldff1uh_gather_u64index_s64(svbool_t, uint16_t const *, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_s32offset_u32))) +svuint32_t svldff1uh_gather_s32offset_u32(svbool_t, uint16_t const *, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_s32offset_s32))) +svint32_t svldff1uh_gather_s32offset_s32(svbool_t, uint16_t const *, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_u32offset_u32))) +svuint32_t svldff1uh_gather_u32offset_u32(svbool_t, uint16_t const *, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_u32offset_s32))) +svint32_t svldff1uh_gather_u32offset_s32(svbool_t, uint16_t const *, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_s64offset_u64))) +svuint64_t svldff1uh_gather_s64offset_u64(svbool_t, uint16_t const *, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_s64offset_s64))) +svint64_t svldff1uh_gather_s64offset_s64(svbool_t, uint16_t const *, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_u64offset_u64))) +svuint64_t svldff1uh_gather_u64offset_u64(svbool_t, uint16_t const *, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_u64offset_s64))) +svint64_t svldff1uh_gather_u64offset_s64(svbool_t, uint16_t const *, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_vnum_u32))) +svuint32_t svldff1uh_vnum_u32(svbool_t, uint16_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_vnum_u64))) +svuint64_t svldff1uh_vnum_u64(svbool_t, uint16_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_vnum_s32))) +svint32_t svldff1uh_vnum_s32(svbool_t, uint16_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_vnum_s64))) +svint64_t svldff1uh_vnum_s64(svbool_t, uint16_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_u32))) +svuint32_t svldff1uh_u32(svbool_t, uint16_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_u64))) +svuint64_t svldff1uh_u64(svbool_t, uint16_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_s32))) +svint32_t svldff1uh_s32(svbool_t, uint16_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_s64))) +svint64_t svldff1uh_s64(svbool_t, uint16_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uw_gather_u64base_index_u64))) +svuint64_t svldff1uw_gather_u64base_index_u64(svbool_t, svuint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uw_gather_u64base_index_s64))) +svint64_t svldff1uw_gather_u64base_index_s64(svbool_t, svuint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uw_gather_u64base_offset_u64))) +svuint64_t svldff1uw_gather_u64base_offset_u64(svbool_t, svuint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uw_gather_u64base_offset_s64))) +svint64_t svldff1uw_gather_u64base_offset_s64(svbool_t, svuint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uw_gather_u64base_u64))) +svuint64_t svldff1uw_gather_u64base_u64(svbool_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uw_gather_u64base_s64))) +svint64_t svldff1uw_gather_u64base_s64(svbool_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uw_gather_s64index_u64))) +svuint64_t svldff1uw_gather_s64index_u64(svbool_t, uint32_t const *, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uw_gather_s64index_s64))) +svint64_t svldff1uw_gather_s64index_s64(svbool_t, uint32_t const *, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uw_gather_u64index_u64))) +svuint64_t svldff1uw_gather_u64index_u64(svbool_t, uint32_t const *, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uw_gather_u64index_s64))) +svint64_t svldff1uw_gather_u64index_s64(svbool_t, uint32_t const *, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uw_gather_s64offset_u64))) +svuint64_t svldff1uw_gather_s64offset_u64(svbool_t, uint32_t const *, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uw_gather_s64offset_s64))) +svint64_t svldff1uw_gather_s64offset_s64(svbool_t, uint32_t const *, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uw_gather_u64offset_u64))) +svuint64_t svldff1uw_gather_u64offset_u64(svbool_t, uint32_t const *, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uw_gather_u64offset_s64))) +svint64_t svldff1uw_gather_u64offset_s64(svbool_t, uint32_t const *, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uw_vnum_u64))) +svuint64_t svldff1uw_vnum_u64(svbool_t, uint32_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uw_vnum_s64))) +svint64_t svldff1uw_vnum_s64(svbool_t, uint32_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uw_u64))) +svuint64_t svldff1uw_u64(svbool_t, uint32_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uw_s64))) +svint64_t svldff1uw_s64(svbool_t, uint32_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_u8))) +svuint8_t svldnf1_u8(svbool_t, uint8_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_u32))) +svuint32_t svldnf1_u32(svbool_t, uint32_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_u64))) +svuint64_t svldnf1_u64(svbool_t, uint64_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_u16))) +svuint16_t svldnf1_u16(svbool_t, uint16_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_s8))) +svint8_t svldnf1_s8(svbool_t, int8_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_f64))) +svfloat64_t svldnf1_f64(svbool_t, float64_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_f32))) +svfloat32_t svldnf1_f32(svbool_t, float32_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_f16))) +svfloat16_t svldnf1_f16(svbool_t, float16_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_s32))) +svint32_t svldnf1_s32(svbool_t, int32_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_s64))) +svint64_t svldnf1_s64(svbool_t, int64_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_s16))) +svint16_t svldnf1_s16(svbool_t, int16_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_vnum_u8))) +svuint8_t svldnf1_vnum_u8(svbool_t, uint8_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_vnum_u32))) +svuint32_t svldnf1_vnum_u32(svbool_t, uint32_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_vnum_u64))) +svuint64_t svldnf1_vnum_u64(svbool_t, uint64_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_vnum_u16))) +svuint16_t svldnf1_vnum_u16(svbool_t, uint16_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_vnum_s8))) +svint8_t svldnf1_vnum_s8(svbool_t, int8_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_vnum_f64))) +svfloat64_t svldnf1_vnum_f64(svbool_t, float64_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_vnum_f32))) +svfloat32_t svldnf1_vnum_f32(svbool_t, float32_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_vnum_f16))) +svfloat16_t svldnf1_vnum_f16(svbool_t, float16_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_vnum_s32))) +svint32_t svldnf1_vnum_s32(svbool_t, int32_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_vnum_s64))) +svint64_t svldnf1_vnum_s64(svbool_t, int64_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_vnum_s16))) +svint16_t svldnf1_vnum_s16(svbool_t, int16_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1sb_vnum_u32))) +svuint32_t svldnf1sb_vnum_u32(svbool_t, int8_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1sb_vnum_u64))) +svuint64_t svldnf1sb_vnum_u64(svbool_t, int8_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1sb_vnum_u16))) +svuint16_t svldnf1sb_vnum_u16(svbool_t, int8_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1sb_vnum_s32))) +svint32_t svldnf1sb_vnum_s32(svbool_t, int8_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1sb_vnum_s64))) +svint64_t svldnf1sb_vnum_s64(svbool_t, int8_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1sb_vnum_s16))) +svint16_t svldnf1sb_vnum_s16(svbool_t, int8_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1sb_u32))) +svuint32_t svldnf1sb_u32(svbool_t, int8_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1sb_u64))) +svuint64_t svldnf1sb_u64(svbool_t, int8_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1sb_u16))) +svuint16_t svldnf1sb_u16(svbool_t, int8_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1sb_s32))) +svint32_t svldnf1sb_s32(svbool_t, int8_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1sb_s64))) +svint64_t svldnf1sb_s64(svbool_t, int8_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1sb_s16))) +svint16_t svldnf1sb_s16(svbool_t, int8_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1sh_vnum_u32))) +svuint32_t svldnf1sh_vnum_u32(svbool_t, int16_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1sh_vnum_u64))) +svuint64_t svldnf1sh_vnum_u64(svbool_t, int16_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1sh_vnum_s32))) +svint32_t svldnf1sh_vnum_s32(svbool_t, int16_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1sh_vnum_s64))) +svint64_t svldnf1sh_vnum_s64(svbool_t, int16_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1sh_u32))) +svuint32_t svldnf1sh_u32(svbool_t, int16_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1sh_u64))) +svuint64_t svldnf1sh_u64(svbool_t, int16_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1sh_s32))) +svint32_t svldnf1sh_s32(svbool_t, int16_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1sh_s64))) +svint64_t svldnf1sh_s64(svbool_t, int16_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1sw_vnum_u64))) +svuint64_t svldnf1sw_vnum_u64(svbool_t, int32_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1sw_vnum_s64))) +svint64_t svldnf1sw_vnum_s64(svbool_t, int32_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1sw_u64))) +svuint64_t svldnf1sw_u64(svbool_t, int32_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1sw_s64))) +svint64_t svldnf1sw_s64(svbool_t, int32_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1ub_vnum_u32))) +svuint32_t svldnf1ub_vnum_u32(svbool_t, uint8_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1ub_vnum_u64))) +svuint64_t svldnf1ub_vnum_u64(svbool_t, uint8_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1ub_vnum_u16))) +svuint16_t svldnf1ub_vnum_u16(svbool_t, uint8_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1ub_vnum_s32))) +svint32_t svldnf1ub_vnum_s32(svbool_t, uint8_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1ub_vnum_s64))) +svint64_t svldnf1ub_vnum_s64(svbool_t, uint8_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1ub_vnum_s16))) +svint16_t svldnf1ub_vnum_s16(svbool_t, uint8_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1ub_u32))) +svuint32_t svldnf1ub_u32(svbool_t, uint8_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1ub_u64))) +svuint64_t svldnf1ub_u64(svbool_t, uint8_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1ub_u16))) +svuint16_t svldnf1ub_u16(svbool_t, uint8_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1ub_s32))) +svint32_t svldnf1ub_s32(svbool_t, uint8_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1ub_s64))) +svint64_t svldnf1ub_s64(svbool_t, uint8_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1ub_s16))) +svint16_t svldnf1ub_s16(svbool_t, uint8_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1uh_vnum_u32))) +svuint32_t svldnf1uh_vnum_u32(svbool_t, uint16_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1uh_vnum_u64))) +svuint64_t svldnf1uh_vnum_u64(svbool_t, uint16_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1uh_vnum_s32))) +svint32_t svldnf1uh_vnum_s32(svbool_t, uint16_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1uh_vnum_s64))) +svint64_t svldnf1uh_vnum_s64(svbool_t, uint16_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1uh_u32))) +svuint32_t svldnf1uh_u32(svbool_t, uint16_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1uh_u64))) +svuint64_t svldnf1uh_u64(svbool_t, uint16_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1uh_s32))) +svint32_t svldnf1uh_s32(svbool_t, uint16_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1uh_s64))) +svint64_t svldnf1uh_s64(svbool_t, uint16_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1uw_vnum_u64))) +svuint64_t svldnf1uw_vnum_u64(svbool_t, uint32_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1uw_vnum_s64))) +svint64_t svldnf1uw_vnum_s64(svbool_t, uint32_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1uw_u64))) +svuint64_t svldnf1uw_u64(svbool_t, uint32_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1uw_s64))) +svint64_t svldnf1uw_s64(svbool_t, uint32_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfb_gather_u32base))) +void svprfb_gather_u32base(svbool_t, svuint32_t, enum svprfop); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfb_gather_u64base))) +void svprfb_gather_u64base(svbool_t, svuint64_t, enum svprfop); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfb_gather_u32base_offset))) +void svprfb_gather_u32base_offset(svbool_t, svuint32_t, int64_t, enum svprfop); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfb_gather_u64base_offset))) +void svprfb_gather_u64base_offset(svbool_t, svuint64_t, int64_t, enum svprfop); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfb_gather_s32offset))) +void svprfb_gather_s32offset(svbool_t, void const *, svint32_t, enum svprfop); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfb_gather_u32offset))) +void svprfb_gather_u32offset(svbool_t, void const *, svuint32_t, enum svprfop); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfb_gather_s64offset))) +void svprfb_gather_s64offset(svbool_t, void const *, svint64_t, enum svprfop); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfb_gather_u64offset))) +void svprfb_gather_u64offset(svbool_t, void const *, svuint64_t, enum svprfop); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfd_gather_u32base))) +void svprfd_gather_u32base(svbool_t, svuint32_t, enum svprfop); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfd_gather_u64base))) +void svprfd_gather_u64base(svbool_t, svuint64_t, enum svprfop); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfd_gather_u32base_index))) +void svprfd_gather_u32base_index(svbool_t, svuint32_t, int64_t, enum svprfop); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfd_gather_u64base_index))) +void svprfd_gather_u64base_index(svbool_t, svuint64_t, int64_t, enum svprfop); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfd_gather_s32index))) +void svprfd_gather_s32index(svbool_t, void const *, svint32_t, enum svprfop); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfd_gather_u32index))) +void svprfd_gather_u32index(svbool_t, void const *, svuint32_t, enum svprfop); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfd_gather_s64index))) +void svprfd_gather_s64index(svbool_t, void const *, svint64_t, enum svprfop); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfd_gather_u64index))) +void svprfd_gather_u64index(svbool_t, void const *, svuint64_t, enum svprfop); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfh_gather_u32base))) +void svprfh_gather_u32base(svbool_t, svuint32_t, enum svprfop); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfh_gather_u64base))) +void svprfh_gather_u64base(svbool_t, svuint64_t, enum svprfop); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfh_gather_u32base_index))) +void svprfh_gather_u32base_index(svbool_t, svuint32_t, int64_t, enum svprfop); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfh_gather_u64base_index))) +void svprfh_gather_u64base_index(svbool_t, svuint64_t, int64_t, enum svprfop); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfh_gather_s32index))) +void svprfh_gather_s32index(svbool_t, void const *, svint32_t, enum svprfop); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfh_gather_u32index))) +void svprfh_gather_u32index(svbool_t, void const *, svuint32_t, enum svprfop); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfh_gather_s64index))) +void svprfh_gather_s64index(svbool_t, void const *, svint64_t, enum svprfop); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfh_gather_u64index))) +void svprfh_gather_u64index(svbool_t, void const *, svuint64_t, enum svprfop); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfw_gather_u32base))) +void svprfw_gather_u32base(svbool_t, svuint32_t, enum svprfop); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfw_gather_u64base))) +void svprfw_gather_u64base(svbool_t, svuint64_t, enum svprfop); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfw_gather_u32base_index))) +void svprfw_gather_u32base_index(svbool_t, svuint32_t, int64_t, enum svprfop); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfw_gather_u64base_index))) +void svprfw_gather_u64base_index(svbool_t, svuint64_t, int64_t, enum svprfop); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfw_gather_s32index))) +void svprfw_gather_s32index(svbool_t, void const *, svint32_t, enum svprfop); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfw_gather_u32index))) +void svprfw_gather_u32index(svbool_t, void const *, svuint32_t, enum svprfop); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfw_gather_s64index))) +void svprfw_gather_s64index(svbool_t, void const *, svint64_t, enum svprfop); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfw_gather_u64index))) +void svprfw_gather_u64index(svbool_t, void const *, svuint64_t, enum svprfop); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrdffr))) +svbool_t svrdffr(void); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrdffr_z))) +svbool_t svrdffr_z(svbool_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsetffr))) +void svsetffr(void); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u32base_index_u32))) +void svst1_scatter_u32base_index_u32(svbool_t, svuint32_t, int64_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u64base_index_u64))) +void svst1_scatter_u64base_index_u64(svbool_t, svuint64_t, int64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u64base_index_f64))) +void svst1_scatter_u64base_index_f64(svbool_t, svuint64_t, int64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u32base_index_f32))) +void svst1_scatter_u32base_index_f32(svbool_t, svuint32_t, int64_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u32base_index_s32))) +void svst1_scatter_u32base_index_s32(svbool_t, svuint32_t, int64_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u64base_index_s64))) +void svst1_scatter_u64base_index_s64(svbool_t, svuint64_t, int64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u32base_offset_u32))) +void svst1_scatter_u32base_offset_u32(svbool_t, svuint32_t, int64_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u64base_offset_u64))) +void svst1_scatter_u64base_offset_u64(svbool_t, svuint64_t, int64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u64base_offset_f64))) +void svst1_scatter_u64base_offset_f64(svbool_t, svuint64_t, int64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u32base_offset_f32))) +void svst1_scatter_u32base_offset_f32(svbool_t, svuint32_t, int64_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u32base_offset_s32))) +void svst1_scatter_u32base_offset_s32(svbool_t, svuint32_t, int64_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u64base_offset_s64))) +void svst1_scatter_u64base_offset_s64(svbool_t, svuint64_t, int64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u32base_u32))) +void svst1_scatter_u32base_u32(svbool_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u64base_u64))) +void svst1_scatter_u64base_u64(svbool_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u64base_f64))) +void svst1_scatter_u64base_f64(svbool_t, svuint64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u32base_f32))) +void svst1_scatter_u32base_f32(svbool_t, svuint32_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u32base_s32))) +void svst1_scatter_u32base_s32(svbool_t, svuint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u64base_s64))) +void svst1_scatter_u64base_s64(svbool_t, svuint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_s32index_u32))) +void svst1_scatter_s32index_u32(svbool_t, uint32_t *, svint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_s32index_f32))) +void svst1_scatter_s32index_f32(svbool_t, float32_t *, svint32_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_s32index_s32))) +void svst1_scatter_s32index_s32(svbool_t, int32_t *, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u32index_u32))) +void svst1_scatter_u32index_u32(svbool_t, uint32_t *, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u32index_f32))) +void svst1_scatter_u32index_f32(svbool_t, float32_t *, svuint32_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u32index_s32))) +void svst1_scatter_u32index_s32(svbool_t, int32_t *, svuint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_s64index_u64))) +void svst1_scatter_s64index_u64(svbool_t, uint64_t *, svint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_s64index_f64))) +void svst1_scatter_s64index_f64(svbool_t, float64_t *, svint64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_s64index_s64))) +void svst1_scatter_s64index_s64(svbool_t, int64_t *, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u64index_u64))) +void svst1_scatter_u64index_u64(svbool_t, uint64_t *, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u64index_f64))) +void svst1_scatter_u64index_f64(svbool_t, float64_t *, svuint64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u64index_s64))) +void svst1_scatter_u64index_s64(svbool_t, int64_t *, svuint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_s32offset_u32))) +void svst1_scatter_s32offset_u32(svbool_t, uint32_t *, svint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_s32offset_f32))) +void svst1_scatter_s32offset_f32(svbool_t, float32_t *, svint32_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_s32offset_s32))) +void svst1_scatter_s32offset_s32(svbool_t, int32_t *, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u32offset_u32))) +void svst1_scatter_u32offset_u32(svbool_t, uint32_t *, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u32offset_f32))) +void svst1_scatter_u32offset_f32(svbool_t, float32_t *, svuint32_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u32offset_s32))) +void svst1_scatter_u32offset_s32(svbool_t, int32_t *, svuint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_s64offset_u64))) +void svst1_scatter_s64offset_u64(svbool_t, uint64_t *, svint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_s64offset_f64))) +void svst1_scatter_s64offset_f64(svbool_t, float64_t *, svint64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_s64offset_s64))) +void svst1_scatter_s64offset_s64(svbool_t, int64_t *, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u64offset_u64))) +void svst1_scatter_u64offset_u64(svbool_t, uint64_t *, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u64offset_f64))) +void svst1_scatter_u64offset_f64(svbool_t, float64_t *, svuint64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u64offset_s64))) +void svst1_scatter_u64offset_s64(svbool_t, int64_t *, svuint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_scatter_u32base_offset_u32))) +void svst1b_scatter_u32base_offset_u32(svbool_t, svuint32_t, int64_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_scatter_u64base_offset_u64))) +void svst1b_scatter_u64base_offset_u64(svbool_t, svuint64_t, int64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_scatter_u32base_offset_s32))) +void svst1b_scatter_u32base_offset_s32(svbool_t, svuint32_t, int64_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_scatter_u64base_offset_s64))) +void svst1b_scatter_u64base_offset_s64(svbool_t, svuint64_t, int64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_scatter_u32base_u32))) +void svst1b_scatter_u32base_u32(svbool_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_scatter_u64base_u64))) +void svst1b_scatter_u64base_u64(svbool_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_scatter_u32base_s32))) +void svst1b_scatter_u32base_s32(svbool_t, svuint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_scatter_u64base_s64))) +void svst1b_scatter_u64base_s64(svbool_t, svuint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_scatter_s32offset_s32))) +void svst1b_scatter_s32offset_s32(svbool_t, int8_t *, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_scatter_s32offset_u32))) +void svst1b_scatter_s32offset_u32(svbool_t, uint8_t *, svint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_scatter_u32offset_s32))) +void svst1b_scatter_u32offset_s32(svbool_t, int8_t *, svuint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_scatter_u32offset_u32))) +void svst1b_scatter_u32offset_u32(svbool_t, uint8_t *, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_scatter_s64offset_s64))) +void svst1b_scatter_s64offset_s64(svbool_t, int8_t *, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_scatter_s64offset_u64))) +void svst1b_scatter_s64offset_u64(svbool_t, uint8_t *, svint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_scatter_u64offset_s64))) +void svst1b_scatter_u64offset_s64(svbool_t, int8_t *, svuint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_scatter_u64offset_u64))) +void svst1b_scatter_u64offset_u64(svbool_t, uint8_t *, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_u32base_index_u32))) +void svst1h_scatter_u32base_index_u32(svbool_t, svuint32_t, int64_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_u64base_index_u64))) +void svst1h_scatter_u64base_index_u64(svbool_t, svuint64_t, int64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_u32base_index_s32))) +void svst1h_scatter_u32base_index_s32(svbool_t, svuint32_t, int64_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_u64base_index_s64))) +void svst1h_scatter_u64base_index_s64(svbool_t, svuint64_t, int64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_u32base_offset_u32))) +void svst1h_scatter_u32base_offset_u32(svbool_t, svuint32_t, int64_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_u64base_offset_u64))) +void svst1h_scatter_u64base_offset_u64(svbool_t, svuint64_t, int64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_u32base_offset_s32))) +void svst1h_scatter_u32base_offset_s32(svbool_t, svuint32_t, int64_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_u64base_offset_s64))) +void svst1h_scatter_u64base_offset_s64(svbool_t, svuint64_t, int64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_u32base_u32))) +void svst1h_scatter_u32base_u32(svbool_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_u64base_u64))) +void svst1h_scatter_u64base_u64(svbool_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_u32base_s32))) +void svst1h_scatter_u32base_s32(svbool_t, svuint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_u64base_s64))) +void svst1h_scatter_u64base_s64(svbool_t, svuint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_s32index_s32))) +void svst1h_scatter_s32index_s32(svbool_t, int16_t *, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_s32index_u32))) +void svst1h_scatter_s32index_u32(svbool_t, uint16_t *, svint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_u32index_s32))) +void svst1h_scatter_u32index_s32(svbool_t, int16_t *, svuint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_u32index_u32))) +void svst1h_scatter_u32index_u32(svbool_t, uint16_t *, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_s64index_s64))) +void svst1h_scatter_s64index_s64(svbool_t, int16_t *, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_s64index_u64))) +void svst1h_scatter_s64index_u64(svbool_t, uint16_t *, svint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_u64index_s64))) +void svst1h_scatter_u64index_s64(svbool_t, int16_t *, svuint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_u64index_u64))) +void svst1h_scatter_u64index_u64(svbool_t, uint16_t *, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_s32offset_s32))) +void svst1h_scatter_s32offset_s32(svbool_t, int16_t *, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_s32offset_u32))) +void svst1h_scatter_s32offset_u32(svbool_t, uint16_t *, svint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_u32offset_s32))) +void svst1h_scatter_u32offset_s32(svbool_t, int16_t *, svuint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_u32offset_u32))) +void svst1h_scatter_u32offset_u32(svbool_t, uint16_t *, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_s64offset_s64))) +void svst1h_scatter_s64offset_s64(svbool_t, int16_t *, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_s64offset_u64))) +void svst1h_scatter_s64offset_u64(svbool_t, uint16_t *, svint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_u64offset_s64))) +void svst1h_scatter_u64offset_s64(svbool_t, int16_t *, svuint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_u64offset_u64))) +void svst1h_scatter_u64offset_u64(svbool_t, uint16_t *, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1w_scatter_u64base_index_u64))) +void svst1w_scatter_u64base_index_u64(svbool_t, svuint64_t, int64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1w_scatter_u64base_index_s64))) +void svst1w_scatter_u64base_index_s64(svbool_t, svuint64_t, int64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1w_scatter_u64base_offset_u64))) +void svst1w_scatter_u64base_offset_u64(svbool_t, svuint64_t, int64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1w_scatter_u64base_offset_s64))) +void svst1w_scatter_u64base_offset_s64(svbool_t, svuint64_t, int64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1w_scatter_u64base_u64))) +void svst1w_scatter_u64base_u64(svbool_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1w_scatter_u64base_s64))) +void svst1w_scatter_u64base_s64(svbool_t, svuint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1w_scatter_s64index_s64))) +void svst1w_scatter_s64index_s64(svbool_t, int32_t *, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1w_scatter_s64index_u64))) +void svst1w_scatter_s64index_u64(svbool_t, uint32_t *, svint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1w_scatter_u64index_s64))) +void svst1w_scatter_u64index_s64(svbool_t, int32_t *, svuint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1w_scatter_u64index_u64))) +void svst1w_scatter_u64index_u64(svbool_t, uint32_t *, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1w_scatter_s64offset_s64))) +void svst1w_scatter_s64offset_s64(svbool_t, int32_t *, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1w_scatter_s64offset_u64))) +void svst1w_scatter_s64offset_u64(svbool_t, uint32_t *, svint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1w_scatter_u64offset_s64))) +void svst1w_scatter_u64offset_s64(svbool_t, int32_t *, svuint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1w_scatter_u64offset_u64))) +void svst1w_scatter_u64offset_u64(svbool_t, uint32_t *, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtmad_f64))) +svfloat64_t svtmad_f64(svfloat64_t, svfloat64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtmad_f32))) +svfloat32_t svtmad_f32(svfloat32_t, svfloat32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtmad_f16))) +svfloat16_t svtmad_f16(svfloat16_t, svfloat16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtsmul_f64))) +svfloat64_t svtsmul_f64(svfloat64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtsmul_f32))) +svfloat32_t svtsmul_f32(svfloat32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtsmul_f16))) +svfloat16_t svtsmul_f16(svfloat16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtssel_f64))) +svfloat64_t svtssel_f64(svfloat64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtssel_f32))) +svfloat32_t svtssel_f32(svfloat32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtssel_f16))) +svfloat16_t svtssel_f16(svfloat16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwrffr))) +void svwrffr(svbool_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadrb_u32base_u32offset))) +svuint32_t svadrb_offset(svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadrb_u64base_u64offset))) +svuint64_t svadrb_offset(svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadrb_u32base_s32offset))) +svuint32_t svadrb_offset(svuint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadrb_u64base_s64offset))) +svuint64_t svadrb_offset(svuint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadrd_u32base_u32index))) +svuint32_t svadrd_index(svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadrd_u64base_u64index))) +svuint64_t svadrd_index(svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadrd_u32base_s32index))) +svuint32_t svadrd_index(svuint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadrd_u64base_s64index))) +svuint64_t svadrd_index(svuint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadrh_u32base_u32index))) +svuint32_t svadrh_index(svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadrh_u64base_u64index))) +svuint64_t svadrh_index(svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadrh_u32base_s32index))) +svuint32_t svadrh_index(svuint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadrh_u64base_s64index))) +svuint64_t svadrh_index(svuint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadrw_u32base_u32index))) +svuint32_t svadrw_index(svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadrw_u64base_u64index))) +svuint64_t svadrw_index(svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadrw_u32base_s32index))) +svuint32_t svadrw_index(svuint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadrw_u64base_s64index))) +svuint64_t svadrw_index(svuint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcompact_u32))) +svuint32_t svcompact(svbool_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcompact_u64))) +svuint64_t svcompact(svbool_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcompact_f64))) +svfloat64_t svcompact(svbool_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcompact_f32))) +svfloat32_t svcompact(svbool_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcompact_s32))) +svint32_t svcompact(svbool_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcompact_s64))) +svint64_t svcompact(svbool_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svexpa_f64))) +svfloat64_t svexpa(svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svexpa_f32))) +svfloat32_t svexpa(svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svexpa_f16))) +svfloat16_t svexpa(svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u32base_index_u32))) +svuint32_t svld1_gather_index_u32(svbool_t, svuint32_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u64base_index_u64))) +svuint64_t svld1_gather_index_u64(svbool_t, svuint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u64base_index_f64))) +svfloat64_t svld1_gather_index_f64(svbool_t, svuint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u32base_index_f32))) +svfloat32_t svld1_gather_index_f32(svbool_t, svuint32_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u32base_index_s32))) +svint32_t svld1_gather_index_s32(svbool_t, svuint32_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u64base_index_s64))) +svint64_t svld1_gather_index_s64(svbool_t, svuint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u32base_offset_u32))) +svuint32_t svld1_gather_offset_u32(svbool_t, svuint32_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u64base_offset_u64))) +svuint64_t svld1_gather_offset_u64(svbool_t, svuint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u64base_offset_f64))) +svfloat64_t svld1_gather_offset_f64(svbool_t, svuint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u32base_offset_f32))) +svfloat32_t svld1_gather_offset_f32(svbool_t, svuint32_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u32base_offset_s32))) +svint32_t svld1_gather_offset_s32(svbool_t, svuint32_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u64base_offset_s64))) +svint64_t svld1_gather_offset_s64(svbool_t, svuint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u32base_u32))) +svuint32_t svld1_gather_u32(svbool_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u64base_u64))) +svuint64_t svld1_gather_u64(svbool_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u64base_f64))) +svfloat64_t svld1_gather_f64(svbool_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u32base_f32))) +svfloat32_t svld1_gather_f32(svbool_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u32base_s32))) +svint32_t svld1_gather_s32(svbool_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u64base_s64))) +svint64_t svld1_gather_s64(svbool_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_s32index_u32))) +svuint32_t svld1_gather_index(svbool_t, uint32_t const *, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_s32index_f32))) +svfloat32_t svld1_gather_index(svbool_t, float32_t const *, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_s32index_s32))) +svint32_t svld1_gather_index(svbool_t, int32_t const *, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u32index_u32))) +svuint32_t svld1_gather_index(svbool_t, uint32_t const *, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u32index_f32))) +svfloat32_t svld1_gather_index(svbool_t, float32_t const *, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u32index_s32))) +svint32_t svld1_gather_index(svbool_t, int32_t const *, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_s64index_u64))) +svuint64_t svld1_gather_index(svbool_t, uint64_t const *, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_s64index_f64))) +svfloat64_t svld1_gather_index(svbool_t, float64_t const *, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_s64index_s64))) +svint64_t svld1_gather_index(svbool_t, int64_t const *, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u64index_u64))) +svuint64_t svld1_gather_index(svbool_t, uint64_t const *, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u64index_f64))) +svfloat64_t svld1_gather_index(svbool_t, float64_t const *, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u64index_s64))) +svint64_t svld1_gather_index(svbool_t, int64_t const *, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_s32offset_u32))) +svuint32_t svld1_gather_offset(svbool_t, uint32_t const *, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_s32offset_f32))) +svfloat32_t svld1_gather_offset(svbool_t, float32_t const *, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_s32offset_s32))) +svint32_t svld1_gather_offset(svbool_t, int32_t const *, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u32offset_u32))) +svuint32_t svld1_gather_offset(svbool_t, uint32_t const *, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u32offset_f32))) +svfloat32_t svld1_gather_offset(svbool_t, float32_t const *, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u32offset_s32))) +svint32_t svld1_gather_offset(svbool_t, int32_t const *, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_s64offset_u64))) +svuint64_t svld1_gather_offset(svbool_t, uint64_t const *, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_s64offset_f64))) +svfloat64_t svld1_gather_offset(svbool_t, float64_t const *, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_s64offset_s64))) +svint64_t svld1_gather_offset(svbool_t, int64_t const *, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u64offset_u64))) +svuint64_t svld1_gather_offset(svbool_t, uint64_t const *, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u64offset_f64))) +svfloat64_t svld1_gather_offset(svbool_t, float64_t const *, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u64offset_s64))) +svint64_t svld1_gather_offset(svbool_t, int64_t const *, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sb_gather_u32base_offset_u32))) +svuint32_t svld1sb_gather_offset_u32(svbool_t, svuint32_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sb_gather_u64base_offset_u64))) +svuint64_t svld1sb_gather_offset_u64(svbool_t, svuint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sb_gather_u32base_offset_s32))) +svint32_t svld1sb_gather_offset_s32(svbool_t, svuint32_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sb_gather_u64base_offset_s64))) +svint64_t svld1sb_gather_offset_s64(svbool_t, svuint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sb_gather_u32base_u32))) +svuint32_t svld1sb_gather_u32(svbool_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sb_gather_u64base_u64))) +svuint64_t svld1sb_gather_u64(svbool_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sb_gather_u32base_s32))) +svint32_t svld1sb_gather_s32(svbool_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sb_gather_u64base_s64))) +svint64_t svld1sb_gather_s64(svbool_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sb_gather_s32offset_u32))) +svuint32_t svld1sb_gather_offset_u32(svbool_t, int8_t const *, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sb_gather_s32offset_s32))) +svint32_t svld1sb_gather_offset_s32(svbool_t, int8_t const *, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sb_gather_u32offset_u32))) +svuint32_t svld1sb_gather_offset_u32(svbool_t, int8_t const *, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sb_gather_u32offset_s32))) +svint32_t svld1sb_gather_offset_s32(svbool_t, int8_t const *, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sb_gather_s64offset_u64))) +svuint64_t svld1sb_gather_offset_u64(svbool_t, int8_t const *, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sb_gather_s64offset_s64))) +svint64_t svld1sb_gather_offset_s64(svbool_t, int8_t const *, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sb_gather_u64offset_u64))) +svuint64_t svld1sb_gather_offset_u64(svbool_t, int8_t const *, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sb_gather_u64offset_s64))) +svint64_t svld1sb_gather_offset_s64(svbool_t, int8_t const *, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_u32base_index_u32))) +svuint32_t svld1sh_gather_index_u32(svbool_t, svuint32_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_u64base_index_u64))) +svuint64_t svld1sh_gather_index_u64(svbool_t, svuint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_u32base_index_s32))) +svint32_t svld1sh_gather_index_s32(svbool_t, svuint32_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_u64base_index_s64))) +svint64_t svld1sh_gather_index_s64(svbool_t, svuint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_u32base_offset_u32))) +svuint32_t svld1sh_gather_offset_u32(svbool_t, svuint32_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_u64base_offset_u64))) +svuint64_t svld1sh_gather_offset_u64(svbool_t, svuint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_u32base_offset_s32))) +svint32_t svld1sh_gather_offset_s32(svbool_t, svuint32_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_u64base_offset_s64))) +svint64_t svld1sh_gather_offset_s64(svbool_t, svuint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_u32base_u32))) +svuint32_t svld1sh_gather_u32(svbool_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_u64base_u64))) +svuint64_t svld1sh_gather_u64(svbool_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_u32base_s32))) +svint32_t svld1sh_gather_s32(svbool_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_u64base_s64))) +svint64_t svld1sh_gather_s64(svbool_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_s32index_u32))) +svuint32_t svld1sh_gather_index_u32(svbool_t, int16_t const *, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_s32index_s32))) +svint32_t svld1sh_gather_index_s32(svbool_t, int16_t const *, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_u32index_u32))) +svuint32_t svld1sh_gather_index_u32(svbool_t, int16_t const *, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_u32index_s32))) +svint32_t svld1sh_gather_index_s32(svbool_t, int16_t const *, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_s64index_u64))) +svuint64_t svld1sh_gather_index_u64(svbool_t, int16_t const *, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_s64index_s64))) +svint64_t svld1sh_gather_index_s64(svbool_t, int16_t const *, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_u64index_u64))) +svuint64_t svld1sh_gather_index_u64(svbool_t, int16_t const *, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_u64index_s64))) +svint64_t svld1sh_gather_index_s64(svbool_t, int16_t const *, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_s32offset_u32))) +svuint32_t svld1sh_gather_offset_u32(svbool_t, int16_t const *, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_s32offset_s32))) +svint32_t svld1sh_gather_offset_s32(svbool_t, int16_t const *, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_u32offset_u32))) +svuint32_t svld1sh_gather_offset_u32(svbool_t, int16_t const *, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_u32offset_s32))) +svint32_t svld1sh_gather_offset_s32(svbool_t, int16_t const *, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_s64offset_u64))) +svuint64_t svld1sh_gather_offset_u64(svbool_t, int16_t const *, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_s64offset_s64))) +svint64_t svld1sh_gather_offset_s64(svbool_t, int16_t const *, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_u64offset_u64))) +svuint64_t svld1sh_gather_offset_u64(svbool_t, int16_t const *, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_u64offset_s64))) +svint64_t svld1sh_gather_offset_s64(svbool_t, int16_t const *, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sw_gather_u64base_index_u64))) +svuint64_t svld1sw_gather_index_u64(svbool_t, svuint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sw_gather_u64base_index_s64))) +svint64_t svld1sw_gather_index_s64(svbool_t, svuint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sw_gather_u64base_offset_u64))) +svuint64_t svld1sw_gather_offset_u64(svbool_t, svuint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sw_gather_u64base_offset_s64))) +svint64_t svld1sw_gather_offset_s64(svbool_t, svuint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sw_gather_u64base_u64))) +svuint64_t svld1sw_gather_u64(svbool_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sw_gather_u64base_s64))) +svint64_t svld1sw_gather_s64(svbool_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sw_gather_s64index_u64))) +svuint64_t svld1sw_gather_index_u64(svbool_t, int32_t const *, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sw_gather_s64index_s64))) +svint64_t svld1sw_gather_index_s64(svbool_t, int32_t const *, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sw_gather_u64index_u64))) +svuint64_t svld1sw_gather_index_u64(svbool_t, int32_t const *, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sw_gather_u64index_s64))) +svint64_t svld1sw_gather_index_s64(svbool_t, int32_t const *, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sw_gather_s64offset_u64))) +svuint64_t svld1sw_gather_offset_u64(svbool_t, int32_t const *, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sw_gather_s64offset_s64))) +svint64_t svld1sw_gather_offset_s64(svbool_t, int32_t const *, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sw_gather_u64offset_u64))) +svuint64_t svld1sw_gather_offset_u64(svbool_t, int32_t const *, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sw_gather_u64offset_s64))) +svint64_t svld1sw_gather_offset_s64(svbool_t, int32_t const *, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ub_gather_u32base_offset_u32))) +svuint32_t svld1ub_gather_offset_u32(svbool_t, svuint32_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ub_gather_u64base_offset_u64))) +svuint64_t svld1ub_gather_offset_u64(svbool_t, svuint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ub_gather_u32base_offset_s32))) +svint32_t svld1ub_gather_offset_s32(svbool_t, svuint32_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ub_gather_u64base_offset_s64))) +svint64_t svld1ub_gather_offset_s64(svbool_t, svuint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ub_gather_u32base_u32))) +svuint32_t svld1ub_gather_u32(svbool_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ub_gather_u64base_u64))) +svuint64_t svld1ub_gather_u64(svbool_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ub_gather_u32base_s32))) +svint32_t svld1ub_gather_s32(svbool_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ub_gather_u64base_s64))) +svint64_t svld1ub_gather_s64(svbool_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ub_gather_s32offset_u32))) +svuint32_t svld1ub_gather_offset_u32(svbool_t, uint8_t const *, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ub_gather_s32offset_s32))) +svint32_t svld1ub_gather_offset_s32(svbool_t, uint8_t const *, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ub_gather_u32offset_u32))) +svuint32_t svld1ub_gather_offset_u32(svbool_t, uint8_t const *, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ub_gather_u32offset_s32))) +svint32_t svld1ub_gather_offset_s32(svbool_t, uint8_t const *, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ub_gather_s64offset_u64))) +svuint64_t svld1ub_gather_offset_u64(svbool_t, uint8_t const *, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ub_gather_s64offset_s64))) +svint64_t svld1ub_gather_offset_s64(svbool_t, uint8_t const *, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ub_gather_u64offset_u64))) +svuint64_t svld1ub_gather_offset_u64(svbool_t, uint8_t const *, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ub_gather_u64offset_s64))) +svint64_t svld1ub_gather_offset_s64(svbool_t, uint8_t const *, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_u32base_index_u32))) +svuint32_t svld1uh_gather_index_u32(svbool_t, svuint32_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_u64base_index_u64))) +svuint64_t svld1uh_gather_index_u64(svbool_t, svuint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_u32base_index_s32))) +svint32_t svld1uh_gather_index_s32(svbool_t, svuint32_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_u64base_index_s64))) +svint64_t svld1uh_gather_index_s64(svbool_t, svuint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_u32base_offset_u32))) +svuint32_t svld1uh_gather_offset_u32(svbool_t, svuint32_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_u64base_offset_u64))) +svuint64_t svld1uh_gather_offset_u64(svbool_t, svuint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_u32base_offset_s32))) +svint32_t svld1uh_gather_offset_s32(svbool_t, svuint32_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_u64base_offset_s64))) +svint64_t svld1uh_gather_offset_s64(svbool_t, svuint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_u32base_u32))) +svuint32_t svld1uh_gather_u32(svbool_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_u64base_u64))) +svuint64_t svld1uh_gather_u64(svbool_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_u32base_s32))) +svint32_t svld1uh_gather_s32(svbool_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_u64base_s64))) +svint64_t svld1uh_gather_s64(svbool_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_s32index_u32))) +svuint32_t svld1uh_gather_index_u32(svbool_t, uint16_t const *, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_s32index_s32))) +svint32_t svld1uh_gather_index_s32(svbool_t, uint16_t const *, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_u32index_u32))) +svuint32_t svld1uh_gather_index_u32(svbool_t, uint16_t const *, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_u32index_s32))) +svint32_t svld1uh_gather_index_s32(svbool_t, uint16_t const *, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_s64index_u64))) +svuint64_t svld1uh_gather_index_u64(svbool_t, uint16_t const *, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_s64index_s64))) +svint64_t svld1uh_gather_index_s64(svbool_t, uint16_t const *, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_u64index_u64))) +svuint64_t svld1uh_gather_index_u64(svbool_t, uint16_t const *, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_u64index_s64))) +svint64_t svld1uh_gather_index_s64(svbool_t, uint16_t const *, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_s32offset_u32))) +svuint32_t svld1uh_gather_offset_u32(svbool_t, uint16_t const *, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_s32offset_s32))) +svint32_t svld1uh_gather_offset_s32(svbool_t, uint16_t const *, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_u32offset_u32))) +svuint32_t svld1uh_gather_offset_u32(svbool_t, uint16_t const *, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_u32offset_s32))) +svint32_t svld1uh_gather_offset_s32(svbool_t, uint16_t const *, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_s64offset_u64))) +svuint64_t svld1uh_gather_offset_u64(svbool_t, uint16_t const *, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_s64offset_s64))) +svint64_t svld1uh_gather_offset_s64(svbool_t, uint16_t const *, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_u64offset_u64))) +svuint64_t svld1uh_gather_offset_u64(svbool_t, uint16_t const *, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_u64offset_s64))) +svint64_t svld1uh_gather_offset_s64(svbool_t, uint16_t const *, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uw_gather_u64base_index_u64))) +svuint64_t svld1uw_gather_index_u64(svbool_t, svuint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uw_gather_u64base_index_s64))) +svint64_t svld1uw_gather_index_s64(svbool_t, svuint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uw_gather_u64base_offset_u64))) +svuint64_t svld1uw_gather_offset_u64(svbool_t, svuint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uw_gather_u64base_offset_s64))) +svint64_t svld1uw_gather_offset_s64(svbool_t, svuint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uw_gather_u64base_u64))) +svuint64_t svld1uw_gather_u64(svbool_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uw_gather_u64base_s64))) +svint64_t svld1uw_gather_s64(svbool_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uw_gather_s64index_u64))) +svuint64_t svld1uw_gather_index_u64(svbool_t, uint32_t const *, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uw_gather_s64index_s64))) +svint64_t svld1uw_gather_index_s64(svbool_t, uint32_t const *, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uw_gather_u64index_u64))) +svuint64_t svld1uw_gather_index_u64(svbool_t, uint32_t const *, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uw_gather_u64index_s64))) +svint64_t svld1uw_gather_index_s64(svbool_t, uint32_t const *, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uw_gather_s64offset_u64))) +svuint64_t svld1uw_gather_offset_u64(svbool_t, uint32_t const *, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uw_gather_s64offset_s64))) +svint64_t svld1uw_gather_offset_s64(svbool_t, uint32_t const *, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uw_gather_u64offset_u64))) +svuint64_t svld1uw_gather_offset_u64(svbool_t, uint32_t const *, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uw_gather_u64offset_s64))) +svint64_t svld1uw_gather_offset_s64(svbool_t, uint32_t const *, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_u8))) +svuint8_t svldff1(svbool_t, uint8_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_u32))) +svuint32_t svldff1(svbool_t, uint32_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_u64))) +svuint64_t svldff1(svbool_t, uint64_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_u16))) +svuint16_t svldff1(svbool_t, uint16_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_s8))) +svint8_t svldff1(svbool_t, int8_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_f64))) +svfloat64_t svldff1(svbool_t, float64_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_f32))) +svfloat32_t svldff1(svbool_t, float32_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_f16))) +svfloat16_t svldff1(svbool_t, float16_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_s32))) +svint32_t svldff1(svbool_t, int32_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_s64))) +svint64_t svldff1(svbool_t, int64_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_s16))) +svint16_t svldff1(svbool_t, int16_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u32base_index_u32))) +svuint32_t svldff1_gather_index_u32(svbool_t, svuint32_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u64base_index_u64))) +svuint64_t svldff1_gather_index_u64(svbool_t, svuint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u64base_index_f64))) +svfloat64_t svldff1_gather_index_f64(svbool_t, svuint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u32base_index_f32))) +svfloat32_t svldff1_gather_index_f32(svbool_t, svuint32_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u32base_index_s32))) +svint32_t svldff1_gather_index_s32(svbool_t, svuint32_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u64base_index_s64))) +svint64_t svldff1_gather_index_s64(svbool_t, svuint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u32base_offset_u32))) +svuint32_t svldff1_gather_offset_u32(svbool_t, svuint32_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u64base_offset_u64))) +svuint64_t svldff1_gather_offset_u64(svbool_t, svuint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u64base_offset_f64))) +svfloat64_t svldff1_gather_offset_f64(svbool_t, svuint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u32base_offset_f32))) +svfloat32_t svldff1_gather_offset_f32(svbool_t, svuint32_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u32base_offset_s32))) +svint32_t svldff1_gather_offset_s32(svbool_t, svuint32_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u64base_offset_s64))) +svint64_t svldff1_gather_offset_s64(svbool_t, svuint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u32base_u32))) +svuint32_t svldff1_gather_u32(svbool_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u64base_u64))) +svuint64_t svldff1_gather_u64(svbool_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u64base_f64))) +svfloat64_t svldff1_gather_f64(svbool_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u32base_f32))) +svfloat32_t svldff1_gather_f32(svbool_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u32base_s32))) +svint32_t svldff1_gather_s32(svbool_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u64base_s64))) +svint64_t svldff1_gather_s64(svbool_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_s32index_u32))) +svuint32_t svldff1_gather_index(svbool_t, uint32_t const *, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_s32index_f32))) +svfloat32_t svldff1_gather_index(svbool_t, float32_t const *, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_s32index_s32))) +svint32_t svldff1_gather_index(svbool_t, int32_t const *, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u32index_u32))) +svuint32_t svldff1_gather_index(svbool_t, uint32_t const *, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u32index_f32))) +svfloat32_t svldff1_gather_index(svbool_t, float32_t const *, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u32index_s32))) +svint32_t svldff1_gather_index(svbool_t, int32_t const *, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_s64index_u64))) +svuint64_t svldff1_gather_index(svbool_t, uint64_t const *, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_s64index_f64))) +svfloat64_t svldff1_gather_index(svbool_t, float64_t const *, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_s64index_s64))) +svint64_t svldff1_gather_index(svbool_t, int64_t const *, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u64index_u64))) +svuint64_t svldff1_gather_index(svbool_t, uint64_t const *, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u64index_f64))) +svfloat64_t svldff1_gather_index(svbool_t, float64_t const *, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u64index_s64))) +svint64_t svldff1_gather_index(svbool_t, int64_t const *, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_s32offset_u32))) +svuint32_t svldff1_gather_offset(svbool_t, uint32_t const *, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_s32offset_f32))) +svfloat32_t svldff1_gather_offset(svbool_t, float32_t const *, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_s32offset_s32))) +svint32_t svldff1_gather_offset(svbool_t, int32_t const *, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u32offset_u32))) +svuint32_t svldff1_gather_offset(svbool_t, uint32_t const *, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u32offset_f32))) +svfloat32_t svldff1_gather_offset(svbool_t, float32_t const *, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u32offset_s32))) +svint32_t svldff1_gather_offset(svbool_t, int32_t const *, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_s64offset_u64))) +svuint64_t svldff1_gather_offset(svbool_t, uint64_t const *, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_s64offset_f64))) +svfloat64_t svldff1_gather_offset(svbool_t, float64_t const *, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_s64offset_s64))) +svint64_t svldff1_gather_offset(svbool_t, int64_t const *, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u64offset_u64))) +svuint64_t svldff1_gather_offset(svbool_t, uint64_t const *, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u64offset_f64))) +svfloat64_t svldff1_gather_offset(svbool_t, float64_t const *, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u64offset_s64))) +svint64_t svldff1_gather_offset(svbool_t, int64_t const *, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_vnum_u8))) +svuint8_t svldff1_vnum(svbool_t, uint8_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_vnum_u32))) +svuint32_t svldff1_vnum(svbool_t, uint32_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_vnum_u64))) +svuint64_t svldff1_vnum(svbool_t, uint64_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_vnum_u16))) +svuint16_t svldff1_vnum(svbool_t, uint16_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_vnum_s8))) +svint8_t svldff1_vnum(svbool_t, int8_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_vnum_f64))) +svfloat64_t svldff1_vnum(svbool_t, float64_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_vnum_f32))) +svfloat32_t svldff1_vnum(svbool_t, float32_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_vnum_f16))) +svfloat16_t svldff1_vnum(svbool_t, float16_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_vnum_s32))) +svint32_t svldff1_vnum(svbool_t, int32_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_vnum_s64))) +svint64_t svldff1_vnum(svbool_t, int64_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_vnum_s16))) +svint16_t svldff1_vnum(svbool_t, int16_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sb_gather_u32base_offset_u32))) +svuint32_t svldff1sb_gather_offset_u32(svbool_t, svuint32_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sb_gather_u64base_offset_u64))) +svuint64_t svldff1sb_gather_offset_u64(svbool_t, svuint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sb_gather_u32base_offset_s32))) +svint32_t svldff1sb_gather_offset_s32(svbool_t, svuint32_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sb_gather_u64base_offset_s64))) +svint64_t svldff1sb_gather_offset_s64(svbool_t, svuint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sb_gather_u32base_u32))) +svuint32_t svldff1sb_gather_u32(svbool_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sb_gather_u64base_u64))) +svuint64_t svldff1sb_gather_u64(svbool_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sb_gather_u32base_s32))) +svint32_t svldff1sb_gather_s32(svbool_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sb_gather_u64base_s64))) +svint64_t svldff1sb_gather_s64(svbool_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sb_gather_s32offset_u32))) +svuint32_t svldff1sb_gather_offset_u32(svbool_t, int8_t const *, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sb_gather_s32offset_s32))) +svint32_t svldff1sb_gather_offset_s32(svbool_t, int8_t const *, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sb_gather_u32offset_u32))) +svuint32_t svldff1sb_gather_offset_u32(svbool_t, int8_t const *, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sb_gather_u32offset_s32))) +svint32_t svldff1sb_gather_offset_s32(svbool_t, int8_t const *, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sb_gather_s64offset_u64))) +svuint64_t svldff1sb_gather_offset_u64(svbool_t, int8_t const *, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sb_gather_s64offset_s64))) +svint64_t svldff1sb_gather_offset_s64(svbool_t, int8_t const *, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sb_gather_u64offset_u64))) +svuint64_t svldff1sb_gather_offset_u64(svbool_t, int8_t const *, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sb_gather_u64offset_s64))) +svint64_t svldff1sb_gather_offset_s64(svbool_t, int8_t const *, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_u32base_index_u32))) +svuint32_t svldff1sh_gather_index_u32(svbool_t, svuint32_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_u64base_index_u64))) +svuint64_t svldff1sh_gather_index_u64(svbool_t, svuint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_u32base_index_s32))) +svint32_t svldff1sh_gather_index_s32(svbool_t, svuint32_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_u64base_index_s64))) +svint64_t svldff1sh_gather_index_s64(svbool_t, svuint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_u32base_offset_u32))) +svuint32_t svldff1sh_gather_offset_u32(svbool_t, svuint32_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_u64base_offset_u64))) +svuint64_t svldff1sh_gather_offset_u64(svbool_t, svuint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_u32base_offset_s32))) +svint32_t svldff1sh_gather_offset_s32(svbool_t, svuint32_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_u64base_offset_s64))) +svint64_t svldff1sh_gather_offset_s64(svbool_t, svuint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_u32base_u32))) +svuint32_t svldff1sh_gather_u32(svbool_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_u64base_u64))) +svuint64_t svldff1sh_gather_u64(svbool_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_u32base_s32))) +svint32_t svldff1sh_gather_s32(svbool_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_u64base_s64))) +svint64_t svldff1sh_gather_s64(svbool_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_s32index_u32))) +svuint32_t svldff1sh_gather_index_u32(svbool_t, int16_t const *, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_s32index_s32))) +svint32_t svldff1sh_gather_index_s32(svbool_t, int16_t const *, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_u32index_u32))) +svuint32_t svldff1sh_gather_index_u32(svbool_t, int16_t const *, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_u32index_s32))) +svint32_t svldff1sh_gather_index_s32(svbool_t, int16_t const *, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_s64index_u64))) +svuint64_t svldff1sh_gather_index_u64(svbool_t, int16_t const *, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_s64index_s64))) +svint64_t svldff1sh_gather_index_s64(svbool_t, int16_t const *, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_u64index_u64))) +svuint64_t svldff1sh_gather_index_u64(svbool_t, int16_t const *, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_u64index_s64))) +svint64_t svldff1sh_gather_index_s64(svbool_t, int16_t const *, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_s32offset_u32))) +svuint32_t svldff1sh_gather_offset_u32(svbool_t, int16_t const *, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_s32offset_s32))) +svint32_t svldff1sh_gather_offset_s32(svbool_t, int16_t const *, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_u32offset_u32))) +svuint32_t svldff1sh_gather_offset_u32(svbool_t, int16_t const *, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_u32offset_s32))) +svint32_t svldff1sh_gather_offset_s32(svbool_t, int16_t const *, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_s64offset_u64))) +svuint64_t svldff1sh_gather_offset_u64(svbool_t, int16_t const *, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_s64offset_s64))) +svint64_t svldff1sh_gather_offset_s64(svbool_t, int16_t const *, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_u64offset_u64))) +svuint64_t svldff1sh_gather_offset_u64(svbool_t, int16_t const *, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_u64offset_s64))) +svint64_t svldff1sh_gather_offset_s64(svbool_t, int16_t const *, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sw_gather_u64base_index_u64))) +svuint64_t svldff1sw_gather_index_u64(svbool_t, svuint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sw_gather_u64base_index_s64))) +svint64_t svldff1sw_gather_index_s64(svbool_t, svuint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sw_gather_u64base_offset_u64))) +svuint64_t svldff1sw_gather_offset_u64(svbool_t, svuint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sw_gather_u64base_offset_s64))) +svint64_t svldff1sw_gather_offset_s64(svbool_t, svuint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sw_gather_u64base_u64))) +svuint64_t svldff1sw_gather_u64(svbool_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sw_gather_u64base_s64))) +svint64_t svldff1sw_gather_s64(svbool_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sw_gather_s64index_u64))) +svuint64_t svldff1sw_gather_index_u64(svbool_t, int32_t const *, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sw_gather_s64index_s64))) +svint64_t svldff1sw_gather_index_s64(svbool_t, int32_t const *, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sw_gather_u64index_u64))) +svuint64_t svldff1sw_gather_index_u64(svbool_t, int32_t const *, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sw_gather_u64index_s64))) +svint64_t svldff1sw_gather_index_s64(svbool_t, int32_t const *, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sw_gather_s64offset_u64))) +svuint64_t svldff1sw_gather_offset_u64(svbool_t, int32_t const *, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sw_gather_s64offset_s64))) +svint64_t svldff1sw_gather_offset_s64(svbool_t, int32_t const *, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sw_gather_u64offset_u64))) +svuint64_t svldff1sw_gather_offset_u64(svbool_t, int32_t const *, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sw_gather_u64offset_s64))) +svint64_t svldff1sw_gather_offset_s64(svbool_t, int32_t const *, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1ub_gather_u32base_offset_u32))) +svuint32_t svldff1ub_gather_offset_u32(svbool_t, svuint32_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1ub_gather_u64base_offset_u64))) +svuint64_t svldff1ub_gather_offset_u64(svbool_t, svuint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1ub_gather_u32base_offset_s32))) +svint32_t svldff1ub_gather_offset_s32(svbool_t, svuint32_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1ub_gather_u64base_offset_s64))) +svint64_t svldff1ub_gather_offset_s64(svbool_t, svuint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1ub_gather_u32base_u32))) +svuint32_t svldff1ub_gather_u32(svbool_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1ub_gather_u64base_u64))) +svuint64_t svldff1ub_gather_u64(svbool_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1ub_gather_u32base_s32))) +svint32_t svldff1ub_gather_s32(svbool_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1ub_gather_u64base_s64))) +svint64_t svldff1ub_gather_s64(svbool_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1ub_gather_s32offset_u32))) +svuint32_t svldff1ub_gather_offset_u32(svbool_t, uint8_t const *, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1ub_gather_s32offset_s32))) +svint32_t svldff1ub_gather_offset_s32(svbool_t, uint8_t const *, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1ub_gather_u32offset_u32))) +svuint32_t svldff1ub_gather_offset_u32(svbool_t, uint8_t const *, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1ub_gather_u32offset_s32))) +svint32_t svldff1ub_gather_offset_s32(svbool_t, uint8_t const *, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1ub_gather_s64offset_u64))) +svuint64_t svldff1ub_gather_offset_u64(svbool_t, uint8_t const *, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1ub_gather_s64offset_s64))) +svint64_t svldff1ub_gather_offset_s64(svbool_t, uint8_t const *, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1ub_gather_u64offset_u64))) +svuint64_t svldff1ub_gather_offset_u64(svbool_t, uint8_t const *, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1ub_gather_u64offset_s64))) +svint64_t svldff1ub_gather_offset_s64(svbool_t, uint8_t const *, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_u32base_index_u32))) +svuint32_t svldff1uh_gather_index_u32(svbool_t, svuint32_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_u64base_index_u64))) +svuint64_t svldff1uh_gather_index_u64(svbool_t, svuint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_u32base_index_s32))) +svint32_t svldff1uh_gather_index_s32(svbool_t, svuint32_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_u64base_index_s64))) +svint64_t svldff1uh_gather_index_s64(svbool_t, svuint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_u32base_offset_u32))) +svuint32_t svldff1uh_gather_offset_u32(svbool_t, svuint32_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_u64base_offset_u64))) +svuint64_t svldff1uh_gather_offset_u64(svbool_t, svuint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_u32base_offset_s32))) +svint32_t svldff1uh_gather_offset_s32(svbool_t, svuint32_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_u64base_offset_s64))) +svint64_t svldff1uh_gather_offset_s64(svbool_t, svuint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_u32base_u32))) +svuint32_t svldff1uh_gather_u32(svbool_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_u64base_u64))) +svuint64_t svldff1uh_gather_u64(svbool_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_u32base_s32))) +svint32_t svldff1uh_gather_s32(svbool_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_u64base_s64))) +svint64_t svldff1uh_gather_s64(svbool_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_s32index_u32))) +svuint32_t svldff1uh_gather_index_u32(svbool_t, uint16_t const *, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_s32index_s32))) +svint32_t svldff1uh_gather_index_s32(svbool_t, uint16_t const *, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_u32index_u32))) +svuint32_t svldff1uh_gather_index_u32(svbool_t, uint16_t const *, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_u32index_s32))) +svint32_t svldff1uh_gather_index_s32(svbool_t, uint16_t const *, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_s64index_u64))) +svuint64_t svldff1uh_gather_index_u64(svbool_t, uint16_t const *, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_s64index_s64))) +svint64_t svldff1uh_gather_index_s64(svbool_t, uint16_t const *, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_u64index_u64))) +svuint64_t svldff1uh_gather_index_u64(svbool_t, uint16_t const *, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_u64index_s64))) +svint64_t svldff1uh_gather_index_s64(svbool_t, uint16_t const *, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_s32offset_u32))) +svuint32_t svldff1uh_gather_offset_u32(svbool_t, uint16_t const *, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_s32offset_s32))) +svint32_t svldff1uh_gather_offset_s32(svbool_t, uint16_t const *, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_u32offset_u32))) +svuint32_t svldff1uh_gather_offset_u32(svbool_t, uint16_t const *, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_u32offset_s32))) +svint32_t svldff1uh_gather_offset_s32(svbool_t, uint16_t const *, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_s64offset_u64))) +svuint64_t svldff1uh_gather_offset_u64(svbool_t, uint16_t const *, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_s64offset_s64))) +svint64_t svldff1uh_gather_offset_s64(svbool_t, uint16_t const *, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_u64offset_u64))) +svuint64_t svldff1uh_gather_offset_u64(svbool_t, uint16_t const *, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_u64offset_s64))) +svint64_t svldff1uh_gather_offset_s64(svbool_t, uint16_t const *, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uw_gather_u64base_index_u64))) +svuint64_t svldff1uw_gather_index_u64(svbool_t, svuint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uw_gather_u64base_index_s64))) +svint64_t svldff1uw_gather_index_s64(svbool_t, svuint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uw_gather_u64base_offset_u64))) +svuint64_t svldff1uw_gather_offset_u64(svbool_t, svuint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uw_gather_u64base_offset_s64))) +svint64_t svldff1uw_gather_offset_s64(svbool_t, svuint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uw_gather_u64base_u64))) +svuint64_t svldff1uw_gather_u64(svbool_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uw_gather_u64base_s64))) +svint64_t svldff1uw_gather_s64(svbool_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uw_gather_s64index_u64))) +svuint64_t svldff1uw_gather_index_u64(svbool_t, uint32_t const *, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uw_gather_s64index_s64))) +svint64_t svldff1uw_gather_index_s64(svbool_t, uint32_t const *, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uw_gather_u64index_u64))) +svuint64_t svldff1uw_gather_index_u64(svbool_t, uint32_t const *, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uw_gather_u64index_s64))) +svint64_t svldff1uw_gather_index_s64(svbool_t, uint32_t const *, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uw_gather_s64offset_u64))) +svuint64_t svldff1uw_gather_offset_u64(svbool_t, uint32_t const *, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uw_gather_s64offset_s64))) +svint64_t svldff1uw_gather_offset_s64(svbool_t, uint32_t const *, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uw_gather_u64offset_u64))) +svuint64_t svldff1uw_gather_offset_u64(svbool_t, uint32_t const *, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uw_gather_u64offset_s64))) +svint64_t svldff1uw_gather_offset_s64(svbool_t, uint32_t const *, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_u8))) +svuint8_t svldnf1(svbool_t, uint8_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_u32))) +svuint32_t svldnf1(svbool_t, uint32_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_u64))) +svuint64_t svldnf1(svbool_t, uint64_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_u16))) +svuint16_t svldnf1(svbool_t, uint16_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_s8))) +svint8_t svldnf1(svbool_t, int8_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_f64))) +svfloat64_t svldnf1(svbool_t, float64_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_f32))) +svfloat32_t svldnf1(svbool_t, float32_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_f16))) +svfloat16_t svldnf1(svbool_t, float16_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_s32))) +svint32_t svldnf1(svbool_t, int32_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_s64))) +svint64_t svldnf1(svbool_t, int64_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_s16))) +svint16_t svldnf1(svbool_t, int16_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_vnum_u8))) +svuint8_t svldnf1_vnum(svbool_t, uint8_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_vnum_u32))) +svuint32_t svldnf1_vnum(svbool_t, uint32_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_vnum_u64))) +svuint64_t svldnf1_vnum(svbool_t, uint64_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_vnum_u16))) +svuint16_t svldnf1_vnum(svbool_t, uint16_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_vnum_s8))) +svint8_t svldnf1_vnum(svbool_t, int8_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_vnum_f64))) +svfloat64_t svldnf1_vnum(svbool_t, float64_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_vnum_f32))) +svfloat32_t svldnf1_vnum(svbool_t, float32_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_vnum_f16))) +svfloat16_t svldnf1_vnum(svbool_t, float16_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_vnum_s32))) +svint32_t svldnf1_vnum(svbool_t, int32_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_vnum_s64))) +svint64_t svldnf1_vnum(svbool_t, int64_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_vnum_s16))) +svint16_t svldnf1_vnum(svbool_t, int16_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfb_gather_u32base))) +void svprfb_gather(svbool_t, svuint32_t, enum svprfop); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfb_gather_u64base))) +void svprfb_gather(svbool_t, svuint64_t, enum svprfop); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfb_gather_u32base_offset))) +void svprfb_gather_offset(svbool_t, svuint32_t, int64_t, enum svprfop); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfb_gather_u64base_offset))) +void svprfb_gather_offset(svbool_t, svuint64_t, int64_t, enum svprfop); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfb_gather_s32offset))) +void svprfb_gather_offset(svbool_t, void const *, svint32_t, enum svprfop); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfb_gather_u32offset))) +void svprfb_gather_offset(svbool_t, void const *, svuint32_t, enum svprfop); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfb_gather_s64offset))) +void svprfb_gather_offset(svbool_t, void const *, svint64_t, enum svprfop); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfb_gather_u64offset))) +void svprfb_gather_offset(svbool_t, void const *, svuint64_t, enum svprfop); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfd_gather_u32base))) +void svprfd_gather(svbool_t, svuint32_t, enum svprfop); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfd_gather_u64base))) +void svprfd_gather(svbool_t, svuint64_t, enum svprfop); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfd_gather_u32base_index))) +void svprfd_gather_index(svbool_t, svuint32_t, int64_t, enum svprfop); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfd_gather_u64base_index))) +void svprfd_gather_index(svbool_t, svuint64_t, int64_t, enum svprfop); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfd_gather_s32index))) +void svprfd_gather_index(svbool_t, void const *, svint32_t, enum svprfop); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfd_gather_u32index))) +void svprfd_gather_index(svbool_t, void const *, svuint32_t, enum svprfop); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfd_gather_s64index))) +void svprfd_gather_index(svbool_t, void const *, svint64_t, enum svprfop); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfd_gather_u64index))) +void svprfd_gather_index(svbool_t, void const *, svuint64_t, enum svprfop); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfh_gather_u32base))) +void svprfh_gather(svbool_t, svuint32_t, enum svprfop); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfh_gather_u64base))) +void svprfh_gather(svbool_t, svuint64_t, enum svprfop); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfh_gather_u32base_index))) +void svprfh_gather_index(svbool_t, svuint32_t, int64_t, enum svprfop); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfh_gather_u64base_index))) +void svprfh_gather_index(svbool_t, svuint64_t, int64_t, enum svprfop); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfh_gather_s32index))) +void svprfh_gather_index(svbool_t, void const *, svint32_t, enum svprfop); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfh_gather_u32index))) +void svprfh_gather_index(svbool_t, void const *, svuint32_t, enum svprfop); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfh_gather_s64index))) +void svprfh_gather_index(svbool_t, void const *, svint64_t, enum svprfop); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfh_gather_u64index))) +void svprfh_gather_index(svbool_t, void const *, svuint64_t, enum svprfop); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfw_gather_u32base))) +void svprfw_gather(svbool_t, svuint32_t, enum svprfop); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfw_gather_u64base))) +void svprfw_gather(svbool_t, svuint64_t, enum svprfop); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfw_gather_u32base_index))) +void svprfw_gather_index(svbool_t, svuint32_t, int64_t, enum svprfop); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfw_gather_u64base_index))) +void svprfw_gather_index(svbool_t, svuint64_t, int64_t, enum svprfop); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfw_gather_s32index))) +void svprfw_gather_index(svbool_t, void const *, svint32_t, enum svprfop); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfw_gather_u32index))) +void svprfw_gather_index(svbool_t, void const *, svuint32_t, enum svprfop); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfw_gather_s64index))) +void svprfw_gather_index(svbool_t, void const *, svint64_t, enum svprfop); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfw_gather_u64index))) +void svprfw_gather_index(svbool_t, void const *, svuint64_t, enum svprfop); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u32base_index_u32))) +void svst1_scatter_index(svbool_t, svuint32_t, int64_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u64base_index_u64))) +void svst1_scatter_index(svbool_t, svuint64_t, int64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u64base_index_f64))) +void svst1_scatter_index(svbool_t, svuint64_t, int64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u32base_index_f32))) +void svst1_scatter_index(svbool_t, svuint32_t, int64_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u32base_index_s32))) +void svst1_scatter_index(svbool_t, svuint32_t, int64_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u64base_index_s64))) +void svst1_scatter_index(svbool_t, svuint64_t, int64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u32base_offset_u32))) +void svst1_scatter_offset(svbool_t, svuint32_t, int64_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u64base_offset_u64))) +void svst1_scatter_offset(svbool_t, svuint64_t, int64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u64base_offset_f64))) +void svst1_scatter_offset(svbool_t, svuint64_t, int64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u32base_offset_f32))) +void svst1_scatter_offset(svbool_t, svuint32_t, int64_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u32base_offset_s32))) +void svst1_scatter_offset(svbool_t, svuint32_t, int64_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u64base_offset_s64))) +void svst1_scatter_offset(svbool_t, svuint64_t, int64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u32base_u32))) +void svst1_scatter(svbool_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u64base_u64))) +void svst1_scatter(svbool_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u64base_f64))) +void svst1_scatter(svbool_t, svuint64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u32base_f32))) +void svst1_scatter(svbool_t, svuint32_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u32base_s32))) +void svst1_scatter(svbool_t, svuint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u64base_s64))) +void svst1_scatter(svbool_t, svuint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_s32index_u32))) +void svst1_scatter_index(svbool_t, uint32_t *, svint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_s32index_f32))) +void svst1_scatter_index(svbool_t, float32_t *, svint32_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_s32index_s32))) +void svst1_scatter_index(svbool_t, int32_t *, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u32index_u32))) +void svst1_scatter_index(svbool_t, uint32_t *, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u32index_f32))) +void svst1_scatter_index(svbool_t, float32_t *, svuint32_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u32index_s32))) +void svst1_scatter_index(svbool_t, int32_t *, svuint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_s64index_u64))) +void svst1_scatter_index(svbool_t, uint64_t *, svint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_s64index_f64))) +void svst1_scatter_index(svbool_t, float64_t *, svint64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_s64index_s64))) +void svst1_scatter_index(svbool_t, int64_t *, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u64index_u64))) +void svst1_scatter_index(svbool_t, uint64_t *, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u64index_f64))) +void svst1_scatter_index(svbool_t, float64_t *, svuint64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u64index_s64))) +void svst1_scatter_index(svbool_t, int64_t *, svuint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_s32offset_u32))) +void svst1_scatter_offset(svbool_t, uint32_t *, svint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_s32offset_f32))) +void svst1_scatter_offset(svbool_t, float32_t *, svint32_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_s32offset_s32))) +void svst1_scatter_offset(svbool_t, int32_t *, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u32offset_u32))) +void svst1_scatter_offset(svbool_t, uint32_t *, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u32offset_f32))) +void svst1_scatter_offset(svbool_t, float32_t *, svuint32_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u32offset_s32))) +void svst1_scatter_offset(svbool_t, int32_t *, svuint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_s64offset_u64))) +void svst1_scatter_offset(svbool_t, uint64_t *, svint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_s64offset_f64))) +void svst1_scatter_offset(svbool_t, float64_t *, svint64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_s64offset_s64))) +void svst1_scatter_offset(svbool_t, int64_t *, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u64offset_u64))) +void svst1_scatter_offset(svbool_t, uint64_t *, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u64offset_f64))) +void svst1_scatter_offset(svbool_t, float64_t *, svuint64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u64offset_s64))) +void svst1_scatter_offset(svbool_t, int64_t *, svuint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_scatter_u32base_offset_u32))) +void svst1b_scatter_offset(svbool_t, svuint32_t, int64_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_scatter_u64base_offset_u64))) +void svst1b_scatter_offset(svbool_t, svuint64_t, int64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_scatter_u32base_offset_s32))) +void svst1b_scatter_offset(svbool_t, svuint32_t, int64_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_scatter_u64base_offset_s64))) +void svst1b_scatter_offset(svbool_t, svuint64_t, int64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_scatter_u32base_u32))) +void svst1b_scatter(svbool_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_scatter_u64base_u64))) +void svst1b_scatter(svbool_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_scatter_u32base_s32))) +void svst1b_scatter(svbool_t, svuint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_scatter_u64base_s64))) +void svst1b_scatter(svbool_t, svuint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_scatter_s32offset_s32))) +void svst1b_scatter_offset(svbool_t, int8_t *, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_scatter_s32offset_u32))) +void svst1b_scatter_offset(svbool_t, uint8_t *, svint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_scatter_u32offset_s32))) +void svst1b_scatter_offset(svbool_t, int8_t *, svuint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_scatter_u32offset_u32))) +void svst1b_scatter_offset(svbool_t, uint8_t *, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_scatter_s64offset_s64))) +void svst1b_scatter_offset(svbool_t, int8_t *, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_scatter_s64offset_u64))) +void svst1b_scatter_offset(svbool_t, uint8_t *, svint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_scatter_u64offset_s64))) +void svst1b_scatter_offset(svbool_t, int8_t *, svuint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_scatter_u64offset_u64))) +void svst1b_scatter_offset(svbool_t, uint8_t *, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_u32base_index_u32))) +void svst1h_scatter_index(svbool_t, svuint32_t, int64_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_u64base_index_u64))) +void svst1h_scatter_index(svbool_t, svuint64_t, int64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_u32base_index_s32))) +void svst1h_scatter_index(svbool_t, svuint32_t, int64_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_u64base_index_s64))) +void svst1h_scatter_index(svbool_t, svuint64_t, int64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_u32base_offset_u32))) +void svst1h_scatter_offset(svbool_t, svuint32_t, int64_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_u64base_offset_u64))) +void svst1h_scatter_offset(svbool_t, svuint64_t, int64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_u32base_offset_s32))) +void svst1h_scatter_offset(svbool_t, svuint32_t, int64_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_u64base_offset_s64))) +void svst1h_scatter_offset(svbool_t, svuint64_t, int64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_u32base_u32))) +void svst1h_scatter(svbool_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_u64base_u64))) +void svst1h_scatter(svbool_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_u32base_s32))) +void svst1h_scatter(svbool_t, svuint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_u64base_s64))) +void svst1h_scatter(svbool_t, svuint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_s32index_s32))) +void svst1h_scatter_index(svbool_t, int16_t *, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_s32index_u32))) +void svst1h_scatter_index(svbool_t, uint16_t *, svint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_u32index_s32))) +void svst1h_scatter_index(svbool_t, int16_t *, svuint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_u32index_u32))) +void svst1h_scatter_index(svbool_t, uint16_t *, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_s64index_s64))) +void svst1h_scatter_index(svbool_t, int16_t *, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_s64index_u64))) +void svst1h_scatter_index(svbool_t, uint16_t *, svint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_u64index_s64))) +void svst1h_scatter_index(svbool_t, int16_t *, svuint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_u64index_u64))) +void svst1h_scatter_index(svbool_t, uint16_t *, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_s32offset_s32))) +void svst1h_scatter_offset(svbool_t, int16_t *, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_s32offset_u32))) +void svst1h_scatter_offset(svbool_t, uint16_t *, svint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_u32offset_s32))) +void svst1h_scatter_offset(svbool_t, int16_t *, svuint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_u32offset_u32))) +void svst1h_scatter_offset(svbool_t, uint16_t *, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_s64offset_s64))) +void svst1h_scatter_offset(svbool_t, int16_t *, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_s64offset_u64))) +void svst1h_scatter_offset(svbool_t, uint16_t *, svint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_u64offset_s64))) +void svst1h_scatter_offset(svbool_t, int16_t *, svuint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_u64offset_u64))) +void svst1h_scatter_offset(svbool_t, uint16_t *, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1w_scatter_u64base_index_u64))) +void svst1w_scatter_index(svbool_t, svuint64_t, int64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1w_scatter_u64base_index_s64))) +void svst1w_scatter_index(svbool_t, svuint64_t, int64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1w_scatter_u64base_offset_u64))) +void svst1w_scatter_offset(svbool_t, svuint64_t, int64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1w_scatter_u64base_offset_s64))) +void svst1w_scatter_offset(svbool_t, svuint64_t, int64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1w_scatter_u64base_u64))) +void svst1w_scatter(svbool_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1w_scatter_u64base_s64))) +void svst1w_scatter(svbool_t, svuint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1w_scatter_s64index_s64))) +void svst1w_scatter_index(svbool_t, int32_t *, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1w_scatter_s64index_u64))) +void svst1w_scatter_index(svbool_t, uint32_t *, svint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1w_scatter_u64index_s64))) +void svst1w_scatter_index(svbool_t, int32_t *, svuint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1w_scatter_u64index_u64))) +void svst1w_scatter_index(svbool_t, uint32_t *, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1w_scatter_s64offset_s64))) +void svst1w_scatter_offset(svbool_t, int32_t *, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1w_scatter_s64offset_u64))) +void svst1w_scatter_offset(svbool_t, uint32_t *, svint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1w_scatter_u64offset_s64))) +void svst1w_scatter_offset(svbool_t, int32_t *, svuint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1w_scatter_u64offset_u64))) +void svst1w_scatter_offset(svbool_t, uint32_t *, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtmad_f64))) +svfloat64_t svtmad(svfloat64_t, svfloat64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtmad_f32))) +svfloat32_t svtmad(svfloat32_t, svfloat32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtmad_f16))) +svfloat16_t svtmad(svfloat16_t, svfloat16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtsmul_f64))) +svfloat64_t svtsmul(svfloat64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtsmul_f32))) +svfloat32_t svtsmul(svfloat32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtsmul_f16))) +svfloat16_t svtsmul(svfloat16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtssel_f64))) +svfloat64_t svtssel(svfloat64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtssel_f32))) +svfloat32_t svtssel(svfloat32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtssel_f16))) +svfloat16_t svtssel(svfloat16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbfmmla_f32))) +svfloat32_t svbfmmla_f32(svfloat32_t, svbfloat16_t, svbfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_bf16))) +svbfloat16_t svldff1_bf16(svbool_t, bfloat16_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_vnum_bf16))) +svbfloat16_t svldff1_vnum_bf16(svbool_t, bfloat16_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_bf16))) +svbfloat16_t svldnf1_bf16(svbool_t, bfloat16_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_vnum_bf16))) +svbfloat16_t svldnf1_vnum_bf16(svbool_t, bfloat16_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbfmmla_f32))) +svfloat32_t svbfmmla(svfloat32_t, svbfloat16_t, svbfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_bf16))) +svbfloat16_t svldff1(svbool_t, bfloat16_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_vnum_bf16))) +svbfloat16_t svldff1_vnum(svbool_t, bfloat16_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_bf16))) +svbfloat16_t svldnf1(svbool_t, bfloat16_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_vnum_bf16))) +svbfloat16_t svldnf1_vnum(svbool_t, bfloat16_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1q_bf16))) +svbfloat16_t svtrn1q_bf16(svbfloat16_t, svbfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2q_bf16))) +svbfloat16_t svtrn2q_bf16(svbfloat16_t, svbfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1q_bf16))) +svbfloat16_t svuzp1q_bf16(svbfloat16_t, svbfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2q_bf16))) +svbfloat16_t svuzp2q_bf16(svbfloat16_t, svbfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1q_bf16))) +svbfloat16_t svzip1q_bf16(svbfloat16_t, svbfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2q_bf16))) +svbfloat16_t svzip2q_bf16(svbfloat16_t, svbfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1q_bf16))) +svbfloat16_t svtrn1q(svbfloat16_t, svbfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2q_bf16))) +svbfloat16_t svtrn2q(svbfloat16_t, svbfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1q_bf16))) +svbfloat16_t svuzp1q(svbfloat16_t, svbfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2q_bf16))) +svbfloat16_t svuzp2q(svbfloat16_t, svbfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1q_bf16))) +svbfloat16_t svzip1q(svbfloat16_t, svbfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2q_bf16))) +svbfloat16_t svzip2q(svbfloat16_t, svbfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbfdot_n_f32))) +svfloat32_t svbfdot_n_f32(svfloat32_t, svbfloat16_t, bfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbfdot_f32))) +svfloat32_t svbfdot_f32(svfloat32_t, svbfloat16_t, svbfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbfdot_lane_f32))) +svfloat32_t svbfdot_lane_f32(svfloat32_t, svbfloat16_t, svbfloat16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbfmlalb_n_f32))) +svfloat32_t svbfmlalb_n_f32(svfloat32_t, svbfloat16_t, bfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbfmlalb_f32))) +svfloat32_t svbfmlalb_f32(svfloat32_t, svbfloat16_t, svbfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbfmlalb_lane_f32))) +svfloat32_t svbfmlalb_lane_f32(svfloat32_t, svbfloat16_t, svbfloat16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbfmlalt_n_f32))) +svfloat32_t svbfmlalt_n_f32(svfloat32_t, svbfloat16_t, bfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbfmlalt_f32))) +svfloat32_t svbfmlalt_f32(svfloat32_t, svbfloat16_t, svbfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbfmlalt_lane_f32))) +svfloat32_t svbfmlalt_lane_f32(svfloat32_t, svbfloat16_t, svbfloat16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclasta_n_bf16))) +bfloat16_t svclasta_n_bf16(svbool_t, bfloat16_t, svbfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclasta_bf16))) +svbfloat16_t svclasta_bf16(svbool_t, svbfloat16_t, svbfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclastb_n_bf16))) +bfloat16_t svclastb_n_bf16(svbool_t, bfloat16_t, svbfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclastb_bf16))) +svbfloat16_t svclastb_bf16(svbool_t, svbfloat16_t, svbfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_bf16_m))) +svuint16_t svcnt_bf16_m(svuint16_t, svbool_t, svbfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_bf16_x))) +svuint16_t svcnt_bf16_x(svbool_t, svbfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_bf16_z))) +svuint16_t svcnt_bf16_z(svbool_t, svbfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate2_bf16))) +svbfloat16x2_t svcreate2_bf16(svbfloat16_t, svbfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate3_bf16))) +svbfloat16x3_t svcreate3_bf16(svbfloat16_t, svbfloat16_t, svbfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate4_bf16))) +svbfloat16x4_t svcreate4_bf16(svbfloat16_t, svbfloat16_t, svbfloat16_t, svbfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_bf16_f32_m))) +svbfloat16_t svcvt_bf16_f32_m(svbfloat16_t, svbool_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_bf16_f32_x))) +svbfloat16_t svcvt_bf16_f32_x(svbool_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_bf16_f32_z))) +svbfloat16_t svcvt_bf16_f32_z(svbool_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvtnt_bf16_f32_m))) +svbfloat16_t svcvtnt_bf16_f32_m(svbfloat16_t, svbool_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_bf16))) +svbfloat16_t svdup_n_bf16(bfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_bf16_m))) +svbfloat16_t svdup_n_bf16_m(svbfloat16_t, svbool_t, bfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_bf16_x))) +svbfloat16_t svdup_n_bf16_x(svbool_t, bfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_bf16_z))) +svbfloat16_t svdup_n_bf16_z(svbool_t, bfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_lane_bf16))) +svbfloat16_t svdup_lane_bf16(svbfloat16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_n_bf16))) +svbfloat16_t svdupq_n_bf16(bfloat16_t, bfloat16_t, bfloat16_t, bfloat16_t, bfloat16_t, bfloat16_t, bfloat16_t, bfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_lane_bf16))) +svbfloat16_t svdupq_lane_bf16(svbfloat16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svext_bf16))) +svbfloat16_t svext_bf16(svbfloat16_t, svbfloat16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget2_bf16))) +svbfloat16_t svget2_bf16(svbfloat16x2_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget3_bf16))) +svbfloat16_t svget3_bf16(svbfloat16x3_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget4_bf16))) +svbfloat16_t svget4_bf16(svbfloat16x4_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svinsr_n_bf16))) +svbfloat16_t svinsr_n_bf16(svbfloat16_t, bfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlasta_bf16))) +bfloat16_t svlasta_bf16(svbool_t, svbfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlastb_bf16))) +bfloat16_t svlastb_bf16(svbool_t, svbfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_bf16))) +svbfloat16_t svld1_bf16(svbool_t, bfloat16_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_vnum_bf16))) +svbfloat16_t svld1_vnum_bf16(svbool_t, bfloat16_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1rq_bf16))) +svbfloat16_t svld1rq_bf16(svbool_t, bfloat16_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2_bf16))) +svbfloat16x2_t svld2_bf16(svbool_t, bfloat16_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2_vnum_bf16))) +svbfloat16x2_t svld2_vnum_bf16(svbool_t, bfloat16_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3_bf16))) +svbfloat16x3_t svld3_bf16(svbool_t, bfloat16_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3_vnum_bf16))) +svbfloat16x3_t svld3_vnum_bf16(svbool_t, bfloat16_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4_bf16))) +svbfloat16x4_t svld4_bf16(svbool_t, bfloat16_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4_vnum_bf16))) +svbfloat16x4_t svld4_vnum_bf16(svbool_t, bfloat16_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_bf16))) +svbfloat16_t svldnt1_bf16(svbool_t, bfloat16_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_vnum_bf16))) +svbfloat16_t svldnt1_vnum_bf16(svbool_t, bfloat16_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlen_bf16))) +uint64_t svlen_bf16(svbfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrev_bf16))) +svbfloat16_t svrev_bf16(svbfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsel_bf16))) +svbfloat16_t svsel_bf16(svbool_t, svbfloat16_t, svbfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset2_bf16))) +svbfloat16x2_t svset2_bf16(svbfloat16x2_t, uint64_t, svbfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset3_bf16))) +svbfloat16x3_t svset3_bf16(svbfloat16x3_t, uint64_t, svbfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset4_bf16))) +svbfloat16x4_t svset4_bf16(svbfloat16x4_t, uint64_t, svbfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsplice_bf16))) +svbfloat16_t svsplice_bf16(svbool_t, svbfloat16_t, svbfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_bf16))) +void svst1_bf16(svbool_t, bfloat16_t *, svbfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_vnum_bf16))) +void svst1_vnum_bf16(svbool_t, bfloat16_t *, int64_t, svbfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2_bf16))) +void svst2_bf16(svbool_t, bfloat16_t *, svbfloat16x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2_vnum_bf16))) +void svst2_vnum_bf16(svbool_t, bfloat16_t *, int64_t, svbfloat16x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3_bf16))) +void svst3_bf16(svbool_t, bfloat16_t *, svbfloat16x3_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3_vnum_bf16))) +void svst3_vnum_bf16(svbool_t, bfloat16_t *, int64_t, svbfloat16x3_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4_bf16))) +void svst4_bf16(svbool_t, bfloat16_t *, svbfloat16x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4_vnum_bf16))) +void svst4_vnum_bf16(svbool_t, bfloat16_t *, int64_t, svbfloat16x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_bf16))) +void svstnt1_bf16(svbool_t, bfloat16_t *, svbfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_vnum_bf16))) +void svstnt1_vnum_bf16(svbool_t, bfloat16_t *, int64_t, svbfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbl_bf16))) +svbfloat16_t svtbl_bf16(svbfloat16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1_bf16))) +svbfloat16_t svtrn1_bf16(svbfloat16_t, svbfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2_bf16))) +svbfloat16_t svtrn2_bf16(svbfloat16_t, svbfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svundef2_bf16))) +svbfloat16x2_t svundef2_bf16(void); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svundef3_bf16))) +svbfloat16x3_t svundef3_bf16(void); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svundef4_bf16))) +svbfloat16x4_t svundef4_bf16(void); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svundef_bf16))) +svbfloat16_t svundef_bf16(void); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1_bf16))) +svbfloat16_t svuzp1_bf16(svbfloat16_t, svbfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2_bf16))) +svbfloat16_t svuzp2_bf16(svbfloat16_t, svbfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1_bf16))) +svbfloat16_t svzip1_bf16(svbfloat16_t, svbfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2_bf16))) +svbfloat16_t svzip2_bf16(svbfloat16_t, svbfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbfdot_n_f32))) +svfloat32_t svbfdot(svfloat32_t, svbfloat16_t, bfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbfdot_f32))) +svfloat32_t svbfdot(svfloat32_t, svbfloat16_t, svbfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbfdot_lane_f32))) +svfloat32_t svbfdot_lane(svfloat32_t, svbfloat16_t, svbfloat16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbfmlalb_n_f32))) +svfloat32_t svbfmlalb(svfloat32_t, svbfloat16_t, bfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbfmlalb_f32))) +svfloat32_t svbfmlalb(svfloat32_t, svbfloat16_t, svbfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbfmlalb_lane_f32))) +svfloat32_t svbfmlalb_lane(svfloat32_t, svbfloat16_t, svbfloat16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbfmlalt_n_f32))) +svfloat32_t svbfmlalt(svfloat32_t, svbfloat16_t, bfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbfmlalt_f32))) +svfloat32_t svbfmlalt(svfloat32_t, svbfloat16_t, svbfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbfmlalt_lane_f32))) +svfloat32_t svbfmlalt_lane(svfloat32_t, svbfloat16_t, svbfloat16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclasta_n_bf16))) +bfloat16_t svclasta(svbool_t, bfloat16_t, svbfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclasta_bf16))) +svbfloat16_t svclasta(svbool_t, svbfloat16_t, svbfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclastb_n_bf16))) +bfloat16_t svclastb(svbool_t, bfloat16_t, svbfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclastb_bf16))) +svbfloat16_t svclastb(svbool_t, svbfloat16_t, svbfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_bf16_m))) +svuint16_t svcnt_m(svuint16_t, svbool_t, svbfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_bf16_x))) +svuint16_t svcnt_x(svbool_t, svbfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_bf16_z))) +svuint16_t svcnt_z(svbool_t, svbfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate2_bf16))) +svbfloat16x2_t svcreate2(svbfloat16_t, svbfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate3_bf16))) +svbfloat16x3_t svcreate3(svbfloat16_t, svbfloat16_t, svbfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate4_bf16))) +svbfloat16x4_t svcreate4(svbfloat16_t, svbfloat16_t, svbfloat16_t, svbfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_bf16_f32_m))) +svbfloat16_t svcvt_bf16_m(svbfloat16_t, svbool_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_bf16_f32_x))) +svbfloat16_t svcvt_bf16_x(svbool_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_bf16_f32_z))) +svbfloat16_t svcvt_bf16_z(svbool_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvtnt_bf16_f32_m))) +svbfloat16_t svcvtnt_bf16_m(svbfloat16_t, svbool_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_bf16))) +svbfloat16_t svdup_bf16(bfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_bf16_m))) +svbfloat16_t svdup_bf16_m(svbfloat16_t, svbool_t, bfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_bf16_x))) +svbfloat16_t svdup_bf16_x(svbool_t, bfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_bf16_z))) +svbfloat16_t svdup_bf16_z(svbool_t, bfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_lane_bf16))) +svbfloat16_t svdup_lane(svbfloat16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_n_bf16))) +svbfloat16_t svdupq_bf16(bfloat16_t, bfloat16_t, bfloat16_t, bfloat16_t, bfloat16_t, bfloat16_t, bfloat16_t, bfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_lane_bf16))) +svbfloat16_t svdupq_lane(svbfloat16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svext_bf16))) +svbfloat16_t svext(svbfloat16_t, svbfloat16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget2_bf16))) +svbfloat16_t svget2(svbfloat16x2_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget3_bf16))) +svbfloat16_t svget3(svbfloat16x3_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget4_bf16))) +svbfloat16_t svget4(svbfloat16x4_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svinsr_n_bf16))) +svbfloat16_t svinsr(svbfloat16_t, bfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlasta_bf16))) +bfloat16_t svlasta(svbool_t, svbfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlastb_bf16))) +bfloat16_t svlastb(svbool_t, svbfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_bf16))) +svbfloat16_t svld1(svbool_t, bfloat16_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_vnum_bf16))) +svbfloat16_t svld1_vnum(svbool_t, bfloat16_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1rq_bf16))) +svbfloat16_t svld1rq(svbool_t, bfloat16_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2_bf16))) +svbfloat16x2_t svld2(svbool_t, bfloat16_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2_vnum_bf16))) +svbfloat16x2_t svld2_vnum(svbool_t, bfloat16_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3_bf16))) +svbfloat16x3_t svld3(svbool_t, bfloat16_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3_vnum_bf16))) +svbfloat16x3_t svld3_vnum(svbool_t, bfloat16_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4_bf16))) +svbfloat16x4_t svld4(svbool_t, bfloat16_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4_vnum_bf16))) +svbfloat16x4_t svld4_vnum(svbool_t, bfloat16_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_bf16))) +svbfloat16_t svldnt1(svbool_t, bfloat16_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_vnum_bf16))) +svbfloat16_t svldnt1_vnum(svbool_t, bfloat16_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlen_bf16))) +uint64_t svlen(svbfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrev_bf16))) +svbfloat16_t svrev(svbfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsel_bf16))) +svbfloat16_t svsel(svbool_t, svbfloat16_t, svbfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset2_bf16))) +svbfloat16x2_t svset2(svbfloat16x2_t, uint64_t, svbfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset3_bf16))) +svbfloat16x3_t svset3(svbfloat16x3_t, uint64_t, svbfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset4_bf16))) +svbfloat16x4_t svset4(svbfloat16x4_t, uint64_t, svbfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsplice_bf16))) +svbfloat16_t svsplice(svbool_t, svbfloat16_t, svbfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_bf16))) +void svst1(svbool_t, bfloat16_t *, svbfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_vnum_bf16))) +void svst1_vnum(svbool_t, bfloat16_t *, int64_t, svbfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2_bf16))) +void svst2(svbool_t, bfloat16_t *, svbfloat16x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2_vnum_bf16))) +void svst2_vnum(svbool_t, bfloat16_t *, int64_t, svbfloat16x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3_bf16))) +void svst3(svbool_t, bfloat16_t *, svbfloat16x3_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3_vnum_bf16))) +void svst3_vnum(svbool_t, bfloat16_t *, int64_t, svbfloat16x3_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4_bf16))) +void svst4(svbool_t, bfloat16_t *, svbfloat16x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4_vnum_bf16))) +void svst4_vnum(svbool_t, bfloat16_t *, int64_t, svbfloat16x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_bf16))) +void svstnt1(svbool_t, bfloat16_t *, svbfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_vnum_bf16))) +void svstnt1_vnum(svbool_t, bfloat16_t *, int64_t, svbfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbl_bf16))) +svbfloat16_t svtbl(svbfloat16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1_bf16))) +svbfloat16_t svtrn1(svbfloat16_t, svbfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2_bf16))) +svbfloat16_t svtrn2(svbfloat16_t, svbfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1_bf16))) +svbfloat16_t svuzp1(svbfloat16_t, svbfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2_bf16))) +svbfloat16_t svuzp2(svbfloat16_t, svbfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1_bf16))) +svbfloat16_t svzip1(svbfloat16_t, svbfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2_bf16))) +svbfloat16_t svzip2(svbfloat16_t, svbfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmmla_f32))) +svfloat32_t svmmla_f32(svfloat32_t, svfloat32_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmmla_f32))) +svfloat32_t svmmla(svfloat32_t, svfloat32_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ro_u8))) +svuint8_t svld1ro_u8(svbool_t, uint8_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ro_u32))) +svuint32_t svld1ro_u32(svbool_t, uint32_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ro_u64))) +svuint64_t svld1ro_u64(svbool_t, uint64_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ro_u16))) +svuint16_t svld1ro_u16(svbool_t, uint16_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ro_s8))) +svint8_t svld1ro_s8(svbool_t, int8_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ro_f64))) +svfloat64_t svld1ro_f64(svbool_t, float64_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ro_f32))) +svfloat32_t svld1ro_f32(svbool_t, float32_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ro_f16))) +svfloat16_t svld1ro_f16(svbool_t, float16_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ro_s32))) +svint32_t svld1ro_s32(svbool_t, int32_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ro_s64))) +svint64_t svld1ro_s64(svbool_t, int64_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ro_s16))) +svint16_t svld1ro_s16(svbool_t, int16_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmmla_f64))) +svfloat64_t svmmla_f64(svfloat64_t, svfloat64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1q_u8))) +svuint8_t svtrn1q_u8(svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1q_u32))) +svuint32_t svtrn1q_u32(svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1q_u64))) +svuint64_t svtrn1q_u64(svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1q_u16))) +svuint16_t svtrn1q_u16(svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1q_s8))) +svint8_t svtrn1q_s8(svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1q_f64))) +svfloat64_t svtrn1q_f64(svfloat64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1q_f32))) +svfloat32_t svtrn1q_f32(svfloat32_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1q_f16))) +svfloat16_t svtrn1q_f16(svfloat16_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1q_s32))) +svint32_t svtrn1q_s32(svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1q_s64))) +svint64_t svtrn1q_s64(svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1q_s16))) +svint16_t svtrn1q_s16(svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2q_u8))) +svuint8_t svtrn2q_u8(svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2q_u32))) +svuint32_t svtrn2q_u32(svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2q_u64))) +svuint64_t svtrn2q_u64(svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2q_u16))) +svuint16_t svtrn2q_u16(svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2q_s8))) +svint8_t svtrn2q_s8(svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2q_f64))) +svfloat64_t svtrn2q_f64(svfloat64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2q_f32))) +svfloat32_t svtrn2q_f32(svfloat32_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2q_f16))) +svfloat16_t svtrn2q_f16(svfloat16_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2q_s32))) +svint32_t svtrn2q_s32(svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2q_s64))) +svint64_t svtrn2q_s64(svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2q_s16))) +svint16_t svtrn2q_s16(svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1q_u8))) +svuint8_t svuzp1q_u8(svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1q_u32))) +svuint32_t svuzp1q_u32(svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1q_u64))) +svuint64_t svuzp1q_u64(svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1q_u16))) +svuint16_t svuzp1q_u16(svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1q_s8))) +svint8_t svuzp1q_s8(svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1q_f64))) +svfloat64_t svuzp1q_f64(svfloat64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1q_f32))) +svfloat32_t svuzp1q_f32(svfloat32_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1q_f16))) +svfloat16_t svuzp1q_f16(svfloat16_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1q_s32))) +svint32_t svuzp1q_s32(svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1q_s64))) +svint64_t svuzp1q_s64(svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1q_s16))) +svint16_t svuzp1q_s16(svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2q_u8))) +svuint8_t svuzp2q_u8(svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2q_u32))) +svuint32_t svuzp2q_u32(svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2q_u64))) +svuint64_t svuzp2q_u64(svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2q_u16))) +svuint16_t svuzp2q_u16(svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2q_s8))) +svint8_t svuzp2q_s8(svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2q_f64))) +svfloat64_t svuzp2q_f64(svfloat64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2q_f32))) +svfloat32_t svuzp2q_f32(svfloat32_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2q_f16))) +svfloat16_t svuzp2q_f16(svfloat16_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2q_s32))) +svint32_t svuzp2q_s32(svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2q_s64))) +svint64_t svuzp2q_s64(svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2q_s16))) +svint16_t svuzp2q_s16(svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1q_u8))) +svuint8_t svzip1q_u8(svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1q_u32))) +svuint32_t svzip1q_u32(svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1q_u64))) +svuint64_t svzip1q_u64(svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1q_u16))) +svuint16_t svzip1q_u16(svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1q_s8))) +svint8_t svzip1q_s8(svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1q_f64))) +svfloat64_t svzip1q_f64(svfloat64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1q_f32))) +svfloat32_t svzip1q_f32(svfloat32_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1q_f16))) +svfloat16_t svzip1q_f16(svfloat16_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1q_s32))) +svint32_t svzip1q_s32(svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1q_s64))) +svint64_t svzip1q_s64(svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1q_s16))) +svint16_t svzip1q_s16(svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2q_u8))) +svuint8_t svzip2q_u8(svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2q_u32))) +svuint32_t svzip2q_u32(svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2q_u64))) +svuint64_t svzip2q_u64(svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2q_u16))) +svuint16_t svzip2q_u16(svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2q_s8))) +svint8_t svzip2q_s8(svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2q_f64))) +svfloat64_t svzip2q_f64(svfloat64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2q_f32))) +svfloat32_t svzip2q_f32(svfloat32_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2q_f16))) +svfloat16_t svzip2q_f16(svfloat16_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2q_s32))) +svint32_t svzip2q_s32(svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2q_s64))) +svint64_t svzip2q_s64(svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2q_s16))) +svint16_t svzip2q_s16(svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ro_u8))) +svuint8_t svld1ro(svbool_t, uint8_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ro_u32))) +svuint32_t svld1ro(svbool_t, uint32_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ro_u64))) +svuint64_t svld1ro(svbool_t, uint64_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ro_u16))) +svuint16_t svld1ro(svbool_t, uint16_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ro_s8))) +svint8_t svld1ro(svbool_t, int8_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ro_f64))) +svfloat64_t svld1ro(svbool_t, float64_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ro_f32))) +svfloat32_t svld1ro(svbool_t, float32_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ro_f16))) +svfloat16_t svld1ro(svbool_t, float16_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ro_s32))) +svint32_t svld1ro(svbool_t, int32_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ro_s64))) +svint64_t svld1ro(svbool_t, int64_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ro_s16))) +svint16_t svld1ro(svbool_t, int16_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmmla_f64))) +svfloat64_t svmmla(svfloat64_t, svfloat64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1q_u8))) +svuint8_t svtrn1q(svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1q_u32))) +svuint32_t svtrn1q(svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1q_u64))) +svuint64_t svtrn1q(svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1q_u16))) +svuint16_t svtrn1q(svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1q_s8))) +svint8_t svtrn1q(svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1q_f64))) +svfloat64_t svtrn1q(svfloat64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1q_f32))) +svfloat32_t svtrn1q(svfloat32_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1q_f16))) +svfloat16_t svtrn1q(svfloat16_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1q_s32))) +svint32_t svtrn1q(svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1q_s64))) +svint64_t svtrn1q(svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1q_s16))) +svint16_t svtrn1q(svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2q_u8))) +svuint8_t svtrn2q(svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2q_u32))) +svuint32_t svtrn2q(svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2q_u64))) +svuint64_t svtrn2q(svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2q_u16))) +svuint16_t svtrn2q(svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2q_s8))) +svint8_t svtrn2q(svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2q_f64))) +svfloat64_t svtrn2q(svfloat64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2q_f32))) +svfloat32_t svtrn2q(svfloat32_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2q_f16))) +svfloat16_t svtrn2q(svfloat16_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2q_s32))) +svint32_t svtrn2q(svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2q_s64))) +svint64_t svtrn2q(svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2q_s16))) +svint16_t svtrn2q(svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1q_u8))) +svuint8_t svuzp1q(svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1q_u32))) +svuint32_t svuzp1q(svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1q_u64))) +svuint64_t svuzp1q(svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1q_u16))) +svuint16_t svuzp1q(svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1q_s8))) +svint8_t svuzp1q(svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1q_f64))) +svfloat64_t svuzp1q(svfloat64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1q_f32))) +svfloat32_t svuzp1q(svfloat32_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1q_f16))) +svfloat16_t svuzp1q(svfloat16_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1q_s32))) +svint32_t svuzp1q(svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1q_s64))) +svint64_t svuzp1q(svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1q_s16))) +svint16_t svuzp1q(svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2q_u8))) +svuint8_t svuzp2q(svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2q_u32))) +svuint32_t svuzp2q(svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2q_u64))) +svuint64_t svuzp2q(svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2q_u16))) +svuint16_t svuzp2q(svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2q_s8))) +svint8_t svuzp2q(svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2q_f64))) +svfloat64_t svuzp2q(svfloat64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2q_f32))) +svfloat32_t svuzp2q(svfloat32_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2q_f16))) +svfloat16_t svuzp2q(svfloat16_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2q_s32))) +svint32_t svuzp2q(svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2q_s64))) +svint64_t svuzp2q(svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2q_s16))) +svint16_t svuzp2q(svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1q_u8))) +svuint8_t svzip1q(svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1q_u32))) +svuint32_t svzip1q(svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1q_u64))) +svuint64_t svzip1q(svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1q_u16))) +svuint16_t svzip1q(svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1q_s8))) +svint8_t svzip1q(svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1q_f64))) +svfloat64_t svzip1q(svfloat64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1q_f32))) +svfloat32_t svzip1q(svfloat32_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1q_f16))) +svfloat16_t svzip1q(svfloat16_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1q_s32))) +svint32_t svzip1q(svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1q_s64))) +svint64_t svzip1q(svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1q_s16))) +svint16_t svzip1q(svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2q_u8))) +svuint8_t svzip2q(svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2q_u32))) +svuint32_t svzip2q(svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2q_u64))) +svuint64_t svzip2q(svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2q_u16))) +svuint16_t svzip2q(svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2q_s8))) +svint8_t svzip2q(svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2q_f64))) +svfloat64_t svzip2q(svfloat64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2q_f32))) +svfloat32_t svzip2q(svfloat32_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2q_f16))) +svfloat16_t svzip2q(svfloat16_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2q_s32))) +svint32_t svzip2q(svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2q_s64))) +svint64_t svzip2q(svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2q_s16))) +svint16_t svzip2q(svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ro_bf16))) +svbfloat16_t svld1ro_bf16(svbool_t, bfloat16_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ro_bf16))) +svbfloat16_t svld1ro(svbool_t, bfloat16_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmmla_s32))) +svint32_t svmmla_s32(svint32_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmmla_u32))) +svuint32_t svmmla_u32(svuint32_t, svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svusmmla_s32))) +svint32_t svusmmla_s32(svint32_t, svuint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmmla_s32))) +svint32_t svmmla(svint32_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmmla_u32))) +svuint32_t svmmla(svuint32_t, svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svusmmla_s32))) +svint32_t svusmmla(svint32_t, svuint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsudot_n_s32))) +svint32_t svsudot_n_s32(svint32_t, svint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsudot_s32))) +svint32_t svsudot_s32(svint32_t, svint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsudot_lane_s32))) +svint32_t svsudot_lane_s32(svint32_t, svint8_t, svuint8_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svusdot_n_s32))) +svint32_t svusdot_n_s32(svint32_t, svuint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svusdot_s32))) +svint32_t svusdot_s32(svint32_t, svuint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svusdot_lane_s32))) +svint32_t svusdot_lane_s32(svint32_t, svuint8_t, svint8_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsudot_n_s32))) +svint32_t svsudot(svint32_t, svint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsudot_s32))) +svint32_t svsudot(svint32_t, svint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsudot_lane_s32))) +svint32_t svsudot_lane(svint32_t, svint8_t, svuint8_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svusdot_n_s32))) +svint32_t svusdot(svint32_t, svuint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svusdot_s32))) +svint32_t svusdot(svint32_t, svuint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svusdot_lane_s32))) +svint32_t svusdot_lane(svint32_t, svuint8_t, svint8_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhistcnt_u32_z))) +svuint32_t svhistcnt_u32_z(svbool_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhistcnt_u64_z))) +svuint64_t svhistcnt_u64_z(svbool_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhistcnt_s32_z))) +svuint32_t svhistcnt_s32_z(svbool_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhistcnt_s64_z))) +svuint64_t svhistcnt_s64_z(svbool_t, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhistseg_u8))) +svuint8_t svhistseg_u8(svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhistseg_s8))) +svuint8_t svhistseg_s8(svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u32base_index_u32))) +svuint32_t svldnt1_gather_u32base_index_u32(svbool_t, svuint32_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u64base_index_u64))) +svuint64_t svldnt1_gather_u64base_index_u64(svbool_t, svuint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u64base_index_f64))) +svfloat64_t svldnt1_gather_u64base_index_f64(svbool_t, svuint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u32base_index_f32))) +svfloat32_t svldnt1_gather_u32base_index_f32(svbool_t, svuint32_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u32base_index_s32))) +svint32_t svldnt1_gather_u32base_index_s32(svbool_t, svuint32_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u64base_index_s64))) +svint64_t svldnt1_gather_u64base_index_s64(svbool_t, svuint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u32base_offset_u32))) +svuint32_t svldnt1_gather_u32base_offset_u32(svbool_t, svuint32_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u64base_offset_u64))) +svuint64_t svldnt1_gather_u64base_offset_u64(svbool_t, svuint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u64base_offset_f64))) +svfloat64_t svldnt1_gather_u64base_offset_f64(svbool_t, svuint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u32base_offset_f32))) +svfloat32_t svldnt1_gather_u32base_offset_f32(svbool_t, svuint32_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u32base_offset_s32))) +svint32_t svldnt1_gather_u32base_offset_s32(svbool_t, svuint32_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u64base_offset_s64))) +svint64_t svldnt1_gather_u64base_offset_s64(svbool_t, svuint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u32base_u32))) +svuint32_t svldnt1_gather_u32base_u32(svbool_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u64base_u64))) +svuint64_t svldnt1_gather_u64base_u64(svbool_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u64base_f64))) +svfloat64_t svldnt1_gather_u64base_f64(svbool_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u32base_f32))) +svfloat32_t svldnt1_gather_u32base_f32(svbool_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u32base_s32))) +svint32_t svldnt1_gather_u32base_s32(svbool_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u64base_s64))) +svint64_t svldnt1_gather_u64base_s64(svbool_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_s64index_u64))) +svuint64_t svldnt1_gather_s64index_u64(svbool_t, uint64_t const *, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_s64index_f64))) +svfloat64_t svldnt1_gather_s64index_f64(svbool_t, float64_t const *, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_s64index_s64))) +svint64_t svldnt1_gather_s64index_s64(svbool_t, int64_t const *, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u64index_u64))) +svuint64_t svldnt1_gather_u64index_u64(svbool_t, uint64_t const *, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u64index_f64))) +svfloat64_t svldnt1_gather_u64index_f64(svbool_t, float64_t const *, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u64index_s64))) +svint64_t svldnt1_gather_u64index_s64(svbool_t, int64_t const *, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u32offset_u32))) +svuint32_t svldnt1_gather_u32offset_u32(svbool_t, uint32_t const *, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u32offset_f32))) +svfloat32_t svldnt1_gather_u32offset_f32(svbool_t, float32_t const *, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u32offset_s32))) +svint32_t svldnt1_gather_u32offset_s32(svbool_t, int32_t const *, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_s64offset_u64))) +svuint64_t svldnt1_gather_s64offset_u64(svbool_t, uint64_t const *, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_s64offset_f64))) +svfloat64_t svldnt1_gather_s64offset_f64(svbool_t, float64_t const *, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_s64offset_s64))) +svint64_t svldnt1_gather_s64offset_s64(svbool_t, int64_t const *, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u64offset_u64))) +svuint64_t svldnt1_gather_u64offset_u64(svbool_t, uint64_t const *, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u64offset_f64))) +svfloat64_t svldnt1_gather_u64offset_f64(svbool_t, float64_t const *, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u64offset_s64))) +svint64_t svldnt1_gather_u64offset_s64(svbool_t, int64_t const *, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sb_gather_u32base_offset_u32))) +svuint32_t svldnt1sb_gather_u32base_offset_u32(svbool_t, svuint32_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sb_gather_u64base_offset_u64))) +svuint64_t svldnt1sb_gather_u64base_offset_u64(svbool_t, svuint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sb_gather_u32base_offset_s32))) +svint32_t svldnt1sb_gather_u32base_offset_s32(svbool_t, svuint32_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sb_gather_u64base_offset_s64))) +svint64_t svldnt1sb_gather_u64base_offset_s64(svbool_t, svuint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sb_gather_u32base_u32))) +svuint32_t svldnt1sb_gather_u32base_u32(svbool_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sb_gather_u64base_u64))) +svuint64_t svldnt1sb_gather_u64base_u64(svbool_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sb_gather_u32base_s32))) +svint32_t svldnt1sb_gather_u32base_s32(svbool_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sb_gather_u64base_s64))) +svint64_t svldnt1sb_gather_u64base_s64(svbool_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sb_gather_u32offset_u32))) +svuint32_t svldnt1sb_gather_u32offset_u32(svbool_t, int8_t const *, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sb_gather_u32offset_s32))) +svint32_t svldnt1sb_gather_u32offset_s32(svbool_t, int8_t const *, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sb_gather_s64offset_u64))) +svuint64_t svldnt1sb_gather_s64offset_u64(svbool_t, int8_t const *, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sb_gather_s64offset_s64))) +svint64_t svldnt1sb_gather_s64offset_s64(svbool_t, int8_t const *, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sb_gather_u64offset_u64))) +svuint64_t svldnt1sb_gather_u64offset_u64(svbool_t, int8_t const *, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sb_gather_u64offset_s64))) +svint64_t svldnt1sb_gather_u64offset_s64(svbool_t, int8_t const *, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sh_gather_u32base_index_u32))) +svuint32_t svldnt1sh_gather_u32base_index_u32(svbool_t, svuint32_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sh_gather_u64base_index_u64))) +svuint64_t svldnt1sh_gather_u64base_index_u64(svbool_t, svuint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sh_gather_u32base_index_s32))) +svint32_t svldnt1sh_gather_u32base_index_s32(svbool_t, svuint32_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sh_gather_u64base_index_s64))) +svint64_t svldnt1sh_gather_u64base_index_s64(svbool_t, svuint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sh_gather_u32base_offset_u32))) +svuint32_t svldnt1sh_gather_u32base_offset_u32(svbool_t, svuint32_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sh_gather_u64base_offset_u64))) +svuint64_t svldnt1sh_gather_u64base_offset_u64(svbool_t, svuint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sh_gather_u32base_offset_s32))) +svint32_t svldnt1sh_gather_u32base_offset_s32(svbool_t, svuint32_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sh_gather_u64base_offset_s64))) +svint64_t svldnt1sh_gather_u64base_offset_s64(svbool_t, svuint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sh_gather_u32base_u32))) +svuint32_t svldnt1sh_gather_u32base_u32(svbool_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sh_gather_u64base_u64))) +svuint64_t svldnt1sh_gather_u64base_u64(svbool_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sh_gather_u32base_s32))) +svint32_t svldnt1sh_gather_u32base_s32(svbool_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sh_gather_u64base_s64))) +svint64_t svldnt1sh_gather_u64base_s64(svbool_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sh_gather_s64index_u64))) +svuint64_t svldnt1sh_gather_s64index_u64(svbool_t, int16_t const *, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sh_gather_s64index_s64))) +svint64_t svldnt1sh_gather_s64index_s64(svbool_t, int16_t const *, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sh_gather_u64index_u64))) +svuint64_t svldnt1sh_gather_u64index_u64(svbool_t, int16_t const *, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sh_gather_u64index_s64))) +svint64_t svldnt1sh_gather_u64index_s64(svbool_t, int16_t const *, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sh_gather_u32offset_u32))) +svuint32_t svldnt1sh_gather_u32offset_u32(svbool_t, int16_t const *, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sh_gather_u32offset_s32))) +svint32_t svldnt1sh_gather_u32offset_s32(svbool_t, int16_t const *, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sh_gather_s64offset_u64))) +svuint64_t svldnt1sh_gather_s64offset_u64(svbool_t, int16_t const *, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sh_gather_s64offset_s64))) +svint64_t svldnt1sh_gather_s64offset_s64(svbool_t, int16_t const *, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sh_gather_u64offset_u64))) +svuint64_t svldnt1sh_gather_u64offset_u64(svbool_t, int16_t const *, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sh_gather_u64offset_s64))) +svint64_t svldnt1sh_gather_u64offset_s64(svbool_t, int16_t const *, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sw_gather_u64base_index_u64))) +svuint64_t svldnt1sw_gather_u64base_index_u64(svbool_t, svuint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sw_gather_u64base_index_s64))) +svint64_t svldnt1sw_gather_u64base_index_s64(svbool_t, svuint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sw_gather_u64base_offset_u64))) +svuint64_t svldnt1sw_gather_u64base_offset_u64(svbool_t, svuint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sw_gather_u64base_offset_s64))) +svint64_t svldnt1sw_gather_u64base_offset_s64(svbool_t, svuint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sw_gather_u64base_u64))) +svuint64_t svldnt1sw_gather_u64base_u64(svbool_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sw_gather_u64base_s64))) +svint64_t svldnt1sw_gather_u64base_s64(svbool_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sw_gather_s64index_u64))) +svuint64_t svldnt1sw_gather_s64index_u64(svbool_t, int32_t const *, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sw_gather_s64index_s64))) +svint64_t svldnt1sw_gather_s64index_s64(svbool_t, int32_t const *, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sw_gather_u64index_u64))) +svuint64_t svldnt1sw_gather_u64index_u64(svbool_t, int32_t const *, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sw_gather_u64index_s64))) +svint64_t svldnt1sw_gather_u64index_s64(svbool_t, int32_t const *, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sw_gather_s64offset_u64))) +svuint64_t svldnt1sw_gather_s64offset_u64(svbool_t, int32_t const *, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sw_gather_s64offset_s64))) +svint64_t svldnt1sw_gather_s64offset_s64(svbool_t, int32_t const *, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sw_gather_u64offset_u64))) +svuint64_t svldnt1sw_gather_u64offset_u64(svbool_t, int32_t const *, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sw_gather_u64offset_s64))) +svint64_t svldnt1sw_gather_u64offset_s64(svbool_t, int32_t const *, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1ub_gather_u32base_offset_u32))) +svuint32_t svldnt1ub_gather_u32base_offset_u32(svbool_t, svuint32_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1ub_gather_u64base_offset_u64))) +svuint64_t svldnt1ub_gather_u64base_offset_u64(svbool_t, svuint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1ub_gather_u32base_offset_s32))) +svint32_t svldnt1ub_gather_u32base_offset_s32(svbool_t, svuint32_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1ub_gather_u64base_offset_s64))) +svint64_t svldnt1ub_gather_u64base_offset_s64(svbool_t, svuint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1ub_gather_u32base_u32))) +svuint32_t svldnt1ub_gather_u32base_u32(svbool_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1ub_gather_u64base_u64))) +svuint64_t svldnt1ub_gather_u64base_u64(svbool_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1ub_gather_u32base_s32))) +svint32_t svldnt1ub_gather_u32base_s32(svbool_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1ub_gather_u64base_s64))) +svint64_t svldnt1ub_gather_u64base_s64(svbool_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1ub_gather_u32offset_u32))) +svuint32_t svldnt1ub_gather_u32offset_u32(svbool_t, uint8_t const *, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1ub_gather_u32offset_s32))) +svint32_t svldnt1ub_gather_u32offset_s32(svbool_t, uint8_t const *, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1ub_gather_s64offset_u64))) +svuint64_t svldnt1ub_gather_s64offset_u64(svbool_t, uint8_t const *, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1ub_gather_s64offset_s64))) +svint64_t svldnt1ub_gather_s64offset_s64(svbool_t, uint8_t const *, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1ub_gather_u64offset_u64))) +svuint64_t svldnt1ub_gather_u64offset_u64(svbool_t, uint8_t const *, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1ub_gather_u64offset_s64))) +svint64_t svldnt1ub_gather_u64offset_s64(svbool_t, uint8_t const *, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uh_gather_u32base_index_u32))) +svuint32_t svldnt1uh_gather_u32base_index_u32(svbool_t, svuint32_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uh_gather_u64base_index_u64))) +svuint64_t svldnt1uh_gather_u64base_index_u64(svbool_t, svuint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uh_gather_u32base_index_s32))) +svint32_t svldnt1uh_gather_u32base_index_s32(svbool_t, svuint32_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uh_gather_u64base_index_s64))) +svint64_t svldnt1uh_gather_u64base_index_s64(svbool_t, svuint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uh_gather_u32base_offset_u32))) +svuint32_t svldnt1uh_gather_u32base_offset_u32(svbool_t, svuint32_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uh_gather_u64base_offset_u64))) +svuint64_t svldnt1uh_gather_u64base_offset_u64(svbool_t, svuint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uh_gather_u32base_offset_s32))) +svint32_t svldnt1uh_gather_u32base_offset_s32(svbool_t, svuint32_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uh_gather_u64base_offset_s64))) +svint64_t svldnt1uh_gather_u64base_offset_s64(svbool_t, svuint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uh_gather_u32base_u32))) +svuint32_t svldnt1uh_gather_u32base_u32(svbool_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uh_gather_u64base_u64))) +svuint64_t svldnt1uh_gather_u64base_u64(svbool_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uh_gather_u32base_s32))) +svint32_t svldnt1uh_gather_u32base_s32(svbool_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uh_gather_u64base_s64))) +svint64_t svldnt1uh_gather_u64base_s64(svbool_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uh_gather_s64index_u64))) +svuint64_t svldnt1uh_gather_s64index_u64(svbool_t, uint16_t const *, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uh_gather_s64index_s64))) +svint64_t svldnt1uh_gather_s64index_s64(svbool_t, uint16_t const *, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uh_gather_u64index_u64))) +svuint64_t svldnt1uh_gather_u64index_u64(svbool_t, uint16_t const *, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uh_gather_u64index_s64))) +svint64_t svldnt1uh_gather_u64index_s64(svbool_t, uint16_t const *, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uh_gather_u32offset_u32))) +svuint32_t svldnt1uh_gather_u32offset_u32(svbool_t, uint16_t const *, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uh_gather_u32offset_s32))) +svint32_t svldnt1uh_gather_u32offset_s32(svbool_t, uint16_t const *, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uh_gather_s64offset_u64))) +svuint64_t svldnt1uh_gather_s64offset_u64(svbool_t, uint16_t const *, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uh_gather_s64offset_s64))) +svint64_t svldnt1uh_gather_s64offset_s64(svbool_t, uint16_t const *, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uh_gather_u64offset_u64))) +svuint64_t svldnt1uh_gather_u64offset_u64(svbool_t, uint16_t const *, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uh_gather_u64offset_s64))) +svint64_t svldnt1uh_gather_u64offset_s64(svbool_t, uint16_t const *, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uw_gather_u64base_index_u64))) +svuint64_t svldnt1uw_gather_u64base_index_u64(svbool_t, svuint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uw_gather_u64base_index_s64))) +svint64_t svldnt1uw_gather_u64base_index_s64(svbool_t, svuint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uw_gather_u64base_offset_u64))) +svuint64_t svldnt1uw_gather_u64base_offset_u64(svbool_t, svuint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uw_gather_u64base_offset_s64))) +svint64_t svldnt1uw_gather_u64base_offset_s64(svbool_t, svuint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uw_gather_u64base_u64))) +svuint64_t svldnt1uw_gather_u64base_u64(svbool_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uw_gather_u64base_s64))) +svint64_t svldnt1uw_gather_u64base_s64(svbool_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uw_gather_s64index_u64))) +svuint64_t svldnt1uw_gather_s64index_u64(svbool_t, uint32_t const *, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uw_gather_s64index_s64))) +svint64_t svldnt1uw_gather_s64index_s64(svbool_t, uint32_t const *, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uw_gather_u64index_u64))) +svuint64_t svldnt1uw_gather_u64index_u64(svbool_t, uint32_t const *, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uw_gather_u64index_s64))) +svint64_t svldnt1uw_gather_u64index_s64(svbool_t, uint32_t const *, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uw_gather_s64offset_u64))) +svuint64_t svldnt1uw_gather_s64offset_u64(svbool_t, uint32_t const *, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uw_gather_s64offset_s64))) +svint64_t svldnt1uw_gather_s64offset_s64(svbool_t, uint32_t const *, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uw_gather_u64offset_u64))) +svuint64_t svldnt1uw_gather_u64offset_u64(svbool_t, uint32_t const *, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uw_gather_u64offset_s64))) +svint64_t svldnt1uw_gather_u64offset_s64(svbool_t, uint32_t const *, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmatch_u8))) +svbool_t svmatch_u8(svbool_t, svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmatch_u16))) +svbool_t svmatch_u16(svbool_t, svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmatch_s8))) +svbool_t svmatch_s8(svbool_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmatch_s16))) +svbool_t svmatch_s16(svbool_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmatch_u8))) +svbool_t svnmatch_u8(svbool_t, svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmatch_u16))) +svbool_t svnmatch_u16(svbool_t, svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmatch_s8))) +svbool_t svnmatch_s8(svbool_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmatch_s16))) +svbool_t svnmatch_s16(svbool_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u32base_index_u32))) +void svstnt1_scatter_u32base_index_u32(svbool_t, svuint32_t, int64_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u64base_index_u64))) +void svstnt1_scatter_u64base_index_u64(svbool_t, svuint64_t, int64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u64base_index_f64))) +void svstnt1_scatter_u64base_index_f64(svbool_t, svuint64_t, int64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u32base_index_f32))) +void svstnt1_scatter_u32base_index_f32(svbool_t, svuint32_t, int64_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u32base_index_s32))) +void svstnt1_scatter_u32base_index_s32(svbool_t, svuint32_t, int64_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u64base_index_s64))) +void svstnt1_scatter_u64base_index_s64(svbool_t, svuint64_t, int64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u32base_offset_u32))) +void svstnt1_scatter_u32base_offset_u32(svbool_t, svuint32_t, int64_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u64base_offset_u64))) +void svstnt1_scatter_u64base_offset_u64(svbool_t, svuint64_t, int64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u64base_offset_f64))) +void svstnt1_scatter_u64base_offset_f64(svbool_t, svuint64_t, int64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u32base_offset_f32))) +void svstnt1_scatter_u32base_offset_f32(svbool_t, svuint32_t, int64_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u32base_offset_s32))) +void svstnt1_scatter_u32base_offset_s32(svbool_t, svuint32_t, int64_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u64base_offset_s64))) +void svstnt1_scatter_u64base_offset_s64(svbool_t, svuint64_t, int64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u32base_u32))) +void svstnt1_scatter_u32base_u32(svbool_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u64base_u64))) +void svstnt1_scatter_u64base_u64(svbool_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u64base_f64))) +void svstnt1_scatter_u64base_f64(svbool_t, svuint64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u32base_f32))) +void svstnt1_scatter_u32base_f32(svbool_t, svuint32_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u32base_s32))) +void svstnt1_scatter_u32base_s32(svbool_t, svuint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u64base_s64))) +void svstnt1_scatter_u64base_s64(svbool_t, svuint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_s64index_u64))) +void svstnt1_scatter_s64index_u64(svbool_t, uint64_t *, svint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_s64index_f64))) +void svstnt1_scatter_s64index_f64(svbool_t, float64_t *, svint64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_s64index_s64))) +void svstnt1_scatter_s64index_s64(svbool_t, int64_t *, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u64index_u64))) +void svstnt1_scatter_u64index_u64(svbool_t, uint64_t *, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u64index_f64))) +void svstnt1_scatter_u64index_f64(svbool_t, float64_t *, svuint64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u64index_s64))) +void svstnt1_scatter_u64index_s64(svbool_t, int64_t *, svuint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u32offset_u32))) +void svstnt1_scatter_u32offset_u32(svbool_t, uint32_t *, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u32offset_f32))) +void svstnt1_scatter_u32offset_f32(svbool_t, float32_t *, svuint32_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u32offset_s32))) +void svstnt1_scatter_u32offset_s32(svbool_t, int32_t *, svuint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_s64offset_u64))) +void svstnt1_scatter_s64offset_u64(svbool_t, uint64_t *, svint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_s64offset_f64))) +void svstnt1_scatter_s64offset_f64(svbool_t, float64_t *, svint64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_s64offset_s64))) +void svstnt1_scatter_s64offset_s64(svbool_t, int64_t *, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u64offset_u64))) +void svstnt1_scatter_u64offset_u64(svbool_t, uint64_t *, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u64offset_f64))) +void svstnt1_scatter_u64offset_f64(svbool_t, float64_t *, svuint64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u64offset_s64))) +void svstnt1_scatter_u64offset_s64(svbool_t, int64_t *, svuint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1b_scatter_u32base_offset_u32))) +void svstnt1b_scatter_u32base_offset_u32(svbool_t, svuint32_t, int64_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1b_scatter_u64base_offset_u64))) +void svstnt1b_scatter_u64base_offset_u64(svbool_t, svuint64_t, int64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1b_scatter_u32base_offset_s32))) +void svstnt1b_scatter_u32base_offset_s32(svbool_t, svuint32_t, int64_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1b_scatter_u64base_offset_s64))) +void svstnt1b_scatter_u64base_offset_s64(svbool_t, svuint64_t, int64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1b_scatter_u32base_u32))) +void svstnt1b_scatter_u32base_u32(svbool_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1b_scatter_u64base_u64))) +void svstnt1b_scatter_u64base_u64(svbool_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1b_scatter_u32base_s32))) +void svstnt1b_scatter_u32base_s32(svbool_t, svuint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1b_scatter_u64base_s64))) +void svstnt1b_scatter_u64base_s64(svbool_t, svuint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1b_scatter_u32offset_s32))) +void svstnt1b_scatter_u32offset_s32(svbool_t, int8_t *, svuint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1b_scatter_u32offset_u32))) +void svstnt1b_scatter_u32offset_u32(svbool_t, uint8_t *, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1b_scatter_s64offset_s64))) +void svstnt1b_scatter_s64offset_s64(svbool_t, int8_t *, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1b_scatter_s64offset_u64))) +void svstnt1b_scatter_s64offset_u64(svbool_t, uint8_t *, svint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1b_scatter_u64offset_s64))) +void svstnt1b_scatter_u64offset_s64(svbool_t, int8_t *, svuint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1b_scatter_u64offset_u64))) +void svstnt1b_scatter_u64offset_u64(svbool_t, uint8_t *, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1h_scatter_u32base_index_u32))) +void svstnt1h_scatter_u32base_index_u32(svbool_t, svuint32_t, int64_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1h_scatter_u64base_index_u64))) +void svstnt1h_scatter_u64base_index_u64(svbool_t, svuint64_t, int64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1h_scatter_u32base_index_s32))) +void svstnt1h_scatter_u32base_index_s32(svbool_t, svuint32_t, int64_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1h_scatter_u64base_index_s64))) +void svstnt1h_scatter_u64base_index_s64(svbool_t, svuint64_t, int64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1h_scatter_u32base_offset_u32))) +void svstnt1h_scatter_u32base_offset_u32(svbool_t, svuint32_t, int64_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1h_scatter_u64base_offset_u64))) +void svstnt1h_scatter_u64base_offset_u64(svbool_t, svuint64_t, int64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1h_scatter_u32base_offset_s32))) +void svstnt1h_scatter_u32base_offset_s32(svbool_t, svuint32_t, int64_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1h_scatter_u64base_offset_s64))) +void svstnt1h_scatter_u64base_offset_s64(svbool_t, svuint64_t, int64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1h_scatter_u32base_u32))) +void svstnt1h_scatter_u32base_u32(svbool_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1h_scatter_u64base_u64))) +void svstnt1h_scatter_u64base_u64(svbool_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1h_scatter_u32base_s32))) +void svstnt1h_scatter_u32base_s32(svbool_t, svuint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1h_scatter_u64base_s64))) +void svstnt1h_scatter_u64base_s64(svbool_t, svuint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1h_scatter_s64index_s64))) +void svstnt1h_scatter_s64index_s64(svbool_t, int16_t *, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1h_scatter_s64index_u64))) +void svstnt1h_scatter_s64index_u64(svbool_t, uint16_t *, svint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1h_scatter_u64index_s64))) +void svstnt1h_scatter_u64index_s64(svbool_t, int16_t *, svuint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1h_scatter_u64index_u64))) +void svstnt1h_scatter_u64index_u64(svbool_t, uint16_t *, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1h_scatter_u32offset_s32))) +void svstnt1h_scatter_u32offset_s32(svbool_t, int16_t *, svuint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1h_scatter_u32offset_u32))) +void svstnt1h_scatter_u32offset_u32(svbool_t, uint16_t *, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1h_scatter_s64offset_s64))) +void svstnt1h_scatter_s64offset_s64(svbool_t, int16_t *, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1h_scatter_s64offset_u64))) +void svstnt1h_scatter_s64offset_u64(svbool_t, uint16_t *, svint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1h_scatter_u64offset_s64))) +void svstnt1h_scatter_u64offset_s64(svbool_t, int16_t *, svuint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1h_scatter_u64offset_u64))) +void svstnt1h_scatter_u64offset_u64(svbool_t, uint16_t *, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1w_scatter_u64base_index_u64))) +void svstnt1w_scatter_u64base_index_u64(svbool_t, svuint64_t, int64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1w_scatter_u64base_index_s64))) +void svstnt1w_scatter_u64base_index_s64(svbool_t, svuint64_t, int64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1w_scatter_u64base_offset_u64))) +void svstnt1w_scatter_u64base_offset_u64(svbool_t, svuint64_t, int64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1w_scatter_u64base_offset_s64))) +void svstnt1w_scatter_u64base_offset_s64(svbool_t, svuint64_t, int64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1w_scatter_u64base_u64))) +void svstnt1w_scatter_u64base_u64(svbool_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1w_scatter_u64base_s64))) +void svstnt1w_scatter_u64base_s64(svbool_t, svuint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1w_scatter_s64index_s64))) +void svstnt1w_scatter_s64index_s64(svbool_t, int32_t *, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1w_scatter_s64index_u64))) +void svstnt1w_scatter_s64index_u64(svbool_t, uint32_t *, svint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1w_scatter_u64index_s64))) +void svstnt1w_scatter_u64index_s64(svbool_t, int32_t *, svuint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1w_scatter_u64index_u64))) +void svstnt1w_scatter_u64index_u64(svbool_t, uint32_t *, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1w_scatter_s64offset_s64))) +void svstnt1w_scatter_s64offset_s64(svbool_t, int32_t *, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1w_scatter_s64offset_u64))) +void svstnt1w_scatter_s64offset_u64(svbool_t, uint32_t *, svint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1w_scatter_u64offset_s64))) +void svstnt1w_scatter_u64offset_s64(svbool_t, int32_t *, svuint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1w_scatter_u64offset_u64))) +void svstnt1w_scatter_u64offset_u64(svbool_t, uint32_t *, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhistcnt_u32_z))) +svuint32_t svhistcnt_z(svbool_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhistcnt_u64_z))) +svuint64_t svhistcnt_z(svbool_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhistcnt_s32_z))) +svuint32_t svhistcnt_z(svbool_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhistcnt_s64_z))) +svuint64_t svhistcnt_z(svbool_t, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhistseg_u8))) +svuint8_t svhistseg(svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhistseg_s8))) +svuint8_t svhistseg(svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u32base_index_u32))) +svuint32_t svldnt1_gather_index_u32(svbool_t, svuint32_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u64base_index_u64))) +svuint64_t svldnt1_gather_index_u64(svbool_t, svuint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u64base_index_f64))) +svfloat64_t svldnt1_gather_index_f64(svbool_t, svuint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u32base_index_f32))) +svfloat32_t svldnt1_gather_index_f32(svbool_t, svuint32_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u32base_index_s32))) +svint32_t svldnt1_gather_index_s32(svbool_t, svuint32_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u64base_index_s64))) +svint64_t svldnt1_gather_index_s64(svbool_t, svuint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u32base_offset_u32))) +svuint32_t svldnt1_gather_offset_u32(svbool_t, svuint32_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u64base_offset_u64))) +svuint64_t svldnt1_gather_offset_u64(svbool_t, svuint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u64base_offset_f64))) +svfloat64_t svldnt1_gather_offset_f64(svbool_t, svuint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u32base_offset_f32))) +svfloat32_t svldnt1_gather_offset_f32(svbool_t, svuint32_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u32base_offset_s32))) +svint32_t svldnt1_gather_offset_s32(svbool_t, svuint32_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u64base_offset_s64))) +svint64_t svldnt1_gather_offset_s64(svbool_t, svuint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u32base_u32))) +svuint32_t svldnt1_gather_u32(svbool_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u64base_u64))) +svuint64_t svldnt1_gather_u64(svbool_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u64base_f64))) +svfloat64_t svldnt1_gather_f64(svbool_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u32base_f32))) +svfloat32_t svldnt1_gather_f32(svbool_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u32base_s32))) +svint32_t svldnt1_gather_s32(svbool_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u64base_s64))) +svint64_t svldnt1_gather_s64(svbool_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_s64index_u64))) +svuint64_t svldnt1_gather_index(svbool_t, uint64_t const *, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_s64index_f64))) +svfloat64_t svldnt1_gather_index(svbool_t, float64_t const *, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_s64index_s64))) +svint64_t svldnt1_gather_index(svbool_t, int64_t const *, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u64index_u64))) +svuint64_t svldnt1_gather_index(svbool_t, uint64_t const *, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u64index_f64))) +svfloat64_t svldnt1_gather_index(svbool_t, float64_t const *, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u64index_s64))) +svint64_t svldnt1_gather_index(svbool_t, int64_t const *, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u32offset_u32))) +svuint32_t svldnt1_gather_offset(svbool_t, uint32_t const *, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u32offset_f32))) +svfloat32_t svldnt1_gather_offset(svbool_t, float32_t const *, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u32offset_s32))) +svint32_t svldnt1_gather_offset(svbool_t, int32_t const *, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_s64offset_u64))) +svuint64_t svldnt1_gather_offset(svbool_t, uint64_t const *, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_s64offset_f64))) +svfloat64_t svldnt1_gather_offset(svbool_t, float64_t const *, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_s64offset_s64))) +svint64_t svldnt1_gather_offset(svbool_t, int64_t const *, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u64offset_u64))) +svuint64_t svldnt1_gather_offset(svbool_t, uint64_t const *, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u64offset_f64))) +svfloat64_t svldnt1_gather_offset(svbool_t, float64_t const *, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u64offset_s64))) +svint64_t svldnt1_gather_offset(svbool_t, int64_t const *, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sb_gather_u32base_offset_u32))) +svuint32_t svldnt1sb_gather_offset_u32(svbool_t, svuint32_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sb_gather_u64base_offset_u64))) +svuint64_t svldnt1sb_gather_offset_u64(svbool_t, svuint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sb_gather_u32base_offset_s32))) +svint32_t svldnt1sb_gather_offset_s32(svbool_t, svuint32_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sb_gather_u64base_offset_s64))) +svint64_t svldnt1sb_gather_offset_s64(svbool_t, svuint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sb_gather_u32base_u32))) +svuint32_t svldnt1sb_gather_u32(svbool_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sb_gather_u64base_u64))) +svuint64_t svldnt1sb_gather_u64(svbool_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sb_gather_u32base_s32))) +svint32_t svldnt1sb_gather_s32(svbool_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sb_gather_u64base_s64))) +svint64_t svldnt1sb_gather_s64(svbool_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sb_gather_u32offset_u32))) +svuint32_t svldnt1sb_gather_offset_u32(svbool_t, int8_t const *, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sb_gather_u32offset_s32))) +svint32_t svldnt1sb_gather_offset_s32(svbool_t, int8_t const *, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sb_gather_s64offset_u64))) +svuint64_t svldnt1sb_gather_offset_u64(svbool_t, int8_t const *, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sb_gather_s64offset_s64))) +svint64_t svldnt1sb_gather_offset_s64(svbool_t, int8_t const *, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sb_gather_u64offset_u64))) +svuint64_t svldnt1sb_gather_offset_u64(svbool_t, int8_t const *, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sb_gather_u64offset_s64))) +svint64_t svldnt1sb_gather_offset_s64(svbool_t, int8_t const *, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sh_gather_u32base_index_u32))) +svuint32_t svldnt1sh_gather_index_u32(svbool_t, svuint32_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sh_gather_u64base_index_u64))) +svuint64_t svldnt1sh_gather_index_u64(svbool_t, svuint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sh_gather_u32base_index_s32))) +svint32_t svldnt1sh_gather_index_s32(svbool_t, svuint32_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sh_gather_u64base_index_s64))) +svint64_t svldnt1sh_gather_index_s64(svbool_t, svuint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sh_gather_u32base_offset_u32))) +svuint32_t svldnt1sh_gather_offset_u32(svbool_t, svuint32_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sh_gather_u64base_offset_u64))) +svuint64_t svldnt1sh_gather_offset_u64(svbool_t, svuint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sh_gather_u32base_offset_s32))) +svint32_t svldnt1sh_gather_offset_s32(svbool_t, svuint32_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sh_gather_u64base_offset_s64))) +svint64_t svldnt1sh_gather_offset_s64(svbool_t, svuint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sh_gather_u32base_u32))) +svuint32_t svldnt1sh_gather_u32(svbool_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sh_gather_u64base_u64))) +svuint64_t svldnt1sh_gather_u64(svbool_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sh_gather_u32base_s32))) +svint32_t svldnt1sh_gather_s32(svbool_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sh_gather_u64base_s64))) +svint64_t svldnt1sh_gather_s64(svbool_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sh_gather_s64index_u64))) +svuint64_t svldnt1sh_gather_index_u64(svbool_t, int16_t const *, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sh_gather_s64index_s64))) +svint64_t svldnt1sh_gather_index_s64(svbool_t, int16_t const *, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sh_gather_u64index_u64))) +svuint64_t svldnt1sh_gather_index_u64(svbool_t, int16_t const *, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sh_gather_u64index_s64))) +svint64_t svldnt1sh_gather_index_s64(svbool_t, int16_t const *, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sh_gather_u32offset_u32))) +svuint32_t svldnt1sh_gather_offset_u32(svbool_t, int16_t const *, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sh_gather_u32offset_s32))) +svint32_t svldnt1sh_gather_offset_s32(svbool_t, int16_t const *, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sh_gather_s64offset_u64))) +svuint64_t svldnt1sh_gather_offset_u64(svbool_t, int16_t const *, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sh_gather_s64offset_s64))) +svint64_t svldnt1sh_gather_offset_s64(svbool_t, int16_t const *, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sh_gather_u64offset_u64))) +svuint64_t svldnt1sh_gather_offset_u64(svbool_t, int16_t const *, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sh_gather_u64offset_s64))) +svint64_t svldnt1sh_gather_offset_s64(svbool_t, int16_t const *, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sw_gather_u64base_index_u64))) +svuint64_t svldnt1sw_gather_index_u64(svbool_t, svuint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sw_gather_u64base_index_s64))) +svint64_t svldnt1sw_gather_index_s64(svbool_t, svuint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sw_gather_u64base_offset_u64))) +svuint64_t svldnt1sw_gather_offset_u64(svbool_t, svuint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sw_gather_u64base_offset_s64))) +svint64_t svldnt1sw_gather_offset_s64(svbool_t, svuint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sw_gather_u64base_u64))) +svuint64_t svldnt1sw_gather_u64(svbool_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sw_gather_u64base_s64))) +svint64_t svldnt1sw_gather_s64(svbool_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sw_gather_s64index_u64))) +svuint64_t svldnt1sw_gather_index_u64(svbool_t, int32_t const *, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sw_gather_s64index_s64))) +svint64_t svldnt1sw_gather_index_s64(svbool_t, int32_t const *, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sw_gather_u64index_u64))) +svuint64_t svldnt1sw_gather_index_u64(svbool_t, int32_t const *, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sw_gather_u64index_s64))) +svint64_t svldnt1sw_gather_index_s64(svbool_t, int32_t const *, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sw_gather_s64offset_u64))) +svuint64_t svldnt1sw_gather_offset_u64(svbool_t, int32_t const *, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sw_gather_s64offset_s64))) +svint64_t svldnt1sw_gather_offset_s64(svbool_t, int32_t const *, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sw_gather_u64offset_u64))) +svuint64_t svldnt1sw_gather_offset_u64(svbool_t, int32_t const *, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sw_gather_u64offset_s64))) +svint64_t svldnt1sw_gather_offset_s64(svbool_t, int32_t const *, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1ub_gather_u32base_offset_u32))) +svuint32_t svldnt1ub_gather_offset_u32(svbool_t, svuint32_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1ub_gather_u64base_offset_u64))) +svuint64_t svldnt1ub_gather_offset_u64(svbool_t, svuint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1ub_gather_u32base_offset_s32))) +svint32_t svldnt1ub_gather_offset_s32(svbool_t, svuint32_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1ub_gather_u64base_offset_s64))) +svint64_t svldnt1ub_gather_offset_s64(svbool_t, svuint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1ub_gather_u32base_u32))) +svuint32_t svldnt1ub_gather_u32(svbool_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1ub_gather_u64base_u64))) +svuint64_t svldnt1ub_gather_u64(svbool_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1ub_gather_u32base_s32))) +svint32_t svldnt1ub_gather_s32(svbool_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1ub_gather_u64base_s64))) +svint64_t svldnt1ub_gather_s64(svbool_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1ub_gather_u32offset_u32))) +svuint32_t svldnt1ub_gather_offset_u32(svbool_t, uint8_t const *, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1ub_gather_u32offset_s32))) +svint32_t svldnt1ub_gather_offset_s32(svbool_t, uint8_t const *, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1ub_gather_s64offset_u64))) +svuint64_t svldnt1ub_gather_offset_u64(svbool_t, uint8_t const *, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1ub_gather_s64offset_s64))) +svint64_t svldnt1ub_gather_offset_s64(svbool_t, uint8_t const *, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1ub_gather_u64offset_u64))) +svuint64_t svldnt1ub_gather_offset_u64(svbool_t, uint8_t const *, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1ub_gather_u64offset_s64))) +svint64_t svldnt1ub_gather_offset_s64(svbool_t, uint8_t const *, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uh_gather_u32base_index_u32))) +svuint32_t svldnt1uh_gather_index_u32(svbool_t, svuint32_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uh_gather_u64base_index_u64))) +svuint64_t svldnt1uh_gather_index_u64(svbool_t, svuint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uh_gather_u32base_index_s32))) +svint32_t svldnt1uh_gather_index_s32(svbool_t, svuint32_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uh_gather_u64base_index_s64))) +svint64_t svldnt1uh_gather_index_s64(svbool_t, svuint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uh_gather_u32base_offset_u32))) +svuint32_t svldnt1uh_gather_offset_u32(svbool_t, svuint32_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uh_gather_u64base_offset_u64))) +svuint64_t svldnt1uh_gather_offset_u64(svbool_t, svuint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uh_gather_u32base_offset_s32))) +svint32_t svldnt1uh_gather_offset_s32(svbool_t, svuint32_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uh_gather_u64base_offset_s64))) +svint64_t svldnt1uh_gather_offset_s64(svbool_t, svuint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uh_gather_u32base_u32))) +svuint32_t svldnt1uh_gather_u32(svbool_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uh_gather_u64base_u64))) +svuint64_t svldnt1uh_gather_u64(svbool_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uh_gather_u32base_s32))) +svint32_t svldnt1uh_gather_s32(svbool_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uh_gather_u64base_s64))) +svint64_t svldnt1uh_gather_s64(svbool_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uh_gather_s64index_u64))) +svuint64_t svldnt1uh_gather_index_u64(svbool_t, uint16_t const *, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uh_gather_s64index_s64))) +svint64_t svldnt1uh_gather_index_s64(svbool_t, uint16_t const *, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uh_gather_u64index_u64))) +svuint64_t svldnt1uh_gather_index_u64(svbool_t, uint16_t const *, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uh_gather_u64index_s64))) +svint64_t svldnt1uh_gather_index_s64(svbool_t, uint16_t const *, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uh_gather_u32offset_u32))) +svuint32_t svldnt1uh_gather_offset_u32(svbool_t, uint16_t const *, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uh_gather_u32offset_s32))) +svint32_t svldnt1uh_gather_offset_s32(svbool_t, uint16_t const *, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uh_gather_s64offset_u64))) +svuint64_t svldnt1uh_gather_offset_u64(svbool_t, uint16_t const *, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uh_gather_s64offset_s64))) +svint64_t svldnt1uh_gather_offset_s64(svbool_t, uint16_t const *, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uh_gather_u64offset_u64))) +svuint64_t svldnt1uh_gather_offset_u64(svbool_t, uint16_t const *, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uh_gather_u64offset_s64))) +svint64_t svldnt1uh_gather_offset_s64(svbool_t, uint16_t const *, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uw_gather_u64base_index_u64))) +svuint64_t svldnt1uw_gather_index_u64(svbool_t, svuint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uw_gather_u64base_index_s64))) +svint64_t svldnt1uw_gather_index_s64(svbool_t, svuint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uw_gather_u64base_offset_u64))) +svuint64_t svldnt1uw_gather_offset_u64(svbool_t, svuint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uw_gather_u64base_offset_s64))) +svint64_t svldnt1uw_gather_offset_s64(svbool_t, svuint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uw_gather_u64base_u64))) +svuint64_t svldnt1uw_gather_u64(svbool_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uw_gather_u64base_s64))) +svint64_t svldnt1uw_gather_s64(svbool_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uw_gather_s64index_u64))) +svuint64_t svldnt1uw_gather_index_u64(svbool_t, uint32_t const *, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uw_gather_s64index_s64))) +svint64_t svldnt1uw_gather_index_s64(svbool_t, uint32_t const *, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uw_gather_u64index_u64))) +svuint64_t svldnt1uw_gather_index_u64(svbool_t, uint32_t const *, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uw_gather_u64index_s64))) +svint64_t svldnt1uw_gather_index_s64(svbool_t, uint32_t const *, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uw_gather_s64offset_u64))) +svuint64_t svldnt1uw_gather_offset_u64(svbool_t, uint32_t const *, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uw_gather_s64offset_s64))) +svint64_t svldnt1uw_gather_offset_s64(svbool_t, uint32_t const *, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uw_gather_u64offset_u64))) +svuint64_t svldnt1uw_gather_offset_u64(svbool_t, uint32_t const *, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uw_gather_u64offset_s64))) +svint64_t svldnt1uw_gather_offset_s64(svbool_t, uint32_t const *, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmatch_u8))) +svbool_t svmatch(svbool_t, svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmatch_u16))) +svbool_t svmatch(svbool_t, svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmatch_s8))) +svbool_t svmatch(svbool_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmatch_s16))) +svbool_t svmatch(svbool_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmatch_u8))) +svbool_t svnmatch(svbool_t, svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmatch_u16))) +svbool_t svnmatch(svbool_t, svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmatch_s8))) +svbool_t svnmatch(svbool_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmatch_s16))) +svbool_t svnmatch(svbool_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u32base_index_u32))) +void svstnt1_scatter_index(svbool_t, svuint32_t, int64_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u64base_index_u64))) +void svstnt1_scatter_index(svbool_t, svuint64_t, int64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u64base_index_f64))) +void svstnt1_scatter_index(svbool_t, svuint64_t, int64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u32base_index_f32))) +void svstnt1_scatter_index(svbool_t, svuint32_t, int64_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u32base_index_s32))) +void svstnt1_scatter_index(svbool_t, svuint32_t, int64_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u64base_index_s64))) +void svstnt1_scatter_index(svbool_t, svuint64_t, int64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u32base_offset_u32))) +void svstnt1_scatter_offset(svbool_t, svuint32_t, int64_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u64base_offset_u64))) +void svstnt1_scatter_offset(svbool_t, svuint64_t, int64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u64base_offset_f64))) +void svstnt1_scatter_offset(svbool_t, svuint64_t, int64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u32base_offset_f32))) +void svstnt1_scatter_offset(svbool_t, svuint32_t, int64_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u32base_offset_s32))) +void svstnt1_scatter_offset(svbool_t, svuint32_t, int64_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u64base_offset_s64))) +void svstnt1_scatter_offset(svbool_t, svuint64_t, int64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u32base_u32))) +void svstnt1_scatter(svbool_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u64base_u64))) +void svstnt1_scatter(svbool_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u64base_f64))) +void svstnt1_scatter(svbool_t, svuint64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u32base_f32))) +void svstnt1_scatter(svbool_t, svuint32_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u32base_s32))) +void svstnt1_scatter(svbool_t, svuint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u64base_s64))) +void svstnt1_scatter(svbool_t, svuint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_s64index_u64))) +void svstnt1_scatter_index(svbool_t, uint64_t *, svint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_s64index_f64))) +void svstnt1_scatter_index(svbool_t, float64_t *, svint64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_s64index_s64))) +void svstnt1_scatter_index(svbool_t, int64_t *, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u64index_u64))) +void svstnt1_scatter_index(svbool_t, uint64_t *, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u64index_f64))) +void svstnt1_scatter_index(svbool_t, float64_t *, svuint64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u64index_s64))) +void svstnt1_scatter_index(svbool_t, int64_t *, svuint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u32offset_u32))) +void svstnt1_scatter_offset(svbool_t, uint32_t *, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u32offset_f32))) +void svstnt1_scatter_offset(svbool_t, float32_t *, svuint32_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u32offset_s32))) +void svstnt1_scatter_offset(svbool_t, int32_t *, svuint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_s64offset_u64))) +void svstnt1_scatter_offset(svbool_t, uint64_t *, svint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_s64offset_f64))) +void svstnt1_scatter_offset(svbool_t, float64_t *, svint64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_s64offset_s64))) +void svstnt1_scatter_offset(svbool_t, int64_t *, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u64offset_u64))) +void svstnt1_scatter_offset(svbool_t, uint64_t *, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u64offset_f64))) +void svstnt1_scatter_offset(svbool_t, float64_t *, svuint64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u64offset_s64))) +void svstnt1_scatter_offset(svbool_t, int64_t *, svuint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1b_scatter_u32base_offset_u32))) +void svstnt1b_scatter_offset(svbool_t, svuint32_t, int64_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1b_scatter_u64base_offset_u64))) +void svstnt1b_scatter_offset(svbool_t, svuint64_t, int64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1b_scatter_u32base_offset_s32))) +void svstnt1b_scatter_offset(svbool_t, svuint32_t, int64_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1b_scatter_u64base_offset_s64))) +void svstnt1b_scatter_offset(svbool_t, svuint64_t, int64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1b_scatter_u32base_u32))) +void svstnt1b_scatter(svbool_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1b_scatter_u64base_u64))) +void svstnt1b_scatter(svbool_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1b_scatter_u32base_s32))) +void svstnt1b_scatter(svbool_t, svuint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1b_scatter_u64base_s64))) +void svstnt1b_scatter(svbool_t, svuint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1b_scatter_u32offset_s32))) +void svstnt1b_scatter_offset(svbool_t, int8_t *, svuint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1b_scatter_u32offset_u32))) +void svstnt1b_scatter_offset(svbool_t, uint8_t *, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1b_scatter_s64offset_s64))) +void svstnt1b_scatter_offset(svbool_t, int8_t *, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1b_scatter_s64offset_u64))) +void svstnt1b_scatter_offset(svbool_t, uint8_t *, svint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1b_scatter_u64offset_s64))) +void svstnt1b_scatter_offset(svbool_t, int8_t *, svuint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1b_scatter_u64offset_u64))) +void svstnt1b_scatter_offset(svbool_t, uint8_t *, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1h_scatter_u32base_index_u32))) +void svstnt1h_scatter_index(svbool_t, svuint32_t, int64_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1h_scatter_u64base_index_u64))) +void svstnt1h_scatter_index(svbool_t, svuint64_t, int64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1h_scatter_u32base_index_s32))) +void svstnt1h_scatter_index(svbool_t, svuint32_t, int64_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1h_scatter_u64base_index_s64))) +void svstnt1h_scatter_index(svbool_t, svuint64_t, int64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1h_scatter_u32base_offset_u32))) +void svstnt1h_scatter_offset(svbool_t, svuint32_t, int64_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1h_scatter_u64base_offset_u64))) +void svstnt1h_scatter_offset(svbool_t, svuint64_t, int64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1h_scatter_u32base_offset_s32))) +void svstnt1h_scatter_offset(svbool_t, svuint32_t, int64_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1h_scatter_u64base_offset_s64))) +void svstnt1h_scatter_offset(svbool_t, svuint64_t, int64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1h_scatter_u32base_u32))) +void svstnt1h_scatter(svbool_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1h_scatter_u64base_u64))) +void svstnt1h_scatter(svbool_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1h_scatter_u32base_s32))) +void svstnt1h_scatter(svbool_t, svuint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1h_scatter_u64base_s64))) +void svstnt1h_scatter(svbool_t, svuint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1h_scatter_s64index_s64))) +void svstnt1h_scatter_index(svbool_t, int16_t *, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1h_scatter_s64index_u64))) +void svstnt1h_scatter_index(svbool_t, uint16_t *, svint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1h_scatter_u64index_s64))) +void svstnt1h_scatter_index(svbool_t, int16_t *, svuint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1h_scatter_u64index_u64))) +void svstnt1h_scatter_index(svbool_t, uint16_t *, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1h_scatter_u32offset_s32))) +void svstnt1h_scatter_offset(svbool_t, int16_t *, svuint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1h_scatter_u32offset_u32))) +void svstnt1h_scatter_offset(svbool_t, uint16_t *, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1h_scatter_s64offset_s64))) +void svstnt1h_scatter_offset(svbool_t, int16_t *, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1h_scatter_s64offset_u64))) +void svstnt1h_scatter_offset(svbool_t, uint16_t *, svint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1h_scatter_u64offset_s64))) +void svstnt1h_scatter_offset(svbool_t, int16_t *, svuint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1h_scatter_u64offset_u64))) +void svstnt1h_scatter_offset(svbool_t, uint16_t *, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1w_scatter_u64base_index_u64))) +void svstnt1w_scatter_index(svbool_t, svuint64_t, int64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1w_scatter_u64base_index_s64))) +void svstnt1w_scatter_index(svbool_t, svuint64_t, int64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1w_scatter_u64base_offset_u64))) +void svstnt1w_scatter_offset(svbool_t, svuint64_t, int64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1w_scatter_u64base_offset_s64))) +void svstnt1w_scatter_offset(svbool_t, svuint64_t, int64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1w_scatter_u64base_u64))) +void svstnt1w_scatter(svbool_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1w_scatter_u64base_s64))) +void svstnt1w_scatter(svbool_t, svuint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1w_scatter_s64index_s64))) +void svstnt1w_scatter_index(svbool_t, int32_t *, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1w_scatter_s64index_u64))) +void svstnt1w_scatter_index(svbool_t, uint32_t *, svint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1w_scatter_u64index_s64))) +void svstnt1w_scatter_index(svbool_t, int32_t *, svuint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1w_scatter_u64index_u64))) +void svstnt1w_scatter_index(svbool_t, uint32_t *, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1w_scatter_s64offset_s64))) +void svstnt1w_scatter_offset(svbool_t, int32_t *, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1w_scatter_s64offset_u64))) +void svstnt1w_scatter_offset(svbool_t, uint32_t *, svint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1w_scatter_u64offset_s64))) +void svstnt1w_scatter_offset(svbool_t, int32_t *, svuint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1w_scatter_u64offset_u64))) +void svstnt1w_scatter_offset(svbool_t, uint32_t *, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_bf16_m))) +svbfloat16_t svadd_n_bf16_m(svbool_t, svbfloat16_t, bfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_bf16_x))) +svbfloat16_t svadd_n_bf16_x(svbool_t, svbfloat16_t, bfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_bf16_z))) +svbfloat16_t svadd_n_bf16_z(svbool_t, svbfloat16_t, bfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_bf16_m))) +svbfloat16_t svadd_bf16_m(svbool_t, svbfloat16_t, svbfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_bf16_x))) +svbfloat16_t svadd_bf16_x(svbool_t, svbfloat16_t, svbfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_bf16_z))) +svbfloat16_t svadd_bf16_z(svbool_t, svbfloat16_t, svbfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclamp_bf16))) +svbfloat16_t svclamp_bf16(svbfloat16_t, svbfloat16_t, svbfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_bf16_m))) +svbfloat16_t svmax_n_bf16_m(svbool_t, svbfloat16_t, bfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_bf16_x))) +svbfloat16_t svmax_n_bf16_x(svbool_t, svbfloat16_t, bfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_bf16_z))) +svbfloat16_t svmax_n_bf16_z(svbool_t, svbfloat16_t, bfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_bf16_m))) +svbfloat16_t svmax_bf16_m(svbool_t, svbfloat16_t, svbfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_bf16_x))) +svbfloat16_t svmax_bf16_x(svbool_t, svbfloat16_t, svbfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_bf16_z))) +svbfloat16_t svmax_bf16_z(svbool_t, svbfloat16_t, svbfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnm_n_bf16_m))) +svbfloat16_t svmaxnm_n_bf16_m(svbool_t, svbfloat16_t, bfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnm_n_bf16_x))) +svbfloat16_t svmaxnm_n_bf16_x(svbool_t, svbfloat16_t, bfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnm_n_bf16_z))) +svbfloat16_t svmaxnm_n_bf16_z(svbool_t, svbfloat16_t, bfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnm_bf16_m))) +svbfloat16_t svmaxnm_bf16_m(svbool_t, svbfloat16_t, svbfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnm_bf16_x))) +svbfloat16_t svmaxnm_bf16_x(svbool_t, svbfloat16_t, svbfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnm_bf16_z))) +svbfloat16_t svmaxnm_bf16_z(svbool_t, svbfloat16_t, svbfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_bf16_m))) +svbfloat16_t svmin_n_bf16_m(svbool_t, svbfloat16_t, bfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_bf16_x))) +svbfloat16_t svmin_n_bf16_x(svbool_t, svbfloat16_t, bfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_bf16_z))) +svbfloat16_t svmin_n_bf16_z(svbool_t, svbfloat16_t, bfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_bf16_m))) +svbfloat16_t svmin_bf16_m(svbool_t, svbfloat16_t, svbfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_bf16_x))) +svbfloat16_t svmin_bf16_x(svbool_t, svbfloat16_t, svbfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_bf16_z))) +svbfloat16_t svmin_bf16_z(svbool_t, svbfloat16_t, svbfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnm_n_bf16_m))) +svbfloat16_t svminnm_n_bf16_m(svbool_t, svbfloat16_t, bfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnm_n_bf16_x))) +svbfloat16_t svminnm_n_bf16_x(svbool_t, svbfloat16_t, bfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnm_n_bf16_z))) +svbfloat16_t svminnm_n_bf16_z(svbool_t, svbfloat16_t, bfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnm_bf16_m))) +svbfloat16_t svminnm_bf16_m(svbool_t, svbfloat16_t, svbfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnm_bf16_x))) +svbfloat16_t svminnm_bf16_x(svbool_t, svbfloat16_t, svbfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnm_bf16_z))) +svbfloat16_t svminnm_bf16_z(svbool_t, svbfloat16_t, svbfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_bf16_m))) +svbfloat16_t svmla_n_bf16_m(svbool_t, svbfloat16_t, svbfloat16_t, bfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_bf16_x))) +svbfloat16_t svmla_n_bf16_x(svbool_t, svbfloat16_t, svbfloat16_t, bfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_bf16_z))) +svbfloat16_t svmla_n_bf16_z(svbool_t, svbfloat16_t, svbfloat16_t, bfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_bf16_m))) +svbfloat16_t svmla_bf16_m(svbool_t, svbfloat16_t, svbfloat16_t, svbfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_bf16_x))) +svbfloat16_t svmla_bf16_x(svbool_t, svbfloat16_t, svbfloat16_t, svbfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_bf16_z))) +svbfloat16_t svmla_bf16_z(svbool_t, svbfloat16_t, svbfloat16_t, svbfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_lane_bf16))) +svbfloat16_t svmla_lane_bf16(svbfloat16_t, svbfloat16_t, svbfloat16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_bf16_m))) +svbfloat16_t svmls_n_bf16_m(svbool_t, svbfloat16_t, svbfloat16_t, bfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_bf16_x))) +svbfloat16_t svmls_n_bf16_x(svbool_t, svbfloat16_t, svbfloat16_t, bfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_bf16_z))) +svbfloat16_t svmls_n_bf16_z(svbool_t, svbfloat16_t, svbfloat16_t, bfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_bf16_m))) +svbfloat16_t svmls_bf16_m(svbool_t, svbfloat16_t, svbfloat16_t, svbfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_bf16_x))) +svbfloat16_t svmls_bf16_x(svbool_t, svbfloat16_t, svbfloat16_t, svbfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_bf16_z))) +svbfloat16_t svmls_bf16_z(svbool_t, svbfloat16_t, svbfloat16_t, svbfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_lane_bf16))) +svbfloat16_t svmls_lane_bf16(svbfloat16_t, svbfloat16_t, svbfloat16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_bf16_m))) +svbfloat16_t svmul_n_bf16_m(svbool_t, svbfloat16_t, bfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_bf16_x))) +svbfloat16_t svmul_n_bf16_x(svbool_t, svbfloat16_t, bfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_bf16_z))) +svbfloat16_t svmul_n_bf16_z(svbool_t, svbfloat16_t, bfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_bf16_m))) +svbfloat16_t svmul_bf16_m(svbool_t, svbfloat16_t, svbfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_bf16_x))) +svbfloat16_t svmul_bf16_x(svbool_t, svbfloat16_t, svbfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_bf16_z))) +svbfloat16_t svmul_bf16_z(svbool_t, svbfloat16_t, svbfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_lane_bf16))) +svbfloat16_t svmul_lane_bf16(svbfloat16_t, svbfloat16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_bf16_m))) +svbfloat16_t svsub_n_bf16_m(svbool_t, svbfloat16_t, bfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_bf16_x))) +svbfloat16_t svsub_n_bf16_x(svbool_t, svbfloat16_t, bfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_bf16_z))) +svbfloat16_t svsub_n_bf16_z(svbool_t, svbfloat16_t, bfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_bf16_m))) +svbfloat16_t svsub_bf16_m(svbool_t, svbfloat16_t, svbfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_bf16_x))) +svbfloat16_t svsub_bf16_x(svbool_t, svbfloat16_t, svbfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_bf16_z))) +svbfloat16_t svsub_bf16_z(svbool_t, svbfloat16_t, svbfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_bf16_m))) +svbfloat16_t svadd_m(svbool_t, svbfloat16_t, bfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_bf16_x))) +svbfloat16_t svadd_x(svbool_t, svbfloat16_t, bfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_bf16_z))) +svbfloat16_t svadd_z(svbool_t, svbfloat16_t, bfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_bf16_m))) +svbfloat16_t svadd_m(svbool_t, svbfloat16_t, svbfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_bf16_x))) +svbfloat16_t svadd_x(svbool_t, svbfloat16_t, svbfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_bf16_z))) +svbfloat16_t svadd_z(svbool_t, svbfloat16_t, svbfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclamp_bf16))) +svbfloat16_t svclamp(svbfloat16_t, svbfloat16_t, svbfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_bf16_m))) +svbfloat16_t svmax_m(svbool_t, svbfloat16_t, bfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_bf16_x))) +svbfloat16_t svmax_x(svbool_t, svbfloat16_t, bfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_bf16_z))) +svbfloat16_t svmax_z(svbool_t, svbfloat16_t, bfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_bf16_m))) +svbfloat16_t svmax_m(svbool_t, svbfloat16_t, svbfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_bf16_x))) +svbfloat16_t svmax_x(svbool_t, svbfloat16_t, svbfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_bf16_z))) +svbfloat16_t svmax_z(svbool_t, svbfloat16_t, svbfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnm_n_bf16_m))) +svbfloat16_t svmaxnm_m(svbool_t, svbfloat16_t, bfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnm_n_bf16_x))) +svbfloat16_t svmaxnm_x(svbool_t, svbfloat16_t, bfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnm_n_bf16_z))) +svbfloat16_t svmaxnm_z(svbool_t, svbfloat16_t, bfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnm_bf16_m))) +svbfloat16_t svmaxnm_m(svbool_t, svbfloat16_t, svbfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnm_bf16_x))) +svbfloat16_t svmaxnm_x(svbool_t, svbfloat16_t, svbfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnm_bf16_z))) +svbfloat16_t svmaxnm_z(svbool_t, svbfloat16_t, svbfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_bf16_m))) +svbfloat16_t svmin_m(svbool_t, svbfloat16_t, bfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_bf16_x))) +svbfloat16_t svmin_x(svbool_t, svbfloat16_t, bfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_bf16_z))) +svbfloat16_t svmin_z(svbool_t, svbfloat16_t, bfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_bf16_m))) +svbfloat16_t svmin_m(svbool_t, svbfloat16_t, svbfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_bf16_x))) +svbfloat16_t svmin_x(svbool_t, svbfloat16_t, svbfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_bf16_z))) +svbfloat16_t svmin_z(svbool_t, svbfloat16_t, svbfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnm_n_bf16_m))) +svbfloat16_t svminnm_m(svbool_t, svbfloat16_t, bfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnm_n_bf16_x))) +svbfloat16_t svminnm_x(svbool_t, svbfloat16_t, bfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnm_n_bf16_z))) +svbfloat16_t svminnm_z(svbool_t, svbfloat16_t, bfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnm_bf16_m))) +svbfloat16_t svminnm_m(svbool_t, svbfloat16_t, svbfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnm_bf16_x))) +svbfloat16_t svminnm_x(svbool_t, svbfloat16_t, svbfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnm_bf16_z))) +svbfloat16_t svminnm_z(svbool_t, svbfloat16_t, svbfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_bf16_m))) +svbfloat16_t svmla_m(svbool_t, svbfloat16_t, svbfloat16_t, bfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_bf16_x))) +svbfloat16_t svmla_x(svbool_t, svbfloat16_t, svbfloat16_t, bfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_bf16_z))) +svbfloat16_t svmla_z(svbool_t, svbfloat16_t, svbfloat16_t, bfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_bf16_m))) +svbfloat16_t svmla_m(svbool_t, svbfloat16_t, svbfloat16_t, svbfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_bf16_x))) +svbfloat16_t svmla_x(svbool_t, svbfloat16_t, svbfloat16_t, svbfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_bf16_z))) +svbfloat16_t svmla_z(svbool_t, svbfloat16_t, svbfloat16_t, svbfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_lane_bf16))) +svbfloat16_t svmla_lane(svbfloat16_t, svbfloat16_t, svbfloat16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_bf16_m))) +svbfloat16_t svmls_m(svbool_t, svbfloat16_t, svbfloat16_t, bfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_bf16_x))) +svbfloat16_t svmls_x(svbool_t, svbfloat16_t, svbfloat16_t, bfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_bf16_z))) +svbfloat16_t svmls_z(svbool_t, svbfloat16_t, svbfloat16_t, bfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_bf16_m))) +svbfloat16_t svmls_m(svbool_t, svbfloat16_t, svbfloat16_t, svbfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_bf16_x))) +svbfloat16_t svmls_x(svbool_t, svbfloat16_t, svbfloat16_t, svbfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_bf16_z))) +svbfloat16_t svmls_z(svbool_t, svbfloat16_t, svbfloat16_t, svbfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_lane_bf16))) +svbfloat16_t svmls_lane(svbfloat16_t, svbfloat16_t, svbfloat16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_bf16_m))) +svbfloat16_t svmul_m(svbool_t, svbfloat16_t, bfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_bf16_x))) +svbfloat16_t svmul_x(svbool_t, svbfloat16_t, bfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_bf16_z))) +svbfloat16_t svmul_z(svbool_t, svbfloat16_t, bfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_bf16_m))) +svbfloat16_t svmul_m(svbool_t, svbfloat16_t, svbfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_bf16_x))) +svbfloat16_t svmul_x(svbool_t, svbfloat16_t, svbfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_bf16_z))) +svbfloat16_t svmul_z(svbool_t, svbfloat16_t, svbfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_lane_bf16))) +svbfloat16_t svmul_lane(svbfloat16_t, svbfloat16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_bf16_m))) +svbfloat16_t svsub_m(svbool_t, svbfloat16_t, bfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_bf16_x))) +svbfloat16_t svsub_x(svbool_t, svbfloat16_t, bfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_bf16_z))) +svbfloat16_t svsub_z(svbool_t, svbfloat16_t, bfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_bf16_m))) +svbfloat16_t svsub_m(svbool_t, svbfloat16_t, svbfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_bf16_x))) +svbfloat16_t svsub_x(svbool_t, svbfloat16_t, svbfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_bf16_z))) +svbfloat16_t svsub_z(svbool_t, svbfloat16_t, svbfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbl2_bf16))) +svbfloat16_t svtbl2_bf16(svbfloat16x2_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbx_bf16))) +svbfloat16_t svtbx_bf16(svbfloat16_t, svbfloat16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilerw_bf16))) +svbool_t svwhilerw_bf16(bfloat16_t const *, bfloat16_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilewr_bf16))) +svbool_t svwhilewr_bf16(bfloat16_t const *, bfloat16_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbl2_bf16))) +svbfloat16_t svtbl2(svbfloat16x2_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbx_bf16))) +svbfloat16_t svtbx(svbfloat16_t, svbfloat16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilerw_bf16))) +svbool_t svwhilerw(bfloat16_t const *, bfloat16_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilewr_bf16))) +svbool_t svwhilewr(bfloat16_t const *, bfloat16_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaesd_u8))) +svuint8_t svaesd_u8(svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaese_u8))) +svuint8_t svaese_u8(svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaesimc_u8))) +svuint8_t svaesimc_u8(svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaesmc_u8))) +svuint8_t svaesmc_u8(svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmullb_pair_n_u64))) +svuint64_t svpmullb_pair_n_u64(svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmullb_pair_u64))) +svuint64_t svpmullb_pair_u64(svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmullt_pair_n_u64))) +svuint64_t svpmullt_pair_n_u64(svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmullt_pair_u64))) +svuint64_t svpmullt_pair_u64(svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaesd_u8))) +svuint8_t svaesd(svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaese_u8))) +svuint8_t svaese(svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaesimc_u8))) +svuint8_t svaesimc(svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaesmc_u8))) +svuint8_t svaesmc(svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmullb_pair_n_u64))) +svuint64_t svpmullb_pair(svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmullb_pair_u64))) +svuint64_t svpmullb_pair(svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmullt_pair_n_u64))) +svuint64_t svpmullt_pair(svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmullt_pair_u64))) +svuint64_t svpmullt_pair(svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbdep_n_u8))) +svuint8_t svbdep_n_u8(svuint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbdep_n_u32))) +svuint32_t svbdep_n_u32(svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbdep_n_u64))) +svuint64_t svbdep_n_u64(svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbdep_n_u16))) +svuint16_t svbdep_n_u16(svuint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbdep_u8))) +svuint8_t svbdep_u8(svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbdep_u32))) +svuint32_t svbdep_u32(svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbdep_u64))) +svuint64_t svbdep_u64(svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbdep_u16))) +svuint16_t svbdep_u16(svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbext_n_u8))) +svuint8_t svbext_n_u8(svuint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbext_n_u32))) +svuint32_t svbext_n_u32(svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbext_n_u64))) +svuint64_t svbext_n_u64(svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbext_n_u16))) +svuint16_t svbext_n_u16(svuint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbext_u8))) +svuint8_t svbext_u8(svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbext_u32))) +svuint32_t svbext_u32(svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbext_u64))) +svuint64_t svbext_u64(svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbext_u16))) +svuint16_t svbext_u16(svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbgrp_n_u8))) +svuint8_t svbgrp_n_u8(svuint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbgrp_n_u32))) +svuint32_t svbgrp_n_u32(svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbgrp_n_u64))) +svuint64_t svbgrp_n_u64(svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbgrp_n_u16))) +svuint16_t svbgrp_n_u16(svuint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbgrp_u8))) +svuint8_t svbgrp_u8(svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbgrp_u32))) +svuint32_t svbgrp_u32(svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbgrp_u64))) +svuint64_t svbgrp_u64(svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbgrp_u16))) +svuint16_t svbgrp_u16(svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbdep_n_u8))) +svuint8_t svbdep(svuint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbdep_n_u32))) +svuint32_t svbdep(svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbdep_n_u64))) +svuint64_t svbdep(svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbdep_n_u16))) +svuint16_t svbdep(svuint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbdep_u8))) +svuint8_t svbdep(svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbdep_u32))) +svuint32_t svbdep(svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbdep_u64))) +svuint64_t svbdep(svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbdep_u16))) +svuint16_t svbdep(svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbext_n_u8))) +svuint8_t svbext(svuint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbext_n_u32))) +svuint32_t svbext(svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbext_n_u64))) +svuint64_t svbext(svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbext_n_u16))) +svuint16_t svbext(svuint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbext_u8))) +svuint8_t svbext(svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbext_u32))) +svuint32_t svbext(svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbext_u64))) +svuint64_t svbext(svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbext_u16))) +svuint16_t svbext(svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbgrp_n_u8))) +svuint8_t svbgrp(svuint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbgrp_n_u32))) +svuint32_t svbgrp(svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbgrp_n_u64))) +svuint64_t svbgrp(svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbgrp_n_u16))) +svuint16_t svbgrp(svuint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbgrp_u8))) +svuint8_t svbgrp(svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbgrp_u32))) +svuint32_t svbgrp(svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbgrp_u64))) +svuint64_t svbgrp(svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbgrp_u16))) +svuint16_t svbgrp(svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrax1_u64))) +svuint64_t svrax1_u64(svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrax1_s64))) +svint64_t svrax1_s64(svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrax1_u64))) +svuint64_t svrax1(svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrax1_s64))) +svint64_t svrax1(svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsm4e_u32))) +svuint32_t svsm4e_u32(svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsm4ekey_u32))) +svuint32_t svsm4ekey_u32(svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsm4e_u32))) +svuint32_t svsm4e(svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsm4ekey_u32))) +svuint32_t svsm4ekey(svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddqv_u8))) +uint8x16_t svaddqv_u8(svbool_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddqv_u32))) +uint32x4_t svaddqv_u32(svbool_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddqv_u64))) +uint64x2_t svaddqv_u64(svbool_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddqv_u16))) +uint16x8_t svaddqv_u16(svbool_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddqv_s8))) +int8x16_t svaddqv_s8(svbool_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddqv_s32))) +int32x4_t svaddqv_s32(svbool_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddqv_s64))) +int64x2_t svaddqv_s64(svbool_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddqv_s16))) +int16x8_t svaddqv_s16(svbool_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddqv_f64))) +float64x2_t svaddqv_f64(svbool_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddqv_f32))) +float32x4_t svaddqv_f32(svbool_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddqv_f16))) +float16x8_t svaddqv_f16(svbool_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svandqv_u8))) +uint8x16_t svandqv_u8(svbool_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svandqv_u32))) +uint32x4_t svandqv_u32(svbool_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svandqv_u64))) +uint64x2_t svandqv_u64(svbool_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svandqv_u16))) +uint16x8_t svandqv_u16(svbool_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svandqv_s8))) +int8x16_t svandqv_s8(svbool_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svandqv_s32))) +int32x4_t svandqv_s32(svbool_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svandqv_s64))) +int64x2_t svandqv_s64(svbool_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svandqv_s16))) +int16x8_t svandqv_s16(svbool_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorqv_u8))) +uint8x16_t sveorqv_u8(svbool_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorqv_u32))) +uint32x4_t sveorqv_u32(svbool_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorqv_u64))) +uint64x2_t sveorqv_u64(svbool_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorqv_u16))) +uint16x8_t sveorqv_u16(svbool_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorqv_s8))) +int8x16_t sveorqv_s8(svbool_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorqv_s32))) +int32x4_t sveorqv_s32(svbool_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorqv_s64))) +int64x2_t sveorqv_s64(svbool_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorqv_s16))) +int16x8_t sveorqv_s16(svbool_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svextq_u8))) +svuint8_t svextq_u8(svuint8_t, svuint8_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svextq_u32))) +svuint32_t svextq_u32(svuint32_t, svuint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svextq_u64))) +svuint64_t svextq_u64(svuint64_t, svuint64_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svextq_u16))) +svuint16_t svextq_u16(svuint16_t, svuint16_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svextq_bf16))) +svbfloat16_t svextq_bf16(svbfloat16_t, svbfloat16_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svextq_s8))) +svint8_t svextq_s8(svint8_t, svint8_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svextq_f64))) +svfloat64_t svextq_f64(svfloat64_t, svfloat64_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svextq_f32))) +svfloat32_t svextq_f32(svfloat32_t, svfloat32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svextq_f16))) +svfloat16_t svextq_f16(svfloat16_t, svfloat16_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svextq_s32))) +svint32_t svextq_s32(svint32_t, svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svextq_s64))) +svint64_t svextq_s64(svint64_t, svint64_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svextq_s16))) +svint16_t svextq_s16(svint16_t, svint16_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1q_gather_u64base_index_u32))) +svuint32_t svld1q_gather_u64base_index_u32(svbool_t, svuint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1q_gather_u64base_index_u64))) +svuint64_t svld1q_gather_u64base_index_u64(svbool_t, svuint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1q_gather_u64base_index_u16))) +svuint16_t svld1q_gather_u64base_index_u16(svbool_t, svuint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1q_gather_u64base_index_bf16))) +svbfloat16_t svld1q_gather_u64base_index_bf16(svbool_t, svuint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1q_gather_u64base_index_f64))) +svfloat64_t svld1q_gather_u64base_index_f64(svbool_t, svuint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1q_gather_u64base_index_f32))) +svfloat32_t svld1q_gather_u64base_index_f32(svbool_t, svuint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1q_gather_u64base_index_f16))) +svfloat16_t svld1q_gather_u64base_index_f16(svbool_t, svuint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1q_gather_u64base_index_s32))) +svint32_t svld1q_gather_u64base_index_s32(svbool_t, svuint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1q_gather_u64base_index_s64))) +svint64_t svld1q_gather_u64base_index_s64(svbool_t, svuint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1q_gather_u64base_index_s16))) +svint16_t svld1q_gather_u64base_index_s16(svbool_t, svuint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1q_gather_u64base_offset_u8))) +svuint8_t svld1q_gather_u64base_offset_u8(svbool_t, svuint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1q_gather_u64base_offset_u32))) +svuint32_t svld1q_gather_u64base_offset_u32(svbool_t, svuint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1q_gather_u64base_offset_u64))) +svuint64_t svld1q_gather_u64base_offset_u64(svbool_t, svuint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1q_gather_u64base_offset_u16))) +svuint16_t svld1q_gather_u64base_offset_u16(svbool_t, svuint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1q_gather_u64base_offset_bf16))) +svbfloat16_t svld1q_gather_u64base_offset_bf16(svbool_t, svuint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1q_gather_u64base_offset_s8))) +svint8_t svld1q_gather_u64base_offset_s8(svbool_t, svuint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1q_gather_u64base_offset_f64))) +svfloat64_t svld1q_gather_u64base_offset_f64(svbool_t, svuint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1q_gather_u64base_offset_f32))) +svfloat32_t svld1q_gather_u64base_offset_f32(svbool_t, svuint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1q_gather_u64base_offset_f16))) +svfloat16_t svld1q_gather_u64base_offset_f16(svbool_t, svuint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1q_gather_u64base_offset_s32))) +svint32_t svld1q_gather_u64base_offset_s32(svbool_t, svuint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1q_gather_u64base_offset_s64))) +svint64_t svld1q_gather_u64base_offset_s64(svbool_t, svuint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1q_gather_u64base_offset_s16))) +svint16_t svld1q_gather_u64base_offset_s16(svbool_t, svuint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1q_gather_u64base_u8))) +svuint8_t svld1q_gather_u64base_u8(svbool_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1q_gather_u64base_u32))) +svuint32_t svld1q_gather_u64base_u32(svbool_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1q_gather_u64base_u64))) +svuint64_t svld1q_gather_u64base_u64(svbool_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1q_gather_u64base_u16))) +svuint16_t svld1q_gather_u64base_u16(svbool_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1q_gather_u64base_bf16))) +svbfloat16_t svld1q_gather_u64base_bf16(svbool_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1q_gather_u64base_s8))) +svint8_t svld1q_gather_u64base_s8(svbool_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1q_gather_u64base_f64))) +svfloat64_t svld1q_gather_u64base_f64(svbool_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1q_gather_u64base_f32))) +svfloat32_t svld1q_gather_u64base_f32(svbool_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1q_gather_u64base_f16))) +svfloat16_t svld1q_gather_u64base_f16(svbool_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1q_gather_u64base_s32))) +svint32_t svld1q_gather_u64base_s32(svbool_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1q_gather_u64base_s64))) +svint64_t svld1q_gather_u64base_s64(svbool_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1q_gather_u64base_s16))) +svint16_t svld1q_gather_u64base_s16(svbool_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1q_gather_u64index_u32))) +svuint32_t svld1q_gather_u64index_u32(svbool_t, uint32_t const *, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1q_gather_u64index_u64))) +svuint64_t svld1q_gather_u64index_u64(svbool_t, uint64_t const *, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1q_gather_u64index_u16))) +svuint16_t svld1q_gather_u64index_u16(svbool_t, uint16_t const *, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1q_gather_u64index_bf16))) +svbfloat16_t svld1q_gather_u64index_bf16(svbool_t, bfloat16_t const *, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1q_gather_u64index_f64))) +svfloat64_t svld1q_gather_u64index_f64(svbool_t, float64_t const *, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1q_gather_u64index_f32))) +svfloat32_t svld1q_gather_u64index_f32(svbool_t, float32_t const *, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1q_gather_u64index_f16))) +svfloat16_t svld1q_gather_u64index_f16(svbool_t, float16_t const *, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1q_gather_u64index_s32))) +svint32_t svld1q_gather_u64index_s32(svbool_t, int32_t const *, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1q_gather_u64index_s64))) +svint64_t svld1q_gather_u64index_s64(svbool_t, int64_t const *, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1q_gather_u64index_s16))) +svint16_t svld1q_gather_u64index_s16(svbool_t, int16_t const *, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1q_gather_u64offset_u8))) +svuint8_t svld1q_gather_u64offset_u8(svbool_t, uint8_t const *, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1q_gather_u64offset_u32))) +svuint32_t svld1q_gather_u64offset_u32(svbool_t, uint32_t const *, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1q_gather_u64offset_u64))) +svuint64_t svld1q_gather_u64offset_u64(svbool_t, uint64_t const *, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1q_gather_u64offset_u16))) +svuint16_t svld1q_gather_u64offset_u16(svbool_t, uint16_t const *, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1q_gather_u64offset_bf16))) +svbfloat16_t svld1q_gather_u64offset_bf16(svbool_t, bfloat16_t const *, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1q_gather_u64offset_s8))) +svint8_t svld1q_gather_u64offset_s8(svbool_t, int8_t const *, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1q_gather_u64offset_f64))) +svfloat64_t svld1q_gather_u64offset_f64(svbool_t, float64_t const *, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1q_gather_u64offset_f32))) +svfloat32_t svld1q_gather_u64offset_f32(svbool_t, float32_t const *, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1q_gather_u64offset_f16))) +svfloat16_t svld1q_gather_u64offset_f16(svbool_t, float16_t const *, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1q_gather_u64offset_s32))) +svint32_t svld1q_gather_u64offset_s32(svbool_t, int32_t const *, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1q_gather_u64offset_s64))) +svint64_t svld1q_gather_u64offset_s64(svbool_t, int64_t const *, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1q_gather_u64offset_s16))) +svint16_t svld1q_gather_u64offset_s16(svbool_t, int16_t const *, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1udq_u64))) +svuint64_t svld1udq_u64(svbool_t, uint64_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1udq_f64))) +svfloat64_t svld1udq_f64(svbool_t, float64_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1udq_s64))) +svint64_t svld1udq_s64(svbool_t, int64_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1udq_vnum_u64))) +svuint64_t svld1udq_vnum_u64(svbool_t, uint64_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1udq_vnum_f64))) +svfloat64_t svld1udq_vnum_f64(svbool_t, float64_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1udq_vnum_s64))) +svint64_t svld1udq_vnum_s64(svbool_t, int64_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uwq_u32))) +svuint32_t svld1uwq_u32(svbool_t, uint32_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uwq_f32))) +svfloat32_t svld1uwq_f32(svbool_t, float32_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uwq_s32))) +svint32_t svld1uwq_s32(svbool_t, int32_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uwq_vnum_u32))) +svuint32_t svld1uwq_vnum_u32(svbool_t, uint32_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uwq_vnum_f32))) +svfloat32_t svld1uwq_vnum_f32(svbool_t, float32_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uwq_vnum_s32))) +svint32_t svld1uwq_vnum_s32(svbool_t, int32_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2q_u8))) +svuint8x2_t svld2q_u8(svbool_t, uint8_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2q_u32))) +svuint32x2_t svld2q_u32(svbool_t, uint32_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2q_u64))) +svuint64x2_t svld2q_u64(svbool_t, uint64_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2q_u16))) +svuint16x2_t svld2q_u16(svbool_t, uint16_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2q_s8))) +svint8x2_t svld2q_s8(svbool_t, int8_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2q_f64))) +svfloat64x2_t svld2q_f64(svbool_t, float64_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2q_f32))) +svfloat32x2_t svld2q_f32(svbool_t, float32_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2q_f16))) +svfloat16x2_t svld2q_f16(svbool_t, float16_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2q_s32))) +svint32x2_t svld2q_s32(svbool_t, int32_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2q_s64))) +svint64x2_t svld2q_s64(svbool_t, int64_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2q_s16))) +svint16x2_t svld2q_s16(svbool_t, int16_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2q_bf16))) +svbfloat16x2_t svld2q_bf16(svbool_t, bfloat16_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2q_vnum_u8))) +svuint8x2_t svld2q_vnum_u8(svbool_t, uint8_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2q_vnum_u32))) +svuint32x2_t svld2q_vnum_u32(svbool_t, uint32_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2q_vnum_u64))) +svuint64x2_t svld2q_vnum_u64(svbool_t, uint64_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2q_vnum_u16))) +svuint16x2_t svld2q_vnum_u16(svbool_t, uint16_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2q_vnum_s8))) +svint8x2_t svld2q_vnum_s8(svbool_t, int8_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2q_vnum_f64))) +svfloat64x2_t svld2q_vnum_f64(svbool_t, float64_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2q_vnum_f32))) +svfloat32x2_t svld2q_vnum_f32(svbool_t, float32_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2q_vnum_f16))) +svfloat16x2_t svld2q_vnum_f16(svbool_t, float16_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2q_vnum_s32))) +svint32x2_t svld2q_vnum_s32(svbool_t, int32_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2q_vnum_s64))) +svint64x2_t svld2q_vnum_s64(svbool_t, int64_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2q_vnum_s16))) +svint16x2_t svld2q_vnum_s16(svbool_t, int16_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2q_vnum_bf16))) +svbfloat16x2_t svld2q_vnum_bf16(svbool_t, bfloat16_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3q_u8))) +svuint8x3_t svld3q_u8(svbool_t, uint8_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3q_u32))) +svuint32x3_t svld3q_u32(svbool_t, uint32_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3q_u64))) +svuint64x3_t svld3q_u64(svbool_t, uint64_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3q_u16))) +svuint16x3_t svld3q_u16(svbool_t, uint16_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3q_s8))) +svint8x3_t svld3q_s8(svbool_t, int8_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3q_f64))) +svfloat64x3_t svld3q_f64(svbool_t, float64_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3q_f32))) +svfloat32x3_t svld3q_f32(svbool_t, float32_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3q_f16))) +svfloat16x3_t svld3q_f16(svbool_t, float16_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3q_s32))) +svint32x3_t svld3q_s32(svbool_t, int32_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3q_s64))) +svint64x3_t svld3q_s64(svbool_t, int64_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3q_s16))) +svint16x3_t svld3q_s16(svbool_t, int16_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3q_bf16))) +svbfloat16x3_t svld3q_bf16(svbool_t, bfloat16_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3q_vnum_u8))) +svuint8x3_t svld3q_vnum_u8(svbool_t, uint8_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3q_vnum_u32))) +svuint32x3_t svld3q_vnum_u32(svbool_t, uint32_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3q_vnum_u64))) +svuint64x3_t svld3q_vnum_u64(svbool_t, uint64_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3q_vnum_u16))) +svuint16x3_t svld3q_vnum_u16(svbool_t, uint16_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3q_vnum_s8))) +svint8x3_t svld3q_vnum_s8(svbool_t, int8_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3q_vnum_f64))) +svfloat64x3_t svld3q_vnum_f64(svbool_t, float64_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3q_vnum_f32))) +svfloat32x3_t svld3q_vnum_f32(svbool_t, float32_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3q_vnum_f16))) +svfloat16x3_t svld3q_vnum_f16(svbool_t, float16_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3q_vnum_s32))) +svint32x3_t svld3q_vnum_s32(svbool_t, int32_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3q_vnum_s64))) +svint64x3_t svld3q_vnum_s64(svbool_t, int64_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3q_vnum_s16))) +svint16x3_t svld3q_vnum_s16(svbool_t, int16_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3q_vnum_bf16))) +svbfloat16x3_t svld3q_vnum_bf16(svbool_t, bfloat16_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4q_u8))) +svuint8x4_t svld4q_u8(svbool_t, uint8_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4q_u32))) +svuint32x4_t svld4q_u32(svbool_t, uint32_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4q_u64))) +svuint64x4_t svld4q_u64(svbool_t, uint64_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4q_u16))) +svuint16x4_t svld4q_u16(svbool_t, uint16_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4q_s8))) +svint8x4_t svld4q_s8(svbool_t, int8_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4q_f64))) +svfloat64x4_t svld4q_f64(svbool_t, float64_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4q_f32))) +svfloat32x4_t svld4q_f32(svbool_t, float32_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4q_f16))) +svfloat16x4_t svld4q_f16(svbool_t, float16_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4q_s32))) +svint32x4_t svld4q_s32(svbool_t, int32_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4q_s64))) +svint64x4_t svld4q_s64(svbool_t, int64_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4q_s16))) +svint16x4_t svld4q_s16(svbool_t, int16_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4q_bf16))) +svbfloat16x4_t svld4q_bf16(svbool_t, bfloat16_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4q_vnum_u8))) +svuint8x4_t svld4q_vnum_u8(svbool_t, uint8_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4q_vnum_u32))) +svuint32x4_t svld4q_vnum_u32(svbool_t, uint32_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4q_vnum_u64))) +svuint64x4_t svld4q_vnum_u64(svbool_t, uint64_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4q_vnum_u16))) +svuint16x4_t svld4q_vnum_u16(svbool_t, uint16_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4q_vnum_s8))) +svint8x4_t svld4q_vnum_s8(svbool_t, int8_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4q_vnum_f64))) +svfloat64x4_t svld4q_vnum_f64(svbool_t, float64_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4q_vnum_f32))) +svfloat32x4_t svld4q_vnum_f32(svbool_t, float32_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4q_vnum_f16))) +svfloat16x4_t svld4q_vnum_f16(svbool_t, float16_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4q_vnum_s32))) +svint32x4_t svld4q_vnum_s32(svbool_t, int32_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4q_vnum_s64))) +svint64x4_t svld4q_vnum_s64(svbool_t, int64_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4q_vnum_s16))) +svint16x4_t svld4q_vnum_s16(svbool_t, int16_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4q_vnum_bf16))) +svbfloat16x4_t svld4q_vnum_bf16(svbool_t, bfloat16_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnmqv_f64))) +float64x2_t svmaxnmqv_f64(svbool_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnmqv_f32))) +float32x4_t svmaxnmqv_f32(svbool_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnmqv_f16))) +float16x8_t svmaxnmqv_f16(svbool_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxqv_f64))) +float64x2_t svmaxqv_f64(svbool_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxqv_f32))) +float32x4_t svmaxqv_f32(svbool_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxqv_f16))) +float16x8_t svmaxqv_f16(svbool_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxqv_s8))) +int8x16_t svmaxqv_s8(svbool_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxqv_s32))) +int32x4_t svmaxqv_s32(svbool_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxqv_s64))) +int64x2_t svmaxqv_s64(svbool_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxqv_s16))) +int16x8_t svmaxqv_s16(svbool_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxqv_u8))) +uint8x16_t svmaxqv_u8(svbool_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxqv_u32))) +uint32x4_t svmaxqv_u32(svbool_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxqv_u64))) +uint64x2_t svmaxqv_u64(svbool_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxqv_u16))) +uint16x8_t svmaxqv_u16(svbool_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnmqv_f64))) +float64x2_t svminnmqv_f64(svbool_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnmqv_f32))) +float32x4_t svminnmqv_f32(svbool_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnmqv_f16))) +float16x8_t svminnmqv_f16(svbool_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminqv_f64))) +float64x2_t svminqv_f64(svbool_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminqv_f32))) +float32x4_t svminqv_f32(svbool_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminqv_f16))) +float16x8_t svminqv_f16(svbool_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminqv_s8))) +int8x16_t svminqv_s8(svbool_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminqv_s32))) +int32x4_t svminqv_s32(svbool_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminqv_s64))) +int64x2_t svminqv_s64(svbool_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminqv_s16))) +int16x8_t svminqv_s16(svbool_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminqv_u8))) +uint8x16_t svminqv_u8(svbool_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminqv_u32))) +uint32x4_t svminqv_u32(svbool_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminqv_u64))) +uint64x2_t svminqv_u64(svbool_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminqv_u16))) +uint16x8_t svminqv_u16(svbool_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorqv_u8))) +uint8x16_t svorqv_u8(svbool_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorqv_u32))) +uint32x4_t svorqv_u32(svbool_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorqv_u64))) +uint64x2_t svorqv_u64(svbool_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorqv_u16))) +uint16x8_t svorqv_u16(svbool_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorqv_s8))) +int8x16_t svorqv_s8(svbool_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorqv_s32))) +int32x4_t svorqv_s32(svbool_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorqv_s64))) +int64x2_t svorqv_s64(svbool_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorqv_s16))) +int16x8_t svorqv_s16(svbool_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmov_u8))) +svbool_t svpmov_u8(svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmov_s8))) +svbool_t svpmov_s8(svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmov_u64))) +svbool_t svpmov_u64(svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmov_s64))) +svbool_t svpmov_s64(svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmov_u16))) +svbool_t svpmov_u16(svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmov_s16))) +svbool_t svpmov_s16(svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmov_u32))) +svbool_t svpmov_u32(svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmov_s32))) +svbool_t svpmov_s32(svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmov_lane_u8))) +svbool_t svpmov_lane_u8(svuint8_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmov_lane_s8))) +svbool_t svpmov_lane_s8(svint8_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmov_lane_u64))) +svbool_t svpmov_lane_u64(svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmov_lane_s64))) +svbool_t svpmov_lane_s64(svint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmov_lane_u16))) +svbool_t svpmov_lane_u16(svuint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmov_lane_s16))) +svbool_t svpmov_lane_s16(svint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmov_lane_u32))) +svbool_t svpmov_lane_u32(svuint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmov_lane_s32))) +svbool_t svpmov_lane_s32(svint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmov_lane_u64_m))) +svuint64_t svpmov_lane_u64_m(svuint64_t, svbool_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmov_lane_s64_m))) +svint64_t svpmov_lane_s64_m(svint64_t, svbool_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmov_lane_u16_m))) +svuint16_t svpmov_lane_u16_m(svuint16_t, svbool_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmov_lane_s16_m))) +svint16_t svpmov_lane_s16_m(svint16_t, svbool_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmov_lane_u32_m))) +svuint32_t svpmov_lane_u32_m(svuint32_t, svbool_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmov_lane_s32_m))) +svint32_t svpmov_lane_s32_m(svint32_t, svbool_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmov_u8_z))) +svuint8_t svpmov_u8_z(svbool_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmov_s8_z))) +svint8_t svpmov_s8_z(svbool_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmov_u64_z))) +svuint64_t svpmov_u64_z(svbool_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmov_s64_z))) +svint64_t svpmov_s64_z(svbool_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmov_u16_z))) +svuint16_t svpmov_u16_z(svbool_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmov_s16_z))) +svint16_t svpmov_s16_z(svbool_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmov_u32_z))) +svuint32_t svpmov_u32_z(svbool_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmov_s32_z))) +svint32_t svpmov_s32_z(svbool_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1dq_u64))) +void svst1dq_u64(svbool_t, uint64_t const *, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1dq_f64))) +void svst1dq_f64(svbool_t, float64_t const *, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1dq_s64))) +void svst1dq_s64(svbool_t, int64_t const *, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1dq_vnum_u64))) +void svst1dq_vnum_u64(svbool_t, uint64_t const *, int64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1dq_vnum_f64))) +void svst1dq_vnum_f64(svbool_t, float64_t const *, int64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1dq_vnum_s64))) +void svst1dq_vnum_s64(svbool_t, int64_t const *, int64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1q_scatter_u64base_u8))) +void svst1q_scatter_u64base_u8(svbool_t, svuint64_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1q_scatter_u64base_u32))) +void svst1q_scatter_u64base_u32(svbool_t, svuint64_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1q_scatter_u64base_u64))) +void svst1q_scatter_u64base_u64(svbool_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1q_scatter_u64base_u16))) +void svst1q_scatter_u64base_u16(svbool_t, svuint64_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1q_scatter_u64base_bf16))) +void svst1q_scatter_u64base_bf16(svbool_t, svuint64_t, svbfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1q_scatter_u64base_s8))) +void svst1q_scatter_u64base_s8(svbool_t, svuint64_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1q_scatter_u64base_f64))) +void svst1q_scatter_u64base_f64(svbool_t, svuint64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1q_scatter_u64base_f32))) +void svst1q_scatter_u64base_f32(svbool_t, svuint64_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1q_scatter_u64base_f16))) +void svst1q_scatter_u64base_f16(svbool_t, svuint64_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1q_scatter_u64base_s32))) +void svst1q_scatter_u64base_s32(svbool_t, svuint64_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1q_scatter_u64base_s64))) +void svst1q_scatter_u64base_s64(svbool_t, svuint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1q_scatter_u64base_s16))) +void svst1q_scatter_u64base_s16(svbool_t, svuint64_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1q_scatter_u64base_index_u32))) +void svst1q_scatter_u64base_index_u32(svbool_t, svuint64_t, int64_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1q_scatter_u64base_index_u64))) +void svst1q_scatter_u64base_index_u64(svbool_t, svuint64_t, int64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1q_scatter_u64base_index_u16))) +void svst1q_scatter_u64base_index_u16(svbool_t, svuint64_t, int64_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1q_scatter_u64base_index_bf16))) +void svst1q_scatter_u64base_index_bf16(svbool_t, svuint64_t, int64_t, svbfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1q_scatter_u64base_index_f64))) +void svst1q_scatter_u64base_index_f64(svbool_t, svuint64_t, int64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1q_scatter_u64base_index_f32))) +void svst1q_scatter_u64base_index_f32(svbool_t, svuint64_t, int64_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1q_scatter_u64base_index_f16))) +void svst1q_scatter_u64base_index_f16(svbool_t, svuint64_t, int64_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1q_scatter_u64base_index_s32))) +void svst1q_scatter_u64base_index_s32(svbool_t, svuint64_t, int64_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1q_scatter_u64base_index_s64))) +void svst1q_scatter_u64base_index_s64(svbool_t, svuint64_t, int64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1q_scatter_u64base_index_s16))) +void svst1q_scatter_u64base_index_s16(svbool_t, svuint64_t, int64_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1q_scatter_u64base_offset_u8))) +void svst1q_scatter_u64base_offset_u8(svbool_t, svuint64_t, int64_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1q_scatter_u64base_offset_u32))) +void svst1q_scatter_u64base_offset_u32(svbool_t, svuint64_t, int64_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1q_scatter_u64base_offset_u64))) +void svst1q_scatter_u64base_offset_u64(svbool_t, svuint64_t, int64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1q_scatter_u64base_offset_u16))) +void svst1q_scatter_u64base_offset_u16(svbool_t, svuint64_t, int64_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1q_scatter_u64base_offset_bf16))) +void svst1q_scatter_u64base_offset_bf16(svbool_t, svuint64_t, int64_t, svbfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1q_scatter_u64base_offset_s8))) +void svst1q_scatter_u64base_offset_s8(svbool_t, svuint64_t, int64_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1q_scatter_u64base_offset_f64))) +void svst1q_scatter_u64base_offset_f64(svbool_t, svuint64_t, int64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1q_scatter_u64base_offset_f32))) +void svst1q_scatter_u64base_offset_f32(svbool_t, svuint64_t, int64_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1q_scatter_u64base_offset_f16))) +void svst1q_scatter_u64base_offset_f16(svbool_t, svuint64_t, int64_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1q_scatter_u64base_offset_s32))) +void svst1q_scatter_u64base_offset_s32(svbool_t, svuint64_t, int64_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1q_scatter_u64base_offset_s64))) +void svst1q_scatter_u64base_offset_s64(svbool_t, svuint64_t, int64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1q_scatter_u64base_offset_s16))) +void svst1q_scatter_u64base_offset_s16(svbool_t, svuint64_t, int64_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1q_scatter_u64index_u32))) +void svst1q_scatter_u64index_u32(svbool_t, uint32_t *, svuint64_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1q_scatter_u64index_u64))) +void svst1q_scatter_u64index_u64(svbool_t, uint64_t *, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1q_scatter_u64index_u16))) +void svst1q_scatter_u64index_u16(svbool_t, uint16_t *, svuint64_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1q_scatter_u64index_bf16))) +void svst1q_scatter_u64index_bf16(svbool_t, bfloat16_t *, svuint64_t, svbfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1q_scatter_u64index_f64))) +void svst1q_scatter_u64index_f64(svbool_t, float64_t *, svuint64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1q_scatter_u64index_f32))) +void svst1q_scatter_u64index_f32(svbool_t, float32_t *, svuint64_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1q_scatter_u64index_f16))) +void svst1q_scatter_u64index_f16(svbool_t, float16_t *, svuint64_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1q_scatter_u64index_s32))) +void svst1q_scatter_u64index_s32(svbool_t, int32_t *, svuint64_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1q_scatter_u64index_s64))) +void svst1q_scatter_u64index_s64(svbool_t, int64_t *, svuint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1q_scatter_u64index_s16))) +void svst1q_scatter_u64index_s16(svbool_t, int16_t *, svuint64_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1q_scatter_u64offset_u8))) +void svst1q_scatter_u64offset_u8(svbool_t, uint8_t *, svuint64_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1q_scatter_u64offset_u32))) +void svst1q_scatter_u64offset_u32(svbool_t, uint32_t *, svuint64_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1q_scatter_u64offset_u64))) +void svst1q_scatter_u64offset_u64(svbool_t, uint64_t *, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1q_scatter_u64offset_u16))) +void svst1q_scatter_u64offset_u16(svbool_t, uint16_t *, svuint64_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1q_scatter_u64offset_bf16))) +void svst1q_scatter_u64offset_bf16(svbool_t, bfloat16_t *, svuint64_t, svbfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1q_scatter_u64offset_s8))) +void svst1q_scatter_u64offset_s8(svbool_t, int8_t *, svuint64_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1q_scatter_u64offset_f64))) +void svst1q_scatter_u64offset_f64(svbool_t, float64_t *, svuint64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1q_scatter_u64offset_f32))) +void svst1q_scatter_u64offset_f32(svbool_t, float32_t *, svuint64_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1q_scatter_u64offset_f16))) +void svst1q_scatter_u64offset_f16(svbool_t, float16_t *, svuint64_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1q_scatter_u64offset_s32))) +void svst1q_scatter_u64offset_s32(svbool_t, int32_t *, svuint64_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1q_scatter_u64offset_s64))) +void svst1q_scatter_u64offset_s64(svbool_t, int64_t *, svuint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1q_scatter_u64offset_s16))) +void svst1q_scatter_u64offset_s16(svbool_t, int16_t *, svuint64_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1wq_u32))) +void svst1wq_u32(svbool_t, uint32_t const *, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1wq_f32))) +void svst1wq_f32(svbool_t, float32_t const *, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1wq_s32))) +void svst1wq_s32(svbool_t, int32_t const *, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1wq_vnum_u32))) +void svst1wq_vnum_u32(svbool_t, uint32_t const *, int64_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1wq_vnum_f32))) +void svst1wq_vnum_f32(svbool_t, float32_t const *, int64_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1wq_vnum_s32))) +void svst1wq_vnum_s32(svbool_t, int32_t const *, int64_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2q_u8))) +void svst2q_u8(svbool_t, uint8_t const *, svuint8x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2q_u32))) +void svst2q_u32(svbool_t, uint32_t const *, svuint32x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2q_u64))) +void svst2q_u64(svbool_t, uint64_t const *, svuint64x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2q_u16))) +void svst2q_u16(svbool_t, uint16_t const *, svuint16x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2q_s8))) +void svst2q_s8(svbool_t, int8_t const *, svint8x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2q_f64))) +void svst2q_f64(svbool_t, float64_t const *, svfloat64x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2q_f32))) +void svst2q_f32(svbool_t, float32_t const *, svfloat32x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2q_f16))) +void svst2q_f16(svbool_t, float16_t const *, svfloat16x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2q_s32))) +void svst2q_s32(svbool_t, int32_t const *, svint32x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2q_s64))) +void svst2q_s64(svbool_t, int64_t const *, svint64x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2q_s16))) +void svst2q_s16(svbool_t, int16_t const *, svint16x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2q_bf16))) +void svst2q_bf16(svbool_t, bfloat16_t const *, svbfloat16x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2q_vnum_u8))) +void svst2q_vnum_u8(svbool_t, uint8_t const *, int64_t, svuint8x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2q_vnum_u32))) +void svst2q_vnum_u32(svbool_t, uint32_t const *, int64_t, svuint32x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2q_vnum_u64))) +void svst2q_vnum_u64(svbool_t, uint64_t const *, int64_t, svuint64x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2q_vnum_u16))) +void svst2q_vnum_u16(svbool_t, uint16_t const *, int64_t, svuint16x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2q_vnum_s8))) +void svst2q_vnum_s8(svbool_t, int8_t const *, int64_t, svint8x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2q_vnum_f64))) +void svst2q_vnum_f64(svbool_t, float64_t const *, int64_t, svfloat64x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2q_vnum_f32))) +void svst2q_vnum_f32(svbool_t, float32_t const *, int64_t, svfloat32x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2q_vnum_f16))) +void svst2q_vnum_f16(svbool_t, float16_t const *, int64_t, svfloat16x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2q_vnum_s32))) +void svst2q_vnum_s32(svbool_t, int32_t const *, int64_t, svint32x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2q_vnum_s64))) +void svst2q_vnum_s64(svbool_t, int64_t const *, int64_t, svint64x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2q_vnum_s16))) +void svst2q_vnum_s16(svbool_t, int16_t const *, int64_t, svint16x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2q_vnum_bf16))) +void svst2q_vnum_bf16(svbool_t, bfloat16_t const *, int64_t, svbfloat16x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3q_u8))) +void svst3q_u8(svbool_t, uint8_t const *, svuint8x3_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3q_u32))) +void svst3q_u32(svbool_t, uint32_t const *, svuint32x3_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3q_u64))) +void svst3q_u64(svbool_t, uint64_t const *, svuint64x3_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3q_u16))) +void svst3q_u16(svbool_t, uint16_t const *, svuint16x3_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3q_s8))) +void svst3q_s8(svbool_t, int8_t const *, svint8x3_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3q_f64))) +void svst3q_f64(svbool_t, float64_t const *, svfloat64x3_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3q_f32))) +void svst3q_f32(svbool_t, float32_t const *, svfloat32x3_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3q_f16))) +void svst3q_f16(svbool_t, float16_t const *, svfloat16x3_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3q_s32))) +void svst3q_s32(svbool_t, int32_t const *, svint32x3_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3q_s64))) +void svst3q_s64(svbool_t, int64_t const *, svint64x3_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3q_s16))) +void svst3q_s16(svbool_t, int16_t const *, svint16x3_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3q_bf16))) +void svst3q_bf16(svbool_t, bfloat16_t const *, svbfloat16x3_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3q_vnum_u8))) +void svst3q_vnum_u8(svbool_t, uint8_t const *, int64_t, svuint8x3_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3q_vnum_u32))) +void svst3q_vnum_u32(svbool_t, uint32_t const *, int64_t, svuint32x3_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3q_vnum_u64))) +void svst3q_vnum_u64(svbool_t, uint64_t const *, int64_t, svuint64x3_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3q_vnum_u16))) +void svst3q_vnum_u16(svbool_t, uint16_t const *, int64_t, svuint16x3_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3q_vnum_s8))) +void svst3q_vnum_s8(svbool_t, int8_t const *, int64_t, svint8x3_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3q_vnum_f64))) +void svst3q_vnum_f64(svbool_t, float64_t const *, int64_t, svfloat64x3_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3q_vnum_f32))) +void svst3q_vnum_f32(svbool_t, float32_t const *, int64_t, svfloat32x3_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3q_vnum_f16))) +void svst3q_vnum_f16(svbool_t, float16_t const *, int64_t, svfloat16x3_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3q_vnum_s32))) +void svst3q_vnum_s32(svbool_t, int32_t const *, int64_t, svint32x3_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3q_vnum_s64))) +void svst3q_vnum_s64(svbool_t, int64_t const *, int64_t, svint64x3_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3q_vnum_s16))) +void svst3q_vnum_s16(svbool_t, int16_t const *, int64_t, svint16x3_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3q_vnum_bf16))) +void svst3q_vnum_bf16(svbool_t, bfloat16_t const *, int64_t, svbfloat16x3_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4q_u8))) +void svst4q_u8(svbool_t, uint8_t const *, svuint8x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4q_u32))) +void svst4q_u32(svbool_t, uint32_t const *, svuint32x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4q_u64))) +void svst4q_u64(svbool_t, uint64_t const *, svuint64x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4q_u16))) +void svst4q_u16(svbool_t, uint16_t const *, svuint16x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4q_s8))) +void svst4q_s8(svbool_t, int8_t const *, svint8x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4q_f64))) +void svst4q_f64(svbool_t, float64_t const *, svfloat64x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4q_f32))) +void svst4q_f32(svbool_t, float32_t const *, svfloat32x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4q_f16))) +void svst4q_f16(svbool_t, float16_t const *, svfloat16x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4q_s32))) +void svst4q_s32(svbool_t, int32_t const *, svint32x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4q_s64))) +void svst4q_s64(svbool_t, int64_t const *, svint64x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4q_s16))) +void svst4q_s16(svbool_t, int16_t const *, svint16x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4q_bf16))) +void svst4q_bf16(svbool_t, bfloat16_t const *, svbfloat16x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4q_vnum_u8))) +void svst4q_vnum_u8(svbool_t, uint8_t const *, int64_t, svuint8x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4q_vnum_u32))) +void svst4q_vnum_u32(svbool_t, uint32_t const *, int64_t, svuint32x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4q_vnum_u64))) +void svst4q_vnum_u64(svbool_t, uint64_t const *, int64_t, svuint64x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4q_vnum_u16))) +void svst4q_vnum_u16(svbool_t, uint16_t const *, int64_t, svuint16x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4q_vnum_s8))) +void svst4q_vnum_s8(svbool_t, int8_t const *, int64_t, svint8x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4q_vnum_f64))) +void svst4q_vnum_f64(svbool_t, float64_t const *, int64_t, svfloat64x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4q_vnum_f32))) +void svst4q_vnum_f32(svbool_t, float32_t const *, int64_t, svfloat32x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4q_vnum_f16))) +void svst4q_vnum_f16(svbool_t, float16_t const *, int64_t, svfloat16x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4q_vnum_s32))) +void svst4q_vnum_s32(svbool_t, int32_t const *, int64_t, svint32x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4q_vnum_s64))) +void svst4q_vnum_s64(svbool_t, int64_t const *, int64_t, svint64x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4q_vnum_s16))) +void svst4q_vnum_s16(svbool_t, int16_t const *, int64_t, svint16x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4q_vnum_bf16))) +void svst4q_vnum_bf16(svbool_t, bfloat16_t const *, int64_t, svbfloat16x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtblq_u8))) +svuint8_t svtblq_u8(svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtblq_u32))) +svuint32_t svtblq_u32(svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtblq_u64))) +svuint64_t svtblq_u64(svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtblq_u16))) +svuint16_t svtblq_u16(svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtblq_bf16))) +svbfloat16_t svtblq_bf16(svbfloat16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtblq_s8))) +svint8_t svtblq_s8(svint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtblq_f64))) +svfloat64_t svtblq_f64(svfloat64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtblq_f32))) +svfloat32_t svtblq_f32(svfloat32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtblq_f16))) +svfloat16_t svtblq_f16(svfloat16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtblq_s32))) +svint32_t svtblq_s32(svint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtblq_s64))) +svint64_t svtblq_s64(svint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtblq_s16))) +svint16_t svtblq_s16(svint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbxq_u8))) +svuint8_t svtbxq_u8(svuint8_t, svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbxq_u32))) +svuint32_t svtbxq_u32(svuint32_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbxq_u64))) +svuint64_t svtbxq_u64(svuint64_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbxq_u16))) +svuint16_t svtbxq_u16(svuint16_t, svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbxq_bf16))) +svbfloat16_t svtbxq_bf16(svbfloat16_t, svbfloat16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbxq_s8))) +svint8_t svtbxq_s8(svint8_t, svint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbxq_f64))) +svfloat64_t svtbxq_f64(svfloat64_t, svfloat64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbxq_f32))) +svfloat32_t svtbxq_f32(svfloat32_t, svfloat32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbxq_f16))) +svfloat16_t svtbxq_f16(svfloat16_t, svfloat16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbxq_s32))) +svint32_t svtbxq_s32(svint32_t, svint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbxq_s64))) +svint64_t svtbxq_s64(svint64_t, svint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbxq_s16))) +svint16_t svtbxq_s16(svint16_t, svint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzpq1_u8))) +svuint8_t svuzpq1_u8(svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzpq1_u32))) +svuint32_t svuzpq1_u32(svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzpq1_u64))) +svuint64_t svuzpq1_u64(svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzpq1_u16))) +svuint16_t svuzpq1_u16(svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzpq1_bf16))) +svbfloat16_t svuzpq1_bf16(svbfloat16_t, svbfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzpq1_s8))) +svint8_t svuzpq1_s8(svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzpq1_f64))) +svfloat64_t svuzpq1_f64(svfloat64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzpq1_f32))) +svfloat32_t svuzpq1_f32(svfloat32_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzpq1_f16))) +svfloat16_t svuzpq1_f16(svfloat16_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzpq1_s32))) +svint32_t svuzpq1_s32(svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzpq1_s64))) +svint64_t svuzpq1_s64(svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzpq1_s16))) +svint16_t svuzpq1_s16(svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzpq2_u8))) +svuint8_t svuzpq2_u8(svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzpq2_u32))) +svuint32_t svuzpq2_u32(svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzpq2_u64))) +svuint64_t svuzpq2_u64(svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzpq2_u16))) +svuint16_t svuzpq2_u16(svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzpq2_bf16))) +svbfloat16_t svuzpq2_bf16(svbfloat16_t, svbfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzpq2_s8))) +svint8_t svuzpq2_s8(svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzpq2_f64))) +svfloat64_t svuzpq2_f64(svfloat64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzpq2_f32))) +svfloat32_t svuzpq2_f32(svfloat32_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzpq2_f16))) +svfloat16_t svuzpq2_f16(svfloat16_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzpq2_s32))) +svint32_t svuzpq2_s32(svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzpq2_s64))) +svint64_t svuzpq2_s64(svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzpq2_s16))) +svint16_t svuzpq2_s16(svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzipq1_u8))) +svuint8_t svzipq1_u8(svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzipq1_u32))) +svuint32_t svzipq1_u32(svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzipq1_u64))) +svuint64_t svzipq1_u64(svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzipq1_u16))) +svuint16_t svzipq1_u16(svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzipq1_bf16))) +svbfloat16_t svzipq1_bf16(svbfloat16_t, svbfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzipq1_s8))) +svint8_t svzipq1_s8(svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzipq1_f64))) +svfloat64_t svzipq1_f64(svfloat64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzipq1_f32))) +svfloat32_t svzipq1_f32(svfloat32_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzipq1_f16))) +svfloat16_t svzipq1_f16(svfloat16_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzipq1_s32))) +svint32_t svzipq1_s32(svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzipq1_s64))) +svint64_t svzipq1_s64(svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzipq1_s16))) +svint16_t svzipq1_s16(svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzipq2_u8))) +svuint8_t svzipq2_u8(svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzipq2_u32))) +svuint32_t svzipq2_u32(svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzipq2_u64))) +svuint64_t svzipq2_u64(svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzipq2_u16))) +svuint16_t svzipq2_u16(svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzipq2_bf16))) +svbfloat16_t svzipq2_bf16(svbfloat16_t, svbfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzipq2_s8))) +svint8_t svzipq2_s8(svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzipq2_f64))) +svfloat64_t svzipq2_f64(svfloat64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzipq2_f32))) +svfloat32_t svzipq2_f32(svfloat32_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzipq2_f16))) +svfloat16_t svzipq2_f16(svfloat16_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzipq2_s32))) +svint32_t svzipq2_s32(svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzipq2_s64))) +svint64_t svzipq2_s64(svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzipq2_s16))) +svint16_t svzipq2_s16(svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddqv_u8))) +uint8x16_t svaddqv(svbool_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddqv_u32))) +uint32x4_t svaddqv(svbool_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddqv_u64))) +uint64x2_t svaddqv(svbool_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddqv_u16))) +uint16x8_t svaddqv(svbool_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddqv_s8))) +int8x16_t svaddqv(svbool_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddqv_s32))) +int32x4_t svaddqv(svbool_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddqv_s64))) +int64x2_t svaddqv(svbool_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddqv_s16))) +int16x8_t svaddqv(svbool_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddqv_f64))) +float64x2_t svaddqv(svbool_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddqv_f32))) +float32x4_t svaddqv(svbool_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddqv_f16))) +float16x8_t svaddqv(svbool_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svandqv_u8))) +uint8x16_t svandqv(svbool_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svandqv_u32))) +uint32x4_t svandqv(svbool_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svandqv_u64))) +uint64x2_t svandqv(svbool_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svandqv_u16))) +uint16x8_t svandqv(svbool_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svandqv_s8))) +int8x16_t svandqv(svbool_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svandqv_s32))) +int32x4_t svandqv(svbool_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svandqv_s64))) +int64x2_t svandqv(svbool_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svandqv_s16))) +int16x8_t svandqv(svbool_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorqv_u8))) +uint8x16_t sveorqv(svbool_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorqv_u32))) +uint32x4_t sveorqv(svbool_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorqv_u64))) +uint64x2_t sveorqv(svbool_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorqv_u16))) +uint16x8_t sveorqv(svbool_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorqv_s8))) +int8x16_t sveorqv(svbool_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorqv_s32))) +int32x4_t sveorqv(svbool_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorqv_s64))) +int64x2_t sveorqv(svbool_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorqv_s16))) +int16x8_t sveorqv(svbool_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svextq_u8))) +svuint8_t svextq(svuint8_t, svuint8_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svextq_u32))) +svuint32_t svextq(svuint32_t, svuint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svextq_u64))) +svuint64_t svextq(svuint64_t, svuint64_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svextq_u16))) +svuint16_t svextq(svuint16_t, svuint16_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svextq_bf16))) +svbfloat16_t svextq(svbfloat16_t, svbfloat16_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svextq_s8))) +svint8_t svextq(svint8_t, svint8_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svextq_f64))) +svfloat64_t svextq(svfloat64_t, svfloat64_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svextq_f32))) +svfloat32_t svextq(svfloat32_t, svfloat32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svextq_f16))) +svfloat16_t svextq(svfloat16_t, svfloat16_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svextq_s32))) +svint32_t svextq(svint32_t, svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svextq_s64))) +svint64_t svextq(svint64_t, svint64_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svextq_s16))) +svint16_t svextq(svint16_t, svint16_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1q_gather_u64base_index_u32))) +svuint32_t svld1q_gather_index_u32(svbool_t, svuint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1q_gather_u64base_index_u64))) +svuint64_t svld1q_gather_index_u64(svbool_t, svuint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1q_gather_u64base_index_u16))) +svuint16_t svld1q_gather_index_u16(svbool_t, svuint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1q_gather_u64base_index_bf16))) +svbfloat16_t svld1q_gather_index_bf16(svbool_t, svuint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1q_gather_u64base_index_f64))) +svfloat64_t svld1q_gather_index_f64(svbool_t, svuint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1q_gather_u64base_index_f32))) +svfloat32_t svld1q_gather_index_f32(svbool_t, svuint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1q_gather_u64base_index_f16))) +svfloat16_t svld1q_gather_index_f16(svbool_t, svuint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1q_gather_u64base_index_s32))) +svint32_t svld1q_gather_index_s32(svbool_t, svuint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1q_gather_u64base_index_s64))) +svint64_t svld1q_gather_index_s64(svbool_t, svuint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1q_gather_u64base_index_s16))) +svint16_t svld1q_gather_index_s16(svbool_t, svuint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1q_gather_u64base_offset_u8))) +svuint8_t svld1q_gather_offset_u8(svbool_t, svuint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1q_gather_u64base_offset_u32))) +svuint32_t svld1q_gather_offset_u32(svbool_t, svuint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1q_gather_u64base_offset_u64))) +svuint64_t svld1q_gather_offset_u64(svbool_t, svuint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1q_gather_u64base_offset_u16))) +svuint16_t svld1q_gather_offset_u16(svbool_t, svuint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1q_gather_u64base_offset_bf16))) +svbfloat16_t svld1q_gather_offset_bf16(svbool_t, svuint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1q_gather_u64base_offset_s8))) +svint8_t svld1q_gather_offset_s8(svbool_t, svuint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1q_gather_u64base_offset_f64))) +svfloat64_t svld1q_gather_offset_f64(svbool_t, svuint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1q_gather_u64base_offset_f32))) +svfloat32_t svld1q_gather_offset_f32(svbool_t, svuint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1q_gather_u64base_offset_f16))) +svfloat16_t svld1q_gather_offset_f16(svbool_t, svuint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1q_gather_u64base_offset_s32))) +svint32_t svld1q_gather_offset_s32(svbool_t, svuint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1q_gather_u64base_offset_s64))) +svint64_t svld1q_gather_offset_s64(svbool_t, svuint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1q_gather_u64base_offset_s16))) +svint16_t svld1q_gather_offset_s16(svbool_t, svuint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1q_gather_u64base_u8))) +svuint8_t svld1q_gather_u8(svbool_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1q_gather_u64base_u32))) +svuint32_t svld1q_gather_u32(svbool_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1q_gather_u64base_u64))) +svuint64_t svld1q_gather_u64(svbool_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1q_gather_u64base_u16))) +svuint16_t svld1q_gather_u16(svbool_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1q_gather_u64base_bf16))) +svbfloat16_t svld1q_gather_bf16(svbool_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1q_gather_u64base_s8))) +svint8_t svld1q_gather_s8(svbool_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1q_gather_u64base_f64))) +svfloat64_t svld1q_gather_f64(svbool_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1q_gather_u64base_f32))) +svfloat32_t svld1q_gather_f32(svbool_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1q_gather_u64base_f16))) +svfloat16_t svld1q_gather_f16(svbool_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1q_gather_u64base_s32))) +svint32_t svld1q_gather_s32(svbool_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1q_gather_u64base_s64))) +svint64_t svld1q_gather_s64(svbool_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1q_gather_u64base_s16))) +svint16_t svld1q_gather_s16(svbool_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1q_gather_u64index_u32))) +svuint32_t svld1q_gather_index(svbool_t, uint32_t const *, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1q_gather_u64index_u64))) +svuint64_t svld1q_gather_index(svbool_t, uint64_t const *, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1q_gather_u64index_u16))) +svuint16_t svld1q_gather_index(svbool_t, uint16_t const *, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1q_gather_u64index_bf16))) +svbfloat16_t svld1q_gather_index(svbool_t, bfloat16_t const *, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1q_gather_u64index_f64))) +svfloat64_t svld1q_gather_index(svbool_t, float64_t const *, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1q_gather_u64index_f32))) +svfloat32_t svld1q_gather_index(svbool_t, float32_t const *, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1q_gather_u64index_f16))) +svfloat16_t svld1q_gather_index(svbool_t, float16_t const *, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1q_gather_u64index_s32))) +svint32_t svld1q_gather_index(svbool_t, int32_t const *, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1q_gather_u64index_s64))) +svint64_t svld1q_gather_index(svbool_t, int64_t const *, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1q_gather_u64index_s16))) +svint16_t svld1q_gather_index(svbool_t, int16_t const *, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1q_gather_u64offset_u8))) +svuint8_t svld1q_gather_offset(svbool_t, uint8_t const *, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1q_gather_u64offset_u32))) +svuint32_t svld1q_gather_offset(svbool_t, uint32_t const *, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1q_gather_u64offset_u64))) +svuint64_t svld1q_gather_offset(svbool_t, uint64_t const *, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1q_gather_u64offset_u16))) +svuint16_t svld1q_gather_offset(svbool_t, uint16_t const *, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1q_gather_u64offset_bf16))) +svbfloat16_t svld1q_gather_offset(svbool_t, bfloat16_t const *, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1q_gather_u64offset_s8))) +svint8_t svld1q_gather_offset(svbool_t, int8_t const *, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1q_gather_u64offset_f64))) +svfloat64_t svld1q_gather_offset(svbool_t, float64_t const *, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1q_gather_u64offset_f32))) +svfloat32_t svld1q_gather_offset(svbool_t, float32_t const *, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1q_gather_u64offset_f16))) +svfloat16_t svld1q_gather_offset(svbool_t, float16_t const *, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1q_gather_u64offset_s32))) +svint32_t svld1q_gather_offset(svbool_t, int32_t const *, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1q_gather_u64offset_s64))) +svint64_t svld1q_gather_offset(svbool_t, int64_t const *, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1q_gather_u64offset_s16))) +svint16_t svld1q_gather_offset(svbool_t, int16_t const *, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1udq_u64))) +svuint64_t svld1udq(svbool_t, uint64_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1udq_f64))) +svfloat64_t svld1udq(svbool_t, float64_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1udq_s64))) +svint64_t svld1udq(svbool_t, int64_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1udq_vnum_u64))) +svuint64_t svld1udq_vnum(svbool_t, uint64_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1udq_vnum_f64))) +svfloat64_t svld1udq_vnum(svbool_t, float64_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1udq_vnum_s64))) +svint64_t svld1udq_vnum(svbool_t, int64_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uwq_u32))) +svuint32_t svld1uwq(svbool_t, uint32_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uwq_f32))) +svfloat32_t svld1uwq(svbool_t, float32_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uwq_s32))) +svint32_t svld1uwq(svbool_t, int32_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uwq_vnum_u32))) +svuint32_t svld1uwq_vnum(svbool_t, uint32_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uwq_vnum_f32))) +svfloat32_t svld1uwq_vnum(svbool_t, float32_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uwq_vnum_s32))) +svint32_t svld1uwq_vnum(svbool_t, int32_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2q_u8))) +svuint8x2_t svld2q(svbool_t, uint8_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2q_u32))) +svuint32x2_t svld2q(svbool_t, uint32_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2q_u64))) +svuint64x2_t svld2q(svbool_t, uint64_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2q_u16))) +svuint16x2_t svld2q(svbool_t, uint16_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2q_s8))) +svint8x2_t svld2q(svbool_t, int8_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2q_f64))) +svfloat64x2_t svld2q(svbool_t, float64_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2q_f32))) +svfloat32x2_t svld2q(svbool_t, float32_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2q_f16))) +svfloat16x2_t svld2q(svbool_t, float16_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2q_s32))) +svint32x2_t svld2q(svbool_t, int32_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2q_s64))) +svint64x2_t svld2q(svbool_t, int64_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2q_s16))) +svint16x2_t svld2q(svbool_t, int16_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2q_bf16))) +svbfloat16x2_t svld2q(svbool_t, bfloat16_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2q_vnum_u8))) +svuint8x2_t svld2q_vnum(svbool_t, uint8_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2q_vnum_u32))) +svuint32x2_t svld2q_vnum(svbool_t, uint32_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2q_vnum_u64))) +svuint64x2_t svld2q_vnum(svbool_t, uint64_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2q_vnum_u16))) +svuint16x2_t svld2q_vnum(svbool_t, uint16_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2q_vnum_s8))) +svint8x2_t svld2q_vnum(svbool_t, int8_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2q_vnum_f64))) +svfloat64x2_t svld2q_vnum(svbool_t, float64_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2q_vnum_f32))) +svfloat32x2_t svld2q_vnum(svbool_t, float32_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2q_vnum_f16))) +svfloat16x2_t svld2q_vnum(svbool_t, float16_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2q_vnum_s32))) +svint32x2_t svld2q_vnum(svbool_t, int32_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2q_vnum_s64))) +svint64x2_t svld2q_vnum(svbool_t, int64_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2q_vnum_s16))) +svint16x2_t svld2q_vnum(svbool_t, int16_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2q_vnum_bf16))) +svbfloat16x2_t svld2q_vnum(svbool_t, bfloat16_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3q_u8))) +svuint8x3_t svld3q(svbool_t, uint8_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3q_u32))) +svuint32x3_t svld3q(svbool_t, uint32_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3q_u64))) +svuint64x3_t svld3q(svbool_t, uint64_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3q_u16))) +svuint16x3_t svld3q(svbool_t, uint16_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3q_s8))) +svint8x3_t svld3q(svbool_t, int8_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3q_f64))) +svfloat64x3_t svld3q(svbool_t, float64_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3q_f32))) +svfloat32x3_t svld3q(svbool_t, float32_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3q_f16))) +svfloat16x3_t svld3q(svbool_t, float16_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3q_s32))) +svint32x3_t svld3q(svbool_t, int32_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3q_s64))) +svint64x3_t svld3q(svbool_t, int64_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3q_s16))) +svint16x3_t svld3q(svbool_t, int16_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3q_bf16))) +svbfloat16x3_t svld3q(svbool_t, bfloat16_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3q_vnum_u8))) +svuint8x3_t svld3q_vnum(svbool_t, uint8_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3q_vnum_u32))) +svuint32x3_t svld3q_vnum(svbool_t, uint32_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3q_vnum_u64))) +svuint64x3_t svld3q_vnum(svbool_t, uint64_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3q_vnum_u16))) +svuint16x3_t svld3q_vnum(svbool_t, uint16_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3q_vnum_s8))) +svint8x3_t svld3q_vnum(svbool_t, int8_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3q_vnum_f64))) +svfloat64x3_t svld3q_vnum(svbool_t, float64_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3q_vnum_f32))) +svfloat32x3_t svld3q_vnum(svbool_t, float32_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3q_vnum_f16))) +svfloat16x3_t svld3q_vnum(svbool_t, float16_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3q_vnum_s32))) +svint32x3_t svld3q_vnum(svbool_t, int32_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3q_vnum_s64))) +svint64x3_t svld3q_vnum(svbool_t, int64_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3q_vnum_s16))) +svint16x3_t svld3q_vnum(svbool_t, int16_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3q_vnum_bf16))) +svbfloat16x3_t svld3q_vnum(svbool_t, bfloat16_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4q_u8))) +svuint8x4_t svld4q(svbool_t, uint8_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4q_u32))) +svuint32x4_t svld4q(svbool_t, uint32_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4q_u64))) +svuint64x4_t svld4q(svbool_t, uint64_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4q_u16))) +svuint16x4_t svld4q(svbool_t, uint16_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4q_s8))) +svint8x4_t svld4q(svbool_t, int8_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4q_f64))) +svfloat64x4_t svld4q(svbool_t, float64_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4q_f32))) +svfloat32x4_t svld4q(svbool_t, float32_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4q_f16))) +svfloat16x4_t svld4q(svbool_t, float16_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4q_s32))) +svint32x4_t svld4q(svbool_t, int32_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4q_s64))) +svint64x4_t svld4q(svbool_t, int64_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4q_s16))) +svint16x4_t svld4q(svbool_t, int16_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4q_bf16))) +svbfloat16x4_t svld4q(svbool_t, bfloat16_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4q_vnum_u8))) +svuint8x4_t svld4q_vnum(svbool_t, uint8_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4q_vnum_u32))) +svuint32x4_t svld4q_vnum(svbool_t, uint32_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4q_vnum_u64))) +svuint64x4_t svld4q_vnum(svbool_t, uint64_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4q_vnum_u16))) +svuint16x4_t svld4q_vnum(svbool_t, uint16_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4q_vnum_s8))) +svint8x4_t svld4q_vnum(svbool_t, int8_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4q_vnum_f64))) +svfloat64x4_t svld4q_vnum(svbool_t, float64_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4q_vnum_f32))) +svfloat32x4_t svld4q_vnum(svbool_t, float32_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4q_vnum_f16))) +svfloat16x4_t svld4q_vnum(svbool_t, float16_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4q_vnum_s32))) +svint32x4_t svld4q_vnum(svbool_t, int32_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4q_vnum_s64))) +svint64x4_t svld4q_vnum(svbool_t, int64_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4q_vnum_s16))) +svint16x4_t svld4q_vnum(svbool_t, int16_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4q_vnum_bf16))) +svbfloat16x4_t svld4q_vnum(svbool_t, bfloat16_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnmqv_f64))) +float64x2_t svmaxnmqv(svbool_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnmqv_f32))) +float32x4_t svmaxnmqv(svbool_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnmqv_f16))) +float16x8_t svmaxnmqv(svbool_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxqv_f64))) +float64x2_t svmaxqv(svbool_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxqv_f32))) +float32x4_t svmaxqv(svbool_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxqv_f16))) +float16x8_t svmaxqv(svbool_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxqv_s8))) +int8x16_t svmaxqv(svbool_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxqv_s32))) +int32x4_t svmaxqv(svbool_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxqv_s64))) +int64x2_t svmaxqv(svbool_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxqv_s16))) +int16x8_t svmaxqv(svbool_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxqv_u8))) +uint8x16_t svmaxqv(svbool_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxqv_u32))) +uint32x4_t svmaxqv(svbool_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxqv_u64))) +uint64x2_t svmaxqv(svbool_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxqv_u16))) +uint16x8_t svmaxqv(svbool_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnmqv_f64))) +float64x2_t svminnmqv(svbool_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnmqv_f32))) +float32x4_t svminnmqv(svbool_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnmqv_f16))) +float16x8_t svminnmqv(svbool_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminqv_f64))) +float64x2_t svminqv(svbool_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminqv_f32))) +float32x4_t svminqv(svbool_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminqv_f16))) +float16x8_t svminqv(svbool_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminqv_s8))) +int8x16_t svminqv(svbool_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminqv_s32))) +int32x4_t svminqv(svbool_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminqv_s64))) +int64x2_t svminqv(svbool_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminqv_s16))) +int16x8_t svminqv(svbool_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminqv_u8))) +uint8x16_t svminqv(svbool_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminqv_u32))) +uint32x4_t svminqv(svbool_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminqv_u64))) +uint64x2_t svminqv(svbool_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminqv_u16))) +uint16x8_t svminqv(svbool_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorqv_u8))) +uint8x16_t svorqv(svbool_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorqv_u32))) +uint32x4_t svorqv(svbool_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorqv_u64))) +uint64x2_t svorqv(svbool_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorqv_u16))) +uint16x8_t svorqv(svbool_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorqv_s8))) +int8x16_t svorqv(svbool_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorqv_s32))) +int32x4_t svorqv(svbool_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorqv_s64))) +int64x2_t svorqv(svbool_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorqv_s16))) +int16x8_t svorqv(svbool_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmov_u8))) +svbool_t svpmov(svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmov_s8))) +svbool_t svpmov(svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmov_u64))) +svbool_t svpmov(svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmov_s64))) +svbool_t svpmov(svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmov_u16))) +svbool_t svpmov(svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmov_s16))) +svbool_t svpmov(svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmov_u32))) +svbool_t svpmov(svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmov_s32))) +svbool_t svpmov(svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmov_lane_u8))) +svbool_t svpmov_lane(svuint8_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmov_lane_s8))) +svbool_t svpmov_lane(svint8_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmov_lane_u64))) +svbool_t svpmov_lane(svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmov_lane_s64))) +svbool_t svpmov_lane(svint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmov_lane_u16))) +svbool_t svpmov_lane(svuint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmov_lane_s16))) +svbool_t svpmov_lane(svint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmov_lane_u32))) +svbool_t svpmov_lane(svuint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmov_lane_s32))) +svbool_t svpmov_lane(svint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmov_lane_u64_m))) +svuint64_t svpmov_lane_m(svuint64_t, svbool_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmov_lane_s64_m))) +svint64_t svpmov_lane_m(svint64_t, svbool_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmov_lane_u16_m))) +svuint16_t svpmov_lane_m(svuint16_t, svbool_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmov_lane_s16_m))) +svint16_t svpmov_lane_m(svint16_t, svbool_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmov_lane_u32_m))) +svuint32_t svpmov_lane_m(svuint32_t, svbool_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmov_lane_s32_m))) +svint32_t svpmov_lane_m(svint32_t, svbool_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1dq_u64))) +void svst1dq(svbool_t, uint64_t const *, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1dq_f64))) +void svst1dq(svbool_t, float64_t const *, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1dq_s64))) +void svst1dq(svbool_t, int64_t const *, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1dq_vnum_u64))) +void svst1dq_vnum(svbool_t, uint64_t const *, int64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1dq_vnum_f64))) +void svst1dq_vnum(svbool_t, float64_t const *, int64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1dq_vnum_s64))) +void svst1dq_vnum(svbool_t, int64_t const *, int64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1q_scatter_u64base_u8))) +void svst1q_scatter(svbool_t, svuint64_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1q_scatter_u64base_u32))) +void svst1q_scatter(svbool_t, svuint64_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1q_scatter_u64base_u64))) +void svst1q_scatter(svbool_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1q_scatter_u64base_u16))) +void svst1q_scatter(svbool_t, svuint64_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1q_scatter_u64base_bf16))) +void svst1q_scatter(svbool_t, svuint64_t, svbfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1q_scatter_u64base_s8))) +void svst1q_scatter(svbool_t, svuint64_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1q_scatter_u64base_f64))) +void svst1q_scatter(svbool_t, svuint64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1q_scatter_u64base_f32))) +void svst1q_scatter(svbool_t, svuint64_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1q_scatter_u64base_f16))) +void svst1q_scatter(svbool_t, svuint64_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1q_scatter_u64base_s32))) +void svst1q_scatter(svbool_t, svuint64_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1q_scatter_u64base_s64))) +void svst1q_scatter(svbool_t, svuint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1q_scatter_u64base_s16))) +void svst1q_scatter(svbool_t, svuint64_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1q_scatter_u64base_index_u32))) +void svst1q_scatter_index(svbool_t, svuint64_t, int64_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1q_scatter_u64base_index_u64))) +void svst1q_scatter_index(svbool_t, svuint64_t, int64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1q_scatter_u64base_index_u16))) +void svst1q_scatter_index(svbool_t, svuint64_t, int64_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1q_scatter_u64base_index_bf16))) +void svst1q_scatter_index(svbool_t, svuint64_t, int64_t, svbfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1q_scatter_u64base_index_f64))) +void svst1q_scatter_index(svbool_t, svuint64_t, int64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1q_scatter_u64base_index_f32))) +void svst1q_scatter_index(svbool_t, svuint64_t, int64_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1q_scatter_u64base_index_f16))) +void svst1q_scatter_index(svbool_t, svuint64_t, int64_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1q_scatter_u64base_index_s32))) +void svst1q_scatter_index(svbool_t, svuint64_t, int64_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1q_scatter_u64base_index_s64))) +void svst1q_scatter_index(svbool_t, svuint64_t, int64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1q_scatter_u64base_index_s16))) +void svst1q_scatter_index(svbool_t, svuint64_t, int64_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1q_scatter_u64base_offset_u8))) +void svst1q_scatter_offset(svbool_t, svuint64_t, int64_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1q_scatter_u64base_offset_u32))) +void svst1q_scatter_offset(svbool_t, svuint64_t, int64_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1q_scatter_u64base_offset_u64))) +void svst1q_scatter_offset(svbool_t, svuint64_t, int64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1q_scatter_u64base_offset_u16))) +void svst1q_scatter_offset(svbool_t, svuint64_t, int64_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1q_scatter_u64base_offset_bf16))) +void svst1q_scatter_offset(svbool_t, svuint64_t, int64_t, svbfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1q_scatter_u64base_offset_s8))) +void svst1q_scatter_offset(svbool_t, svuint64_t, int64_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1q_scatter_u64base_offset_f64))) +void svst1q_scatter_offset(svbool_t, svuint64_t, int64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1q_scatter_u64base_offset_f32))) +void svst1q_scatter_offset(svbool_t, svuint64_t, int64_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1q_scatter_u64base_offset_f16))) +void svst1q_scatter_offset(svbool_t, svuint64_t, int64_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1q_scatter_u64base_offset_s32))) +void svst1q_scatter_offset(svbool_t, svuint64_t, int64_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1q_scatter_u64base_offset_s64))) +void svst1q_scatter_offset(svbool_t, svuint64_t, int64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1q_scatter_u64base_offset_s16))) +void svst1q_scatter_offset(svbool_t, svuint64_t, int64_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1q_scatter_u64index_u32))) +void svst1q_scatter_index(svbool_t, uint32_t *, svuint64_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1q_scatter_u64index_u64))) +void svst1q_scatter_index(svbool_t, uint64_t *, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1q_scatter_u64index_u16))) +void svst1q_scatter_index(svbool_t, uint16_t *, svuint64_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1q_scatter_u64index_bf16))) +void svst1q_scatter_index(svbool_t, bfloat16_t *, svuint64_t, svbfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1q_scatter_u64index_f64))) +void svst1q_scatter_index(svbool_t, float64_t *, svuint64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1q_scatter_u64index_f32))) +void svst1q_scatter_index(svbool_t, float32_t *, svuint64_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1q_scatter_u64index_f16))) +void svst1q_scatter_index(svbool_t, float16_t *, svuint64_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1q_scatter_u64index_s32))) +void svst1q_scatter_index(svbool_t, int32_t *, svuint64_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1q_scatter_u64index_s64))) +void svst1q_scatter_index(svbool_t, int64_t *, svuint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1q_scatter_u64index_s16))) +void svst1q_scatter_index(svbool_t, int16_t *, svuint64_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1q_scatter_u64offset_u8))) +void svst1q_scatter_offset(svbool_t, uint8_t *, svuint64_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1q_scatter_u64offset_u32))) +void svst1q_scatter_offset(svbool_t, uint32_t *, svuint64_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1q_scatter_u64offset_u64))) +void svst1q_scatter_offset(svbool_t, uint64_t *, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1q_scatter_u64offset_u16))) +void svst1q_scatter_offset(svbool_t, uint16_t *, svuint64_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1q_scatter_u64offset_bf16))) +void svst1q_scatter_offset(svbool_t, bfloat16_t *, svuint64_t, svbfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1q_scatter_u64offset_s8))) +void svst1q_scatter_offset(svbool_t, int8_t *, svuint64_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1q_scatter_u64offset_f64))) +void svst1q_scatter_offset(svbool_t, float64_t *, svuint64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1q_scatter_u64offset_f32))) +void svst1q_scatter_offset(svbool_t, float32_t *, svuint64_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1q_scatter_u64offset_f16))) +void svst1q_scatter_offset(svbool_t, float16_t *, svuint64_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1q_scatter_u64offset_s32))) +void svst1q_scatter_offset(svbool_t, int32_t *, svuint64_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1q_scatter_u64offset_s64))) +void svst1q_scatter_offset(svbool_t, int64_t *, svuint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1q_scatter_u64offset_s16))) +void svst1q_scatter_offset(svbool_t, int16_t *, svuint64_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1wq_u32))) +void svst1wq(svbool_t, uint32_t const *, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1wq_f32))) +void svst1wq(svbool_t, float32_t const *, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1wq_s32))) +void svst1wq(svbool_t, int32_t const *, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1wq_vnum_u32))) +void svst1wq_vnum(svbool_t, uint32_t const *, int64_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1wq_vnum_f32))) +void svst1wq_vnum(svbool_t, float32_t const *, int64_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1wq_vnum_s32))) +void svst1wq_vnum(svbool_t, int32_t const *, int64_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2q_u8))) +void svst2q(svbool_t, uint8_t const *, svuint8x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2q_u32))) +void svst2q(svbool_t, uint32_t const *, svuint32x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2q_u64))) +void svst2q(svbool_t, uint64_t const *, svuint64x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2q_u16))) +void svst2q(svbool_t, uint16_t const *, svuint16x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2q_s8))) +void svst2q(svbool_t, int8_t const *, svint8x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2q_f64))) +void svst2q(svbool_t, float64_t const *, svfloat64x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2q_f32))) +void svst2q(svbool_t, float32_t const *, svfloat32x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2q_f16))) +void svst2q(svbool_t, float16_t const *, svfloat16x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2q_s32))) +void svst2q(svbool_t, int32_t const *, svint32x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2q_s64))) +void svst2q(svbool_t, int64_t const *, svint64x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2q_s16))) +void svst2q(svbool_t, int16_t const *, svint16x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2q_bf16))) +void svst2q(svbool_t, bfloat16_t const *, svbfloat16x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2q_vnum_u8))) +void svst2q_vnum(svbool_t, uint8_t const *, int64_t, svuint8x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2q_vnum_u32))) +void svst2q_vnum(svbool_t, uint32_t const *, int64_t, svuint32x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2q_vnum_u64))) +void svst2q_vnum(svbool_t, uint64_t const *, int64_t, svuint64x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2q_vnum_u16))) +void svst2q_vnum(svbool_t, uint16_t const *, int64_t, svuint16x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2q_vnum_s8))) +void svst2q_vnum(svbool_t, int8_t const *, int64_t, svint8x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2q_vnum_f64))) +void svst2q_vnum(svbool_t, float64_t const *, int64_t, svfloat64x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2q_vnum_f32))) +void svst2q_vnum(svbool_t, float32_t const *, int64_t, svfloat32x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2q_vnum_f16))) +void svst2q_vnum(svbool_t, float16_t const *, int64_t, svfloat16x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2q_vnum_s32))) +void svst2q_vnum(svbool_t, int32_t const *, int64_t, svint32x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2q_vnum_s64))) +void svst2q_vnum(svbool_t, int64_t const *, int64_t, svint64x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2q_vnum_s16))) +void svst2q_vnum(svbool_t, int16_t const *, int64_t, svint16x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2q_vnum_bf16))) +void svst2q_vnum(svbool_t, bfloat16_t const *, int64_t, svbfloat16x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3q_u8))) +void svst3q(svbool_t, uint8_t const *, svuint8x3_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3q_u32))) +void svst3q(svbool_t, uint32_t const *, svuint32x3_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3q_u64))) +void svst3q(svbool_t, uint64_t const *, svuint64x3_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3q_u16))) +void svst3q(svbool_t, uint16_t const *, svuint16x3_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3q_s8))) +void svst3q(svbool_t, int8_t const *, svint8x3_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3q_f64))) +void svst3q(svbool_t, float64_t const *, svfloat64x3_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3q_f32))) +void svst3q(svbool_t, float32_t const *, svfloat32x3_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3q_f16))) +void svst3q(svbool_t, float16_t const *, svfloat16x3_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3q_s32))) +void svst3q(svbool_t, int32_t const *, svint32x3_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3q_s64))) +void svst3q(svbool_t, int64_t const *, svint64x3_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3q_s16))) +void svst3q(svbool_t, int16_t const *, svint16x3_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3q_bf16))) +void svst3q(svbool_t, bfloat16_t const *, svbfloat16x3_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3q_vnum_u8))) +void svst3q_vnum(svbool_t, uint8_t const *, int64_t, svuint8x3_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3q_vnum_u32))) +void svst3q_vnum(svbool_t, uint32_t const *, int64_t, svuint32x3_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3q_vnum_u64))) +void svst3q_vnum(svbool_t, uint64_t const *, int64_t, svuint64x3_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3q_vnum_u16))) +void svst3q_vnum(svbool_t, uint16_t const *, int64_t, svuint16x3_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3q_vnum_s8))) +void svst3q_vnum(svbool_t, int8_t const *, int64_t, svint8x3_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3q_vnum_f64))) +void svst3q_vnum(svbool_t, float64_t const *, int64_t, svfloat64x3_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3q_vnum_f32))) +void svst3q_vnum(svbool_t, float32_t const *, int64_t, svfloat32x3_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3q_vnum_f16))) +void svst3q_vnum(svbool_t, float16_t const *, int64_t, svfloat16x3_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3q_vnum_s32))) +void svst3q_vnum(svbool_t, int32_t const *, int64_t, svint32x3_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3q_vnum_s64))) +void svst3q_vnum(svbool_t, int64_t const *, int64_t, svint64x3_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3q_vnum_s16))) +void svst3q_vnum(svbool_t, int16_t const *, int64_t, svint16x3_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3q_vnum_bf16))) +void svst3q_vnum(svbool_t, bfloat16_t const *, int64_t, svbfloat16x3_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4q_u8))) +void svst4q(svbool_t, uint8_t const *, svuint8x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4q_u32))) +void svst4q(svbool_t, uint32_t const *, svuint32x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4q_u64))) +void svst4q(svbool_t, uint64_t const *, svuint64x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4q_u16))) +void svst4q(svbool_t, uint16_t const *, svuint16x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4q_s8))) +void svst4q(svbool_t, int8_t const *, svint8x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4q_f64))) +void svst4q(svbool_t, float64_t const *, svfloat64x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4q_f32))) +void svst4q(svbool_t, float32_t const *, svfloat32x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4q_f16))) +void svst4q(svbool_t, float16_t const *, svfloat16x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4q_s32))) +void svst4q(svbool_t, int32_t const *, svint32x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4q_s64))) +void svst4q(svbool_t, int64_t const *, svint64x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4q_s16))) +void svst4q(svbool_t, int16_t const *, svint16x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4q_bf16))) +void svst4q(svbool_t, bfloat16_t const *, svbfloat16x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4q_vnum_u8))) +void svst4q_vnum(svbool_t, uint8_t const *, int64_t, svuint8x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4q_vnum_u32))) +void svst4q_vnum(svbool_t, uint32_t const *, int64_t, svuint32x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4q_vnum_u64))) +void svst4q_vnum(svbool_t, uint64_t const *, int64_t, svuint64x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4q_vnum_u16))) +void svst4q_vnum(svbool_t, uint16_t const *, int64_t, svuint16x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4q_vnum_s8))) +void svst4q_vnum(svbool_t, int8_t const *, int64_t, svint8x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4q_vnum_f64))) +void svst4q_vnum(svbool_t, float64_t const *, int64_t, svfloat64x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4q_vnum_f32))) +void svst4q_vnum(svbool_t, float32_t const *, int64_t, svfloat32x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4q_vnum_f16))) +void svst4q_vnum(svbool_t, float16_t const *, int64_t, svfloat16x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4q_vnum_s32))) +void svst4q_vnum(svbool_t, int32_t const *, int64_t, svint32x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4q_vnum_s64))) +void svst4q_vnum(svbool_t, int64_t const *, int64_t, svint64x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4q_vnum_s16))) +void svst4q_vnum(svbool_t, int16_t const *, int64_t, svint16x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4q_vnum_bf16))) +void svst4q_vnum(svbool_t, bfloat16_t const *, int64_t, svbfloat16x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtblq_u8))) +svuint8_t svtblq(svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtblq_u32))) +svuint32_t svtblq(svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtblq_u64))) +svuint64_t svtblq(svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtblq_u16))) +svuint16_t svtblq(svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtblq_bf16))) +svbfloat16_t svtblq(svbfloat16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtblq_s8))) +svint8_t svtblq(svint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtblq_f64))) +svfloat64_t svtblq(svfloat64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtblq_f32))) +svfloat32_t svtblq(svfloat32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtblq_f16))) +svfloat16_t svtblq(svfloat16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtblq_s32))) +svint32_t svtblq(svint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtblq_s64))) +svint64_t svtblq(svint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtblq_s16))) +svint16_t svtblq(svint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbxq_u8))) +svuint8_t svtbxq(svuint8_t, svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbxq_u32))) +svuint32_t svtbxq(svuint32_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbxq_u64))) +svuint64_t svtbxq(svuint64_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbxq_u16))) +svuint16_t svtbxq(svuint16_t, svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbxq_bf16))) +svbfloat16_t svtbxq(svbfloat16_t, svbfloat16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbxq_s8))) +svint8_t svtbxq(svint8_t, svint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbxq_f64))) +svfloat64_t svtbxq(svfloat64_t, svfloat64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbxq_f32))) +svfloat32_t svtbxq(svfloat32_t, svfloat32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbxq_f16))) +svfloat16_t svtbxq(svfloat16_t, svfloat16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbxq_s32))) +svint32_t svtbxq(svint32_t, svint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbxq_s64))) +svint64_t svtbxq(svint64_t, svint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbxq_s16))) +svint16_t svtbxq(svint16_t, svint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzpq1_u8))) +svuint8_t svuzpq1(svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzpq1_u32))) +svuint32_t svuzpq1(svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzpq1_u64))) +svuint64_t svuzpq1(svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzpq1_u16))) +svuint16_t svuzpq1(svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzpq1_bf16))) +svbfloat16_t svuzpq1(svbfloat16_t, svbfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzpq1_s8))) +svint8_t svuzpq1(svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzpq1_f64))) +svfloat64_t svuzpq1(svfloat64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzpq1_f32))) +svfloat32_t svuzpq1(svfloat32_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzpq1_f16))) +svfloat16_t svuzpq1(svfloat16_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzpq1_s32))) +svint32_t svuzpq1(svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzpq1_s64))) +svint64_t svuzpq1(svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzpq1_s16))) +svint16_t svuzpq1(svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzpq2_u8))) +svuint8_t svuzpq2(svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzpq2_u32))) +svuint32_t svuzpq2(svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzpq2_u64))) +svuint64_t svuzpq2(svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzpq2_u16))) +svuint16_t svuzpq2(svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzpq2_bf16))) +svbfloat16_t svuzpq2(svbfloat16_t, svbfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzpq2_s8))) +svint8_t svuzpq2(svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzpq2_f64))) +svfloat64_t svuzpq2(svfloat64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzpq2_f32))) +svfloat32_t svuzpq2(svfloat32_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzpq2_f16))) +svfloat16_t svuzpq2(svfloat16_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzpq2_s32))) +svint32_t svuzpq2(svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzpq2_s64))) +svint64_t svuzpq2(svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzpq2_s16))) +svint16_t svuzpq2(svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzipq1_u8))) +svuint8_t svzipq1(svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzipq1_u32))) +svuint32_t svzipq1(svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzipq1_u64))) +svuint64_t svzipq1(svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzipq1_u16))) +svuint16_t svzipq1(svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzipq1_bf16))) +svbfloat16_t svzipq1(svbfloat16_t, svbfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzipq1_s8))) +svint8_t svzipq1(svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzipq1_f64))) +svfloat64_t svzipq1(svfloat64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzipq1_f32))) +svfloat32_t svzipq1(svfloat32_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzipq1_f16))) +svfloat16_t svzipq1(svfloat16_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzipq1_s32))) +svint32_t svzipq1(svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzipq1_s64))) +svint64_t svzipq1(svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzipq1_s16))) +svint16_t svzipq1(svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzipq2_u8))) +svuint8_t svzipq2(svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzipq2_u32))) +svuint32_t svzipq2(svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzipq2_u64))) +svuint64_t svzipq2(svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzipq2_u16))) +svuint16_t svzipq2(svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzipq2_bf16))) +svbfloat16_t svzipq2(svbfloat16_t, svbfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzipq2_s8))) +svint8_t svzipq2(svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzipq2_f64))) +svfloat64_t svzipq2(svfloat64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzipq2_f32))) +svfloat32_t svzipq2(svfloat32_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzipq2_f16))) +svfloat16_t svzipq2(svfloat16_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzipq2_s32))) +svint32_t svzipq2(svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzipq2_s64))) +svint64_t svzipq2(svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzipq2_s16))) +svint16_t svzipq2(svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_laneq_bf16))) +svbfloat16_t svdup_laneq_bf16(svbfloat16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_laneq_bf16))) +svbfloat16_t svdup_laneq(svbfloat16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclamp_s8))) +svint8_t svclamp_s8(svint8_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclamp_s32))) +svint32_t svclamp_s32(svint32_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclamp_s64))) +svint64_t svclamp_s64(svint64_t, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclamp_s16))) +svint16_t svclamp_s16(svint16_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclamp_u8))) +svuint8_t svclamp_u8(svuint8_t, svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclamp_u32))) +svuint32_t svclamp_u32(svuint32_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclamp_u64))) +svuint64_t svclamp_u64(svuint64_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclamp_u16))) +svuint16_t svclamp_u16(svuint16_t, svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpsel_lane_b16))) +svbool_t svpsel_lane_b16(svbool_t, svbool_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpsel_lane_b32))) +svbool_t svpsel_lane_b32(svbool_t, svbool_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpsel_lane_b64))) +svbool_t svpsel_lane_b64(svbool_t, svbool_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpsel_lane_b8))) +svbool_t svpsel_lane_b8(svbool_t, svbool_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevd_u8_m))) +svuint8_t svrevd_u8_m(svuint8_t, svbool_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevd_u32_m))) +svuint32_t svrevd_u32_m(svuint32_t, svbool_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevd_u64_m))) +svuint64_t svrevd_u64_m(svuint64_t, svbool_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevd_u16_m))) +svuint16_t svrevd_u16_m(svuint16_t, svbool_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevd_bf16_m))) +svbfloat16_t svrevd_bf16_m(svbfloat16_t, svbool_t, svbfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevd_s8_m))) +svint8_t svrevd_s8_m(svint8_t, svbool_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevd_f64_m))) +svfloat64_t svrevd_f64_m(svfloat64_t, svbool_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevd_f32_m))) +svfloat32_t svrevd_f32_m(svfloat32_t, svbool_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevd_f16_m))) +svfloat16_t svrevd_f16_m(svfloat16_t, svbool_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevd_s32_m))) +svint32_t svrevd_s32_m(svint32_t, svbool_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevd_s64_m))) +svint64_t svrevd_s64_m(svint64_t, svbool_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevd_s16_m))) +svint16_t svrevd_s16_m(svint16_t, svbool_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevd_u8_x))) +svuint8_t svrevd_u8_x(svbool_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevd_u32_x))) +svuint32_t svrevd_u32_x(svbool_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevd_u64_x))) +svuint64_t svrevd_u64_x(svbool_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevd_u16_x))) +svuint16_t svrevd_u16_x(svbool_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevd_bf16_x))) +svbfloat16_t svrevd_bf16_x(svbool_t, svbfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevd_s8_x))) +svint8_t svrevd_s8_x(svbool_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevd_f64_x))) +svfloat64_t svrevd_f64_x(svbool_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevd_f32_x))) +svfloat32_t svrevd_f32_x(svbool_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevd_f16_x))) +svfloat16_t svrevd_f16_x(svbool_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevd_s32_x))) +svint32_t svrevd_s32_x(svbool_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevd_s64_x))) +svint64_t svrevd_s64_x(svbool_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevd_s16_x))) +svint16_t svrevd_s16_x(svbool_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevd_u8_z))) +svuint8_t svrevd_u8_z(svbool_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevd_u32_z))) +svuint32_t svrevd_u32_z(svbool_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevd_u64_z))) +svuint64_t svrevd_u64_z(svbool_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevd_u16_z))) +svuint16_t svrevd_u16_z(svbool_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevd_bf16_z))) +svbfloat16_t svrevd_bf16_z(svbool_t, svbfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevd_s8_z))) +svint8_t svrevd_s8_z(svbool_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevd_f64_z))) +svfloat64_t svrevd_f64_z(svbool_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevd_f32_z))) +svfloat32_t svrevd_f32_z(svbool_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevd_f16_z))) +svfloat16_t svrevd_f16_z(svbool_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevd_s32_z))) +svint32_t svrevd_s32_z(svbool_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevd_s64_z))) +svint64_t svrevd_s64_z(svbool_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevd_s16_z))) +svint16_t svrevd_s16_z(svbool_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclamp_s8))) +svint8_t svclamp(svint8_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclamp_s32))) +svint32_t svclamp(svint32_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclamp_s64))) +svint64_t svclamp(svint64_t, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclamp_s16))) +svint16_t svclamp(svint16_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclamp_u8))) +svuint8_t svclamp(svuint8_t, svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclamp_u32))) +svuint32_t svclamp(svuint32_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclamp_u64))) +svuint64_t svclamp(svuint64_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclamp_u16))) +svuint16_t svclamp(svuint16_t, svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevd_u8_m))) +svuint8_t svrevd_m(svuint8_t, svbool_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevd_u32_m))) +svuint32_t svrevd_m(svuint32_t, svbool_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevd_u64_m))) +svuint64_t svrevd_m(svuint64_t, svbool_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevd_u16_m))) +svuint16_t svrevd_m(svuint16_t, svbool_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevd_bf16_m))) +svbfloat16_t svrevd_m(svbfloat16_t, svbool_t, svbfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevd_s8_m))) +svint8_t svrevd_m(svint8_t, svbool_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevd_f64_m))) +svfloat64_t svrevd_m(svfloat64_t, svbool_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevd_f32_m))) +svfloat32_t svrevd_m(svfloat32_t, svbool_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevd_f16_m))) +svfloat16_t svrevd_m(svfloat16_t, svbool_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevd_s32_m))) +svint32_t svrevd_m(svint32_t, svbool_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevd_s64_m))) +svint64_t svrevd_m(svint64_t, svbool_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevd_s16_m))) +svint16_t svrevd_m(svint16_t, svbool_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevd_u8_x))) +svuint8_t svrevd_x(svbool_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevd_u32_x))) +svuint32_t svrevd_x(svbool_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevd_u64_x))) +svuint64_t svrevd_x(svbool_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevd_u16_x))) +svuint16_t svrevd_x(svbool_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevd_bf16_x))) +svbfloat16_t svrevd_x(svbool_t, svbfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevd_s8_x))) +svint8_t svrevd_x(svbool_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevd_f64_x))) +svfloat64_t svrevd_x(svbool_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevd_f32_x))) +svfloat32_t svrevd_x(svbool_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevd_f16_x))) +svfloat16_t svrevd_x(svbool_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevd_s32_x))) +svint32_t svrevd_x(svbool_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevd_s64_x))) +svint64_t svrevd_x(svbool_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevd_s16_x))) +svint16_t svrevd_x(svbool_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevd_u8_z))) +svuint8_t svrevd_z(svbool_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevd_u32_z))) +svuint32_t svrevd_z(svbool_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevd_u64_z))) +svuint64_t svrevd_z(svbool_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevd_u16_z))) +svuint16_t svrevd_z(svbool_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevd_bf16_z))) +svbfloat16_t svrevd_z(svbool_t, svbfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevd_s8_z))) +svint8_t svrevd_z(svbool_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevd_f64_z))) +svfloat64_t svrevd_z(svbool_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevd_f32_z))) +svfloat32_t svrevd_z(svbool_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevd_f16_z))) +svfloat16_t svrevd_z(svbool_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevd_s32_z))) +svint32_t svrevd_z(svbool_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevd_s64_z))) +svint64_t svrevd_z(svbool_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevd_s16_z))) +svint16_t svrevd_z(svbool_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbfmlslb_f32))) +svfloat32_t svbfmlslb_f32(svfloat32_t, svbfloat16_t, svbfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbfmlslb_lane_f32))) +svfloat32_t svbfmlslb_lane_f32(svfloat32_t, svbfloat16_t, svbfloat16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbfmlslt_f32))) +svfloat32_t svbfmlslt_f32(svfloat32_t, svbfloat16_t, svbfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbfmlslt_lane_f32))) +svfloat32_t svbfmlslt_lane_f32(svfloat32_t, svbfloat16_t, svbfloat16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclamp_f64))) +svfloat64_t svclamp_f64(svfloat64_t, svfloat64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclamp_f32))) +svfloat32_t svclamp_f32(svfloat32_t, svfloat32_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclamp_f16))) +svfloat16_t svclamp_f16(svfloat16_t, svfloat16_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcntp_c8))) +uint64_t svcntp_c8(svcount_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcntp_c32))) +uint64_t svcntp_c32(svcount_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcntp_c64))) +uint64_t svcntp_c64(svcount_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcntp_c16))) +uint64_t svcntp_c16(svcount_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate2_b))) +svboolx2_t svcreate2_b(svbool_t, svbool_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate4_b))) +svboolx4_t svcreate4_b(svbool_t, svbool_t, svbool_t, svbool_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdot_f32_f16))) +svfloat32_t svdot_f32_f16(svfloat32_t, svfloat16_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdot_s32_s16))) +svint32_t svdot_s32_s16(svint32_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdot_u32_u16))) +svuint32_t svdot_u32_u16(svuint32_t, svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdot_lane_f32_f16))) +svfloat32_t svdot_lane_f32_f16(svfloat32_t, svfloat16_t, svfloat16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdot_lane_s32_s16))) +svint32_t svdot_lane_s32_s16(svint32_t, svint16_t, svint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdot_lane_u32_u16))) +svuint32_t svdot_lane_u32_u16(svuint32_t, svuint16_t, svuint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget2_b))) +svbool_t svget2_b(svboolx2_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget4_b))) +svbool_t svget4_b(svboolx4_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_u8_x2))) +svuint8x2_t svld1_u8_x2(svcount_t, uint8_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_s8_x2))) +svint8x2_t svld1_s8_x2(svcount_t, int8_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_u64_x2))) +svuint64x2_t svld1_u64_x2(svcount_t, uint64_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_f64_x2))) +svfloat64x2_t svld1_f64_x2(svcount_t, float64_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_s64_x2))) +svint64x2_t svld1_s64_x2(svcount_t, int64_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_u16_x2))) +svuint16x2_t svld1_u16_x2(svcount_t, uint16_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_bf16_x2))) +svbfloat16x2_t svld1_bf16_x2(svcount_t, bfloat16_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_f16_x2))) +svfloat16x2_t svld1_f16_x2(svcount_t, float16_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_s16_x2))) +svint16x2_t svld1_s16_x2(svcount_t, int16_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_u32_x2))) +svuint32x2_t svld1_u32_x2(svcount_t, uint32_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_f32_x2))) +svfloat32x2_t svld1_f32_x2(svcount_t, float32_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_s32_x2))) +svint32x2_t svld1_s32_x2(svcount_t, int32_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_u8_x4))) +svuint8x4_t svld1_u8_x4(svcount_t, uint8_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_s8_x4))) +svint8x4_t svld1_s8_x4(svcount_t, int8_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_u64_x4))) +svuint64x4_t svld1_u64_x4(svcount_t, uint64_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_f64_x4))) +svfloat64x4_t svld1_f64_x4(svcount_t, float64_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_s64_x4))) +svint64x4_t svld1_s64_x4(svcount_t, int64_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_u16_x4))) +svuint16x4_t svld1_u16_x4(svcount_t, uint16_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_bf16_x4))) +svbfloat16x4_t svld1_bf16_x4(svcount_t, bfloat16_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_f16_x4))) +svfloat16x4_t svld1_f16_x4(svcount_t, float16_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_s16_x4))) +svint16x4_t svld1_s16_x4(svcount_t, int16_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_u32_x4))) +svuint32x4_t svld1_u32_x4(svcount_t, uint32_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_f32_x4))) +svfloat32x4_t svld1_f32_x4(svcount_t, float32_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_s32_x4))) +svint32x4_t svld1_s32_x4(svcount_t, int32_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_vnum_u8_x2))) +svuint8x2_t svld1_vnum_u8_x2(svcount_t, uint8_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_vnum_s8_x2))) +svint8x2_t svld1_vnum_s8_x2(svcount_t, int8_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_vnum_u64_x2))) +svuint64x2_t svld1_vnum_u64_x2(svcount_t, uint64_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_vnum_f64_x2))) +svfloat64x2_t svld1_vnum_f64_x2(svcount_t, float64_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_vnum_s64_x2))) +svint64x2_t svld1_vnum_s64_x2(svcount_t, int64_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_vnum_u16_x2))) +svuint16x2_t svld1_vnum_u16_x2(svcount_t, uint16_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_vnum_bf16_x2))) +svbfloat16x2_t svld1_vnum_bf16_x2(svcount_t, bfloat16_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_vnum_f16_x2))) +svfloat16x2_t svld1_vnum_f16_x2(svcount_t, float16_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_vnum_s16_x2))) +svint16x2_t svld1_vnum_s16_x2(svcount_t, int16_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_vnum_u32_x2))) +svuint32x2_t svld1_vnum_u32_x2(svcount_t, uint32_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_vnum_f32_x2))) +svfloat32x2_t svld1_vnum_f32_x2(svcount_t, float32_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_vnum_s32_x2))) +svint32x2_t svld1_vnum_s32_x2(svcount_t, int32_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_vnum_u8_x4))) +svuint8x4_t svld1_vnum_u8_x4(svcount_t, uint8_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_vnum_s8_x4))) +svint8x4_t svld1_vnum_s8_x4(svcount_t, int8_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_vnum_u64_x4))) +svuint64x4_t svld1_vnum_u64_x4(svcount_t, uint64_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_vnum_f64_x4))) +svfloat64x4_t svld1_vnum_f64_x4(svcount_t, float64_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_vnum_s64_x4))) +svint64x4_t svld1_vnum_s64_x4(svcount_t, int64_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_vnum_u16_x4))) +svuint16x4_t svld1_vnum_u16_x4(svcount_t, uint16_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_vnum_bf16_x4))) +svbfloat16x4_t svld1_vnum_bf16_x4(svcount_t, bfloat16_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_vnum_f16_x4))) +svfloat16x4_t svld1_vnum_f16_x4(svcount_t, float16_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_vnum_s16_x4))) +svint16x4_t svld1_vnum_s16_x4(svcount_t, int16_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_vnum_u32_x4))) +svuint32x4_t svld1_vnum_u32_x4(svcount_t, uint32_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_vnum_f32_x4))) +svfloat32x4_t svld1_vnum_f32_x4(svcount_t, float32_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_vnum_s32_x4))) +svint32x4_t svld1_vnum_s32_x4(svcount_t, int32_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_u8_x2))) +svuint8x2_t svldnt1_u8_x2(svcount_t, uint8_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_s8_x2))) +svint8x2_t svldnt1_s8_x2(svcount_t, int8_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_u64_x2))) +svuint64x2_t svldnt1_u64_x2(svcount_t, uint64_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_f64_x2))) +svfloat64x2_t svldnt1_f64_x2(svcount_t, float64_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_s64_x2))) +svint64x2_t svldnt1_s64_x2(svcount_t, int64_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_u16_x2))) +svuint16x2_t svldnt1_u16_x2(svcount_t, uint16_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_bf16_x2))) +svbfloat16x2_t svldnt1_bf16_x2(svcount_t, bfloat16_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_f16_x2))) +svfloat16x2_t svldnt1_f16_x2(svcount_t, float16_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_s16_x2))) +svint16x2_t svldnt1_s16_x2(svcount_t, int16_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_u32_x2))) +svuint32x2_t svldnt1_u32_x2(svcount_t, uint32_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_f32_x2))) +svfloat32x2_t svldnt1_f32_x2(svcount_t, float32_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_s32_x2))) +svint32x2_t svldnt1_s32_x2(svcount_t, int32_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_u8_x4))) +svuint8x4_t svldnt1_u8_x4(svcount_t, uint8_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_s8_x4))) +svint8x4_t svldnt1_s8_x4(svcount_t, int8_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_u64_x4))) +svuint64x4_t svldnt1_u64_x4(svcount_t, uint64_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_f64_x4))) +svfloat64x4_t svldnt1_f64_x4(svcount_t, float64_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_s64_x4))) +svint64x4_t svldnt1_s64_x4(svcount_t, int64_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_u16_x4))) +svuint16x4_t svldnt1_u16_x4(svcount_t, uint16_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_bf16_x4))) +svbfloat16x4_t svldnt1_bf16_x4(svcount_t, bfloat16_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_f16_x4))) +svfloat16x4_t svldnt1_f16_x4(svcount_t, float16_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_s16_x4))) +svint16x4_t svldnt1_s16_x4(svcount_t, int16_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_u32_x4))) +svuint32x4_t svldnt1_u32_x4(svcount_t, uint32_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_f32_x4))) +svfloat32x4_t svldnt1_f32_x4(svcount_t, float32_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_s32_x4))) +svint32x4_t svldnt1_s32_x4(svcount_t, int32_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_vnum_u8_x2))) +svuint8x2_t svldnt1_vnum_u8_x2(svcount_t, uint8_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_vnum_s8_x2))) +svint8x2_t svldnt1_vnum_s8_x2(svcount_t, int8_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_vnum_u64_x2))) +svuint64x2_t svldnt1_vnum_u64_x2(svcount_t, uint64_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_vnum_f64_x2))) +svfloat64x2_t svldnt1_vnum_f64_x2(svcount_t, float64_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_vnum_s64_x2))) +svint64x2_t svldnt1_vnum_s64_x2(svcount_t, int64_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_vnum_u16_x2))) +svuint16x2_t svldnt1_vnum_u16_x2(svcount_t, uint16_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_vnum_bf16_x2))) +svbfloat16x2_t svldnt1_vnum_bf16_x2(svcount_t, bfloat16_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_vnum_f16_x2))) +svfloat16x2_t svldnt1_vnum_f16_x2(svcount_t, float16_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_vnum_s16_x2))) +svint16x2_t svldnt1_vnum_s16_x2(svcount_t, int16_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_vnum_u32_x2))) +svuint32x2_t svldnt1_vnum_u32_x2(svcount_t, uint32_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_vnum_f32_x2))) +svfloat32x2_t svldnt1_vnum_f32_x2(svcount_t, float32_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_vnum_s32_x2))) +svint32x2_t svldnt1_vnum_s32_x2(svcount_t, int32_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_vnum_u8_x4))) +svuint8x4_t svldnt1_vnum_u8_x4(svcount_t, uint8_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_vnum_s8_x4))) +svint8x4_t svldnt1_vnum_s8_x4(svcount_t, int8_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_vnum_u64_x4))) +svuint64x4_t svldnt1_vnum_u64_x4(svcount_t, uint64_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_vnum_f64_x4))) +svfloat64x4_t svldnt1_vnum_f64_x4(svcount_t, float64_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_vnum_s64_x4))) +svint64x4_t svldnt1_vnum_s64_x4(svcount_t, int64_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_vnum_u16_x4))) +svuint16x4_t svldnt1_vnum_u16_x4(svcount_t, uint16_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_vnum_bf16_x4))) +svbfloat16x4_t svldnt1_vnum_bf16_x4(svcount_t, bfloat16_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_vnum_f16_x4))) +svfloat16x4_t svldnt1_vnum_f16_x4(svcount_t, float16_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_vnum_s16_x4))) +svint16x4_t svldnt1_vnum_s16_x4(svcount_t, int16_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_vnum_u32_x4))) +svuint32x4_t svldnt1_vnum_u32_x4(svcount_t, uint32_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_vnum_f32_x4))) +svfloat32x4_t svldnt1_vnum_f32_x4(svcount_t, float32_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_vnum_s32_x4))) +svint32x4_t svldnt1_vnum_s32_x4(svcount_t, int32_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpext_lane_c8))) +svbool_t svpext_lane_c8(svcount_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpext_lane_c32))) +svbool_t svpext_lane_c32(svcount_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpext_lane_c64))) +svbool_t svpext_lane_c64(svcount_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpext_lane_c16))) +svbool_t svpext_lane_c16(svcount_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpext_lane_c8_x2))) +svboolx2_t svpext_lane_c8_x2(svcount_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpext_lane_c32_x2))) +svboolx2_t svpext_lane_c32_x2(svcount_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpext_lane_c64_x2))) +svboolx2_t svpext_lane_c64_x2(svcount_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpext_lane_c16_x2))) +svboolx2_t svpext_lane_c16_x2(svcount_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpfalse_c))) +svcount_t svpfalse_c(void); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpsel_lane_c16))) +svcount_t svpsel_lane_c16(svcount_t, svbool_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpsel_lane_c32))) +svcount_t svpsel_lane_c32(svcount_t, svbool_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpsel_lane_c64))) +svcount_t svpsel_lane_c64(svcount_t, svbool_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpsel_lane_c8))) +svcount_t svpsel_lane_c8(svcount_t, svbool_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svptrue_c8))) +svcount_t svptrue_c8(void); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svptrue_c32))) +svcount_t svptrue_c32(void); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svptrue_c64))) +svcount_t svptrue_c64(void); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svptrue_c16))) +svcount_t svptrue_c16(void); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqcvtn_s16_s32_x2))) +svint16_t svqcvtn_s16_s32_x2(svint32x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqcvtn_u16_s32_x2))) +svuint16_t svqcvtn_u16_s32_x2(svint32x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqcvtn_u16_u32_x2))) +svuint16_t svqcvtn_u16_u32_x2(svuint32x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshrn_n_s16_s32_x2))) +svint16_t svqrshrn_n_s16_s32_x2(svint32x2_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshrn_n_u16_u32_x2))) +svuint16_t svqrshrn_n_u16_u32_x2(svuint32x2_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshrun_n_u16_s32_x2))) +svuint16_t svqrshrun_n_u16_s32_x2(svint32x2_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svreinterpret_b))) +svbool_t svreinterpret_b(svcount_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svreinterpret_c))) +svcount_t svreinterpret_c(svbool_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset2_b))) +svboolx2_t svset2_b(svboolx2_t, uint64_t, svbool_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset4_b))) +svboolx4_t svset4_b(svboolx4_t, uint64_t, svbool_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_u8_x2))) +void svst1_u8_x2(svcount_t, uint8_t *, svuint8x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_s8_x2))) +void svst1_s8_x2(svcount_t, int8_t *, svint8x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_u64_x2))) +void svst1_u64_x2(svcount_t, uint64_t *, svuint64x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_f64_x2))) +void svst1_f64_x2(svcount_t, float64_t *, svfloat64x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_s64_x2))) +void svst1_s64_x2(svcount_t, int64_t *, svint64x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_u16_x2))) +void svst1_u16_x2(svcount_t, uint16_t *, svuint16x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_bf16_x2))) +void svst1_bf16_x2(svcount_t, bfloat16_t *, svbfloat16x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_f16_x2))) +void svst1_f16_x2(svcount_t, float16_t *, svfloat16x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_s16_x2))) +void svst1_s16_x2(svcount_t, int16_t *, svint16x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_u32_x2))) +void svst1_u32_x2(svcount_t, uint32_t *, svuint32x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_f32_x2))) +void svst1_f32_x2(svcount_t, float32_t *, svfloat32x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_s32_x2))) +void svst1_s32_x2(svcount_t, int32_t *, svint32x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_u8_x4))) +void svst1_u8_x4(svcount_t, uint8_t *, svuint8x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_s8_x4))) +void svst1_s8_x4(svcount_t, int8_t *, svint8x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_u64_x4))) +void svst1_u64_x4(svcount_t, uint64_t *, svuint64x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_f64_x4))) +void svst1_f64_x4(svcount_t, float64_t *, svfloat64x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_s64_x4))) +void svst1_s64_x4(svcount_t, int64_t *, svint64x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_u16_x4))) +void svst1_u16_x4(svcount_t, uint16_t *, svuint16x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_bf16_x4))) +void svst1_bf16_x4(svcount_t, bfloat16_t *, svbfloat16x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_f16_x4))) +void svst1_f16_x4(svcount_t, float16_t *, svfloat16x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_s16_x4))) +void svst1_s16_x4(svcount_t, int16_t *, svint16x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_u32_x4))) +void svst1_u32_x4(svcount_t, uint32_t *, svuint32x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_f32_x4))) +void svst1_f32_x4(svcount_t, float32_t *, svfloat32x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_s32_x4))) +void svst1_s32_x4(svcount_t, int32_t *, svint32x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_vnum_u8_x2))) +void svst1_vnum_u8_x2(svcount_t, uint8_t *, int64_t, svuint8x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_vnum_s8_x2))) +void svst1_vnum_s8_x2(svcount_t, int8_t *, int64_t, svint8x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_vnum_u64_x2))) +void svst1_vnum_u64_x2(svcount_t, uint64_t *, int64_t, svuint64x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_vnum_f64_x2))) +void svst1_vnum_f64_x2(svcount_t, float64_t *, int64_t, svfloat64x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_vnum_s64_x2))) +void svst1_vnum_s64_x2(svcount_t, int64_t *, int64_t, svint64x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_vnum_u16_x2))) +void svst1_vnum_u16_x2(svcount_t, uint16_t *, int64_t, svuint16x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_vnum_bf16_x2))) +void svst1_vnum_bf16_x2(svcount_t, bfloat16_t *, int64_t, svbfloat16x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_vnum_f16_x2))) +void svst1_vnum_f16_x2(svcount_t, float16_t *, int64_t, svfloat16x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_vnum_s16_x2))) +void svst1_vnum_s16_x2(svcount_t, int16_t *, int64_t, svint16x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_vnum_u32_x2))) +void svst1_vnum_u32_x2(svcount_t, uint32_t *, int64_t, svuint32x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_vnum_f32_x2))) +void svst1_vnum_f32_x2(svcount_t, float32_t *, int64_t, svfloat32x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_vnum_s32_x2))) +void svst1_vnum_s32_x2(svcount_t, int32_t *, int64_t, svint32x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_vnum_u8_x4))) +void svst1_vnum_u8_x4(svcount_t, uint8_t *, int64_t, svuint8x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_vnum_s8_x4))) +void svst1_vnum_s8_x4(svcount_t, int8_t *, int64_t, svint8x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_vnum_u64_x4))) +void svst1_vnum_u64_x4(svcount_t, uint64_t *, int64_t, svuint64x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_vnum_f64_x4))) +void svst1_vnum_f64_x4(svcount_t, float64_t *, int64_t, svfloat64x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_vnum_s64_x4))) +void svst1_vnum_s64_x4(svcount_t, int64_t *, int64_t, svint64x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_vnum_u16_x4))) +void svst1_vnum_u16_x4(svcount_t, uint16_t *, int64_t, svuint16x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_vnum_bf16_x4))) +void svst1_vnum_bf16_x4(svcount_t, bfloat16_t *, int64_t, svbfloat16x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_vnum_f16_x4))) +void svst1_vnum_f16_x4(svcount_t, float16_t *, int64_t, svfloat16x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_vnum_s16_x4))) +void svst1_vnum_s16_x4(svcount_t, int16_t *, int64_t, svint16x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_vnum_u32_x4))) +void svst1_vnum_u32_x4(svcount_t, uint32_t *, int64_t, svuint32x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_vnum_f32_x4))) +void svst1_vnum_f32_x4(svcount_t, float32_t *, int64_t, svfloat32x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_vnum_s32_x4))) +void svst1_vnum_s32_x4(svcount_t, int32_t *, int64_t, svint32x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_u8_x2))) +void svstnt1_u8_x2(svcount_t, uint8_t *, svuint8x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_s8_x2))) +void svstnt1_s8_x2(svcount_t, int8_t *, svint8x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_u64_x2))) +void svstnt1_u64_x2(svcount_t, uint64_t *, svuint64x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_f64_x2))) +void svstnt1_f64_x2(svcount_t, float64_t *, svfloat64x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_s64_x2))) +void svstnt1_s64_x2(svcount_t, int64_t *, svint64x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_u16_x2))) +void svstnt1_u16_x2(svcount_t, uint16_t *, svuint16x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_bf16_x2))) +void svstnt1_bf16_x2(svcount_t, bfloat16_t *, svbfloat16x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_f16_x2))) +void svstnt1_f16_x2(svcount_t, float16_t *, svfloat16x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_s16_x2))) +void svstnt1_s16_x2(svcount_t, int16_t *, svint16x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_u32_x2))) +void svstnt1_u32_x2(svcount_t, uint32_t *, svuint32x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_f32_x2))) +void svstnt1_f32_x2(svcount_t, float32_t *, svfloat32x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_s32_x2))) +void svstnt1_s32_x2(svcount_t, int32_t *, svint32x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_u8_x4))) +void svstnt1_u8_x4(svcount_t, uint8_t *, svuint8x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_s8_x4))) +void svstnt1_s8_x4(svcount_t, int8_t *, svint8x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_u64_x4))) +void svstnt1_u64_x4(svcount_t, uint64_t *, svuint64x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_f64_x4))) +void svstnt1_f64_x4(svcount_t, float64_t *, svfloat64x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_s64_x4))) +void svstnt1_s64_x4(svcount_t, int64_t *, svint64x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_u16_x4))) +void svstnt1_u16_x4(svcount_t, uint16_t *, svuint16x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_bf16_x4))) +void svstnt1_bf16_x4(svcount_t, bfloat16_t *, svbfloat16x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_f16_x4))) +void svstnt1_f16_x4(svcount_t, float16_t *, svfloat16x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_s16_x4))) +void svstnt1_s16_x4(svcount_t, int16_t *, svint16x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_u32_x4))) +void svstnt1_u32_x4(svcount_t, uint32_t *, svuint32x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_f32_x4))) +void svstnt1_f32_x4(svcount_t, float32_t *, svfloat32x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_s32_x4))) +void svstnt1_s32_x4(svcount_t, int32_t *, svint32x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_vnum_u8_x2))) +void svstnt1_vnum_u8_x2(svcount_t, uint8_t *, int64_t, svuint8x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_vnum_s8_x2))) +void svstnt1_vnum_s8_x2(svcount_t, int8_t *, int64_t, svint8x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_vnum_u64_x2))) +void svstnt1_vnum_u64_x2(svcount_t, uint64_t *, int64_t, svuint64x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_vnum_f64_x2))) +void svstnt1_vnum_f64_x2(svcount_t, float64_t *, int64_t, svfloat64x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_vnum_s64_x2))) +void svstnt1_vnum_s64_x2(svcount_t, int64_t *, int64_t, svint64x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_vnum_u16_x2))) +void svstnt1_vnum_u16_x2(svcount_t, uint16_t *, int64_t, svuint16x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_vnum_bf16_x2))) +void svstnt1_vnum_bf16_x2(svcount_t, bfloat16_t *, int64_t, svbfloat16x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_vnum_f16_x2))) +void svstnt1_vnum_f16_x2(svcount_t, float16_t *, int64_t, svfloat16x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_vnum_s16_x2))) +void svstnt1_vnum_s16_x2(svcount_t, int16_t *, int64_t, svint16x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_vnum_u32_x2))) +void svstnt1_vnum_u32_x2(svcount_t, uint32_t *, int64_t, svuint32x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_vnum_f32_x2))) +void svstnt1_vnum_f32_x2(svcount_t, float32_t *, int64_t, svfloat32x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_vnum_s32_x2))) +void svstnt1_vnum_s32_x2(svcount_t, int32_t *, int64_t, svint32x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_vnum_u8_x4))) +void svstnt1_vnum_u8_x4(svcount_t, uint8_t *, int64_t, svuint8x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_vnum_s8_x4))) +void svstnt1_vnum_s8_x4(svcount_t, int8_t *, int64_t, svint8x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_vnum_u64_x4))) +void svstnt1_vnum_u64_x4(svcount_t, uint64_t *, int64_t, svuint64x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_vnum_f64_x4))) +void svstnt1_vnum_f64_x4(svcount_t, float64_t *, int64_t, svfloat64x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_vnum_s64_x4))) +void svstnt1_vnum_s64_x4(svcount_t, int64_t *, int64_t, svint64x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_vnum_u16_x4))) +void svstnt1_vnum_u16_x4(svcount_t, uint16_t *, int64_t, svuint16x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_vnum_bf16_x4))) +void svstnt1_vnum_bf16_x4(svcount_t, bfloat16_t *, int64_t, svbfloat16x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_vnum_f16_x4))) +void svstnt1_vnum_f16_x4(svcount_t, float16_t *, int64_t, svfloat16x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_vnum_s16_x4))) +void svstnt1_vnum_s16_x4(svcount_t, int16_t *, int64_t, svint16x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_vnum_u32_x4))) +void svstnt1_vnum_u32_x4(svcount_t, uint32_t *, int64_t, svuint32x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_vnum_f32_x4))) +void svstnt1_vnum_f32_x4(svcount_t, float32_t *, int64_t, svfloat32x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_vnum_s32_x4))) +void svstnt1_vnum_s32_x4(svcount_t, int32_t *, int64_t, svint32x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svundef2_b))) +svboolx2_t svundef2_b(); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svundef4_b))) +svboolx4_t svundef4_b(); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilege_c8_s64))) +svcount_t svwhilege_c8_s64(int64_t, int64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilege_c32_s64))) +svcount_t svwhilege_c32_s64(int64_t, int64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilege_c64_s64))) +svcount_t svwhilege_c64_s64(int64_t, int64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilege_c16_s64))) +svcount_t svwhilege_c16_s64(int64_t, int64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilege_c8_u64))) +svcount_t svwhilege_c8_u64(uint64_t, uint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilege_c32_u64))) +svcount_t svwhilege_c32_u64(uint64_t, uint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilege_c64_u64))) +svcount_t svwhilege_c64_u64(uint64_t, uint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilege_c16_u64))) +svcount_t svwhilege_c16_u64(uint64_t, uint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilege_b8_s64_x2))) +svboolx2_t svwhilege_b8_s64_x2(int64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilege_b32_s64_x2))) +svboolx2_t svwhilege_b32_s64_x2(int64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilege_b64_s64_x2))) +svboolx2_t svwhilege_b64_s64_x2(int64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilege_b16_s64_x2))) +svboolx2_t svwhilege_b16_s64_x2(int64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilege_b8_u64_x2))) +svboolx2_t svwhilege_b8_u64_x2(uint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilege_b32_u64_x2))) +svboolx2_t svwhilege_b32_u64_x2(uint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilege_b64_u64_x2))) +svboolx2_t svwhilege_b64_u64_x2(uint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilege_b16_u64_x2))) +svboolx2_t svwhilege_b16_u64_x2(uint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilegt_c8_s64))) +svcount_t svwhilegt_c8_s64(int64_t, int64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilegt_c32_s64))) +svcount_t svwhilegt_c32_s64(int64_t, int64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilegt_c64_s64))) +svcount_t svwhilegt_c64_s64(int64_t, int64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilegt_c16_s64))) +svcount_t svwhilegt_c16_s64(int64_t, int64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilegt_c8_u64))) +svcount_t svwhilegt_c8_u64(uint64_t, uint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilegt_c32_u64))) +svcount_t svwhilegt_c32_u64(uint64_t, uint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilegt_c64_u64))) +svcount_t svwhilegt_c64_u64(uint64_t, uint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilegt_c16_u64))) +svcount_t svwhilegt_c16_u64(uint64_t, uint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilegt_b8_s64_x2))) +svboolx2_t svwhilegt_b8_s64_x2(int64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilegt_b32_s64_x2))) +svboolx2_t svwhilegt_b32_s64_x2(int64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilegt_b64_s64_x2))) +svboolx2_t svwhilegt_b64_s64_x2(int64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilegt_b16_s64_x2))) +svboolx2_t svwhilegt_b16_s64_x2(int64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilegt_b8_u64_x2))) +svboolx2_t svwhilegt_b8_u64_x2(uint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilegt_b32_u64_x2))) +svboolx2_t svwhilegt_b32_u64_x2(uint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilegt_b64_u64_x2))) +svboolx2_t svwhilegt_b64_u64_x2(uint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilegt_b16_u64_x2))) +svboolx2_t svwhilegt_b16_u64_x2(uint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilele_c8_s64))) +svcount_t svwhilele_c8_s64(int64_t, int64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilele_c32_s64))) +svcount_t svwhilele_c32_s64(int64_t, int64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilele_c64_s64))) +svcount_t svwhilele_c64_s64(int64_t, int64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilele_c16_s64))) +svcount_t svwhilele_c16_s64(int64_t, int64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilele_c8_u64))) +svcount_t svwhilele_c8_u64(uint64_t, uint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilele_c32_u64))) +svcount_t svwhilele_c32_u64(uint64_t, uint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilele_c64_u64))) +svcount_t svwhilele_c64_u64(uint64_t, uint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilele_c16_u64))) +svcount_t svwhilele_c16_u64(uint64_t, uint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilele_b8_s64_x2))) +svboolx2_t svwhilele_b8_s64_x2(int64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilele_b32_s64_x2))) +svboolx2_t svwhilele_b32_s64_x2(int64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilele_b64_s64_x2))) +svboolx2_t svwhilele_b64_s64_x2(int64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilele_b16_s64_x2))) +svboolx2_t svwhilele_b16_s64_x2(int64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilele_b8_u64_x2))) +svboolx2_t svwhilele_b8_u64_x2(uint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilele_b32_u64_x2))) +svboolx2_t svwhilele_b32_u64_x2(uint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilele_b64_u64_x2))) +svboolx2_t svwhilele_b64_u64_x2(uint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilele_b16_u64_x2))) +svboolx2_t svwhilele_b16_u64_x2(uint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilelt_c8_u64))) +svcount_t svwhilelt_c8_u64(uint64_t, uint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilelt_c32_u64))) +svcount_t svwhilelt_c32_u64(uint64_t, uint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilelt_c64_u64))) +svcount_t svwhilelt_c64_u64(uint64_t, uint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilelt_c16_u64))) +svcount_t svwhilelt_c16_u64(uint64_t, uint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilelt_c8_s64))) +svcount_t svwhilelt_c8_s64(int64_t, int64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilelt_c32_s64))) +svcount_t svwhilelt_c32_s64(int64_t, int64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilelt_c64_s64))) +svcount_t svwhilelt_c64_s64(int64_t, int64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilelt_c16_s64))) +svcount_t svwhilelt_c16_s64(int64_t, int64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilelt_b8_u64_x2))) +svboolx2_t svwhilelt_b8_u64_x2(uint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilelt_b32_u64_x2))) +svboolx2_t svwhilelt_b32_u64_x2(uint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilelt_b64_u64_x2))) +svboolx2_t svwhilelt_b64_u64_x2(uint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilelt_b16_u64_x2))) +svboolx2_t svwhilelt_b16_u64_x2(uint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilelt_b8_s64_x2))) +svboolx2_t svwhilelt_b8_s64_x2(int64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilelt_b32_s64_x2))) +svboolx2_t svwhilelt_b32_s64_x2(int64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilelt_b64_s64_x2))) +svboolx2_t svwhilelt_b64_s64_x2(int64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilelt_b16_s64_x2))) +svboolx2_t svwhilelt_b16_s64_x2(int64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbfmlslb_f32))) +svfloat32_t svbfmlslb(svfloat32_t, svbfloat16_t, svbfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbfmlslb_lane_f32))) +svfloat32_t svbfmlslb_lane(svfloat32_t, svbfloat16_t, svbfloat16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbfmlslt_f32))) +svfloat32_t svbfmlslt(svfloat32_t, svbfloat16_t, svbfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbfmlslt_lane_f32))) +svfloat32_t svbfmlslt_lane(svfloat32_t, svbfloat16_t, svbfloat16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclamp_f64))) +svfloat64_t svclamp(svfloat64_t, svfloat64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclamp_f32))) +svfloat32_t svclamp(svfloat32_t, svfloat32_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclamp_f16))) +svfloat16_t svclamp(svfloat16_t, svfloat16_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate2_b))) +svboolx2_t svcreate2(svbool_t, svbool_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate4_b))) +svboolx4_t svcreate4(svbool_t, svbool_t, svbool_t, svbool_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdot_f32_f16))) +svfloat32_t svdot(svfloat32_t, svfloat16_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdot_s32_s16))) +svint32_t svdot(svint32_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdot_u32_u16))) +svuint32_t svdot(svuint32_t, svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdot_lane_f32_f16))) +svfloat32_t svdot_lane(svfloat32_t, svfloat16_t, svfloat16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdot_lane_s32_s16))) +svint32_t svdot_lane(svint32_t, svint16_t, svint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdot_lane_u32_u16))) +svuint32_t svdot_lane(svuint32_t, svuint16_t, svuint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget2_b))) +svbool_t svget2(svboolx2_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget4_b))) +svbool_t svget4(svboolx4_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_u8_x2))) +svuint8x2_t svld1_x2(svcount_t, uint8_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_s8_x2))) +svint8x2_t svld1_x2(svcount_t, int8_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_u64_x2))) +svuint64x2_t svld1_x2(svcount_t, uint64_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_f64_x2))) +svfloat64x2_t svld1_x2(svcount_t, float64_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_s64_x2))) +svint64x2_t svld1_x2(svcount_t, int64_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_u16_x2))) +svuint16x2_t svld1_x2(svcount_t, uint16_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_bf16_x2))) +svbfloat16x2_t svld1_x2(svcount_t, bfloat16_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_f16_x2))) +svfloat16x2_t svld1_x2(svcount_t, float16_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_s16_x2))) +svint16x2_t svld1_x2(svcount_t, int16_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_u32_x2))) +svuint32x2_t svld1_x2(svcount_t, uint32_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_f32_x2))) +svfloat32x2_t svld1_x2(svcount_t, float32_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_s32_x2))) +svint32x2_t svld1_x2(svcount_t, int32_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_u8_x4))) +svuint8x4_t svld1_x4(svcount_t, uint8_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_s8_x4))) +svint8x4_t svld1_x4(svcount_t, int8_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_u64_x4))) +svuint64x4_t svld1_x4(svcount_t, uint64_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_f64_x4))) +svfloat64x4_t svld1_x4(svcount_t, float64_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_s64_x4))) +svint64x4_t svld1_x4(svcount_t, int64_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_u16_x4))) +svuint16x4_t svld1_x4(svcount_t, uint16_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_bf16_x4))) +svbfloat16x4_t svld1_x4(svcount_t, bfloat16_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_f16_x4))) +svfloat16x4_t svld1_x4(svcount_t, float16_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_s16_x4))) +svint16x4_t svld1_x4(svcount_t, int16_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_u32_x4))) +svuint32x4_t svld1_x4(svcount_t, uint32_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_f32_x4))) +svfloat32x4_t svld1_x4(svcount_t, float32_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_s32_x4))) +svint32x4_t svld1_x4(svcount_t, int32_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_vnum_u8_x2))) +svuint8x2_t svld1_vnum_x2(svcount_t, uint8_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_vnum_s8_x2))) +svint8x2_t svld1_vnum_x2(svcount_t, int8_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_vnum_u64_x2))) +svuint64x2_t svld1_vnum_x2(svcount_t, uint64_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_vnum_f64_x2))) +svfloat64x2_t svld1_vnum_x2(svcount_t, float64_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_vnum_s64_x2))) +svint64x2_t svld1_vnum_x2(svcount_t, int64_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_vnum_u16_x2))) +svuint16x2_t svld1_vnum_x2(svcount_t, uint16_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_vnum_bf16_x2))) +svbfloat16x2_t svld1_vnum_x2(svcount_t, bfloat16_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_vnum_f16_x2))) +svfloat16x2_t svld1_vnum_x2(svcount_t, float16_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_vnum_s16_x2))) +svint16x2_t svld1_vnum_x2(svcount_t, int16_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_vnum_u32_x2))) +svuint32x2_t svld1_vnum_x2(svcount_t, uint32_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_vnum_f32_x2))) +svfloat32x2_t svld1_vnum_x2(svcount_t, float32_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_vnum_s32_x2))) +svint32x2_t svld1_vnum_x2(svcount_t, int32_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_vnum_u8_x4))) +svuint8x4_t svld1_vnum_x4(svcount_t, uint8_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_vnum_s8_x4))) +svint8x4_t svld1_vnum_x4(svcount_t, int8_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_vnum_u64_x4))) +svuint64x4_t svld1_vnum_x4(svcount_t, uint64_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_vnum_f64_x4))) +svfloat64x4_t svld1_vnum_x4(svcount_t, float64_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_vnum_s64_x4))) +svint64x4_t svld1_vnum_x4(svcount_t, int64_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_vnum_u16_x4))) +svuint16x4_t svld1_vnum_x4(svcount_t, uint16_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_vnum_bf16_x4))) +svbfloat16x4_t svld1_vnum_x4(svcount_t, bfloat16_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_vnum_f16_x4))) +svfloat16x4_t svld1_vnum_x4(svcount_t, float16_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_vnum_s16_x4))) +svint16x4_t svld1_vnum_x4(svcount_t, int16_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_vnum_u32_x4))) +svuint32x4_t svld1_vnum_x4(svcount_t, uint32_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_vnum_f32_x4))) +svfloat32x4_t svld1_vnum_x4(svcount_t, float32_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_vnum_s32_x4))) +svint32x4_t svld1_vnum_x4(svcount_t, int32_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_u8_x2))) +svuint8x2_t svldnt1_x2(svcount_t, uint8_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_s8_x2))) +svint8x2_t svldnt1_x2(svcount_t, int8_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_u64_x2))) +svuint64x2_t svldnt1_x2(svcount_t, uint64_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_f64_x2))) +svfloat64x2_t svldnt1_x2(svcount_t, float64_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_s64_x2))) +svint64x2_t svldnt1_x2(svcount_t, int64_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_u16_x2))) +svuint16x2_t svldnt1_x2(svcount_t, uint16_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_bf16_x2))) +svbfloat16x2_t svldnt1_x2(svcount_t, bfloat16_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_f16_x2))) +svfloat16x2_t svldnt1_x2(svcount_t, float16_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_s16_x2))) +svint16x2_t svldnt1_x2(svcount_t, int16_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_u32_x2))) +svuint32x2_t svldnt1_x2(svcount_t, uint32_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_f32_x2))) +svfloat32x2_t svldnt1_x2(svcount_t, float32_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_s32_x2))) +svint32x2_t svldnt1_x2(svcount_t, int32_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_u8_x4))) +svuint8x4_t svldnt1_x4(svcount_t, uint8_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_s8_x4))) +svint8x4_t svldnt1_x4(svcount_t, int8_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_u64_x4))) +svuint64x4_t svldnt1_x4(svcount_t, uint64_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_f64_x4))) +svfloat64x4_t svldnt1_x4(svcount_t, float64_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_s64_x4))) +svint64x4_t svldnt1_x4(svcount_t, int64_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_u16_x4))) +svuint16x4_t svldnt1_x4(svcount_t, uint16_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_bf16_x4))) +svbfloat16x4_t svldnt1_x4(svcount_t, bfloat16_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_f16_x4))) +svfloat16x4_t svldnt1_x4(svcount_t, float16_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_s16_x4))) +svint16x4_t svldnt1_x4(svcount_t, int16_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_u32_x4))) +svuint32x4_t svldnt1_x4(svcount_t, uint32_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_f32_x4))) +svfloat32x4_t svldnt1_x4(svcount_t, float32_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_s32_x4))) +svint32x4_t svldnt1_x4(svcount_t, int32_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_vnum_u8_x2))) +svuint8x2_t svldnt1_vnum_x2(svcount_t, uint8_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_vnum_s8_x2))) +svint8x2_t svldnt1_vnum_x2(svcount_t, int8_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_vnum_u64_x2))) +svuint64x2_t svldnt1_vnum_x2(svcount_t, uint64_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_vnum_f64_x2))) +svfloat64x2_t svldnt1_vnum_x2(svcount_t, float64_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_vnum_s64_x2))) +svint64x2_t svldnt1_vnum_x2(svcount_t, int64_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_vnum_u16_x2))) +svuint16x2_t svldnt1_vnum_x2(svcount_t, uint16_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_vnum_bf16_x2))) +svbfloat16x2_t svldnt1_vnum_x2(svcount_t, bfloat16_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_vnum_f16_x2))) +svfloat16x2_t svldnt1_vnum_x2(svcount_t, float16_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_vnum_s16_x2))) +svint16x2_t svldnt1_vnum_x2(svcount_t, int16_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_vnum_u32_x2))) +svuint32x2_t svldnt1_vnum_x2(svcount_t, uint32_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_vnum_f32_x2))) +svfloat32x2_t svldnt1_vnum_x2(svcount_t, float32_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_vnum_s32_x2))) +svint32x2_t svldnt1_vnum_x2(svcount_t, int32_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_vnum_u8_x4))) +svuint8x4_t svldnt1_vnum_x4(svcount_t, uint8_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_vnum_s8_x4))) +svint8x4_t svldnt1_vnum_x4(svcount_t, int8_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_vnum_u64_x4))) +svuint64x4_t svldnt1_vnum_x4(svcount_t, uint64_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_vnum_f64_x4))) +svfloat64x4_t svldnt1_vnum_x4(svcount_t, float64_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_vnum_s64_x4))) +svint64x4_t svldnt1_vnum_x4(svcount_t, int64_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_vnum_u16_x4))) +svuint16x4_t svldnt1_vnum_x4(svcount_t, uint16_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_vnum_bf16_x4))) +svbfloat16x4_t svldnt1_vnum_x4(svcount_t, bfloat16_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_vnum_f16_x4))) +svfloat16x4_t svldnt1_vnum_x4(svcount_t, float16_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_vnum_s16_x4))) +svint16x4_t svldnt1_vnum_x4(svcount_t, int16_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_vnum_u32_x4))) +svuint32x4_t svldnt1_vnum_x4(svcount_t, uint32_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_vnum_f32_x4))) +svfloat32x4_t svldnt1_vnum_x4(svcount_t, float32_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_vnum_s32_x4))) +svint32x4_t svldnt1_vnum_x4(svcount_t, int32_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqcvtn_s16_s32_x2))) +svint16_t svqcvtn_s16(svint32x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqcvtn_u16_s32_x2))) +svuint16_t svqcvtn_u16(svint32x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqcvtn_u16_u32_x2))) +svuint16_t svqcvtn_u16(svuint32x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshrn_n_s16_s32_x2))) +svint16_t svqrshrn_s16(svint32x2_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshrn_n_u16_u32_x2))) +svuint16_t svqrshrn_u16(svuint32x2_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshrun_n_u16_s32_x2))) +svuint16_t svqrshrun_u16(svint32x2_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svreinterpret_b))) +svbool_t svreinterpret(svcount_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svreinterpret_c))) +svcount_t svreinterpret(svbool_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset2_b))) +svboolx2_t svset2(svboolx2_t, uint64_t, svbool_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset4_b))) +svboolx4_t svset4(svboolx4_t, uint64_t, svbool_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_u8_x2))) +void svst1(svcount_t, uint8_t *, svuint8x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_s8_x2))) +void svst1(svcount_t, int8_t *, svint8x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_u64_x2))) +void svst1(svcount_t, uint64_t *, svuint64x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_f64_x2))) +void svst1(svcount_t, float64_t *, svfloat64x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_s64_x2))) +void svst1(svcount_t, int64_t *, svint64x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_u16_x2))) +void svst1(svcount_t, uint16_t *, svuint16x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_bf16_x2))) +void svst1(svcount_t, bfloat16_t *, svbfloat16x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_f16_x2))) +void svst1(svcount_t, float16_t *, svfloat16x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_s16_x2))) +void svst1(svcount_t, int16_t *, svint16x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_u32_x2))) +void svst1(svcount_t, uint32_t *, svuint32x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_f32_x2))) +void svst1(svcount_t, float32_t *, svfloat32x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_s32_x2))) +void svst1(svcount_t, int32_t *, svint32x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_u8_x4))) +void svst1(svcount_t, uint8_t *, svuint8x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_s8_x4))) +void svst1(svcount_t, int8_t *, svint8x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_u64_x4))) +void svst1(svcount_t, uint64_t *, svuint64x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_f64_x4))) +void svst1(svcount_t, float64_t *, svfloat64x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_s64_x4))) +void svst1(svcount_t, int64_t *, svint64x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_u16_x4))) +void svst1(svcount_t, uint16_t *, svuint16x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_bf16_x4))) +void svst1(svcount_t, bfloat16_t *, svbfloat16x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_f16_x4))) +void svst1(svcount_t, float16_t *, svfloat16x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_s16_x4))) +void svst1(svcount_t, int16_t *, svint16x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_u32_x4))) +void svst1(svcount_t, uint32_t *, svuint32x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_f32_x4))) +void svst1(svcount_t, float32_t *, svfloat32x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_s32_x4))) +void svst1(svcount_t, int32_t *, svint32x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_vnum_u8_x2))) +void svst1_vnum(svcount_t, uint8_t *, int64_t, svuint8x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_vnum_s8_x2))) +void svst1_vnum(svcount_t, int8_t *, int64_t, svint8x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_vnum_u64_x2))) +void svst1_vnum(svcount_t, uint64_t *, int64_t, svuint64x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_vnum_f64_x2))) +void svst1_vnum(svcount_t, float64_t *, int64_t, svfloat64x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_vnum_s64_x2))) +void svst1_vnum(svcount_t, int64_t *, int64_t, svint64x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_vnum_u16_x2))) +void svst1_vnum(svcount_t, uint16_t *, int64_t, svuint16x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_vnum_bf16_x2))) +void svst1_vnum(svcount_t, bfloat16_t *, int64_t, svbfloat16x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_vnum_f16_x2))) +void svst1_vnum(svcount_t, float16_t *, int64_t, svfloat16x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_vnum_s16_x2))) +void svst1_vnum(svcount_t, int16_t *, int64_t, svint16x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_vnum_u32_x2))) +void svst1_vnum(svcount_t, uint32_t *, int64_t, svuint32x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_vnum_f32_x2))) +void svst1_vnum(svcount_t, float32_t *, int64_t, svfloat32x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_vnum_s32_x2))) +void svst1_vnum(svcount_t, int32_t *, int64_t, svint32x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_vnum_u8_x4))) +void svst1_vnum(svcount_t, uint8_t *, int64_t, svuint8x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_vnum_s8_x4))) +void svst1_vnum(svcount_t, int8_t *, int64_t, svint8x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_vnum_u64_x4))) +void svst1_vnum(svcount_t, uint64_t *, int64_t, svuint64x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_vnum_f64_x4))) +void svst1_vnum(svcount_t, float64_t *, int64_t, svfloat64x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_vnum_s64_x4))) +void svst1_vnum(svcount_t, int64_t *, int64_t, svint64x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_vnum_u16_x4))) +void svst1_vnum(svcount_t, uint16_t *, int64_t, svuint16x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_vnum_bf16_x4))) +void svst1_vnum(svcount_t, bfloat16_t *, int64_t, svbfloat16x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_vnum_f16_x4))) +void svst1_vnum(svcount_t, float16_t *, int64_t, svfloat16x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_vnum_s16_x4))) +void svst1_vnum(svcount_t, int16_t *, int64_t, svint16x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_vnum_u32_x4))) +void svst1_vnum(svcount_t, uint32_t *, int64_t, svuint32x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_vnum_f32_x4))) +void svst1_vnum(svcount_t, float32_t *, int64_t, svfloat32x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_vnum_s32_x4))) +void svst1_vnum(svcount_t, int32_t *, int64_t, svint32x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_u8_x2))) +void svstnt1(svcount_t, uint8_t *, svuint8x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_s8_x2))) +void svstnt1(svcount_t, int8_t *, svint8x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_u64_x2))) +void svstnt1(svcount_t, uint64_t *, svuint64x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_f64_x2))) +void svstnt1(svcount_t, float64_t *, svfloat64x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_s64_x2))) +void svstnt1(svcount_t, int64_t *, svint64x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_u16_x2))) +void svstnt1(svcount_t, uint16_t *, svuint16x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_bf16_x2))) +void svstnt1(svcount_t, bfloat16_t *, svbfloat16x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_f16_x2))) +void svstnt1(svcount_t, float16_t *, svfloat16x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_s16_x2))) +void svstnt1(svcount_t, int16_t *, svint16x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_u32_x2))) +void svstnt1(svcount_t, uint32_t *, svuint32x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_f32_x2))) +void svstnt1(svcount_t, float32_t *, svfloat32x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_s32_x2))) +void svstnt1(svcount_t, int32_t *, svint32x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_u8_x4))) +void svstnt1(svcount_t, uint8_t *, svuint8x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_s8_x4))) +void svstnt1(svcount_t, int8_t *, svint8x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_u64_x4))) +void svstnt1(svcount_t, uint64_t *, svuint64x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_f64_x4))) +void svstnt1(svcount_t, float64_t *, svfloat64x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_s64_x4))) +void svstnt1(svcount_t, int64_t *, svint64x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_u16_x4))) +void svstnt1(svcount_t, uint16_t *, svuint16x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_bf16_x4))) +void svstnt1(svcount_t, bfloat16_t *, svbfloat16x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_f16_x4))) +void svstnt1(svcount_t, float16_t *, svfloat16x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_s16_x4))) +void svstnt1(svcount_t, int16_t *, svint16x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_u32_x4))) +void svstnt1(svcount_t, uint32_t *, svuint32x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_f32_x4))) +void svstnt1(svcount_t, float32_t *, svfloat32x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_s32_x4))) +void svstnt1(svcount_t, int32_t *, svint32x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_vnum_u8_x2))) +void svstnt1_vnum(svcount_t, uint8_t *, int64_t, svuint8x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_vnum_s8_x2))) +void svstnt1_vnum(svcount_t, int8_t *, int64_t, svint8x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_vnum_u64_x2))) +void svstnt1_vnum(svcount_t, uint64_t *, int64_t, svuint64x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_vnum_f64_x2))) +void svstnt1_vnum(svcount_t, float64_t *, int64_t, svfloat64x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_vnum_s64_x2))) +void svstnt1_vnum(svcount_t, int64_t *, int64_t, svint64x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_vnum_u16_x2))) +void svstnt1_vnum(svcount_t, uint16_t *, int64_t, svuint16x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_vnum_bf16_x2))) +void svstnt1_vnum(svcount_t, bfloat16_t *, int64_t, svbfloat16x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_vnum_f16_x2))) +void svstnt1_vnum(svcount_t, float16_t *, int64_t, svfloat16x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_vnum_s16_x2))) +void svstnt1_vnum(svcount_t, int16_t *, int64_t, svint16x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_vnum_u32_x2))) +void svstnt1_vnum(svcount_t, uint32_t *, int64_t, svuint32x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_vnum_f32_x2))) +void svstnt1_vnum(svcount_t, float32_t *, int64_t, svfloat32x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_vnum_s32_x2))) +void svstnt1_vnum(svcount_t, int32_t *, int64_t, svint32x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_vnum_u8_x4))) +void svstnt1_vnum(svcount_t, uint8_t *, int64_t, svuint8x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_vnum_s8_x4))) +void svstnt1_vnum(svcount_t, int8_t *, int64_t, svint8x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_vnum_u64_x4))) +void svstnt1_vnum(svcount_t, uint64_t *, int64_t, svuint64x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_vnum_f64_x4))) +void svstnt1_vnum(svcount_t, float64_t *, int64_t, svfloat64x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_vnum_s64_x4))) +void svstnt1_vnum(svcount_t, int64_t *, int64_t, svint64x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_vnum_u16_x4))) +void svstnt1_vnum(svcount_t, uint16_t *, int64_t, svuint16x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_vnum_bf16_x4))) +void svstnt1_vnum(svcount_t, bfloat16_t *, int64_t, svbfloat16x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_vnum_f16_x4))) +void svstnt1_vnum(svcount_t, float16_t *, int64_t, svfloat16x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_vnum_s16_x4))) +void svstnt1_vnum(svcount_t, int16_t *, int64_t, svint16x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_vnum_u32_x4))) +void svstnt1_vnum(svcount_t, uint32_t *, int64_t, svuint32x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_vnum_f32_x4))) +void svstnt1_vnum(svcount_t, float32_t *, int64_t, svfloat32x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_vnum_s32_x4))) +void svstnt1_vnum(svcount_t, int32_t *, int64_t, svint32x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilege_c8_s64))) +svcount_t svwhilege_c8(int64_t, int64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilege_c32_s64))) +svcount_t svwhilege_c32(int64_t, int64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilege_c64_s64))) +svcount_t svwhilege_c64(int64_t, int64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilege_c16_s64))) +svcount_t svwhilege_c16(int64_t, int64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilege_c8_u64))) +svcount_t svwhilege_c8(uint64_t, uint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilege_c32_u64))) +svcount_t svwhilege_c32(uint64_t, uint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilege_c64_u64))) +svcount_t svwhilege_c64(uint64_t, uint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilege_c16_u64))) +svcount_t svwhilege_c16(uint64_t, uint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilege_b8_s64_x2))) +svboolx2_t svwhilege_b8_x2(int64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilege_b32_s64_x2))) +svboolx2_t svwhilege_b32_x2(int64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilege_b64_s64_x2))) +svboolx2_t svwhilege_b64_x2(int64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilege_b16_s64_x2))) +svboolx2_t svwhilege_b16_x2(int64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilege_b8_u64_x2))) +svboolx2_t svwhilege_b8_x2(uint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilege_b32_u64_x2))) +svboolx2_t svwhilege_b32_x2(uint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilege_b64_u64_x2))) +svboolx2_t svwhilege_b64_x2(uint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilege_b16_u64_x2))) +svboolx2_t svwhilege_b16_x2(uint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilegt_c8_s64))) +svcount_t svwhilegt_c8(int64_t, int64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilegt_c32_s64))) +svcount_t svwhilegt_c32(int64_t, int64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilegt_c64_s64))) +svcount_t svwhilegt_c64(int64_t, int64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilegt_c16_s64))) +svcount_t svwhilegt_c16(int64_t, int64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilegt_c8_u64))) +svcount_t svwhilegt_c8(uint64_t, uint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilegt_c32_u64))) +svcount_t svwhilegt_c32(uint64_t, uint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilegt_c64_u64))) +svcount_t svwhilegt_c64(uint64_t, uint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilegt_c16_u64))) +svcount_t svwhilegt_c16(uint64_t, uint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilegt_b8_s64_x2))) +svboolx2_t svwhilegt_b8_x2(int64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilegt_b32_s64_x2))) +svboolx2_t svwhilegt_b32_x2(int64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilegt_b64_s64_x2))) +svboolx2_t svwhilegt_b64_x2(int64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilegt_b16_s64_x2))) +svboolx2_t svwhilegt_b16_x2(int64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilegt_b8_u64_x2))) +svboolx2_t svwhilegt_b8_x2(uint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilegt_b32_u64_x2))) +svboolx2_t svwhilegt_b32_x2(uint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilegt_b64_u64_x2))) +svboolx2_t svwhilegt_b64_x2(uint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilegt_b16_u64_x2))) +svboolx2_t svwhilegt_b16_x2(uint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilele_c8_s64))) +svcount_t svwhilele_c8(int64_t, int64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilele_c32_s64))) +svcount_t svwhilele_c32(int64_t, int64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilele_c64_s64))) +svcount_t svwhilele_c64(int64_t, int64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilele_c16_s64))) +svcount_t svwhilele_c16(int64_t, int64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilele_c8_u64))) +svcount_t svwhilele_c8(uint64_t, uint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilele_c32_u64))) +svcount_t svwhilele_c32(uint64_t, uint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilele_c64_u64))) +svcount_t svwhilele_c64(uint64_t, uint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilele_c16_u64))) +svcount_t svwhilele_c16(uint64_t, uint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilele_b8_s64_x2))) +svboolx2_t svwhilele_b8_x2(int64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilele_b32_s64_x2))) +svboolx2_t svwhilele_b32_x2(int64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilele_b64_s64_x2))) +svboolx2_t svwhilele_b64_x2(int64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilele_b16_s64_x2))) +svboolx2_t svwhilele_b16_x2(int64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilele_b8_u64_x2))) +svboolx2_t svwhilele_b8_x2(uint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilele_b32_u64_x2))) +svboolx2_t svwhilele_b32_x2(uint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilele_b64_u64_x2))) +svboolx2_t svwhilele_b64_x2(uint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilele_b16_u64_x2))) +svboolx2_t svwhilele_b16_x2(uint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilelt_c8_u64))) +svcount_t svwhilelt_c8(uint64_t, uint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilelt_c32_u64))) +svcount_t svwhilelt_c32(uint64_t, uint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilelt_c64_u64))) +svcount_t svwhilelt_c64(uint64_t, uint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilelt_c16_u64))) +svcount_t svwhilelt_c16(uint64_t, uint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilelt_c8_s64))) +svcount_t svwhilelt_c8(int64_t, int64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilelt_c32_s64))) +svcount_t svwhilelt_c32(int64_t, int64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilelt_c64_s64))) +svcount_t svwhilelt_c64(int64_t, int64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilelt_c16_s64))) +svcount_t svwhilelt_c16(int64_t, int64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilelt_b8_u64_x2))) +svboolx2_t svwhilelt_b8_x2(uint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilelt_b32_u64_x2))) +svboolx2_t svwhilelt_b32_x2(uint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilelt_b64_u64_x2))) +svboolx2_t svwhilelt_b64_x2(uint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilelt_b16_u64_x2))) +svboolx2_t svwhilelt_b16_x2(uint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilelt_b8_s64_x2))) +svboolx2_t svwhilelt_b8_x2(int64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilelt_b32_s64_x2))) +svboolx2_t svwhilelt_b32_x2(int64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilelt_b64_s64_x2))) +svboolx2_t svwhilelt_b64_x2(int64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilelt_b16_s64_x2))) +svboolx2_t svwhilelt_b16_x2(int64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_laneq_u8))) +svuint8_t svdup_laneq_u8(svuint8_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_laneq_s8))) +svint8_t svdup_laneq_s8(svint8_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_laneq_u64))) +svuint64_t svdup_laneq_u64(svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_laneq_f64))) +svfloat64_t svdup_laneq_f64(svfloat64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_laneq_s64))) +svint64_t svdup_laneq_s64(svint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_laneq_u16))) +svuint16_t svdup_laneq_u16(svuint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_laneq_f16))) +svfloat16_t svdup_laneq_f16(svfloat16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_laneq_s16))) +svint16_t svdup_laneq_s16(svint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_laneq_u32))) +svuint32_t svdup_laneq_u32(svuint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_laneq_f32))) +svfloat32_t svdup_laneq_f32(svfloat32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_laneq_s32))) +svint32_t svdup_laneq_s32(svint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_laneq_u8))) +svuint8_t svdup_laneq(svuint8_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_laneq_s8))) +svint8_t svdup_laneq(svint8_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_laneq_u64))) +svuint64_t svdup_laneq(svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_laneq_f64))) +svfloat64_t svdup_laneq(svfloat64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_laneq_s64))) +svint64_t svdup_laneq(svint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_laneq_u16))) +svuint16_t svdup_laneq(svuint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_laneq_f16))) +svfloat16_t svdup_laneq(svfloat16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_laneq_s16))) +svint16_t svdup_laneq(svint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_laneq_u32))) +svuint32_t svdup_laneq(svuint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_laneq_f32))) +svfloat32_t svdup_laneq(svfloat32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_laneq_s32))) +svint32_t svdup_laneq(svint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaba_n_s8))) +svint8_t svaba_n_s8(svint8_t, svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaba_n_s32))) +svint32_t svaba_n_s32(svint32_t, svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaba_n_s64))) +svint64_t svaba_n_s64(svint64_t, svint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaba_n_s16))) +svint16_t svaba_n_s16(svint16_t, svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaba_n_u8))) +svuint8_t svaba_n_u8(svuint8_t, svuint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaba_n_u32))) +svuint32_t svaba_n_u32(svuint32_t, svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaba_n_u64))) +svuint64_t svaba_n_u64(svuint64_t, svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaba_n_u16))) +svuint16_t svaba_n_u16(svuint16_t, svuint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaba_s8))) +svint8_t svaba_s8(svint8_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaba_s32))) +svint32_t svaba_s32(svint32_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaba_s64))) +svint64_t svaba_s64(svint64_t, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaba_s16))) +svint16_t svaba_s16(svint16_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaba_u8))) +svuint8_t svaba_u8(svuint8_t, svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaba_u32))) +svuint32_t svaba_u32(svuint32_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaba_u64))) +svuint64_t svaba_u64(svuint64_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaba_u16))) +svuint16_t svaba_u16(svuint16_t, svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalb_n_s32))) +svint32_t svabalb_n_s32(svint32_t, svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalb_n_s64))) +svint64_t svabalb_n_s64(svint64_t, svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalb_n_s16))) +svint16_t svabalb_n_s16(svint16_t, svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalb_n_u32))) +svuint32_t svabalb_n_u32(svuint32_t, svuint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalb_n_u64))) +svuint64_t svabalb_n_u64(svuint64_t, svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalb_n_u16))) +svuint16_t svabalb_n_u16(svuint16_t, svuint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalb_s32))) +svint32_t svabalb_s32(svint32_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalb_s64))) +svint64_t svabalb_s64(svint64_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalb_s16))) +svint16_t svabalb_s16(svint16_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalb_u32))) +svuint32_t svabalb_u32(svuint32_t, svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalb_u64))) +svuint64_t svabalb_u64(svuint64_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalb_u16))) +svuint16_t svabalb_u16(svuint16_t, svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalt_n_s32))) +svint32_t svabalt_n_s32(svint32_t, svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalt_n_s64))) +svint64_t svabalt_n_s64(svint64_t, svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalt_n_s16))) +svint16_t svabalt_n_s16(svint16_t, svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalt_n_u32))) +svuint32_t svabalt_n_u32(svuint32_t, svuint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalt_n_u64))) +svuint64_t svabalt_n_u64(svuint64_t, svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalt_n_u16))) +svuint16_t svabalt_n_u16(svuint16_t, svuint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalt_s32))) +svint32_t svabalt_s32(svint32_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalt_s64))) +svint64_t svabalt_s64(svint64_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalt_s16))) +svint16_t svabalt_s16(svint16_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalt_u32))) +svuint32_t svabalt_u32(svuint32_t, svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalt_u64))) +svuint64_t svabalt_u64(svuint64_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalt_u16))) +svuint16_t svabalt_u16(svuint16_t, svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlb_n_s32))) +svint32_t svabdlb_n_s32(svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlb_n_s64))) +svint64_t svabdlb_n_s64(svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlb_n_s16))) +svint16_t svabdlb_n_s16(svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlb_n_u32))) +svuint32_t svabdlb_n_u32(svuint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlb_n_u64))) +svuint64_t svabdlb_n_u64(svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlb_n_u16))) +svuint16_t svabdlb_n_u16(svuint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlb_s32))) +svint32_t svabdlb_s32(svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlb_s64))) +svint64_t svabdlb_s64(svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlb_s16))) +svint16_t svabdlb_s16(svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlb_u32))) +svuint32_t svabdlb_u32(svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlb_u64))) +svuint64_t svabdlb_u64(svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlb_u16))) +svuint16_t svabdlb_u16(svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlt_n_s32))) +svint32_t svabdlt_n_s32(svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlt_n_s64))) +svint64_t svabdlt_n_s64(svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlt_n_s16))) +svint16_t svabdlt_n_s16(svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlt_n_u32))) +svuint32_t svabdlt_n_u32(svuint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlt_n_u64))) +svuint64_t svabdlt_n_u64(svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlt_n_u16))) +svuint16_t svabdlt_n_u16(svuint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlt_s32))) +svint32_t svabdlt_s32(svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlt_s64))) +svint64_t svabdlt_s64(svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlt_s16))) +svint16_t svabdlt_s16(svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlt_u32))) +svuint32_t svabdlt_u32(svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlt_u64))) +svuint64_t svabdlt_u64(svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlt_u16))) +svuint16_t svabdlt_u16(svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadalp_s32_m))) +svint32_t svadalp_s32_m(svbool_t, svint32_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadalp_s64_m))) +svint64_t svadalp_s64_m(svbool_t, svint64_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadalp_s16_m))) +svint16_t svadalp_s16_m(svbool_t, svint16_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadalp_s32_x))) +svint32_t svadalp_s32_x(svbool_t, svint32_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadalp_s64_x))) +svint64_t svadalp_s64_x(svbool_t, svint64_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadalp_s16_x))) +svint16_t svadalp_s16_x(svbool_t, svint16_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadalp_s32_z))) +svint32_t svadalp_s32_z(svbool_t, svint32_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadalp_s64_z))) +svint64_t svadalp_s64_z(svbool_t, svint64_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadalp_s16_z))) +svint16_t svadalp_s16_z(svbool_t, svint16_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadalp_u32_m))) +svuint32_t svadalp_u32_m(svbool_t, svuint32_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadalp_u64_m))) +svuint64_t svadalp_u64_m(svbool_t, svuint64_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadalp_u16_m))) +svuint16_t svadalp_u16_m(svbool_t, svuint16_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadalp_u32_x))) +svuint32_t svadalp_u32_x(svbool_t, svuint32_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadalp_u64_x))) +svuint64_t svadalp_u64_x(svbool_t, svuint64_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadalp_u16_x))) +svuint16_t svadalp_u16_x(svbool_t, svuint16_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadalp_u32_z))) +svuint32_t svadalp_u32_z(svbool_t, svuint32_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadalp_u64_z))) +svuint64_t svadalp_u64_z(svbool_t, svuint64_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadalp_u16_z))) +svuint16_t svadalp_u16_z(svbool_t, svuint16_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadclb_n_u32))) +svuint32_t svadclb_n_u32(svuint32_t, svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadclb_n_u64))) +svuint64_t svadclb_n_u64(svuint64_t, svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadclb_u32))) +svuint32_t svadclb_u32(svuint32_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadclb_u64))) +svuint64_t svadclb_u64(svuint64_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadclt_n_u32))) +svuint32_t svadclt_n_u32(svuint32_t, svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadclt_n_u64))) +svuint64_t svadclt_n_u64(svuint64_t, svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadclt_u32))) +svuint32_t svadclt_u32(svuint32_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadclt_u64))) +svuint64_t svadclt_u64(svuint64_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnb_n_u32))) +svuint16_t svaddhnb_n_u32(svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnb_n_u64))) +svuint32_t svaddhnb_n_u64(svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnb_n_u16))) +svuint8_t svaddhnb_n_u16(svuint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnb_n_s32))) +svint16_t svaddhnb_n_s32(svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnb_n_s64))) +svint32_t svaddhnb_n_s64(svint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnb_n_s16))) +svint8_t svaddhnb_n_s16(svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnb_u32))) +svuint16_t svaddhnb_u32(svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnb_u64))) +svuint32_t svaddhnb_u64(svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnb_u16))) +svuint8_t svaddhnb_u16(svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnb_s32))) +svint16_t svaddhnb_s32(svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnb_s64))) +svint32_t svaddhnb_s64(svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnb_s16))) +svint8_t svaddhnb_s16(svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnt_n_u32))) +svuint16_t svaddhnt_n_u32(svuint16_t, svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnt_n_u64))) +svuint32_t svaddhnt_n_u64(svuint32_t, svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnt_n_u16))) +svuint8_t svaddhnt_n_u16(svuint8_t, svuint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnt_n_s32))) +svint16_t svaddhnt_n_s32(svint16_t, svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnt_n_s64))) +svint32_t svaddhnt_n_s64(svint32_t, svint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnt_n_s16))) +svint8_t svaddhnt_n_s16(svint8_t, svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnt_u32))) +svuint16_t svaddhnt_u32(svuint16_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnt_u64))) +svuint32_t svaddhnt_u64(svuint32_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnt_u16))) +svuint8_t svaddhnt_u16(svuint8_t, svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnt_s32))) +svint16_t svaddhnt_s32(svint16_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnt_s64))) +svint32_t svaddhnt_s64(svint32_t, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnt_s16))) +svint8_t svaddhnt_s16(svint8_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlb_n_s32))) +svint32_t svaddlb_n_s32(svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlb_n_s64))) +svint64_t svaddlb_n_s64(svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlb_n_s16))) +svint16_t svaddlb_n_s16(svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlb_n_u32))) +svuint32_t svaddlb_n_u32(svuint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlb_n_u64))) +svuint64_t svaddlb_n_u64(svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlb_n_u16))) +svuint16_t svaddlb_n_u16(svuint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlb_s32))) +svint32_t svaddlb_s32(svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlb_s64))) +svint64_t svaddlb_s64(svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlb_s16))) +svint16_t svaddlb_s16(svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlb_u32))) +svuint32_t svaddlb_u32(svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlb_u64))) +svuint64_t svaddlb_u64(svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlb_u16))) +svuint16_t svaddlb_u16(svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlbt_n_s32))) +svint32_t svaddlbt_n_s32(svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlbt_n_s64))) +svint64_t svaddlbt_n_s64(svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlbt_n_s16))) +svint16_t svaddlbt_n_s16(svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlbt_s32))) +svint32_t svaddlbt_s32(svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlbt_s64))) +svint64_t svaddlbt_s64(svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlbt_s16))) +svint16_t svaddlbt_s16(svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlt_n_s32))) +svint32_t svaddlt_n_s32(svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlt_n_s64))) +svint64_t svaddlt_n_s64(svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlt_n_s16))) +svint16_t svaddlt_n_s16(svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlt_n_u32))) +svuint32_t svaddlt_n_u32(svuint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlt_n_u64))) +svuint64_t svaddlt_n_u64(svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlt_n_u16))) +svuint16_t svaddlt_n_u16(svuint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlt_s32))) +svint32_t svaddlt_s32(svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlt_s64))) +svint64_t svaddlt_s64(svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlt_s16))) +svint16_t svaddlt_s16(svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlt_u32))) +svuint32_t svaddlt_u32(svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlt_u64))) +svuint64_t svaddlt_u64(svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlt_u16))) +svuint16_t svaddlt_u16(svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddp_f64_m))) +svfloat64_t svaddp_f64_m(svbool_t, svfloat64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddp_f32_m))) +svfloat32_t svaddp_f32_m(svbool_t, svfloat32_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddp_f16_m))) +svfloat16_t svaddp_f16_m(svbool_t, svfloat16_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddp_f64_x))) +svfloat64_t svaddp_f64_x(svbool_t, svfloat64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddp_f32_x))) +svfloat32_t svaddp_f32_x(svbool_t, svfloat32_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddp_f16_x))) +svfloat16_t svaddp_f16_x(svbool_t, svfloat16_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddp_u8_m))) +svuint8_t svaddp_u8_m(svbool_t, svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddp_u32_m))) +svuint32_t svaddp_u32_m(svbool_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddp_u64_m))) +svuint64_t svaddp_u64_m(svbool_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddp_u16_m))) +svuint16_t svaddp_u16_m(svbool_t, svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddp_s8_m))) +svint8_t svaddp_s8_m(svbool_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddp_s32_m))) +svint32_t svaddp_s32_m(svbool_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddp_s64_m))) +svint64_t svaddp_s64_m(svbool_t, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddp_s16_m))) +svint16_t svaddp_s16_m(svbool_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddp_u8_x))) +svuint8_t svaddp_u8_x(svbool_t, svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddp_u32_x))) +svuint32_t svaddp_u32_x(svbool_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddp_u64_x))) +svuint64_t svaddp_u64_x(svbool_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddp_u16_x))) +svuint16_t svaddp_u16_x(svbool_t, svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddp_s8_x))) +svint8_t svaddp_s8_x(svbool_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddp_s32_x))) +svint32_t svaddp_s32_x(svbool_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddp_s64_x))) +svint64_t svaddp_s64_x(svbool_t, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddp_s16_x))) +svint16_t svaddp_s16_x(svbool_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwb_n_s32))) +svint32_t svaddwb_n_s32(svint32_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwb_n_s64))) +svint64_t svaddwb_n_s64(svint64_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwb_n_s16))) +svint16_t svaddwb_n_s16(svint16_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwb_n_u32))) +svuint32_t svaddwb_n_u32(svuint32_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwb_n_u64))) +svuint64_t svaddwb_n_u64(svuint64_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwb_n_u16))) +svuint16_t svaddwb_n_u16(svuint16_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwb_s32))) +svint32_t svaddwb_s32(svint32_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwb_s64))) +svint64_t svaddwb_s64(svint64_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwb_s16))) +svint16_t svaddwb_s16(svint16_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwb_u32))) +svuint32_t svaddwb_u32(svuint32_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwb_u64))) +svuint64_t svaddwb_u64(svuint64_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwb_u16))) +svuint16_t svaddwb_u16(svuint16_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwt_n_s32))) +svint32_t svaddwt_n_s32(svint32_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwt_n_s64))) +svint64_t svaddwt_n_s64(svint64_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwt_n_s16))) +svint16_t svaddwt_n_s16(svint16_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwt_n_u32))) +svuint32_t svaddwt_n_u32(svuint32_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwt_n_u64))) +svuint64_t svaddwt_n_u64(svuint64_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwt_n_u16))) +svuint16_t svaddwt_n_u16(svuint16_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwt_s32))) +svint32_t svaddwt_s32(svint32_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwt_s64))) +svint64_t svaddwt_s64(svint64_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwt_s16))) +svint16_t svaddwt_s16(svint16_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwt_u32))) +svuint32_t svaddwt_u32(svuint32_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwt_u64))) +svuint64_t svaddwt_u64(svuint64_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwt_u16))) +svuint16_t svaddwt_u16(svuint16_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbcax_n_u8))) +svuint8_t svbcax_n_u8(svuint8_t, svuint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbcax_n_u32))) +svuint32_t svbcax_n_u32(svuint32_t, svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbcax_n_u64))) +svuint64_t svbcax_n_u64(svuint64_t, svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbcax_n_u16))) +svuint16_t svbcax_n_u16(svuint16_t, svuint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbcax_n_s8))) +svint8_t svbcax_n_s8(svint8_t, svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbcax_n_s32))) +svint32_t svbcax_n_s32(svint32_t, svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbcax_n_s64))) +svint64_t svbcax_n_s64(svint64_t, svint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbcax_n_s16))) +svint16_t svbcax_n_s16(svint16_t, svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbcax_u8))) +svuint8_t svbcax_u8(svuint8_t, svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbcax_u32))) +svuint32_t svbcax_u32(svuint32_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbcax_u64))) +svuint64_t svbcax_u64(svuint64_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbcax_u16))) +svuint16_t svbcax_u16(svuint16_t, svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbcax_s8))) +svint8_t svbcax_s8(svint8_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbcax_s32))) +svint32_t svbcax_s32(svint32_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbcax_s64))) +svint64_t svbcax_s64(svint64_t, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbcax_s16))) +svint16_t svbcax_s16(svint16_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl1n_n_u8))) +svuint8_t svbsl1n_n_u8(svuint8_t, svuint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl1n_n_u32))) +svuint32_t svbsl1n_n_u32(svuint32_t, svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl1n_n_u64))) +svuint64_t svbsl1n_n_u64(svuint64_t, svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl1n_n_u16))) +svuint16_t svbsl1n_n_u16(svuint16_t, svuint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl1n_n_s8))) +svint8_t svbsl1n_n_s8(svint8_t, svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl1n_n_s32))) +svint32_t svbsl1n_n_s32(svint32_t, svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl1n_n_s64))) +svint64_t svbsl1n_n_s64(svint64_t, svint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl1n_n_s16))) +svint16_t svbsl1n_n_s16(svint16_t, svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl1n_u8))) +svuint8_t svbsl1n_u8(svuint8_t, svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl1n_u32))) +svuint32_t svbsl1n_u32(svuint32_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl1n_u64))) +svuint64_t svbsl1n_u64(svuint64_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl1n_u16))) +svuint16_t svbsl1n_u16(svuint16_t, svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl1n_s8))) +svint8_t svbsl1n_s8(svint8_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl1n_s32))) +svint32_t svbsl1n_s32(svint32_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl1n_s64))) +svint64_t svbsl1n_s64(svint64_t, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl1n_s16))) +svint16_t svbsl1n_s16(svint16_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl2n_n_u8))) +svuint8_t svbsl2n_n_u8(svuint8_t, svuint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl2n_n_u32))) +svuint32_t svbsl2n_n_u32(svuint32_t, svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl2n_n_u64))) +svuint64_t svbsl2n_n_u64(svuint64_t, svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl2n_n_u16))) +svuint16_t svbsl2n_n_u16(svuint16_t, svuint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl2n_n_s8))) +svint8_t svbsl2n_n_s8(svint8_t, svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl2n_n_s32))) +svint32_t svbsl2n_n_s32(svint32_t, svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl2n_n_s64))) +svint64_t svbsl2n_n_s64(svint64_t, svint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl2n_n_s16))) +svint16_t svbsl2n_n_s16(svint16_t, svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl2n_u8))) +svuint8_t svbsl2n_u8(svuint8_t, svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl2n_u32))) +svuint32_t svbsl2n_u32(svuint32_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl2n_u64))) +svuint64_t svbsl2n_u64(svuint64_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl2n_u16))) +svuint16_t svbsl2n_u16(svuint16_t, svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl2n_s8))) +svint8_t svbsl2n_s8(svint8_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl2n_s32))) +svint32_t svbsl2n_s32(svint32_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl2n_s64))) +svint64_t svbsl2n_s64(svint64_t, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl2n_s16))) +svint16_t svbsl2n_s16(svint16_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl_n_u8))) +svuint8_t svbsl_n_u8(svuint8_t, svuint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl_n_u32))) +svuint32_t svbsl_n_u32(svuint32_t, svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl_n_u64))) +svuint64_t svbsl_n_u64(svuint64_t, svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl_n_u16))) +svuint16_t svbsl_n_u16(svuint16_t, svuint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl_n_s8))) +svint8_t svbsl_n_s8(svint8_t, svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl_n_s32))) +svint32_t svbsl_n_s32(svint32_t, svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl_n_s64))) +svint64_t svbsl_n_s64(svint64_t, svint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl_n_s16))) +svint16_t svbsl_n_s16(svint16_t, svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl_u8))) +svuint8_t svbsl_u8(svuint8_t, svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl_u32))) +svuint32_t svbsl_u32(svuint32_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl_u64))) +svuint64_t svbsl_u64(svuint64_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl_u16))) +svuint16_t svbsl_u16(svuint16_t, svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl_s8))) +svint8_t svbsl_s8(svint8_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl_s32))) +svint32_t svbsl_s32(svint32_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl_s64))) +svint64_t svbsl_s64(svint64_t, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl_s16))) +svint16_t svbsl_s16(svint16_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcadd_u8))) +svuint8_t svcadd_u8(svuint8_t, svuint8_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcadd_u32))) +svuint32_t svcadd_u32(svuint32_t, svuint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcadd_u64))) +svuint64_t svcadd_u64(svuint64_t, svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcadd_u16))) +svuint16_t svcadd_u16(svuint16_t, svuint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcadd_s8))) +svint8_t svcadd_s8(svint8_t, svint8_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcadd_s32))) +svint32_t svcadd_s32(svint32_t, svint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcadd_s64))) +svint64_t svcadd_s64(svint64_t, svint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcadd_s16))) +svint16_t svcadd_s16(svint16_t, svint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcdot_s32))) +svint32_t svcdot_s32(svint32_t, svint8_t, svint8_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcdot_s64))) +svint64_t svcdot_s64(svint64_t, svint16_t, svint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcdot_lane_s32))) +svint32_t svcdot_lane_s32(svint32_t, svint8_t, svint8_t, uint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcdot_lane_s64))) +svint64_t svcdot_lane_s64(svint64_t, svint16_t, svint16_t, uint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmla_u8))) +svuint8_t svcmla_u8(svuint8_t, svuint8_t, svuint8_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmla_u32))) +svuint32_t svcmla_u32(svuint32_t, svuint32_t, svuint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmla_u64))) +svuint64_t svcmla_u64(svuint64_t, svuint64_t, svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmla_u16))) +svuint16_t svcmla_u16(svuint16_t, svuint16_t, svuint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmla_s8))) +svint8_t svcmla_s8(svint8_t, svint8_t, svint8_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmla_s32))) +svint32_t svcmla_s32(svint32_t, svint32_t, svint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmla_s64))) +svint64_t svcmla_s64(svint64_t, svint64_t, svint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmla_s16))) +svint16_t svcmla_s16(svint16_t, svint16_t, svint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmla_lane_u32))) +svuint32_t svcmla_lane_u32(svuint32_t, svuint32_t, svuint32_t, uint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmla_lane_u16))) +svuint16_t svcmla_lane_u16(svuint16_t, svuint16_t, svuint16_t, uint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmla_lane_s32))) +svint32_t svcmla_lane_s32(svint32_t, svint32_t, svint32_t, uint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmla_lane_s16))) +svint16_t svcmla_lane_s16(svint16_t, svint16_t, svint16_t, uint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvtlt_f32_f16_m))) +svfloat32_t svcvtlt_f32_f16_m(svfloat32_t, svbool_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvtlt_f32_f16_x))) +svfloat32_t svcvtlt_f32_f16_x(svbool_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvtlt_f64_f32_m))) +svfloat64_t svcvtlt_f64_f32_m(svfloat64_t, svbool_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvtlt_f64_f32_x))) +svfloat64_t svcvtlt_f64_f32_x(svbool_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvtnt_f16_f32_m))) +svfloat16_t svcvtnt_f16_f32_m(svfloat16_t, svbool_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvtnt_f32_f64_m))) +svfloat32_t svcvtnt_f32_f64_m(svfloat32_t, svbool_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvtx_f32_f64_m))) +svfloat32_t svcvtx_f32_f64_m(svfloat32_t, svbool_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvtx_f32_f64_x))) +svfloat32_t svcvtx_f32_f64_x(svbool_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvtx_f32_f64_z))) +svfloat32_t svcvtx_f32_f64_z(svbool_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvtxnt_f32_f64_m))) +svfloat32_t svcvtxnt_f32_f64_m(svfloat32_t, svbool_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor3_n_u8))) +svuint8_t sveor3_n_u8(svuint8_t, svuint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor3_n_u32))) +svuint32_t sveor3_n_u32(svuint32_t, svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor3_n_u64))) +svuint64_t sveor3_n_u64(svuint64_t, svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor3_n_u16))) +svuint16_t sveor3_n_u16(svuint16_t, svuint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor3_n_s8))) +svint8_t sveor3_n_s8(svint8_t, svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor3_n_s32))) +svint32_t sveor3_n_s32(svint32_t, svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor3_n_s64))) +svint64_t sveor3_n_s64(svint64_t, svint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor3_n_s16))) +svint16_t sveor3_n_s16(svint16_t, svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor3_u8))) +svuint8_t sveor3_u8(svuint8_t, svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor3_u32))) +svuint32_t sveor3_u32(svuint32_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor3_u64))) +svuint64_t sveor3_u64(svuint64_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor3_u16))) +svuint16_t sveor3_u16(svuint16_t, svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor3_s8))) +svint8_t sveor3_s8(svint8_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor3_s32))) +svint32_t sveor3_s32(svint32_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor3_s64))) +svint64_t sveor3_s64(svint64_t, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor3_s16))) +svint16_t sveor3_s16(svint16_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorbt_n_u8))) +svuint8_t sveorbt_n_u8(svuint8_t, svuint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorbt_n_u32))) +svuint32_t sveorbt_n_u32(svuint32_t, svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorbt_n_u64))) +svuint64_t sveorbt_n_u64(svuint64_t, svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorbt_n_u16))) +svuint16_t sveorbt_n_u16(svuint16_t, svuint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorbt_n_s8))) +svint8_t sveorbt_n_s8(svint8_t, svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorbt_n_s32))) +svint32_t sveorbt_n_s32(svint32_t, svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorbt_n_s64))) +svint64_t sveorbt_n_s64(svint64_t, svint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorbt_n_s16))) +svint16_t sveorbt_n_s16(svint16_t, svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorbt_u8))) +svuint8_t sveorbt_u8(svuint8_t, svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorbt_u32))) +svuint32_t sveorbt_u32(svuint32_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorbt_u64))) +svuint64_t sveorbt_u64(svuint64_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorbt_u16))) +svuint16_t sveorbt_u16(svuint16_t, svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorbt_s8))) +svint8_t sveorbt_s8(svint8_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorbt_s32))) +svint32_t sveorbt_s32(svint32_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorbt_s64))) +svint64_t sveorbt_s64(svint64_t, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorbt_s16))) +svint16_t sveorbt_s16(svint16_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveortb_n_u8))) +svuint8_t sveortb_n_u8(svuint8_t, svuint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveortb_n_u32))) +svuint32_t sveortb_n_u32(svuint32_t, svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveortb_n_u64))) +svuint64_t sveortb_n_u64(svuint64_t, svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveortb_n_u16))) +svuint16_t sveortb_n_u16(svuint16_t, svuint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveortb_n_s8))) +svint8_t sveortb_n_s8(svint8_t, svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveortb_n_s32))) +svint32_t sveortb_n_s32(svint32_t, svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveortb_n_s64))) +svint64_t sveortb_n_s64(svint64_t, svint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveortb_n_s16))) +svint16_t sveortb_n_s16(svint16_t, svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveortb_u8))) +svuint8_t sveortb_u8(svuint8_t, svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveortb_u32))) +svuint32_t sveortb_u32(svuint32_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveortb_u64))) +svuint64_t sveortb_u64(svuint64_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveortb_u16))) +svuint16_t sveortb_u16(svuint16_t, svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveortb_s8))) +svint8_t sveortb_s8(svint8_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveortb_s32))) +svint32_t sveortb_s32(svint32_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveortb_s64))) +svint64_t sveortb_s64(svint64_t, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveortb_s16))) +svint16_t sveortb_s16(svint16_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_s8_m))) +svint8_t svhadd_n_s8_m(svbool_t, svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_s32_m))) +svint32_t svhadd_n_s32_m(svbool_t, svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_s64_m))) +svint64_t svhadd_n_s64_m(svbool_t, svint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_s16_m))) +svint16_t svhadd_n_s16_m(svbool_t, svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_s8_x))) +svint8_t svhadd_n_s8_x(svbool_t, svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_s32_x))) +svint32_t svhadd_n_s32_x(svbool_t, svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_s64_x))) +svint64_t svhadd_n_s64_x(svbool_t, svint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_s16_x))) +svint16_t svhadd_n_s16_x(svbool_t, svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_s8_z))) +svint8_t svhadd_n_s8_z(svbool_t, svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_s32_z))) +svint32_t svhadd_n_s32_z(svbool_t, svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_s64_z))) +svint64_t svhadd_n_s64_z(svbool_t, svint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_s16_z))) +svint16_t svhadd_n_s16_z(svbool_t, svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_u8_m))) +svuint8_t svhadd_n_u8_m(svbool_t, svuint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_u32_m))) +svuint32_t svhadd_n_u32_m(svbool_t, svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_u64_m))) +svuint64_t svhadd_n_u64_m(svbool_t, svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_u16_m))) +svuint16_t svhadd_n_u16_m(svbool_t, svuint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_u8_x))) +svuint8_t svhadd_n_u8_x(svbool_t, svuint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_u32_x))) +svuint32_t svhadd_n_u32_x(svbool_t, svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_u64_x))) +svuint64_t svhadd_n_u64_x(svbool_t, svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_u16_x))) +svuint16_t svhadd_n_u16_x(svbool_t, svuint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_u8_z))) +svuint8_t svhadd_n_u8_z(svbool_t, svuint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_u32_z))) +svuint32_t svhadd_n_u32_z(svbool_t, svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_u64_z))) +svuint64_t svhadd_n_u64_z(svbool_t, svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_u16_z))) +svuint16_t svhadd_n_u16_z(svbool_t, svuint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_s8_m))) +svint8_t svhadd_s8_m(svbool_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_s32_m))) +svint32_t svhadd_s32_m(svbool_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_s64_m))) +svint64_t svhadd_s64_m(svbool_t, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_s16_m))) +svint16_t svhadd_s16_m(svbool_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_s8_x))) +svint8_t svhadd_s8_x(svbool_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_s32_x))) +svint32_t svhadd_s32_x(svbool_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_s64_x))) +svint64_t svhadd_s64_x(svbool_t, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_s16_x))) +svint16_t svhadd_s16_x(svbool_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_s8_z))) +svint8_t svhadd_s8_z(svbool_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_s32_z))) +svint32_t svhadd_s32_z(svbool_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_s64_z))) +svint64_t svhadd_s64_z(svbool_t, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_s16_z))) +svint16_t svhadd_s16_z(svbool_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_u8_m))) +svuint8_t svhadd_u8_m(svbool_t, svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_u32_m))) +svuint32_t svhadd_u32_m(svbool_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_u64_m))) +svuint64_t svhadd_u64_m(svbool_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_u16_m))) +svuint16_t svhadd_u16_m(svbool_t, svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_u8_x))) +svuint8_t svhadd_u8_x(svbool_t, svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_u32_x))) +svuint32_t svhadd_u32_x(svbool_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_u64_x))) +svuint64_t svhadd_u64_x(svbool_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_u16_x))) +svuint16_t svhadd_u16_x(svbool_t, svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_u8_z))) +svuint8_t svhadd_u8_z(svbool_t, svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_u32_z))) +svuint32_t svhadd_u32_z(svbool_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_u64_z))) +svuint64_t svhadd_u64_z(svbool_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_u16_z))) +svuint16_t svhadd_u16_z(svbool_t, svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_s8_m))) +svint8_t svhsub_n_s8_m(svbool_t, svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_s32_m))) +svint32_t svhsub_n_s32_m(svbool_t, svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_s64_m))) +svint64_t svhsub_n_s64_m(svbool_t, svint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_s16_m))) +svint16_t svhsub_n_s16_m(svbool_t, svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_s8_x))) +svint8_t svhsub_n_s8_x(svbool_t, svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_s32_x))) +svint32_t svhsub_n_s32_x(svbool_t, svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_s64_x))) +svint64_t svhsub_n_s64_x(svbool_t, svint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_s16_x))) +svint16_t svhsub_n_s16_x(svbool_t, svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_s8_z))) +svint8_t svhsub_n_s8_z(svbool_t, svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_s32_z))) +svint32_t svhsub_n_s32_z(svbool_t, svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_s64_z))) +svint64_t svhsub_n_s64_z(svbool_t, svint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_s16_z))) +svint16_t svhsub_n_s16_z(svbool_t, svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_u8_m))) +svuint8_t svhsub_n_u8_m(svbool_t, svuint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_u32_m))) +svuint32_t svhsub_n_u32_m(svbool_t, svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_u64_m))) +svuint64_t svhsub_n_u64_m(svbool_t, svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_u16_m))) +svuint16_t svhsub_n_u16_m(svbool_t, svuint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_u8_x))) +svuint8_t svhsub_n_u8_x(svbool_t, svuint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_u32_x))) +svuint32_t svhsub_n_u32_x(svbool_t, svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_u64_x))) +svuint64_t svhsub_n_u64_x(svbool_t, svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_u16_x))) +svuint16_t svhsub_n_u16_x(svbool_t, svuint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_u8_z))) +svuint8_t svhsub_n_u8_z(svbool_t, svuint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_u32_z))) +svuint32_t svhsub_n_u32_z(svbool_t, svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_u64_z))) +svuint64_t svhsub_n_u64_z(svbool_t, svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_u16_z))) +svuint16_t svhsub_n_u16_z(svbool_t, svuint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_s8_m))) +svint8_t svhsub_s8_m(svbool_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_s32_m))) +svint32_t svhsub_s32_m(svbool_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_s64_m))) +svint64_t svhsub_s64_m(svbool_t, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_s16_m))) +svint16_t svhsub_s16_m(svbool_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_s8_x))) +svint8_t svhsub_s8_x(svbool_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_s32_x))) +svint32_t svhsub_s32_x(svbool_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_s64_x))) +svint64_t svhsub_s64_x(svbool_t, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_s16_x))) +svint16_t svhsub_s16_x(svbool_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_s8_z))) +svint8_t svhsub_s8_z(svbool_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_s32_z))) +svint32_t svhsub_s32_z(svbool_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_s64_z))) +svint64_t svhsub_s64_z(svbool_t, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_s16_z))) +svint16_t svhsub_s16_z(svbool_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_u8_m))) +svuint8_t svhsub_u8_m(svbool_t, svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_u32_m))) +svuint32_t svhsub_u32_m(svbool_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_u64_m))) +svuint64_t svhsub_u64_m(svbool_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_u16_m))) +svuint16_t svhsub_u16_m(svbool_t, svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_u8_x))) +svuint8_t svhsub_u8_x(svbool_t, svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_u32_x))) +svuint32_t svhsub_u32_x(svbool_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_u64_x))) +svuint64_t svhsub_u64_x(svbool_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_u16_x))) +svuint16_t svhsub_u16_x(svbool_t, svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_u8_z))) +svuint8_t svhsub_u8_z(svbool_t, svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_u32_z))) +svuint32_t svhsub_u32_z(svbool_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_u64_z))) +svuint64_t svhsub_u64_z(svbool_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_u16_z))) +svuint16_t svhsub_u16_z(svbool_t, svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_s8_m))) +svint8_t svhsubr_n_s8_m(svbool_t, svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_s32_m))) +svint32_t svhsubr_n_s32_m(svbool_t, svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_s64_m))) +svint64_t svhsubr_n_s64_m(svbool_t, svint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_s16_m))) +svint16_t svhsubr_n_s16_m(svbool_t, svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_s8_x))) +svint8_t svhsubr_n_s8_x(svbool_t, svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_s32_x))) +svint32_t svhsubr_n_s32_x(svbool_t, svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_s64_x))) +svint64_t svhsubr_n_s64_x(svbool_t, svint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_s16_x))) +svint16_t svhsubr_n_s16_x(svbool_t, svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_s8_z))) +svint8_t svhsubr_n_s8_z(svbool_t, svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_s32_z))) +svint32_t svhsubr_n_s32_z(svbool_t, svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_s64_z))) +svint64_t svhsubr_n_s64_z(svbool_t, svint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_s16_z))) +svint16_t svhsubr_n_s16_z(svbool_t, svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_u8_m))) +svuint8_t svhsubr_n_u8_m(svbool_t, svuint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_u32_m))) +svuint32_t svhsubr_n_u32_m(svbool_t, svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_u64_m))) +svuint64_t svhsubr_n_u64_m(svbool_t, svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_u16_m))) +svuint16_t svhsubr_n_u16_m(svbool_t, svuint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_u8_x))) +svuint8_t svhsubr_n_u8_x(svbool_t, svuint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_u32_x))) +svuint32_t svhsubr_n_u32_x(svbool_t, svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_u64_x))) +svuint64_t svhsubr_n_u64_x(svbool_t, svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_u16_x))) +svuint16_t svhsubr_n_u16_x(svbool_t, svuint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_u8_z))) +svuint8_t svhsubr_n_u8_z(svbool_t, svuint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_u32_z))) +svuint32_t svhsubr_n_u32_z(svbool_t, svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_u64_z))) +svuint64_t svhsubr_n_u64_z(svbool_t, svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_u16_z))) +svuint16_t svhsubr_n_u16_z(svbool_t, svuint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_s8_m))) +svint8_t svhsubr_s8_m(svbool_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_s32_m))) +svint32_t svhsubr_s32_m(svbool_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_s64_m))) +svint64_t svhsubr_s64_m(svbool_t, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_s16_m))) +svint16_t svhsubr_s16_m(svbool_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_s8_x))) +svint8_t svhsubr_s8_x(svbool_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_s32_x))) +svint32_t svhsubr_s32_x(svbool_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_s64_x))) +svint64_t svhsubr_s64_x(svbool_t, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_s16_x))) +svint16_t svhsubr_s16_x(svbool_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_s8_z))) +svint8_t svhsubr_s8_z(svbool_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_s32_z))) +svint32_t svhsubr_s32_z(svbool_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_s64_z))) +svint64_t svhsubr_s64_z(svbool_t, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_s16_z))) +svint16_t svhsubr_s16_z(svbool_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_u8_m))) +svuint8_t svhsubr_u8_m(svbool_t, svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_u32_m))) +svuint32_t svhsubr_u32_m(svbool_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_u64_m))) +svuint64_t svhsubr_u64_m(svbool_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_u16_m))) +svuint16_t svhsubr_u16_m(svbool_t, svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_u8_x))) +svuint8_t svhsubr_u8_x(svbool_t, svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_u32_x))) +svuint32_t svhsubr_u32_x(svbool_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_u64_x))) +svuint64_t svhsubr_u64_x(svbool_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_u16_x))) +svuint16_t svhsubr_u16_x(svbool_t, svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_u8_z))) +svuint8_t svhsubr_u8_z(svbool_t, svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_u32_z))) +svuint32_t svhsubr_u32_z(svbool_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_u64_z))) +svuint64_t svhsubr_u64_z(svbool_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_u16_z))) +svuint16_t svhsubr_u16_z(svbool_t, svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlogb_f64_m))) +svint64_t svlogb_f64_m(svint64_t, svbool_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlogb_f32_m))) +svint32_t svlogb_f32_m(svint32_t, svbool_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlogb_f16_m))) +svint16_t svlogb_f16_m(svint16_t, svbool_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlogb_f64_x))) +svint64_t svlogb_f64_x(svbool_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlogb_f32_x))) +svint32_t svlogb_f32_x(svbool_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlogb_f16_x))) +svint16_t svlogb_f16_x(svbool_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlogb_f64_z))) +svint64_t svlogb_f64_z(svbool_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlogb_f32_z))) +svint32_t svlogb_f32_z(svbool_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlogb_f16_z))) +svint16_t svlogb_f16_z(svbool_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnmp_f64_m))) +svfloat64_t svmaxnmp_f64_m(svbool_t, svfloat64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnmp_f32_m))) +svfloat32_t svmaxnmp_f32_m(svbool_t, svfloat32_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnmp_f16_m))) +svfloat16_t svmaxnmp_f16_m(svbool_t, svfloat16_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnmp_f64_x))) +svfloat64_t svmaxnmp_f64_x(svbool_t, svfloat64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnmp_f32_x))) +svfloat32_t svmaxnmp_f32_x(svbool_t, svfloat32_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnmp_f16_x))) +svfloat16_t svmaxnmp_f16_x(svbool_t, svfloat16_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxp_f64_m))) +svfloat64_t svmaxp_f64_m(svbool_t, svfloat64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxp_f32_m))) +svfloat32_t svmaxp_f32_m(svbool_t, svfloat32_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxp_f16_m))) +svfloat16_t svmaxp_f16_m(svbool_t, svfloat16_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxp_f64_x))) +svfloat64_t svmaxp_f64_x(svbool_t, svfloat64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxp_f32_x))) +svfloat32_t svmaxp_f32_x(svbool_t, svfloat32_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxp_f16_x))) +svfloat16_t svmaxp_f16_x(svbool_t, svfloat16_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxp_s8_m))) +svint8_t svmaxp_s8_m(svbool_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxp_s32_m))) +svint32_t svmaxp_s32_m(svbool_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxp_s64_m))) +svint64_t svmaxp_s64_m(svbool_t, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxp_s16_m))) +svint16_t svmaxp_s16_m(svbool_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxp_s8_x))) +svint8_t svmaxp_s8_x(svbool_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxp_s32_x))) +svint32_t svmaxp_s32_x(svbool_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxp_s64_x))) +svint64_t svmaxp_s64_x(svbool_t, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxp_s16_x))) +svint16_t svmaxp_s16_x(svbool_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxp_u8_m))) +svuint8_t svmaxp_u8_m(svbool_t, svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxp_u32_m))) +svuint32_t svmaxp_u32_m(svbool_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxp_u64_m))) +svuint64_t svmaxp_u64_m(svbool_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxp_u16_m))) +svuint16_t svmaxp_u16_m(svbool_t, svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxp_u8_x))) +svuint8_t svmaxp_u8_x(svbool_t, svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxp_u32_x))) +svuint32_t svmaxp_u32_x(svbool_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxp_u64_x))) +svuint64_t svmaxp_u64_x(svbool_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxp_u16_x))) +svuint16_t svmaxp_u16_x(svbool_t, svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnmp_f64_m))) +svfloat64_t svminnmp_f64_m(svbool_t, svfloat64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnmp_f32_m))) +svfloat32_t svminnmp_f32_m(svbool_t, svfloat32_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnmp_f16_m))) +svfloat16_t svminnmp_f16_m(svbool_t, svfloat16_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnmp_f64_x))) +svfloat64_t svminnmp_f64_x(svbool_t, svfloat64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnmp_f32_x))) +svfloat32_t svminnmp_f32_x(svbool_t, svfloat32_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnmp_f16_x))) +svfloat16_t svminnmp_f16_x(svbool_t, svfloat16_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminp_f64_m))) +svfloat64_t svminp_f64_m(svbool_t, svfloat64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminp_f32_m))) +svfloat32_t svminp_f32_m(svbool_t, svfloat32_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminp_f16_m))) +svfloat16_t svminp_f16_m(svbool_t, svfloat16_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminp_f64_x))) +svfloat64_t svminp_f64_x(svbool_t, svfloat64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminp_f32_x))) +svfloat32_t svminp_f32_x(svbool_t, svfloat32_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminp_f16_x))) +svfloat16_t svminp_f16_x(svbool_t, svfloat16_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminp_s8_m))) +svint8_t svminp_s8_m(svbool_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminp_s32_m))) +svint32_t svminp_s32_m(svbool_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminp_s64_m))) +svint64_t svminp_s64_m(svbool_t, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminp_s16_m))) +svint16_t svminp_s16_m(svbool_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminp_s8_x))) +svint8_t svminp_s8_x(svbool_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminp_s32_x))) +svint32_t svminp_s32_x(svbool_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminp_s64_x))) +svint64_t svminp_s64_x(svbool_t, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminp_s16_x))) +svint16_t svminp_s16_x(svbool_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminp_u8_m))) +svuint8_t svminp_u8_m(svbool_t, svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminp_u32_m))) +svuint32_t svminp_u32_m(svbool_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminp_u64_m))) +svuint64_t svminp_u64_m(svbool_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminp_u16_m))) +svuint16_t svminp_u16_m(svbool_t, svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminp_u8_x))) +svuint8_t svminp_u8_x(svbool_t, svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminp_u32_x))) +svuint32_t svminp_u32_x(svbool_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminp_u64_x))) +svuint64_t svminp_u64_x(svbool_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminp_u16_x))) +svuint16_t svminp_u16_x(svbool_t, svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_lane_u32))) +svuint32_t svmla_lane_u32(svuint32_t, svuint32_t, svuint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_lane_u64))) +svuint64_t svmla_lane_u64(svuint64_t, svuint64_t, svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_lane_u16))) +svuint16_t svmla_lane_u16(svuint16_t, svuint16_t, svuint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_lane_s32))) +svint32_t svmla_lane_s32(svint32_t, svint32_t, svint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_lane_s64))) +svint64_t svmla_lane_s64(svint64_t, svint64_t, svint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_lane_s16))) +svint16_t svmla_lane_s16(svint16_t, svint16_t, svint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalb_n_f32))) +svfloat32_t svmlalb_n_f32(svfloat32_t, svfloat16_t, float16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalb_n_s32))) +svint32_t svmlalb_n_s32(svint32_t, svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalb_n_s64))) +svint64_t svmlalb_n_s64(svint64_t, svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalb_n_s16))) +svint16_t svmlalb_n_s16(svint16_t, svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalb_n_u32))) +svuint32_t svmlalb_n_u32(svuint32_t, svuint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalb_n_u64))) +svuint64_t svmlalb_n_u64(svuint64_t, svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalb_n_u16))) +svuint16_t svmlalb_n_u16(svuint16_t, svuint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalb_f32))) +svfloat32_t svmlalb_f32(svfloat32_t, svfloat16_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalb_s32))) +svint32_t svmlalb_s32(svint32_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalb_s64))) +svint64_t svmlalb_s64(svint64_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalb_s16))) +svint16_t svmlalb_s16(svint16_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalb_u32))) +svuint32_t svmlalb_u32(svuint32_t, svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalb_u64))) +svuint64_t svmlalb_u64(svuint64_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalb_u16))) +svuint16_t svmlalb_u16(svuint16_t, svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalb_lane_f32))) +svfloat32_t svmlalb_lane_f32(svfloat32_t, svfloat16_t, svfloat16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalb_lane_s32))) +svint32_t svmlalb_lane_s32(svint32_t, svint16_t, svint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalb_lane_s64))) +svint64_t svmlalb_lane_s64(svint64_t, svint32_t, svint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalb_lane_u32))) +svuint32_t svmlalb_lane_u32(svuint32_t, svuint16_t, svuint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalb_lane_u64))) +svuint64_t svmlalb_lane_u64(svuint64_t, svuint32_t, svuint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalt_n_f32))) +svfloat32_t svmlalt_n_f32(svfloat32_t, svfloat16_t, float16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalt_n_s32))) +svint32_t svmlalt_n_s32(svint32_t, svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalt_n_s64))) +svint64_t svmlalt_n_s64(svint64_t, svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalt_n_s16))) +svint16_t svmlalt_n_s16(svint16_t, svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalt_n_u32))) +svuint32_t svmlalt_n_u32(svuint32_t, svuint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalt_n_u64))) +svuint64_t svmlalt_n_u64(svuint64_t, svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalt_n_u16))) +svuint16_t svmlalt_n_u16(svuint16_t, svuint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalt_f32))) +svfloat32_t svmlalt_f32(svfloat32_t, svfloat16_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalt_s32))) +svint32_t svmlalt_s32(svint32_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalt_s64))) +svint64_t svmlalt_s64(svint64_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalt_s16))) +svint16_t svmlalt_s16(svint16_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalt_u32))) +svuint32_t svmlalt_u32(svuint32_t, svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalt_u64))) +svuint64_t svmlalt_u64(svuint64_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalt_u16))) +svuint16_t svmlalt_u16(svuint16_t, svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalt_lane_f32))) +svfloat32_t svmlalt_lane_f32(svfloat32_t, svfloat16_t, svfloat16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalt_lane_s32))) +svint32_t svmlalt_lane_s32(svint32_t, svint16_t, svint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalt_lane_s64))) +svint64_t svmlalt_lane_s64(svint64_t, svint32_t, svint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalt_lane_u32))) +svuint32_t svmlalt_lane_u32(svuint32_t, svuint16_t, svuint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalt_lane_u64))) +svuint64_t svmlalt_lane_u64(svuint64_t, svuint32_t, svuint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_lane_u32))) +svuint32_t svmls_lane_u32(svuint32_t, svuint32_t, svuint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_lane_u64))) +svuint64_t svmls_lane_u64(svuint64_t, svuint64_t, svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_lane_u16))) +svuint16_t svmls_lane_u16(svuint16_t, svuint16_t, svuint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_lane_s32))) +svint32_t svmls_lane_s32(svint32_t, svint32_t, svint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_lane_s64))) +svint64_t svmls_lane_s64(svint64_t, svint64_t, svint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_lane_s16))) +svint16_t svmls_lane_s16(svint16_t, svint16_t, svint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslb_n_f32))) +svfloat32_t svmlslb_n_f32(svfloat32_t, svfloat16_t, float16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslb_n_s32))) +svint32_t svmlslb_n_s32(svint32_t, svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslb_n_s64))) +svint64_t svmlslb_n_s64(svint64_t, svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslb_n_s16))) +svint16_t svmlslb_n_s16(svint16_t, svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslb_n_u32))) +svuint32_t svmlslb_n_u32(svuint32_t, svuint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslb_n_u64))) +svuint64_t svmlslb_n_u64(svuint64_t, svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslb_n_u16))) +svuint16_t svmlslb_n_u16(svuint16_t, svuint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslb_f32))) +svfloat32_t svmlslb_f32(svfloat32_t, svfloat16_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslb_s32))) +svint32_t svmlslb_s32(svint32_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslb_s64))) +svint64_t svmlslb_s64(svint64_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslb_s16))) +svint16_t svmlslb_s16(svint16_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslb_u32))) +svuint32_t svmlslb_u32(svuint32_t, svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslb_u64))) +svuint64_t svmlslb_u64(svuint64_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslb_u16))) +svuint16_t svmlslb_u16(svuint16_t, svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslb_lane_f32))) +svfloat32_t svmlslb_lane_f32(svfloat32_t, svfloat16_t, svfloat16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslb_lane_s32))) +svint32_t svmlslb_lane_s32(svint32_t, svint16_t, svint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslb_lane_s64))) +svint64_t svmlslb_lane_s64(svint64_t, svint32_t, svint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslb_lane_u32))) +svuint32_t svmlslb_lane_u32(svuint32_t, svuint16_t, svuint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslb_lane_u64))) +svuint64_t svmlslb_lane_u64(svuint64_t, svuint32_t, svuint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslt_n_f32))) +svfloat32_t svmlslt_n_f32(svfloat32_t, svfloat16_t, float16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslt_n_s32))) +svint32_t svmlslt_n_s32(svint32_t, svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslt_n_s64))) +svint64_t svmlslt_n_s64(svint64_t, svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslt_n_s16))) +svint16_t svmlslt_n_s16(svint16_t, svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslt_n_u32))) +svuint32_t svmlslt_n_u32(svuint32_t, svuint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslt_n_u64))) +svuint64_t svmlslt_n_u64(svuint64_t, svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslt_n_u16))) +svuint16_t svmlslt_n_u16(svuint16_t, svuint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslt_f32))) +svfloat32_t svmlslt_f32(svfloat32_t, svfloat16_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslt_s32))) +svint32_t svmlslt_s32(svint32_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslt_s64))) +svint64_t svmlslt_s64(svint64_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslt_s16))) +svint16_t svmlslt_s16(svint16_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslt_u32))) +svuint32_t svmlslt_u32(svuint32_t, svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslt_u64))) +svuint64_t svmlslt_u64(svuint64_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslt_u16))) +svuint16_t svmlslt_u16(svuint16_t, svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslt_lane_f32))) +svfloat32_t svmlslt_lane_f32(svfloat32_t, svfloat16_t, svfloat16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslt_lane_s32))) +svint32_t svmlslt_lane_s32(svint32_t, svint16_t, svint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslt_lane_s64))) +svint64_t svmlslt_lane_s64(svint64_t, svint32_t, svint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslt_lane_u32))) +svuint32_t svmlslt_lane_u32(svuint32_t, svuint16_t, svuint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslt_lane_u64))) +svuint64_t svmlslt_lane_u64(svuint64_t, svuint32_t, svuint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmovlb_s32))) +svint32_t svmovlb_s32(svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmovlb_s64))) +svint64_t svmovlb_s64(svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmovlb_s16))) +svint16_t svmovlb_s16(svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmovlb_u32))) +svuint32_t svmovlb_u32(svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmovlb_u64))) +svuint64_t svmovlb_u64(svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmovlb_u16))) +svuint16_t svmovlb_u16(svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmovlt_s32))) +svint32_t svmovlt_s32(svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmovlt_s64))) +svint64_t svmovlt_s64(svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmovlt_s16))) +svint16_t svmovlt_s16(svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmovlt_u32))) +svuint32_t svmovlt_u32(svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmovlt_u64))) +svuint64_t svmovlt_u64(svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmovlt_u16))) +svuint16_t svmovlt_u16(svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_lane_u32))) +svuint32_t svmul_lane_u32(svuint32_t, svuint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_lane_u64))) +svuint64_t svmul_lane_u64(svuint64_t, svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_lane_u16))) +svuint16_t svmul_lane_u16(svuint16_t, svuint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_lane_s32))) +svint32_t svmul_lane_s32(svint32_t, svint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_lane_s64))) +svint64_t svmul_lane_s64(svint64_t, svint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_lane_s16))) +svint16_t svmul_lane_s16(svint16_t, svint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullb_n_s32))) +svint32_t svmullb_n_s32(svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullb_n_s64))) +svint64_t svmullb_n_s64(svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullb_n_s16))) +svint16_t svmullb_n_s16(svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullb_n_u32))) +svuint32_t svmullb_n_u32(svuint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullb_n_u64))) +svuint64_t svmullb_n_u64(svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullb_n_u16))) +svuint16_t svmullb_n_u16(svuint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullb_s32))) +svint32_t svmullb_s32(svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullb_s64))) +svint64_t svmullb_s64(svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullb_s16))) +svint16_t svmullb_s16(svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullb_u32))) +svuint32_t svmullb_u32(svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullb_u64))) +svuint64_t svmullb_u64(svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullb_u16))) +svuint16_t svmullb_u16(svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullb_lane_s32))) +svint32_t svmullb_lane_s32(svint16_t, svint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullb_lane_s64))) +svint64_t svmullb_lane_s64(svint32_t, svint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullb_lane_u32))) +svuint32_t svmullb_lane_u32(svuint16_t, svuint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullb_lane_u64))) +svuint64_t svmullb_lane_u64(svuint32_t, svuint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullt_n_s32))) +svint32_t svmullt_n_s32(svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullt_n_s64))) +svint64_t svmullt_n_s64(svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullt_n_s16))) +svint16_t svmullt_n_s16(svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullt_n_u32))) +svuint32_t svmullt_n_u32(svuint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullt_n_u64))) +svuint64_t svmullt_n_u64(svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullt_n_u16))) +svuint16_t svmullt_n_u16(svuint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullt_s32))) +svint32_t svmullt_s32(svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullt_s64))) +svint64_t svmullt_s64(svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullt_s16))) +svint16_t svmullt_s16(svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullt_u32))) +svuint32_t svmullt_u32(svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullt_u64))) +svuint64_t svmullt_u64(svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullt_u16))) +svuint16_t svmullt_u16(svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullt_lane_s32))) +svint32_t svmullt_lane_s32(svint16_t, svint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullt_lane_s64))) +svint64_t svmullt_lane_s64(svint32_t, svint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullt_lane_u32))) +svuint32_t svmullt_lane_u32(svuint16_t, svuint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullt_lane_u64))) +svuint64_t svmullt_lane_u64(svuint32_t, svuint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnbsl_n_u8))) +svuint8_t svnbsl_n_u8(svuint8_t, svuint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnbsl_n_u32))) +svuint32_t svnbsl_n_u32(svuint32_t, svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnbsl_n_u64))) +svuint64_t svnbsl_n_u64(svuint64_t, svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnbsl_n_u16))) +svuint16_t svnbsl_n_u16(svuint16_t, svuint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnbsl_n_s8))) +svint8_t svnbsl_n_s8(svint8_t, svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnbsl_n_s32))) +svint32_t svnbsl_n_s32(svint32_t, svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnbsl_n_s64))) +svint64_t svnbsl_n_s64(svint64_t, svint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnbsl_n_s16))) +svint16_t svnbsl_n_s16(svint16_t, svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnbsl_u8))) +svuint8_t svnbsl_u8(svuint8_t, svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnbsl_u32))) +svuint32_t svnbsl_u32(svuint32_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnbsl_u64))) +svuint64_t svnbsl_u64(svuint64_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnbsl_u16))) +svuint16_t svnbsl_u16(svuint16_t, svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnbsl_s8))) +svint8_t svnbsl_s8(svint8_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnbsl_s32))) +svint32_t svnbsl_s32(svint32_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnbsl_s64))) +svint64_t svnbsl_s64(svint64_t, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnbsl_s16))) +svint16_t svnbsl_s16(svint16_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmul_n_u8))) +svuint8_t svpmul_n_u8(svuint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmul_u8))) +svuint8_t svpmul_u8(svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmullb_n_u64))) +svuint64_t svpmullb_n_u64(svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmullb_n_u16))) +svuint16_t svpmullb_n_u16(svuint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmullb_u64))) +svuint64_t svpmullb_u64(svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmullb_u16))) +svuint16_t svpmullb_u16(svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmullb_pair_n_u8))) +svuint8_t svpmullb_pair_n_u8(svuint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmullb_pair_n_u32))) +svuint32_t svpmullb_pair_n_u32(svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmullb_pair_u8))) +svuint8_t svpmullb_pair_u8(svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmullb_pair_u32))) +svuint32_t svpmullb_pair_u32(svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmullt_n_u64))) +svuint64_t svpmullt_n_u64(svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmullt_n_u16))) +svuint16_t svpmullt_n_u16(svuint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmullt_u64))) +svuint64_t svpmullt_u64(svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmullt_u16))) +svuint16_t svpmullt_u16(svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmullt_pair_n_u8))) +svuint8_t svpmullt_pair_n_u8(svuint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmullt_pair_n_u32))) +svuint32_t svpmullt_pair_n_u32(svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmullt_pair_u8))) +svuint8_t svpmullt_pair_u8(svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmullt_pair_u32))) +svuint32_t svpmullt_pair_u32(svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqabs_s8_m))) +svint8_t svqabs_s8_m(svint8_t, svbool_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqabs_s32_m))) +svint32_t svqabs_s32_m(svint32_t, svbool_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqabs_s64_m))) +svint64_t svqabs_s64_m(svint64_t, svbool_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqabs_s16_m))) +svint16_t svqabs_s16_m(svint16_t, svbool_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqabs_s8_x))) +svint8_t svqabs_s8_x(svbool_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqabs_s32_x))) +svint32_t svqabs_s32_x(svbool_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqabs_s64_x))) +svint64_t svqabs_s64_x(svbool_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqabs_s16_x))) +svint16_t svqabs_s16_x(svbool_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqabs_s8_z))) +svint8_t svqabs_s8_z(svbool_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqabs_s32_z))) +svint32_t svqabs_s32_z(svbool_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqabs_s64_z))) +svint64_t svqabs_s64_z(svbool_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqabs_s16_z))) +svint16_t svqabs_s16_z(svbool_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_s8_m))) +svint8_t svqadd_n_s8_m(svbool_t, svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_s32_m))) +svint32_t svqadd_n_s32_m(svbool_t, svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_s64_m))) +svint64_t svqadd_n_s64_m(svbool_t, svint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_s16_m))) +svint16_t svqadd_n_s16_m(svbool_t, svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_s8_x))) +svint8_t svqadd_n_s8_x(svbool_t, svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_s32_x))) +svint32_t svqadd_n_s32_x(svbool_t, svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_s64_x))) +svint64_t svqadd_n_s64_x(svbool_t, svint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_s16_x))) +svint16_t svqadd_n_s16_x(svbool_t, svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_s8_z))) +svint8_t svqadd_n_s8_z(svbool_t, svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_s32_z))) +svint32_t svqadd_n_s32_z(svbool_t, svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_s64_z))) +svint64_t svqadd_n_s64_z(svbool_t, svint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_s16_z))) +svint16_t svqadd_n_s16_z(svbool_t, svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_u8_m))) +svuint8_t svqadd_n_u8_m(svbool_t, svuint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_u32_m))) +svuint32_t svqadd_n_u32_m(svbool_t, svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_u64_m))) +svuint64_t svqadd_n_u64_m(svbool_t, svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_u16_m))) +svuint16_t svqadd_n_u16_m(svbool_t, svuint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_u8_x))) +svuint8_t svqadd_n_u8_x(svbool_t, svuint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_u32_x))) +svuint32_t svqadd_n_u32_x(svbool_t, svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_u64_x))) +svuint64_t svqadd_n_u64_x(svbool_t, svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_u16_x))) +svuint16_t svqadd_n_u16_x(svbool_t, svuint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_u8_z))) +svuint8_t svqadd_n_u8_z(svbool_t, svuint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_u32_z))) +svuint32_t svqadd_n_u32_z(svbool_t, svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_u64_z))) +svuint64_t svqadd_n_u64_z(svbool_t, svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_u16_z))) +svuint16_t svqadd_n_u16_z(svbool_t, svuint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_s8_m))) +svint8_t svqadd_s8_m(svbool_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_s32_m))) +svint32_t svqadd_s32_m(svbool_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_s64_m))) +svint64_t svqadd_s64_m(svbool_t, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_s16_m))) +svint16_t svqadd_s16_m(svbool_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_s8_x))) +svint8_t svqadd_s8_x(svbool_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_s32_x))) +svint32_t svqadd_s32_x(svbool_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_s64_x))) +svint64_t svqadd_s64_x(svbool_t, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_s16_x))) +svint16_t svqadd_s16_x(svbool_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_s8_z))) +svint8_t svqadd_s8_z(svbool_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_s32_z))) +svint32_t svqadd_s32_z(svbool_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_s64_z))) +svint64_t svqadd_s64_z(svbool_t, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_s16_z))) +svint16_t svqadd_s16_z(svbool_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_u8_m))) +svuint8_t svqadd_u8_m(svbool_t, svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_u32_m))) +svuint32_t svqadd_u32_m(svbool_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_u64_m))) +svuint64_t svqadd_u64_m(svbool_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_u16_m))) +svuint16_t svqadd_u16_m(svbool_t, svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_u8_x))) +svuint8_t svqadd_u8_x(svbool_t, svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_u32_x))) +svuint32_t svqadd_u32_x(svbool_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_u64_x))) +svuint64_t svqadd_u64_x(svbool_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_u16_x))) +svuint16_t svqadd_u16_x(svbool_t, svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_u8_z))) +svuint8_t svqadd_u8_z(svbool_t, svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_u32_z))) +svuint32_t svqadd_u32_z(svbool_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_u64_z))) +svuint64_t svqadd_u64_z(svbool_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_u16_z))) +svuint16_t svqadd_u16_z(svbool_t, svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqcadd_s8))) +svint8_t svqcadd_s8(svint8_t, svint8_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqcadd_s32))) +svint32_t svqcadd_s32(svint32_t, svint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqcadd_s64))) +svint64_t svqcadd_s64(svint64_t, svint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqcadd_s16))) +svint16_t svqcadd_s16(svint16_t, svint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlalb_n_s32))) +svint32_t svqdmlalb_n_s32(svint32_t, svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlalb_n_s64))) +svint64_t svqdmlalb_n_s64(svint64_t, svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlalb_n_s16))) +svint16_t svqdmlalb_n_s16(svint16_t, svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlalb_s32))) +svint32_t svqdmlalb_s32(svint32_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlalb_s64))) +svint64_t svqdmlalb_s64(svint64_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlalb_s16))) +svint16_t svqdmlalb_s16(svint16_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlalb_lane_s32))) +svint32_t svqdmlalb_lane_s32(svint32_t, svint16_t, svint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlalb_lane_s64))) +svint64_t svqdmlalb_lane_s64(svint64_t, svint32_t, svint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlalbt_n_s32))) +svint32_t svqdmlalbt_n_s32(svint32_t, svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlalbt_n_s64))) +svint64_t svqdmlalbt_n_s64(svint64_t, svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlalbt_n_s16))) +svint16_t svqdmlalbt_n_s16(svint16_t, svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlalbt_s32))) +svint32_t svqdmlalbt_s32(svint32_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlalbt_s64))) +svint64_t svqdmlalbt_s64(svint64_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlalbt_s16))) +svint16_t svqdmlalbt_s16(svint16_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlalt_n_s32))) +svint32_t svqdmlalt_n_s32(svint32_t, svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlalt_n_s64))) +svint64_t svqdmlalt_n_s64(svint64_t, svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlalt_n_s16))) +svint16_t svqdmlalt_n_s16(svint16_t, svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlalt_s32))) +svint32_t svqdmlalt_s32(svint32_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlalt_s64))) +svint64_t svqdmlalt_s64(svint64_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlalt_s16))) +svint16_t svqdmlalt_s16(svint16_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlalt_lane_s32))) +svint32_t svqdmlalt_lane_s32(svint32_t, svint16_t, svint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlalt_lane_s64))) +svint64_t svqdmlalt_lane_s64(svint64_t, svint32_t, svint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlslb_n_s32))) +svint32_t svqdmlslb_n_s32(svint32_t, svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlslb_n_s64))) +svint64_t svqdmlslb_n_s64(svint64_t, svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlslb_n_s16))) +svint16_t svqdmlslb_n_s16(svint16_t, svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlslb_s32))) +svint32_t svqdmlslb_s32(svint32_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlslb_s64))) +svint64_t svqdmlslb_s64(svint64_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlslb_s16))) +svint16_t svqdmlslb_s16(svint16_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlslb_lane_s32))) +svint32_t svqdmlslb_lane_s32(svint32_t, svint16_t, svint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlslb_lane_s64))) +svint64_t svqdmlslb_lane_s64(svint64_t, svint32_t, svint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlslbt_n_s32))) +svint32_t svqdmlslbt_n_s32(svint32_t, svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlslbt_n_s64))) +svint64_t svqdmlslbt_n_s64(svint64_t, svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlslbt_n_s16))) +svint16_t svqdmlslbt_n_s16(svint16_t, svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlslbt_s32))) +svint32_t svqdmlslbt_s32(svint32_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlslbt_s64))) +svint64_t svqdmlslbt_s64(svint64_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlslbt_s16))) +svint16_t svqdmlslbt_s16(svint16_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlslt_n_s32))) +svint32_t svqdmlslt_n_s32(svint32_t, svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlslt_n_s64))) +svint64_t svqdmlslt_n_s64(svint64_t, svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlslt_n_s16))) +svint16_t svqdmlslt_n_s16(svint16_t, svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlslt_s32))) +svint32_t svqdmlslt_s32(svint32_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlslt_s64))) +svint64_t svqdmlslt_s64(svint64_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlslt_s16))) +svint16_t svqdmlslt_s16(svint16_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlslt_lane_s32))) +svint32_t svqdmlslt_lane_s32(svint32_t, svint16_t, svint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlslt_lane_s64))) +svint64_t svqdmlslt_lane_s64(svint64_t, svint32_t, svint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmulh_n_s8))) +svint8_t svqdmulh_n_s8(svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmulh_n_s32))) +svint32_t svqdmulh_n_s32(svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmulh_n_s64))) +svint64_t svqdmulh_n_s64(svint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmulh_n_s16))) +svint16_t svqdmulh_n_s16(svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmulh_s8))) +svint8_t svqdmulh_s8(svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmulh_s32))) +svint32_t svqdmulh_s32(svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmulh_s64))) +svint64_t svqdmulh_s64(svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmulh_s16))) +svint16_t svqdmulh_s16(svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmulh_lane_s32))) +svint32_t svqdmulh_lane_s32(svint32_t, svint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmulh_lane_s64))) +svint64_t svqdmulh_lane_s64(svint64_t, svint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmulh_lane_s16))) +svint16_t svqdmulh_lane_s16(svint16_t, svint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmullb_n_s32))) +svint32_t svqdmullb_n_s32(svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmullb_n_s64))) +svint64_t svqdmullb_n_s64(svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmullb_n_s16))) +svint16_t svqdmullb_n_s16(svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmullb_s32))) +svint32_t svqdmullb_s32(svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmullb_s64))) +svint64_t svqdmullb_s64(svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmullb_s16))) +svint16_t svqdmullb_s16(svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmullb_lane_s32))) +svint32_t svqdmullb_lane_s32(svint16_t, svint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmullb_lane_s64))) +svint64_t svqdmullb_lane_s64(svint32_t, svint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmullt_n_s32))) +svint32_t svqdmullt_n_s32(svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmullt_n_s64))) +svint64_t svqdmullt_n_s64(svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmullt_n_s16))) +svint16_t svqdmullt_n_s16(svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmullt_s32))) +svint32_t svqdmullt_s32(svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmullt_s64))) +svint64_t svqdmullt_s64(svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmullt_s16))) +svint16_t svqdmullt_s16(svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmullt_lane_s32))) +svint32_t svqdmullt_lane_s32(svint16_t, svint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmullt_lane_s64))) +svint64_t svqdmullt_lane_s64(svint32_t, svint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqneg_s8_m))) +svint8_t svqneg_s8_m(svint8_t, svbool_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqneg_s32_m))) +svint32_t svqneg_s32_m(svint32_t, svbool_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqneg_s64_m))) +svint64_t svqneg_s64_m(svint64_t, svbool_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqneg_s16_m))) +svint16_t svqneg_s16_m(svint16_t, svbool_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqneg_s8_x))) +svint8_t svqneg_s8_x(svbool_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqneg_s32_x))) +svint32_t svqneg_s32_x(svbool_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqneg_s64_x))) +svint64_t svqneg_s64_x(svbool_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqneg_s16_x))) +svint16_t svqneg_s16_x(svbool_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqneg_s8_z))) +svint8_t svqneg_s8_z(svbool_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqneg_s32_z))) +svint32_t svqneg_s32_z(svbool_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqneg_s64_z))) +svint64_t svqneg_s64_z(svbool_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqneg_s16_z))) +svint16_t svqneg_s16_z(svbool_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdcmlah_s8))) +svint8_t svqrdcmlah_s8(svint8_t, svint8_t, svint8_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdcmlah_s32))) +svint32_t svqrdcmlah_s32(svint32_t, svint32_t, svint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdcmlah_s64))) +svint64_t svqrdcmlah_s64(svint64_t, svint64_t, svint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdcmlah_s16))) +svint16_t svqrdcmlah_s16(svint16_t, svint16_t, svint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdcmlah_lane_s32))) +svint32_t svqrdcmlah_lane_s32(svint32_t, svint32_t, svint32_t, uint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdcmlah_lane_s16))) +svint16_t svqrdcmlah_lane_s16(svint16_t, svint16_t, svint16_t, uint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmlah_n_s8))) +svint8_t svqrdmlah_n_s8(svint8_t, svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmlah_n_s32))) +svint32_t svqrdmlah_n_s32(svint32_t, svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmlah_n_s64))) +svint64_t svqrdmlah_n_s64(svint64_t, svint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmlah_n_s16))) +svint16_t svqrdmlah_n_s16(svint16_t, svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmlah_s8))) +svint8_t svqrdmlah_s8(svint8_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmlah_s32))) +svint32_t svqrdmlah_s32(svint32_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmlah_s64))) +svint64_t svqrdmlah_s64(svint64_t, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmlah_s16))) +svint16_t svqrdmlah_s16(svint16_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmlah_lane_s32))) +svint32_t svqrdmlah_lane_s32(svint32_t, svint32_t, svint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmlah_lane_s64))) +svint64_t svqrdmlah_lane_s64(svint64_t, svint64_t, svint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmlah_lane_s16))) +svint16_t svqrdmlah_lane_s16(svint16_t, svint16_t, svint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmlsh_n_s8))) +svint8_t svqrdmlsh_n_s8(svint8_t, svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmlsh_n_s32))) +svint32_t svqrdmlsh_n_s32(svint32_t, svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmlsh_n_s64))) +svint64_t svqrdmlsh_n_s64(svint64_t, svint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmlsh_n_s16))) +svint16_t svqrdmlsh_n_s16(svint16_t, svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmlsh_s8))) +svint8_t svqrdmlsh_s8(svint8_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmlsh_s32))) +svint32_t svqrdmlsh_s32(svint32_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmlsh_s64))) +svint64_t svqrdmlsh_s64(svint64_t, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmlsh_s16))) +svint16_t svqrdmlsh_s16(svint16_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmlsh_lane_s32))) +svint32_t svqrdmlsh_lane_s32(svint32_t, svint32_t, svint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmlsh_lane_s64))) +svint64_t svqrdmlsh_lane_s64(svint64_t, svint64_t, svint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmlsh_lane_s16))) +svint16_t svqrdmlsh_lane_s16(svint16_t, svint16_t, svint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmulh_n_s8))) +svint8_t svqrdmulh_n_s8(svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmulh_n_s32))) +svint32_t svqrdmulh_n_s32(svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmulh_n_s64))) +svint64_t svqrdmulh_n_s64(svint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmulh_n_s16))) +svint16_t svqrdmulh_n_s16(svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmulh_s8))) +svint8_t svqrdmulh_s8(svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmulh_s32))) +svint32_t svqrdmulh_s32(svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmulh_s64))) +svint64_t svqrdmulh_s64(svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmulh_s16))) +svint16_t svqrdmulh_s16(svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmulh_lane_s32))) +svint32_t svqrdmulh_lane_s32(svint32_t, svint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmulh_lane_s64))) +svint64_t svqrdmulh_lane_s64(svint64_t, svint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmulh_lane_s16))) +svint16_t svqrdmulh_lane_s16(svint16_t, svint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_s8_m))) +svint8_t svqrshl_n_s8_m(svbool_t, svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_s32_m))) +svint32_t svqrshl_n_s32_m(svbool_t, svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_s64_m))) +svint64_t svqrshl_n_s64_m(svbool_t, svint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_s16_m))) +svint16_t svqrshl_n_s16_m(svbool_t, svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_s8_x))) +svint8_t svqrshl_n_s8_x(svbool_t, svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_s32_x))) +svint32_t svqrshl_n_s32_x(svbool_t, svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_s64_x))) +svint64_t svqrshl_n_s64_x(svbool_t, svint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_s16_x))) +svint16_t svqrshl_n_s16_x(svbool_t, svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_s8_z))) +svint8_t svqrshl_n_s8_z(svbool_t, svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_s32_z))) +svint32_t svqrshl_n_s32_z(svbool_t, svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_s64_z))) +svint64_t svqrshl_n_s64_z(svbool_t, svint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_s16_z))) +svint16_t svqrshl_n_s16_z(svbool_t, svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_u8_m))) +svuint8_t svqrshl_n_u8_m(svbool_t, svuint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_u32_m))) +svuint32_t svqrshl_n_u32_m(svbool_t, svuint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_u64_m))) +svuint64_t svqrshl_n_u64_m(svbool_t, svuint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_u16_m))) +svuint16_t svqrshl_n_u16_m(svbool_t, svuint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_u8_x))) +svuint8_t svqrshl_n_u8_x(svbool_t, svuint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_u32_x))) +svuint32_t svqrshl_n_u32_x(svbool_t, svuint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_u64_x))) +svuint64_t svqrshl_n_u64_x(svbool_t, svuint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_u16_x))) +svuint16_t svqrshl_n_u16_x(svbool_t, svuint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_u8_z))) +svuint8_t svqrshl_n_u8_z(svbool_t, svuint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_u32_z))) +svuint32_t svqrshl_n_u32_z(svbool_t, svuint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_u64_z))) +svuint64_t svqrshl_n_u64_z(svbool_t, svuint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_u16_z))) +svuint16_t svqrshl_n_u16_z(svbool_t, svuint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_s8_m))) +svint8_t svqrshl_s8_m(svbool_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_s32_m))) +svint32_t svqrshl_s32_m(svbool_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_s64_m))) +svint64_t svqrshl_s64_m(svbool_t, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_s16_m))) +svint16_t svqrshl_s16_m(svbool_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_s8_x))) +svint8_t svqrshl_s8_x(svbool_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_s32_x))) +svint32_t svqrshl_s32_x(svbool_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_s64_x))) +svint64_t svqrshl_s64_x(svbool_t, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_s16_x))) +svint16_t svqrshl_s16_x(svbool_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_s8_z))) +svint8_t svqrshl_s8_z(svbool_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_s32_z))) +svint32_t svqrshl_s32_z(svbool_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_s64_z))) +svint64_t svqrshl_s64_z(svbool_t, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_s16_z))) +svint16_t svqrshl_s16_z(svbool_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_u8_m))) +svuint8_t svqrshl_u8_m(svbool_t, svuint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_u32_m))) +svuint32_t svqrshl_u32_m(svbool_t, svuint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_u64_m))) +svuint64_t svqrshl_u64_m(svbool_t, svuint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_u16_m))) +svuint16_t svqrshl_u16_m(svbool_t, svuint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_u8_x))) +svuint8_t svqrshl_u8_x(svbool_t, svuint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_u32_x))) +svuint32_t svqrshl_u32_x(svbool_t, svuint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_u64_x))) +svuint64_t svqrshl_u64_x(svbool_t, svuint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_u16_x))) +svuint16_t svqrshl_u16_x(svbool_t, svuint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_u8_z))) +svuint8_t svqrshl_u8_z(svbool_t, svuint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_u32_z))) +svuint32_t svqrshl_u32_z(svbool_t, svuint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_u64_z))) +svuint64_t svqrshl_u64_z(svbool_t, svuint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_u16_z))) +svuint16_t svqrshl_u16_z(svbool_t, svuint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshrnb_n_s32))) +svint16_t svqrshrnb_n_s32(svint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshrnb_n_s64))) +svint32_t svqrshrnb_n_s64(svint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshrnb_n_s16))) +svint8_t svqrshrnb_n_s16(svint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshrnb_n_u32))) +svuint16_t svqrshrnb_n_u32(svuint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshrnb_n_u64))) +svuint32_t svqrshrnb_n_u64(svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshrnb_n_u16))) +svuint8_t svqrshrnb_n_u16(svuint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshrnt_n_s32))) +svint16_t svqrshrnt_n_s32(svint16_t, svint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshrnt_n_s64))) +svint32_t svqrshrnt_n_s64(svint32_t, svint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshrnt_n_s16))) +svint8_t svqrshrnt_n_s16(svint8_t, svint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshrnt_n_u32))) +svuint16_t svqrshrnt_n_u32(svuint16_t, svuint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshrnt_n_u64))) +svuint32_t svqrshrnt_n_u64(svuint32_t, svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshrnt_n_u16))) +svuint8_t svqrshrnt_n_u16(svuint8_t, svuint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshrunb_n_s32))) +svuint16_t svqrshrunb_n_s32(svint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshrunb_n_s64))) +svuint32_t svqrshrunb_n_s64(svint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshrunb_n_s16))) +svuint8_t svqrshrunb_n_s16(svint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshrunt_n_s32))) +svuint16_t svqrshrunt_n_s32(svuint16_t, svint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshrunt_n_s64))) +svuint32_t svqrshrunt_n_s64(svuint32_t, svint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshrunt_n_s16))) +svuint8_t svqrshrunt_n_s16(svuint8_t, svint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_s8_m))) +svint8_t svqshl_n_s8_m(svbool_t, svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_s32_m))) +svint32_t svqshl_n_s32_m(svbool_t, svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_s64_m))) +svint64_t svqshl_n_s64_m(svbool_t, svint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_s16_m))) +svint16_t svqshl_n_s16_m(svbool_t, svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_s8_x))) +svint8_t svqshl_n_s8_x(svbool_t, svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_s32_x))) +svint32_t svqshl_n_s32_x(svbool_t, svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_s64_x))) +svint64_t svqshl_n_s64_x(svbool_t, svint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_s16_x))) +svint16_t svqshl_n_s16_x(svbool_t, svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_s8_z))) +svint8_t svqshl_n_s8_z(svbool_t, svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_s32_z))) +svint32_t svqshl_n_s32_z(svbool_t, svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_s64_z))) +svint64_t svqshl_n_s64_z(svbool_t, svint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_s16_z))) +svint16_t svqshl_n_s16_z(svbool_t, svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_u8_m))) +svuint8_t svqshl_n_u8_m(svbool_t, svuint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_u32_m))) +svuint32_t svqshl_n_u32_m(svbool_t, svuint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_u64_m))) +svuint64_t svqshl_n_u64_m(svbool_t, svuint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_u16_m))) +svuint16_t svqshl_n_u16_m(svbool_t, svuint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_u8_x))) +svuint8_t svqshl_n_u8_x(svbool_t, svuint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_u32_x))) +svuint32_t svqshl_n_u32_x(svbool_t, svuint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_u64_x))) +svuint64_t svqshl_n_u64_x(svbool_t, svuint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_u16_x))) +svuint16_t svqshl_n_u16_x(svbool_t, svuint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_u8_z))) +svuint8_t svqshl_n_u8_z(svbool_t, svuint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_u32_z))) +svuint32_t svqshl_n_u32_z(svbool_t, svuint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_u64_z))) +svuint64_t svqshl_n_u64_z(svbool_t, svuint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_u16_z))) +svuint16_t svqshl_n_u16_z(svbool_t, svuint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_s8_m))) +svint8_t svqshl_s8_m(svbool_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_s32_m))) +svint32_t svqshl_s32_m(svbool_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_s64_m))) +svint64_t svqshl_s64_m(svbool_t, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_s16_m))) +svint16_t svqshl_s16_m(svbool_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_s8_x))) +svint8_t svqshl_s8_x(svbool_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_s32_x))) +svint32_t svqshl_s32_x(svbool_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_s64_x))) +svint64_t svqshl_s64_x(svbool_t, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_s16_x))) +svint16_t svqshl_s16_x(svbool_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_s8_z))) +svint8_t svqshl_s8_z(svbool_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_s32_z))) +svint32_t svqshl_s32_z(svbool_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_s64_z))) +svint64_t svqshl_s64_z(svbool_t, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_s16_z))) +svint16_t svqshl_s16_z(svbool_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_u8_m))) +svuint8_t svqshl_u8_m(svbool_t, svuint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_u32_m))) +svuint32_t svqshl_u32_m(svbool_t, svuint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_u64_m))) +svuint64_t svqshl_u64_m(svbool_t, svuint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_u16_m))) +svuint16_t svqshl_u16_m(svbool_t, svuint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_u8_x))) +svuint8_t svqshl_u8_x(svbool_t, svuint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_u32_x))) +svuint32_t svqshl_u32_x(svbool_t, svuint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_u64_x))) +svuint64_t svqshl_u64_x(svbool_t, svuint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_u16_x))) +svuint16_t svqshl_u16_x(svbool_t, svuint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_u8_z))) +svuint8_t svqshl_u8_z(svbool_t, svuint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_u32_z))) +svuint32_t svqshl_u32_z(svbool_t, svuint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_u64_z))) +svuint64_t svqshl_u64_z(svbool_t, svuint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_u16_z))) +svuint16_t svqshl_u16_z(svbool_t, svuint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshlu_n_s8_m))) +svuint8_t svqshlu_n_s8_m(svbool_t, svint8_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshlu_n_s32_m))) +svuint32_t svqshlu_n_s32_m(svbool_t, svint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshlu_n_s64_m))) +svuint64_t svqshlu_n_s64_m(svbool_t, svint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshlu_n_s16_m))) +svuint16_t svqshlu_n_s16_m(svbool_t, svint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshlu_n_s8_x))) +svuint8_t svqshlu_n_s8_x(svbool_t, svint8_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshlu_n_s32_x))) +svuint32_t svqshlu_n_s32_x(svbool_t, svint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshlu_n_s64_x))) +svuint64_t svqshlu_n_s64_x(svbool_t, svint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshlu_n_s16_x))) +svuint16_t svqshlu_n_s16_x(svbool_t, svint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshlu_n_s8_z))) +svuint8_t svqshlu_n_s8_z(svbool_t, svint8_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshlu_n_s32_z))) +svuint32_t svqshlu_n_s32_z(svbool_t, svint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshlu_n_s64_z))) +svuint64_t svqshlu_n_s64_z(svbool_t, svint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshlu_n_s16_z))) +svuint16_t svqshlu_n_s16_z(svbool_t, svint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshrnb_n_s32))) +svint16_t svqshrnb_n_s32(svint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshrnb_n_s64))) +svint32_t svqshrnb_n_s64(svint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshrnb_n_s16))) +svint8_t svqshrnb_n_s16(svint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshrnb_n_u32))) +svuint16_t svqshrnb_n_u32(svuint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshrnb_n_u64))) +svuint32_t svqshrnb_n_u64(svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshrnb_n_u16))) +svuint8_t svqshrnb_n_u16(svuint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshrnt_n_s32))) +svint16_t svqshrnt_n_s32(svint16_t, svint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshrnt_n_s64))) +svint32_t svqshrnt_n_s64(svint32_t, svint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshrnt_n_s16))) +svint8_t svqshrnt_n_s16(svint8_t, svint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshrnt_n_u32))) +svuint16_t svqshrnt_n_u32(svuint16_t, svuint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshrnt_n_u64))) +svuint32_t svqshrnt_n_u64(svuint32_t, svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshrnt_n_u16))) +svuint8_t svqshrnt_n_u16(svuint8_t, svuint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshrunb_n_s32))) +svuint16_t svqshrunb_n_s32(svint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshrunb_n_s64))) +svuint32_t svqshrunb_n_s64(svint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshrunb_n_s16))) +svuint8_t svqshrunb_n_s16(svint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshrunt_n_s32))) +svuint16_t svqshrunt_n_s32(svuint16_t, svint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshrunt_n_s64))) +svuint32_t svqshrunt_n_s64(svuint32_t, svint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshrunt_n_s16))) +svuint8_t svqshrunt_n_s16(svuint8_t, svint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_s8_m))) +svint8_t svqsub_n_s8_m(svbool_t, svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_s32_m))) +svint32_t svqsub_n_s32_m(svbool_t, svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_s64_m))) +svint64_t svqsub_n_s64_m(svbool_t, svint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_s16_m))) +svint16_t svqsub_n_s16_m(svbool_t, svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_s8_x))) +svint8_t svqsub_n_s8_x(svbool_t, svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_s32_x))) +svint32_t svqsub_n_s32_x(svbool_t, svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_s64_x))) +svint64_t svqsub_n_s64_x(svbool_t, svint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_s16_x))) +svint16_t svqsub_n_s16_x(svbool_t, svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_s8_z))) +svint8_t svqsub_n_s8_z(svbool_t, svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_s32_z))) +svint32_t svqsub_n_s32_z(svbool_t, svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_s64_z))) +svint64_t svqsub_n_s64_z(svbool_t, svint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_s16_z))) +svint16_t svqsub_n_s16_z(svbool_t, svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_u8_m))) +svuint8_t svqsub_n_u8_m(svbool_t, svuint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_u32_m))) +svuint32_t svqsub_n_u32_m(svbool_t, svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_u64_m))) +svuint64_t svqsub_n_u64_m(svbool_t, svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_u16_m))) +svuint16_t svqsub_n_u16_m(svbool_t, svuint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_u8_x))) +svuint8_t svqsub_n_u8_x(svbool_t, svuint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_u32_x))) +svuint32_t svqsub_n_u32_x(svbool_t, svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_u64_x))) +svuint64_t svqsub_n_u64_x(svbool_t, svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_u16_x))) +svuint16_t svqsub_n_u16_x(svbool_t, svuint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_u8_z))) +svuint8_t svqsub_n_u8_z(svbool_t, svuint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_u32_z))) +svuint32_t svqsub_n_u32_z(svbool_t, svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_u64_z))) +svuint64_t svqsub_n_u64_z(svbool_t, svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_u16_z))) +svuint16_t svqsub_n_u16_z(svbool_t, svuint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_s8_m))) +svint8_t svqsub_s8_m(svbool_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_s32_m))) +svint32_t svqsub_s32_m(svbool_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_s64_m))) +svint64_t svqsub_s64_m(svbool_t, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_s16_m))) +svint16_t svqsub_s16_m(svbool_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_s8_x))) +svint8_t svqsub_s8_x(svbool_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_s32_x))) +svint32_t svqsub_s32_x(svbool_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_s64_x))) +svint64_t svqsub_s64_x(svbool_t, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_s16_x))) +svint16_t svqsub_s16_x(svbool_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_s8_z))) +svint8_t svqsub_s8_z(svbool_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_s32_z))) +svint32_t svqsub_s32_z(svbool_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_s64_z))) +svint64_t svqsub_s64_z(svbool_t, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_s16_z))) +svint16_t svqsub_s16_z(svbool_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_u8_m))) +svuint8_t svqsub_u8_m(svbool_t, svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_u32_m))) +svuint32_t svqsub_u32_m(svbool_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_u64_m))) +svuint64_t svqsub_u64_m(svbool_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_u16_m))) +svuint16_t svqsub_u16_m(svbool_t, svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_u8_x))) +svuint8_t svqsub_u8_x(svbool_t, svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_u32_x))) +svuint32_t svqsub_u32_x(svbool_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_u64_x))) +svuint64_t svqsub_u64_x(svbool_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_u16_x))) +svuint16_t svqsub_u16_x(svbool_t, svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_u8_z))) +svuint8_t svqsub_u8_z(svbool_t, svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_u32_z))) +svuint32_t svqsub_u32_z(svbool_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_u64_z))) +svuint64_t svqsub_u64_z(svbool_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_u16_z))) +svuint16_t svqsub_u16_z(svbool_t, svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_s8_m))) +svint8_t svqsubr_n_s8_m(svbool_t, svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_s32_m))) +svint32_t svqsubr_n_s32_m(svbool_t, svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_s64_m))) +svint64_t svqsubr_n_s64_m(svbool_t, svint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_s16_m))) +svint16_t svqsubr_n_s16_m(svbool_t, svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_s8_x))) +svint8_t svqsubr_n_s8_x(svbool_t, svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_s32_x))) +svint32_t svqsubr_n_s32_x(svbool_t, svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_s64_x))) +svint64_t svqsubr_n_s64_x(svbool_t, svint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_s16_x))) +svint16_t svqsubr_n_s16_x(svbool_t, svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_s8_z))) +svint8_t svqsubr_n_s8_z(svbool_t, svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_s32_z))) +svint32_t svqsubr_n_s32_z(svbool_t, svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_s64_z))) +svint64_t svqsubr_n_s64_z(svbool_t, svint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_s16_z))) +svint16_t svqsubr_n_s16_z(svbool_t, svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_u8_m))) +svuint8_t svqsubr_n_u8_m(svbool_t, svuint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_u32_m))) +svuint32_t svqsubr_n_u32_m(svbool_t, svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_u64_m))) +svuint64_t svqsubr_n_u64_m(svbool_t, svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_u16_m))) +svuint16_t svqsubr_n_u16_m(svbool_t, svuint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_u8_x))) +svuint8_t svqsubr_n_u8_x(svbool_t, svuint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_u32_x))) +svuint32_t svqsubr_n_u32_x(svbool_t, svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_u64_x))) +svuint64_t svqsubr_n_u64_x(svbool_t, svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_u16_x))) +svuint16_t svqsubr_n_u16_x(svbool_t, svuint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_u8_z))) +svuint8_t svqsubr_n_u8_z(svbool_t, svuint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_u32_z))) +svuint32_t svqsubr_n_u32_z(svbool_t, svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_u64_z))) +svuint64_t svqsubr_n_u64_z(svbool_t, svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_u16_z))) +svuint16_t svqsubr_n_u16_z(svbool_t, svuint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_s8_m))) +svint8_t svqsubr_s8_m(svbool_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_s32_m))) +svint32_t svqsubr_s32_m(svbool_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_s64_m))) +svint64_t svqsubr_s64_m(svbool_t, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_s16_m))) +svint16_t svqsubr_s16_m(svbool_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_s8_x))) +svint8_t svqsubr_s8_x(svbool_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_s32_x))) +svint32_t svqsubr_s32_x(svbool_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_s64_x))) +svint64_t svqsubr_s64_x(svbool_t, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_s16_x))) +svint16_t svqsubr_s16_x(svbool_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_s8_z))) +svint8_t svqsubr_s8_z(svbool_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_s32_z))) +svint32_t svqsubr_s32_z(svbool_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_s64_z))) +svint64_t svqsubr_s64_z(svbool_t, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_s16_z))) +svint16_t svqsubr_s16_z(svbool_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_u8_m))) +svuint8_t svqsubr_u8_m(svbool_t, svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_u32_m))) +svuint32_t svqsubr_u32_m(svbool_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_u64_m))) +svuint64_t svqsubr_u64_m(svbool_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_u16_m))) +svuint16_t svqsubr_u16_m(svbool_t, svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_u8_x))) +svuint8_t svqsubr_u8_x(svbool_t, svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_u32_x))) +svuint32_t svqsubr_u32_x(svbool_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_u64_x))) +svuint64_t svqsubr_u64_x(svbool_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_u16_x))) +svuint16_t svqsubr_u16_x(svbool_t, svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_u8_z))) +svuint8_t svqsubr_u8_z(svbool_t, svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_u32_z))) +svuint32_t svqsubr_u32_z(svbool_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_u64_z))) +svuint64_t svqsubr_u64_z(svbool_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_u16_z))) +svuint16_t svqsubr_u16_z(svbool_t, svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqxtnb_s32))) +svint16_t svqxtnb_s32(svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqxtnb_s64))) +svint32_t svqxtnb_s64(svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqxtnb_s16))) +svint8_t svqxtnb_s16(svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqxtnb_u32))) +svuint16_t svqxtnb_u32(svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqxtnb_u64))) +svuint32_t svqxtnb_u64(svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqxtnb_u16))) +svuint8_t svqxtnb_u16(svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqxtnt_s32))) +svint16_t svqxtnt_s32(svint16_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqxtnt_s64))) +svint32_t svqxtnt_s64(svint32_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqxtnt_s16))) +svint8_t svqxtnt_s16(svint8_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqxtnt_u32))) +svuint16_t svqxtnt_u32(svuint16_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqxtnt_u64))) +svuint32_t svqxtnt_u64(svuint32_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqxtnt_u16))) +svuint8_t svqxtnt_u16(svuint8_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqxtunb_s32))) +svuint16_t svqxtunb_s32(svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqxtunb_s64))) +svuint32_t svqxtunb_s64(svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqxtunb_s16))) +svuint8_t svqxtunb_s16(svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqxtunt_s32))) +svuint16_t svqxtunt_s32(svuint16_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqxtunt_s64))) +svuint32_t svqxtunt_s64(svuint32_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqxtunt_s16))) +svuint8_t svqxtunt_s16(svuint8_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnb_n_u32))) +svuint16_t svraddhnb_n_u32(svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnb_n_u64))) +svuint32_t svraddhnb_n_u64(svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnb_n_u16))) +svuint8_t svraddhnb_n_u16(svuint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnb_n_s32))) +svint16_t svraddhnb_n_s32(svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnb_n_s64))) +svint32_t svraddhnb_n_s64(svint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnb_n_s16))) +svint8_t svraddhnb_n_s16(svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnb_u32))) +svuint16_t svraddhnb_u32(svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnb_u64))) +svuint32_t svraddhnb_u64(svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnb_u16))) +svuint8_t svraddhnb_u16(svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnb_s32))) +svint16_t svraddhnb_s32(svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnb_s64))) +svint32_t svraddhnb_s64(svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnb_s16))) +svint8_t svraddhnb_s16(svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnt_n_u32))) +svuint16_t svraddhnt_n_u32(svuint16_t, svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnt_n_u64))) +svuint32_t svraddhnt_n_u64(svuint32_t, svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnt_n_u16))) +svuint8_t svraddhnt_n_u16(svuint8_t, svuint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnt_n_s32))) +svint16_t svraddhnt_n_s32(svint16_t, svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnt_n_s64))) +svint32_t svraddhnt_n_s64(svint32_t, svint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnt_n_s16))) +svint8_t svraddhnt_n_s16(svint8_t, svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnt_u32))) +svuint16_t svraddhnt_u32(svuint16_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnt_u64))) +svuint32_t svraddhnt_u64(svuint32_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnt_u16))) +svuint8_t svraddhnt_u16(svuint8_t, svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnt_s32))) +svint16_t svraddhnt_s32(svint16_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnt_s64))) +svint32_t svraddhnt_s64(svint32_t, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnt_s16))) +svint8_t svraddhnt_s16(svint8_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrecpe_u32_m))) +svuint32_t svrecpe_u32_m(svuint32_t, svbool_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrecpe_u32_x))) +svuint32_t svrecpe_u32_x(svbool_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrecpe_u32_z))) +svuint32_t svrecpe_u32_z(svbool_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_s8_m))) +svint8_t svrhadd_n_s8_m(svbool_t, svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_s32_m))) +svint32_t svrhadd_n_s32_m(svbool_t, svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_s64_m))) +svint64_t svrhadd_n_s64_m(svbool_t, svint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_s16_m))) +svint16_t svrhadd_n_s16_m(svbool_t, svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_s8_x))) +svint8_t svrhadd_n_s8_x(svbool_t, svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_s32_x))) +svint32_t svrhadd_n_s32_x(svbool_t, svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_s64_x))) +svint64_t svrhadd_n_s64_x(svbool_t, svint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_s16_x))) +svint16_t svrhadd_n_s16_x(svbool_t, svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_s8_z))) +svint8_t svrhadd_n_s8_z(svbool_t, svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_s32_z))) +svint32_t svrhadd_n_s32_z(svbool_t, svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_s64_z))) +svint64_t svrhadd_n_s64_z(svbool_t, svint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_s16_z))) +svint16_t svrhadd_n_s16_z(svbool_t, svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_u8_m))) +svuint8_t svrhadd_n_u8_m(svbool_t, svuint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_u32_m))) +svuint32_t svrhadd_n_u32_m(svbool_t, svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_u64_m))) +svuint64_t svrhadd_n_u64_m(svbool_t, svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_u16_m))) +svuint16_t svrhadd_n_u16_m(svbool_t, svuint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_u8_x))) +svuint8_t svrhadd_n_u8_x(svbool_t, svuint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_u32_x))) +svuint32_t svrhadd_n_u32_x(svbool_t, svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_u64_x))) +svuint64_t svrhadd_n_u64_x(svbool_t, svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_u16_x))) +svuint16_t svrhadd_n_u16_x(svbool_t, svuint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_u8_z))) +svuint8_t svrhadd_n_u8_z(svbool_t, svuint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_u32_z))) +svuint32_t svrhadd_n_u32_z(svbool_t, svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_u64_z))) +svuint64_t svrhadd_n_u64_z(svbool_t, svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_u16_z))) +svuint16_t svrhadd_n_u16_z(svbool_t, svuint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_s8_m))) +svint8_t svrhadd_s8_m(svbool_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_s32_m))) +svint32_t svrhadd_s32_m(svbool_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_s64_m))) +svint64_t svrhadd_s64_m(svbool_t, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_s16_m))) +svint16_t svrhadd_s16_m(svbool_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_s8_x))) +svint8_t svrhadd_s8_x(svbool_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_s32_x))) +svint32_t svrhadd_s32_x(svbool_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_s64_x))) +svint64_t svrhadd_s64_x(svbool_t, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_s16_x))) +svint16_t svrhadd_s16_x(svbool_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_s8_z))) +svint8_t svrhadd_s8_z(svbool_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_s32_z))) +svint32_t svrhadd_s32_z(svbool_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_s64_z))) +svint64_t svrhadd_s64_z(svbool_t, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_s16_z))) +svint16_t svrhadd_s16_z(svbool_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_u8_m))) +svuint8_t svrhadd_u8_m(svbool_t, svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_u32_m))) +svuint32_t svrhadd_u32_m(svbool_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_u64_m))) +svuint64_t svrhadd_u64_m(svbool_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_u16_m))) +svuint16_t svrhadd_u16_m(svbool_t, svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_u8_x))) +svuint8_t svrhadd_u8_x(svbool_t, svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_u32_x))) +svuint32_t svrhadd_u32_x(svbool_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_u64_x))) +svuint64_t svrhadd_u64_x(svbool_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_u16_x))) +svuint16_t svrhadd_u16_x(svbool_t, svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_u8_z))) +svuint8_t svrhadd_u8_z(svbool_t, svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_u32_z))) +svuint32_t svrhadd_u32_z(svbool_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_u64_z))) +svuint64_t svrhadd_u64_z(svbool_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_u16_z))) +svuint16_t svrhadd_u16_z(svbool_t, svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_s8_m))) +svint8_t svrshl_n_s8_m(svbool_t, svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_s32_m))) +svint32_t svrshl_n_s32_m(svbool_t, svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_s64_m))) +svint64_t svrshl_n_s64_m(svbool_t, svint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_s16_m))) +svint16_t svrshl_n_s16_m(svbool_t, svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_s8_x))) +svint8_t svrshl_n_s8_x(svbool_t, svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_s32_x))) +svint32_t svrshl_n_s32_x(svbool_t, svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_s64_x))) +svint64_t svrshl_n_s64_x(svbool_t, svint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_s16_x))) +svint16_t svrshl_n_s16_x(svbool_t, svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_s8_z))) +svint8_t svrshl_n_s8_z(svbool_t, svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_s32_z))) +svint32_t svrshl_n_s32_z(svbool_t, svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_s64_z))) +svint64_t svrshl_n_s64_z(svbool_t, svint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_s16_z))) +svint16_t svrshl_n_s16_z(svbool_t, svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_u8_m))) +svuint8_t svrshl_n_u8_m(svbool_t, svuint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_u32_m))) +svuint32_t svrshl_n_u32_m(svbool_t, svuint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_u64_m))) +svuint64_t svrshl_n_u64_m(svbool_t, svuint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_u16_m))) +svuint16_t svrshl_n_u16_m(svbool_t, svuint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_u8_x))) +svuint8_t svrshl_n_u8_x(svbool_t, svuint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_u32_x))) +svuint32_t svrshl_n_u32_x(svbool_t, svuint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_u64_x))) +svuint64_t svrshl_n_u64_x(svbool_t, svuint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_u16_x))) +svuint16_t svrshl_n_u16_x(svbool_t, svuint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_u8_z))) +svuint8_t svrshl_n_u8_z(svbool_t, svuint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_u32_z))) +svuint32_t svrshl_n_u32_z(svbool_t, svuint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_u64_z))) +svuint64_t svrshl_n_u64_z(svbool_t, svuint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_u16_z))) +svuint16_t svrshl_n_u16_z(svbool_t, svuint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_s8_m))) +svint8_t svrshl_s8_m(svbool_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_s32_m))) +svint32_t svrshl_s32_m(svbool_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_s64_m))) +svint64_t svrshl_s64_m(svbool_t, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_s16_m))) +svint16_t svrshl_s16_m(svbool_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_s8_x))) +svint8_t svrshl_s8_x(svbool_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_s32_x))) +svint32_t svrshl_s32_x(svbool_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_s64_x))) +svint64_t svrshl_s64_x(svbool_t, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_s16_x))) +svint16_t svrshl_s16_x(svbool_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_s8_z))) +svint8_t svrshl_s8_z(svbool_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_s32_z))) +svint32_t svrshl_s32_z(svbool_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_s64_z))) +svint64_t svrshl_s64_z(svbool_t, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_s16_z))) +svint16_t svrshl_s16_z(svbool_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_u8_m))) +svuint8_t svrshl_u8_m(svbool_t, svuint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_u32_m))) +svuint32_t svrshl_u32_m(svbool_t, svuint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_u64_m))) +svuint64_t svrshl_u64_m(svbool_t, svuint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_u16_m))) +svuint16_t svrshl_u16_m(svbool_t, svuint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_u8_x))) +svuint8_t svrshl_u8_x(svbool_t, svuint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_u32_x))) +svuint32_t svrshl_u32_x(svbool_t, svuint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_u64_x))) +svuint64_t svrshl_u64_x(svbool_t, svuint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_u16_x))) +svuint16_t svrshl_u16_x(svbool_t, svuint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_u8_z))) +svuint8_t svrshl_u8_z(svbool_t, svuint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_u32_z))) +svuint32_t svrshl_u32_z(svbool_t, svuint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_u64_z))) +svuint64_t svrshl_u64_z(svbool_t, svuint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_u16_z))) +svuint16_t svrshl_u16_z(svbool_t, svuint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_s8_m))) +svint8_t svrshr_n_s8_m(svbool_t, svint8_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_s32_m))) +svint32_t svrshr_n_s32_m(svbool_t, svint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_s64_m))) +svint64_t svrshr_n_s64_m(svbool_t, svint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_s16_m))) +svint16_t svrshr_n_s16_m(svbool_t, svint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_u8_m))) +svuint8_t svrshr_n_u8_m(svbool_t, svuint8_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_u32_m))) +svuint32_t svrshr_n_u32_m(svbool_t, svuint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_u64_m))) +svuint64_t svrshr_n_u64_m(svbool_t, svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_u16_m))) +svuint16_t svrshr_n_u16_m(svbool_t, svuint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_s8_x))) +svint8_t svrshr_n_s8_x(svbool_t, svint8_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_s32_x))) +svint32_t svrshr_n_s32_x(svbool_t, svint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_s64_x))) +svint64_t svrshr_n_s64_x(svbool_t, svint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_s16_x))) +svint16_t svrshr_n_s16_x(svbool_t, svint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_u8_x))) +svuint8_t svrshr_n_u8_x(svbool_t, svuint8_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_u32_x))) +svuint32_t svrshr_n_u32_x(svbool_t, svuint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_u64_x))) +svuint64_t svrshr_n_u64_x(svbool_t, svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_u16_x))) +svuint16_t svrshr_n_u16_x(svbool_t, svuint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_s8_z))) +svint8_t svrshr_n_s8_z(svbool_t, svint8_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_s32_z))) +svint32_t svrshr_n_s32_z(svbool_t, svint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_s64_z))) +svint64_t svrshr_n_s64_z(svbool_t, svint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_s16_z))) +svint16_t svrshr_n_s16_z(svbool_t, svint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_u8_z))) +svuint8_t svrshr_n_u8_z(svbool_t, svuint8_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_u32_z))) +svuint32_t svrshr_n_u32_z(svbool_t, svuint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_u64_z))) +svuint64_t svrshr_n_u64_z(svbool_t, svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_u16_z))) +svuint16_t svrshr_n_u16_z(svbool_t, svuint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshrnb_n_u32))) +svuint16_t svrshrnb_n_u32(svuint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshrnb_n_u64))) +svuint32_t svrshrnb_n_u64(svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshrnb_n_u16))) +svuint8_t svrshrnb_n_u16(svuint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshrnb_n_s32))) +svint16_t svrshrnb_n_s32(svint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshrnb_n_s64))) +svint32_t svrshrnb_n_s64(svint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshrnb_n_s16))) +svint8_t svrshrnb_n_s16(svint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshrnt_n_u32))) +svuint16_t svrshrnt_n_u32(svuint16_t, svuint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshrnt_n_u64))) +svuint32_t svrshrnt_n_u64(svuint32_t, svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshrnt_n_u16))) +svuint8_t svrshrnt_n_u16(svuint8_t, svuint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshrnt_n_s32))) +svint16_t svrshrnt_n_s32(svint16_t, svint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshrnt_n_s64))) +svint32_t svrshrnt_n_s64(svint32_t, svint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshrnt_n_s16))) +svint8_t svrshrnt_n_s16(svint8_t, svint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsqrte_u32_m))) +svuint32_t svrsqrte_u32_m(svuint32_t, svbool_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsqrte_u32_x))) +svuint32_t svrsqrte_u32_x(svbool_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsqrte_u32_z))) +svuint32_t svrsqrte_u32_z(svbool_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsra_n_s8))) +svint8_t svrsra_n_s8(svint8_t, svint8_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsra_n_s32))) +svint32_t svrsra_n_s32(svint32_t, svint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsra_n_s64))) +svint64_t svrsra_n_s64(svint64_t, svint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsra_n_s16))) +svint16_t svrsra_n_s16(svint16_t, svint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsra_n_u8))) +svuint8_t svrsra_n_u8(svuint8_t, svuint8_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsra_n_u32))) +svuint32_t svrsra_n_u32(svuint32_t, svuint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsra_n_u64))) +svuint64_t svrsra_n_u64(svuint64_t, svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsra_n_u16))) +svuint16_t svrsra_n_u16(svuint16_t, svuint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnb_n_u32))) +svuint16_t svrsubhnb_n_u32(svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnb_n_u64))) +svuint32_t svrsubhnb_n_u64(svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnb_n_u16))) +svuint8_t svrsubhnb_n_u16(svuint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnb_n_s32))) +svint16_t svrsubhnb_n_s32(svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnb_n_s64))) +svint32_t svrsubhnb_n_s64(svint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnb_n_s16))) +svint8_t svrsubhnb_n_s16(svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnb_u32))) +svuint16_t svrsubhnb_u32(svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnb_u64))) +svuint32_t svrsubhnb_u64(svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnb_u16))) +svuint8_t svrsubhnb_u16(svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnb_s32))) +svint16_t svrsubhnb_s32(svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnb_s64))) +svint32_t svrsubhnb_s64(svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnb_s16))) +svint8_t svrsubhnb_s16(svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnt_n_u32))) +svuint16_t svrsubhnt_n_u32(svuint16_t, svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnt_n_u64))) +svuint32_t svrsubhnt_n_u64(svuint32_t, svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnt_n_u16))) +svuint8_t svrsubhnt_n_u16(svuint8_t, svuint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnt_n_s32))) +svint16_t svrsubhnt_n_s32(svint16_t, svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnt_n_s64))) +svint32_t svrsubhnt_n_s64(svint32_t, svint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnt_n_s16))) +svint8_t svrsubhnt_n_s16(svint8_t, svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnt_u32))) +svuint16_t svrsubhnt_u32(svuint16_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnt_u64))) +svuint32_t svrsubhnt_u64(svuint32_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnt_u16))) +svuint8_t svrsubhnt_u16(svuint8_t, svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnt_s32))) +svint16_t svrsubhnt_s32(svint16_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnt_s64))) +svint32_t svrsubhnt_s64(svint32_t, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnt_s16))) +svint8_t svrsubhnt_s16(svint8_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsbclb_n_u32))) +svuint32_t svsbclb_n_u32(svuint32_t, svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsbclb_n_u64))) +svuint64_t svsbclb_n_u64(svuint64_t, svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsbclb_u32))) +svuint32_t svsbclb_u32(svuint32_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsbclb_u64))) +svuint64_t svsbclb_u64(svuint64_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsbclt_n_u32))) +svuint32_t svsbclt_n_u32(svuint32_t, svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsbclt_n_u64))) +svuint64_t svsbclt_n_u64(svuint64_t, svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsbclt_u32))) +svuint32_t svsbclt_u32(svuint32_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsbclt_u64))) +svuint64_t svsbclt_u64(svuint64_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshllb_n_s32))) +svint32_t svshllb_n_s32(svint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshllb_n_s64))) +svint64_t svshllb_n_s64(svint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshllb_n_s16))) +svint16_t svshllb_n_s16(svint8_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshllb_n_u32))) +svuint32_t svshllb_n_u32(svuint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshllb_n_u64))) +svuint64_t svshllb_n_u64(svuint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshllb_n_u16))) +svuint16_t svshllb_n_u16(svuint8_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshllt_n_s32))) +svint32_t svshllt_n_s32(svint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshllt_n_s64))) +svint64_t svshllt_n_s64(svint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshllt_n_s16))) +svint16_t svshllt_n_s16(svint8_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshllt_n_u32))) +svuint32_t svshllt_n_u32(svuint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshllt_n_u64))) +svuint64_t svshllt_n_u64(svuint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshllt_n_u16))) +svuint16_t svshllt_n_u16(svuint8_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshrnb_n_u32))) +svuint16_t svshrnb_n_u32(svuint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshrnb_n_u64))) +svuint32_t svshrnb_n_u64(svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshrnb_n_u16))) +svuint8_t svshrnb_n_u16(svuint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshrnb_n_s32))) +svint16_t svshrnb_n_s32(svint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshrnb_n_s64))) +svint32_t svshrnb_n_s64(svint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshrnb_n_s16))) +svint8_t svshrnb_n_s16(svint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshrnt_n_u32))) +svuint16_t svshrnt_n_u32(svuint16_t, svuint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshrnt_n_u64))) +svuint32_t svshrnt_n_u64(svuint32_t, svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshrnt_n_u16))) +svuint8_t svshrnt_n_u16(svuint8_t, svuint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshrnt_n_s32))) +svint16_t svshrnt_n_s32(svint16_t, svint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshrnt_n_s64))) +svint32_t svshrnt_n_s64(svint32_t, svint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshrnt_n_s16))) +svint8_t svshrnt_n_s16(svint8_t, svint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsli_n_u8))) +svuint8_t svsli_n_u8(svuint8_t, svuint8_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsli_n_u32))) +svuint32_t svsli_n_u32(svuint32_t, svuint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsli_n_u64))) +svuint64_t svsli_n_u64(svuint64_t, svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsli_n_u16))) +svuint16_t svsli_n_u16(svuint16_t, svuint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsli_n_s8))) +svint8_t svsli_n_s8(svint8_t, svint8_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsli_n_s32))) +svint32_t svsli_n_s32(svint32_t, svint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsli_n_s64))) +svint64_t svsli_n_s64(svint64_t, svint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsli_n_s16))) +svint16_t svsli_n_s16(svint16_t, svint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_n_u8_m))) +svuint8_t svsqadd_n_u8_m(svbool_t, svuint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_n_u32_m))) +svuint32_t svsqadd_n_u32_m(svbool_t, svuint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_n_u64_m))) +svuint64_t svsqadd_n_u64_m(svbool_t, svuint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_n_u16_m))) +svuint16_t svsqadd_n_u16_m(svbool_t, svuint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_n_u8_x))) +svuint8_t svsqadd_n_u8_x(svbool_t, svuint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_n_u32_x))) +svuint32_t svsqadd_n_u32_x(svbool_t, svuint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_n_u64_x))) +svuint64_t svsqadd_n_u64_x(svbool_t, svuint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_n_u16_x))) +svuint16_t svsqadd_n_u16_x(svbool_t, svuint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_n_u8_z))) +svuint8_t svsqadd_n_u8_z(svbool_t, svuint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_n_u32_z))) +svuint32_t svsqadd_n_u32_z(svbool_t, svuint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_n_u64_z))) +svuint64_t svsqadd_n_u64_z(svbool_t, svuint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_n_u16_z))) +svuint16_t svsqadd_n_u16_z(svbool_t, svuint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_u8_m))) +svuint8_t svsqadd_u8_m(svbool_t, svuint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_u32_m))) +svuint32_t svsqadd_u32_m(svbool_t, svuint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_u64_m))) +svuint64_t svsqadd_u64_m(svbool_t, svuint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_u16_m))) +svuint16_t svsqadd_u16_m(svbool_t, svuint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_u8_x))) +svuint8_t svsqadd_u8_x(svbool_t, svuint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_u32_x))) +svuint32_t svsqadd_u32_x(svbool_t, svuint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_u64_x))) +svuint64_t svsqadd_u64_x(svbool_t, svuint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_u16_x))) +svuint16_t svsqadd_u16_x(svbool_t, svuint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_u8_z))) +svuint8_t svsqadd_u8_z(svbool_t, svuint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_u32_z))) +svuint32_t svsqadd_u32_z(svbool_t, svuint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_u64_z))) +svuint64_t svsqadd_u64_z(svbool_t, svuint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_u16_z))) +svuint16_t svsqadd_u16_z(svbool_t, svuint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsra_n_s8))) +svint8_t svsra_n_s8(svint8_t, svint8_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsra_n_s32))) +svint32_t svsra_n_s32(svint32_t, svint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsra_n_s64))) +svint64_t svsra_n_s64(svint64_t, svint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsra_n_s16))) +svint16_t svsra_n_s16(svint16_t, svint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsra_n_u8))) +svuint8_t svsra_n_u8(svuint8_t, svuint8_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsra_n_u32))) +svuint32_t svsra_n_u32(svuint32_t, svuint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsra_n_u64))) +svuint64_t svsra_n_u64(svuint64_t, svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsra_n_u16))) +svuint16_t svsra_n_u16(svuint16_t, svuint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsri_n_u8))) +svuint8_t svsri_n_u8(svuint8_t, svuint8_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsri_n_u32))) +svuint32_t svsri_n_u32(svuint32_t, svuint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsri_n_u64))) +svuint64_t svsri_n_u64(svuint64_t, svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsri_n_u16))) +svuint16_t svsri_n_u16(svuint16_t, svuint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsri_n_s8))) +svint8_t svsri_n_s8(svint8_t, svint8_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsri_n_s32))) +svint32_t svsri_n_s32(svint32_t, svint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsri_n_s64))) +svint64_t svsri_n_s64(svint64_t, svint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsri_n_s16))) +svint16_t svsri_n_s16(svint16_t, svint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnb_n_u32))) +svuint16_t svsubhnb_n_u32(svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnb_n_u64))) +svuint32_t svsubhnb_n_u64(svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnb_n_u16))) +svuint8_t svsubhnb_n_u16(svuint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnb_n_s32))) +svint16_t svsubhnb_n_s32(svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnb_n_s64))) +svint32_t svsubhnb_n_s64(svint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnb_n_s16))) +svint8_t svsubhnb_n_s16(svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnb_u32))) +svuint16_t svsubhnb_u32(svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnb_u64))) +svuint32_t svsubhnb_u64(svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnb_u16))) +svuint8_t svsubhnb_u16(svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnb_s32))) +svint16_t svsubhnb_s32(svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnb_s64))) +svint32_t svsubhnb_s64(svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnb_s16))) +svint8_t svsubhnb_s16(svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnt_n_u32))) +svuint16_t svsubhnt_n_u32(svuint16_t, svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnt_n_u64))) +svuint32_t svsubhnt_n_u64(svuint32_t, svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnt_n_u16))) +svuint8_t svsubhnt_n_u16(svuint8_t, svuint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnt_n_s32))) +svint16_t svsubhnt_n_s32(svint16_t, svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnt_n_s64))) +svint32_t svsubhnt_n_s64(svint32_t, svint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnt_n_s16))) +svint8_t svsubhnt_n_s16(svint8_t, svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnt_u32))) +svuint16_t svsubhnt_u32(svuint16_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnt_u64))) +svuint32_t svsubhnt_u64(svuint32_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnt_u16))) +svuint8_t svsubhnt_u16(svuint8_t, svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnt_s32))) +svint16_t svsubhnt_s32(svint16_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnt_s64))) +svint32_t svsubhnt_s64(svint32_t, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnt_s16))) +svint8_t svsubhnt_s16(svint8_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublb_n_s32))) +svint32_t svsublb_n_s32(svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublb_n_s64))) +svint64_t svsublb_n_s64(svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublb_n_s16))) +svint16_t svsublb_n_s16(svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublb_n_u32))) +svuint32_t svsublb_n_u32(svuint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublb_n_u64))) +svuint64_t svsublb_n_u64(svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublb_n_u16))) +svuint16_t svsublb_n_u16(svuint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublb_s32))) +svint32_t svsublb_s32(svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublb_s64))) +svint64_t svsublb_s64(svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublb_s16))) +svint16_t svsublb_s16(svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublb_u32))) +svuint32_t svsublb_u32(svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublb_u64))) +svuint64_t svsublb_u64(svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublb_u16))) +svuint16_t svsublb_u16(svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublbt_n_s32))) +svint32_t svsublbt_n_s32(svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublbt_n_s64))) +svint64_t svsublbt_n_s64(svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublbt_n_s16))) +svint16_t svsublbt_n_s16(svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublbt_s32))) +svint32_t svsublbt_s32(svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublbt_s64))) +svint64_t svsublbt_s64(svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublbt_s16))) +svint16_t svsublbt_s16(svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublt_n_s32))) +svint32_t svsublt_n_s32(svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublt_n_s64))) +svint64_t svsublt_n_s64(svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublt_n_s16))) +svint16_t svsublt_n_s16(svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublt_n_u32))) +svuint32_t svsublt_n_u32(svuint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublt_n_u64))) +svuint64_t svsublt_n_u64(svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublt_n_u16))) +svuint16_t svsublt_n_u16(svuint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublt_s32))) +svint32_t svsublt_s32(svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublt_s64))) +svint64_t svsublt_s64(svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublt_s16))) +svint16_t svsublt_s16(svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublt_u32))) +svuint32_t svsublt_u32(svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublt_u64))) +svuint64_t svsublt_u64(svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublt_u16))) +svuint16_t svsublt_u16(svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubltb_n_s32))) +svint32_t svsubltb_n_s32(svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubltb_n_s64))) +svint64_t svsubltb_n_s64(svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubltb_n_s16))) +svint16_t svsubltb_n_s16(svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubltb_s32))) +svint32_t svsubltb_s32(svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubltb_s64))) +svint64_t svsubltb_s64(svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubltb_s16))) +svint16_t svsubltb_s16(svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwb_n_s32))) +svint32_t svsubwb_n_s32(svint32_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwb_n_s64))) +svint64_t svsubwb_n_s64(svint64_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwb_n_s16))) +svint16_t svsubwb_n_s16(svint16_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwb_n_u32))) +svuint32_t svsubwb_n_u32(svuint32_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwb_n_u64))) +svuint64_t svsubwb_n_u64(svuint64_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwb_n_u16))) +svuint16_t svsubwb_n_u16(svuint16_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwb_s32))) +svint32_t svsubwb_s32(svint32_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwb_s64))) +svint64_t svsubwb_s64(svint64_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwb_s16))) +svint16_t svsubwb_s16(svint16_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwb_u32))) +svuint32_t svsubwb_u32(svuint32_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwb_u64))) +svuint64_t svsubwb_u64(svuint64_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwb_u16))) +svuint16_t svsubwb_u16(svuint16_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwt_n_s32))) +svint32_t svsubwt_n_s32(svint32_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwt_n_s64))) +svint64_t svsubwt_n_s64(svint64_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwt_n_s16))) +svint16_t svsubwt_n_s16(svint16_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwt_n_u32))) +svuint32_t svsubwt_n_u32(svuint32_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwt_n_u64))) +svuint64_t svsubwt_n_u64(svuint64_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwt_n_u16))) +svuint16_t svsubwt_n_u16(svuint16_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwt_s32))) +svint32_t svsubwt_s32(svint32_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwt_s64))) +svint64_t svsubwt_s64(svint64_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwt_s16))) +svint16_t svsubwt_s16(svint16_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwt_u32))) +svuint32_t svsubwt_u32(svuint32_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwt_u64))) +svuint64_t svsubwt_u64(svuint64_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwt_u16))) +svuint16_t svsubwt_u16(svuint16_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbl2_u8))) +svuint8_t svtbl2_u8(svuint8x2_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbl2_u32))) +svuint32_t svtbl2_u32(svuint32x2_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbl2_u64))) +svuint64_t svtbl2_u64(svuint64x2_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbl2_u16))) +svuint16_t svtbl2_u16(svuint16x2_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbl2_s8))) +svint8_t svtbl2_s8(svint8x2_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbl2_f64))) +svfloat64_t svtbl2_f64(svfloat64x2_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbl2_f32))) +svfloat32_t svtbl2_f32(svfloat32x2_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbl2_f16))) +svfloat16_t svtbl2_f16(svfloat16x2_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbl2_s32))) +svint32_t svtbl2_s32(svint32x2_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbl2_s64))) +svint64_t svtbl2_s64(svint64x2_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbl2_s16))) +svint16_t svtbl2_s16(svint16x2_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbx_u8))) +svuint8_t svtbx_u8(svuint8_t, svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbx_u32))) +svuint32_t svtbx_u32(svuint32_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbx_u64))) +svuint64_t svtbx_u64(svuint64_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbx_u16))) +svuint16_t svtbx_u16(svuint16_t, svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbx_s8))) +svint8_t svtbx_s8(svint8_t, svint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbx_f64))) +svfloat64_t svtbx_f64(svfloat64_t, svfloat64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbx_f32))) +svfloat32_t svtbx_f32(svfloat32_t, svfloat32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbx_f16))) +svfloat16_t svtbx_f16(svfloat16_t, svfloat16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbx_s32))) +svint32_t svtbx_s32(svint32_t, svint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbx_s64))) +svint64_t svtbx_s64(svint64_t, svint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbx_s16))) +svint16_t svtbx_s16(svint16_t, svint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_n_s8_m))) +svint8_t svuqadd_n_s8_m(svbool_t, svint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_n_s32_m))) +svint32_t svuqadd_n_s32_m(svbool_t, svint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_n_s64_m))) +svint64_t svuqadd_n_s64_m(svbool_t, svint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_n_s16_m))) +svint16_t svuqadd_n_s16_m(svbool_t, svint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_n_s8_x))) +svint8_t svuqadd_n_s8_x(svbool_t, svint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_n_s32_x))) +svint32_t svuqadd_n_s32_x(svbool_t, svint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_n_s64_x))) +svint64_t svuqadd_n_s64_x(svbool_t, svint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_n_s16_x))) +svint16_t svuqadd_n_s16_x(svbool_t, svint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_n_s8_z))) +svint8_t svuqadd_n_s8_z(svbool_t, svint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_n_s32_z))) +svint32_t svuqadd_n_s32_z(svbool_t, svint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_n_s64_z))) +svint64_t svuqadd_n_s64_z(svbool_t, svint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_n_s16_z))) +svint16_t svuqadd_n_s16_z(svbool_t, svint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_s8_m))) +svint8_t svuqadd_s8_m(svbool_t, svint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_s32_m))) +svint32_t svuqadd_s32_m(svbool_t, svint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_s64_m))) +svint64_t svuqadd_s64_m(svbool_t, svint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_s16_m))) +svint16_t svuqadd_s16_m(svbool_t, svint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_s8_x))) +svint8_t svuqadd_s8_x(svbool_t, svint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_s32_x))) +svint32_t svuqadd_s32_x(svbool_t, svint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_s64_x))) +svint64_t svuqadd_s64_x(svbool_t, svint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_s16_x))) +svint16_t svuqadd_s16_x(svbool_t, svint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_s8_z))) +svint8_t svuqadd_s8_z(svbool_t, svint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_s32_z))) +svint32_t svuqadd_s32_z(svbool_t, svint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_s64_z))) +svint64_t svuqadd_s64_z(svbool_t, svint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_s16_z))) +svint16_t svuqadd_s16_z(svbool_t, svint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilege_b8_s32))) +svbool_t svwhilege_b8_s32(int32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilege_b32_s32))) +svbool_t svwhilege_b32_s32(int32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilege_b64_s32))) +svbool_t svwhilege_b64_s32(int32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilege_b16_s32))) +svbool_t svwhilege_b16_s32(int32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilege_b8_s64))) +svbool_t svwhilege_b8_s64(int64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilege_b32_s64))) +svbool_t svwhilege_b32_s64(int64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilege_b64_s64))) +svbool_t svwhilege_b64_s64(int64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilege_b16_s64))) +svbool_t svwhilege_b16_s64(int64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilege_b8_u32))) +svbool_t svwhilege_b8_u32(uint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilege_b32_u32))) +svbool_t svwhilege_b32_u32(uint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilege_b64_u32))) +svbool_t svwhilege_b64_u32(uint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilege_b16_u32))) +svbool_t svwhilege_b16_u32(uint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilege_b8_u64))) +svbool_t svwhilege_b8_u64(uint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilege_b32_u64))) +svbool_t svwhilege_b32_u64(uint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilege_b64_u64))) +svbool_t svwhilege_b64_u64(uint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilege_b16_u64))) +svbool_t svwhilege_b16_u64(uint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilegt_b8_s32))) +svbool_t svwhilegt_b8_s32(int32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilegt_b32_s32))) +svbool_t svwhilegt_b32_s32(int32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilegt_b64_s32))) +svbool_t svwhilegt_b64_s32(int32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilegt_b16_s32))) +svbool_t svwhilegt_b16_s32(int32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilegt_b8_s64))) +svbool_t svwhilegt_b8_s64(int64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilegt_b32_s64))) +svbool_t svwhilegt_b32_s64(int64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilegt_b64_s64))) +svbool_t svwhilegt_b64_s64(int64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilegt_b16_s64))) +svbool_t svwhilegt_b16_s64(int64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilegt_b8_u32))) +svbool_t svwhilegt_b8_u32(uint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilegt_b32_u32))) +svbool_t svwhilegt_b32_u32(uint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilegt_b64_u32))) +svbool_t svwhilegt_b64_u32(uint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilegt_b16_u32))) +svbool_t svwhilegt_b16_u32(uint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilegt_b8_u64))) +svbool_t svwhilegt_b8_u64(uint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilegt_b32_u64))) +svbool_t svwhilegt_b32_u64(uint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilegt_b64_u64))) +svbool_t svwhilegt_b64_u64(uint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilegt_b16_u64))) +svbool_t svwhilegt_b16_u64(uint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilerw_u8))) +svbool_t svwhilerw_u8(uint8_t const *, uint8_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilerw_s8))) +svbool_t svwhilerw_s8(int8_t const *, int8_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilerw_u64))) +svbool_t svwhilerw_u64(uint64_t const *, uint64_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilerw_f64))) +svbool_t svwhilerw_f64(float64_t const *, float64_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilerw_s64))) +svbool_t svwhilerw_s64(int64_t const *, int64_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilerw_u16))) +svbool_t svwhilerw_u16(uint16_t const *, uint16_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilerw_f16))) +svbool_t svwhilerw_f16(float16_t const *, float16_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilerw_s16))) +svbool_t svwhilerw_s16(int16_t const *, int16_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilerw_u32))) +svbool_t svwhilerw_u32(uint32_t const *, uint32_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilerw_f32))) +svbool_t svwhilerw_f32(float32_t const *, float32_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilerw_s32))) +svbool_t svwhilerw_s32(int32_t const *, int32_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilewr_u8))) +svbool_t svwhilewr_u8(uint8_t const *, uint8_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilewr_s8))) +svbool_t svwhilewr_s8(int8_t const *, int8_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilewr_u64))) +svbool_t svwhilewr_u64(uint64_t const *, uint64_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilewr_f64))) +svbool_t svwhilewr_f64(float64_t const *, float64_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilewr_s64))) +svbool_t svwhilewr_s64(int64_t const *, int64_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilewr_u16))) +svbool_t svwhilewr_u16(uint16_t const *, uint16_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilewr_f16))) +svbool_t svwhilewr_f16(float16_t const *, float16_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilewr_s16))) +svbool_t svwhilewr_s16(int16_t const *, int16_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilewr_u32))) +svbool_t svwhilewr_u32(uint32_t const *, uint32_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilewr_f32))) +svbool_t svwhilewr_f32(float32_t const *, float32_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilewr_s32))) +svbool_t svwhilewr_s32(int32_t const *, int32_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svxar_n_u8))) +svuint8_t svxar_n_u8(svuint8_t, svuint8_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svxar_n_u32))) +svuint32_t svxar_n_u32(svuint32_t, svuint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svxar_n_u64))) +svuint64_t svxar_n_u64(svuint64_t, svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svxar_n_u16))) +svuint16_t svxar_n_u16(svuint16_t, svuint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svxar_n_s8))) +svint8_t svxar_n_s8(svint8_t, svint8_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svxar_n_s32))) +svint32_t svxar_n_s32(svint32_t, svint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svxar_n_s64))) +svint64_t svxar_n_s64(svint64_t, svint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svxar_n_s16))) +svint16_t svxar_n_s16(svint16_t, svint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaba_n_s8))) +svint8_t svaba(svint8_t, svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaba_n_s32))) +svint32_t svaba(svint32_t, svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaba_n_s64))) +svint64_t svaba(svint64_t, svint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaba_n_s16))) +svint16_t svaba(svint16_t, svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaba_n_u8))) +svuint8_t svaba(svuint8_t, svuint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaba_n_u32))) +svuint32_t svaba(svuint32_t, svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaba_n_u64))) +svuint64_t svaba(svuint64_t, svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaba_n_u16))) +svuint16_t svaba(svuint16_t, svuint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaba_s8))) +svint8_t svaba(svint8_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaba_s32))) +svint32_t svaba(svint32_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaba_s64))) +svint64_t svaba(svint64_t, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaba_s16))) +svint16_t svaba(svint16_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaba_u8))) +svuint8_t svaba(svuint8_t, svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaba_u32))) +svuint32_t svaba(svuint32_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaba_u64))) +svuint64_t svaba(svuint64_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaba_u16))) +svuint16_t svaba(svuint16_t, svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalb_n_s32))) +svint32_t svabalb(svint32_t, svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalb_n_s64))) +svint64_t svabalb(svint64_t, svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalb_n_s16))) +svint16_t svabalb(svint16_t, svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalb_n_u32))) +svuint32_t svabalb(svuint32_t, svuint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalb_n_u64))) +svuint64_t svabalb(svuint64_t, svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalb_n_u16))) +svuint16_t svabalb(svuint16_t, svuint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalb_s32))) +svint32_t svabalb(svint32_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalb_s64))) +svint64_t svabalb(svint64_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalb_s16))) +svint16_t svabalb(svint16_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalb_u32))) +svuint32_t svabalb(svuint32_t, svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalb_u64))) +svuint64_t svabalb(svuint64_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalb_u16))) +svuint16_t svabalb(svuint16_t, svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalt_n_s32))) +svint32_t svabalt(svint32_t, svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalt_n_s64))) +svint64_t svabalt(svint64_t, svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalt_n_s16))) +svint16_t svabalt(svint16_t, svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalt_n_u32))) +svuint32_t svabalt(svuint32_t, svuint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalt_n_u64))) +svuint64_t svabalt(svuint64_t, svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalt_n_u16))) +svuint16_t svabalt(svuint16_t, svuint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalt_s32))) +svint32_t svabalt(svint32_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalt_s64))) +svint64_t svabalt(svint64_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalt_s16))) +svint16_t svabalt(svint16_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalt_u32))) +svuint32_t svabalt(svuint32_t, svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalt_u64))) +svuint64_t svabalt(svuint64_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalt_u16))) +svuint16_t svabalt(svuint16_t, svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlb_n_s32))) +svint32_t svabdlb(svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlb_n_s64))) +svint64_t svabdlb(svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlb_n_s16))) +svint16_t svabdlb(svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlb_n_u32))) +svuint32_t svabdlb(svuint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlb_n_u64))) +svuint64_t svabdlb(svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlb_n_u16))) +svuint16_t svabdlb(svuint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlb_s32))) +svint32_t svabdlb(svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlb_s64))) +svint64_t svabdlb(svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlb_s16))) +svint16_t svabdlb(svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlb_u32))) +svuint32_t svabdlb(svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlb_u64))) +svuint64_t svabdlb(svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlb_u16))) +svuint16_t svabdlb(svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlt_n_s32))) +svint32_t svabdlt(svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlt_n_s64))) +svint64_t svabdlt(svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlt_n_s16))) +svint16_t svabdlt(svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlt_n_u32))) +svuint32_t svabdlt(svuint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlt_n_u64))) +svuint64_t svabdlt(svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlt_n_u16))) +svuint16_t svabdlt(svuint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlt_s32))) +svint32_t svabdlt(svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlt_s64))) +svint64_t svabdlt(svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlt_s16))) +svint16_t svabdlt(svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlt_u32))) +svuint32_t svabdlt(svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlt_u64))) +svuint64_t svabdlt(svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlt_u16))) +svuint16_t svabdlt(svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadalp_s32_m))) +svint32_t svadalp_m(svbool_t, svint32_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadalp_s64_m))) +svint64_t svadalp_m(svbool_t, svint64_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadalp_s16_m))) +svint16_t svadalp_m(svbool_t, svint16_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadalp_s32_x))) +svint32_t svadalp_x(svbool_t, svint32_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadalp_s64_x))) +svint64_t svadalp_x(svbool_t, svint64_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadalp_s16_x))) +svint16_t svadalp_x(svbool_t, svint16_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadalp_s32_z))) +svint32_t svadalp_z(svbool_t, svint32_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadalp_s64_z))) +svint64_t svadalp_z(svbool_t, svint64_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadalp_s16_z))) +svint16_t svadalp_z(svbool_t, svint16_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadalp_u32_m))) +svuint32_t svadalp_m(svbool_t, svuint32_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadalp_u64_m))) +svuint64_t svadalp_m(svbool_t, svuint64_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadalp_u16_m))) +svuint16_t svadalp_m(svbool_t, svuint16_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadalp_u32_x))) +svuint32_t svadalp_x(svbool_t, svuint32_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadalp_u64_x))) +svuint64_t svadalp_x(svbool_t, svuint64_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadalp_u16_x))) +svuint16_t svadalp_x(svbool_t, svuint16_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadalp_u32_z))) +svuint32_t svadalp_z(svbool_t, svuint32_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadalp_u64_z))) +svuint64_t svadalp_z(svbool_t, svuint64_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadalp_u16_z))) +svuint16_t svadalp_z(svbool_t, svuint16_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadclb_n_u32))) +svuint32_t svadclb(svuint32_t, svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadclb_n_u64))) +svuint64_t svadclb(svuint64_t, svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadclb_u32))) +svuint32_t svadclb(svuint32_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadclb_u64))) +svuint64_t svadclb(svuint64_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadclt_n_u32))) +svuint32_t svadclt(svuint32_t, svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadclt_n_u64))) +svuint64_t svadclt(svuint64_t, svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadclt_u32))) +svuint32_t svadclt(svuint32_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadclt_u64))) +svuint64_t svadclt(svuint64_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnb_n_u32))) +svuint16_t svaddhnb(svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnb_n_u64))) +svuint32_t svaddhnb(svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnb_n_u16))) +svuint8_t svaddhnb(svuint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnb_n_s32))) +svint16_t svaddhnb(svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnb_n_s64))) +svint32_t svaddhnb(svint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnb_n_s16))) +svint8_t svaddhnb(svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnb_u32))) +svuint16_t svaddhnb(svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnb_u64))) +svuint32_t svaddhnb(svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnb_u16))) +svuint8_t svaddhnb(svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnb_s32))) +svint16_t svaddhnb(svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnb_s64))) +svint32_t svaddhnb(svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnb_s16))) +svint8_t svaddhnb(svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnt_n_u32))) +svuint16_t svaddhnt(svuint16_t, svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnt_n_u64))) +svuint32_t svaddhnt(svuint32_t, svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnt_n_u16))) +svuint8_t svaddhnt(svuint8_t, svuint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnt_n_s32))) +svint16_t svaddhnt(svint16_t, svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnt_n_s64))) +svint32_t svaddhnt(svint32_t, svint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnt_n_s16))) +svint8_t svaddhnt(svint8_t, svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnt_u32))) +svuint16_t svaddhnt(svuint16_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnt_u64))) +svuint32_t svaddhnt(svuint32_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnt_u16))) +svuint8_t svaddhnt(svuint8_t, svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnt_s32))) +svint16_t svaddhnt(svint16_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnt_s64))) +svint32_t svaddhnt(svint32_t, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnt_s16))) +svint8_t svaddhnt(svint8_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlb_n_s32))) +svint32_t svaddlb(svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlb_n_s64))) +svint64_t svaddlb(svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlb_n_s16))) +svint16_t svaddlb(svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlb_n_u32))) +svuint32_t svaddlb(svuint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlb_n_u64))) +svuint64_t svaddlb(svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlb_n_u16))) +svuint16_t svaddlb(svuint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlb_s32))) +svint32_t svaddlb(svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlb_s64))) +svint64_t svaddlb(svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlb_s16))) +svint16_t svaddlb(svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlb_u32))) +svuint32_t svaddlb(svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlb_u64))) +svuint64_t svaddlb(svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlb_u16))) +svuint16_t svaddlb(svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlbt_n_s32))) +svint32_t svaddlbt(svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlbt_n_s64))) +svint64_t svaddlbt(svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlbt_n_s16))) +svint16_t svaddlbt(svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlbt_s32))) +svint32_t svaddlbt(svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlbt_s64))) +svint64_t svaddlbt(svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlbt_s16))) +svint16_t svaddlbt(svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlt_n_s32))) +svint32_t svaddlt(svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlt_n_s64))) +svint64_t svaddlt(svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlt_n_s16))) +svint16_t svaddlt(svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlt_n_u32))) +svuint32_t svaddlt(svuint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlt_n_u64))) +svuint64_t svaddlt(svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlt_n_u16))) +svuint16_t svaddlt(svuint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlt_s32))) +svint32_t svaddlt(svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlt_s64))) +svint64_t svaddlt(svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlt_s16))) +svint16_t svaddlt(svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlt_u32))) +svuint32_t svaddlt(svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlt_u64))) +svuint64_t svaddlt(svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlt_u16))) +svuint16_t svaddlt(svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddp_f64_m))) +svfloat64_t svaddp_m(svbool_t, svfloat64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddp_f32_m))) +svfloat32_t svaddp_m(svbool_t, svfloat32_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddp_f16_m))) +svfloat16_t svaddp_m(svbool_t, svfloat16_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddp_f64_x))) +svfloat64_t svaddp_x(svbool_t, svfloat64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddp_f32_x))) +svfloat32_t svaddp_x(svbool_t, svfloat32_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddp_f16_x))) +svfloat16_t svaddp_x(svbool_t, svfloat16_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddp_u8_m))) +svuint8_t svaddp_m(svbool_t, svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddp_u32_m))) +svuint32_t svaddp_m(svbool_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddp_u64_m))) +svuint64_t svaddp_m(svbool_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddp_u16_m))) +svuint16_t svaddp_m(svbool_t, svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddp_s8_m))) +svint8_t svaddp_m(svbool_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddp_s32_m))) +svint32_t svaddp_m(svbool_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddp_s64_m))) +svint64_t svaddp_m(svbool_t, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddp_s16_m))) +svint16_t svaddp_m(svbool_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddp_u8_x))) +svuint8_t svaddp_x(svbool_t, svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddp_u32_x))) +svuint32_t svaddp_x(svbool_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddp_u64_x))) +svuint64_t svaddp_x(svbool_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddp_u16_x))) +svuint16_t svaddp_x(svbool_t, svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddp_s8_x))) +svint8_t svaddp_x(svbool_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddp_s32_x))) +svint32_t svaddp_x(svbool_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddp_s64_x))) +svint64_t svaddp_x(svbool_t, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddp_s16_x))) +svint16_t svaddp_x(svbool_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwb_n_s32))) +svint32_t svaddwb(svint32_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwb_n_s64))) +svint64_t svaddwb(svint64_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwb_n_s16))) +svint16_t svaddwb(svint16_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwb_n_u32))) +svuint32_t svaddwb(svuint32_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwb_n_u64))) +svuint64_t svaddwb(svuint64_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwb_n_u16))) +svuint16_t svaddwb(svuint16_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwb_s32))) +svint32_t svaddwb(svint32_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwb_s64))) +svint64_t svaddwb(svint64_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwb_s16))) +svint16_t svaddwb(svint16_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwb_u32))) +svuint32_t svaddwb(svuint32_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwb_u64))) +svuint64_t svaddwb(svuint64_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwb_u16))) +svuint16_t svaddwb(svuint16_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwt_n_s32))) +svint32_t svaddwt(svint32_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwt_n_s64))) +svint64_t svaddwt(svint64_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwt_n_s16))) +svint16_t svaddwt(svint16_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwt_n_u32))) +svuint32_t svaddwt(svuint32_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwt_n_u64))) +svuint64_t svaddwt(svuint64_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwt_n_u16))) +svuint16_t svaddwt(svuint16_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwt_s32))) +svint32_t svaddwt(svint32_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwt_s64))) +svint64_t svaddwt(svint64_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwt_s16))) +svint16_t svaddwt(svint16_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwt_u32))) +svuint32_t svaddwt(svuint32_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwt_u64))) +svuint64_t svaddwt(svuint64_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwt_u16))) +svuint16_t svaddwt(svuint16_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbcax_n_u8))) +svuint8_t svbcax(svuint8_t, svuint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbcax_n_u32))) +svuint32_t svbcax(svuint32_t, svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbcax_n_u64))) +svuint64_t svbcax(svuint64_t, svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbcax_n_u16))) +svuint16_t svbcax(svuint16_t, svuint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbcax_n_s8))) +svint8_t svbcax(svint8_t, svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbcax_n_s32))) +svint32_t svbcax(svint32_t, svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbcax_n_s64))) +svint64_t svbcax(svint64_t, svint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbcax_n_s16))) +svint16_t svbcax(svint16_t, svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbcax_u8))) +svuint8_t svbcax(svuint8_t, svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbcax_u32))) +svuint32_t svbcax(svuint32_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbcax_u64))) +svuint64_t svbcax(svuint64_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbcax_u16))) +svuint16_t svbcax(svuint16_t, svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbcax_s8))) +svint8_t svbcax(svint8_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbcax_s32))) +svint32_t svbcax(svint32_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbcax_s64))) +svint64_t svbcax(svint64_t, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbcax_s16))) +svint16_t svbcax(svint16_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl1n_n_u8))) +svuint8_t svbsl1n(svuint8_t, svuint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl1n_n_u32))) +svuint32_t svbsl1n(svuint32_t, svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl1n_n_u64))) +svuint64_t svbsl1n(svuint64_t, svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl1n_n_u16))) +svuint16_t svbsl1n(svuint16_t, svuint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl1n_n_s8))) +svint8_t svbsl1n(svint8_t, svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl1n_n_s32))) +svint32_t svbsl1n(svint32_t, svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl1n_n_s64))) +svint64_t svbsl1n(svint64_t, svint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl1n_n_s16))) +svint16_t svbsl1n(svint16_t, svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl1n_u8))) +svuint8_t svbsl1n(svuint8_t, svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl1n_u32))) +svuint32_t svbsl1n(svuint32_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl1n_u64))) +svuint64_t svbsl1n(svuint64_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl1n_u16))) +svuint16_t svbsl1n(svuint16_t, svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl1n_s8))) +svint8_t svbsl1n(svint8_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl1n_s32))) +svint32_t svbsl1n(svint32_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl1n_s64))) +svint64_t svbsl1n(svint64_t, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl1n_s16))) +svint16_t svbsl1n(svint16_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl2n_n_u8))) +svuint8_t svbsl2n(svuint8_t, svuint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl2n_n_u32))) +svuint32_t svbsl2n(svuint32_t, svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl2n_n_u64))) +svuint64_t svbsl2n(svuint64_t, svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl2n_n_u16))) +svuint16_t svbsl2n(svuint16_t, svuint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl2n_n_s8))) +svint8_t svbsl2n(svint8_t, svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl2n_n_s32))) +svint32_t svbsl2n(svint32_t, svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl2n_n_s64))) +svint64_t svbsl2n(svint64_t, svint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl2n_n_s16))) +svint16_t svbsl2n(svint16_t, svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl2n_u8))) +svuint8_t svbsl2n(svuint8_t, svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl2n_u32))) +svuint32_t svbsl2n(svuint32_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl2n_u64))) +svuint64_t svbsl2n(svuint64_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl2n_u16))) +svuint16_t svbsl2n(svuint16_t, svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl2n_s8))) +svint8_t svbsl2n(svint8_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl2n_s32))) +svint32_t svbsl2n(svint32_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl2n_s64))) +svint64_t svbsl2n(svint64_t, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl2n_s16))) +svint16_t svbsl2n(svint16_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl_n_u8))) +svuint8_t svbsl(svuint8_t, svuint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl_n_u32))) +svuint32_t svbsl(svuint32_t, svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl_n_u64))) +svuint64_t svbsl(svuint64_t, svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl_n_u16))) +svuint16_t svbsl(svuint16_t, svuint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl_n_s8))) +svint8_t svbsl(svint8_t, svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl_n_s32))) +svint32_t svbsl(svint32_t, svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl_n_s64))) +svint64_t svbsl(svint64_t, svint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl_n_s16))) +svint16_t svbsl(svint16_t, svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl_u8))) +svuint8_t svbsl(svuint8_t, svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl_u32))) +svuint32_t svbsl(svuint32_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl_u64))) +svuint64_t svbsl(svuint64_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl_u16))) +svuint16_t svbsl(svuint16_t, svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl_s8))) +svint8_t svbsl(svint8_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl_s32))) +svint32_t svbsl(svint32_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl_s64))) +svint64_t svbsl(svint64_t, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl_s16))) +svint16_t svbsl(svint16_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcadd_u8))) +svuint8_t svcadd(svuint8_t, svuint8_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcadd_u32))) +svuint32_t svcadd(svuint32_t, svuint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcadd_u64))) +svuint64_t svcadd(svuint64_t, svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcadd_u16))) +svuint16_t svcadd(svuint16_t, svuint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcadd_s8))) +svint8_t svcadd(svint8_t, svint8_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcadd_s32))) +svint32_t svcadd(svint32_t, svint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcadd_s64))) +svint64_t svcadd(svint64_t, svint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcadd_s16))) +svint16_t svcadd(svint16_t, svint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcdot_s32))) +svint32_t svcdot(svint32_t, svint8_t, svint8_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcdot_s64))) +svint64_t svcdot(svint64_t, svint16_t, svint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcdot_lane_s32))) +svint32_t svcdot_lane(svint32_t, svint8_t, svint8_t, uint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcdot_lane_s64))) +svint64_t svcdot_lane(svint64_t, svint16_t, svint16_t, uint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmla_u8))) +svuint8_t svcmla(svuint8_t, svuint8_t, svuint8_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmla_u32))) +svuint32_t svcmla(svuint32_t, svuint32_t, svuint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmla_u64))) +svuint64_t svcmla(svuint64_t, svuint64_t, svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmla_u16))) +svuint16_t svcmla(svuint16_t, svuint16_t, svuint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmla_s8))) +svint8_t svcmla(svint8_t, svint8_t, svint8_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmla_s32))) +svint32_t svcmla(svint32_t, svint32_t, svint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmla_s64))) +svint64_t svcmla(svint64_t, svint64_t, svint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmla_s16))) +svint16_t svcmla(svint16_t, svint16_t, svint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmla_lane_u32))) +svuint32_t svcmla_lane(svuint32_t, svuint32_t, svuint32_t, uint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmla_lane_u16))) +svuint16_t svcmla_lane(svuint16_t, svuint16_t, svuint16_t, uint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmla_lane_s32))) +svint32_t svcmla_lane(svint32_t, svint32_t, svint32_t, uint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmla_lane_s16))) +svint16_t svcmla_lane(svint16_t, svint16_t, svint16_t, uint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvtlt_f32_f16_m))) +svfloat32_t svcvtlt_f32_m(svfloat32_t, svbool_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvtlt_f32_f16_x))) +svfloat32_t svcvtlt_f32_x(svbool_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvtlt_f64_f32_m))) +svfloat64_t svcvtlt_f64_m(svfloat64_t, svbool_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvtlt_f64_f32_x))) +svfloat64_t svcvtlt_f64_x(svbool_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvtnt_f16_f32_m))) +svfloat16_t svcvtnt_f16_m(svfloat16_t, svbool_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvtnt_f32_f64_m))) +svfloat32_t svcvtnt_f32_m(svfloat32_t, svbool_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvtx_f32_f64_m))) +svfloat32_t svcvtx_f32_m(svfloat32_t, svbool_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvtx_f32_f64_x))) +svfloat32_t svcvtx_f32_x(svbool_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvtx_f32_f64_z))) +svfloat32_t svcvtx_f32_z(svbool_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvtxnt_f32_f64_m))) +svfloat32_t svcvtxnt_f32_m(svfloat32_t, svbool_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor3_n_u8))) +svuint8_t sveor3(svuint8_t, svuint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor3_n_u32))) +svuint32_t sveor3(svuint32_t, svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor3_n_u64))) +svuint64_t sveor3(svuint64_t, svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor3_n_u16))) +svuint16_t sveor3(svuint16_t, svuint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor3_n_s8))) +svint8_t sveor3(svint8_t, svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor3_n_s32))) +svint32_t sveor3(svint32_t, svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor3_n_s64))) +svint64_t sveor3(svint64_t, svint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor3_n_s16))) +svint16_t sveor3(svint16_t, svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor3_u8))) +svuint8_t sveor3(svuint8_t, svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor3_u32))) +svuint32_t sveor3(svuint32_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor3_u64))) +svuint64_t sveor3(svuint64_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor3_u16))) +svuint16_t sveor3(svuint16_t, svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor3_s8))) +svint8_t sveor3(svint8_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor3_s32))) +svint32_t sveor3(svint32_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor3_s64))) +svint64_t sveor3(svint64_t, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor3_s16))) +svint16_t sveor3(svint16_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorbt_n_u8))) +svuint8_t sveorbt(svuint8_t, svuint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorbt_n_u32))) +svuint32_t sveorbt(svuint32_t, svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorbt_n_u64))) +svuint64_t sveorbt(svuint64_t, svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorbt_n_u16))) +svuint16_t sveorbt(svuint16_t, svuint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorbt_n_s8))) +svint8_t sveorbt(svint8_t, svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorbt_n_s32))) +svint32_t sveorbt(svint32_t, svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorbt_n_s64))) +svint64_t sveorbt(svint64_t, svint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorbt_n_s16))) +svint16_t sveorbt(svint16_t, svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorbt_u8))) +svuint8_t sveorbt(svuint8_t, svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorbt_u32))) +svuint32_t sveorbt(svuint32_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorbt_u64))) +svuint64_t sveorbt(svuint64_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorbt_u16))) +svuint16_t sveorbt(svuint16_t, svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorbt_s8))) +svint8_t sveorbt(svint8_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorbt_s32))) +svint32_t sveorbt(svint32_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorbt_s64))) +svint64_t sveorbt(svint64_t, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorbt_s16))) +svint16_t sveorbt(svint16_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveortb_n_u8))) +svuint8_t sveortb(svuint8_t, svuint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveortb_n_u32))) +svuint32_t sveortb(svuint32_t, svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveortb_n_u64))) +svuint64_t sveortb(svuint64_t, svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveortb_n_u16))) +svuint16_t sveortb(svuint16_t, svuint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveortb_n_s8))) +svint8_t sveortb(svint8_t, svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveortb_n_s32))) +svint32_t sveortb(svint32_t, svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveortb_n_s64))) +svint64_t sveortb(svint64_t, svint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveortb_n_s16))) +svint16_t sveortb(svint16_t, svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveortb_u8))) +svuint8_t sveortb(svuint8_t, svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveortb_u32))) +svuint32_t sveortb(svuint32_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveortb_u64))) +svuint64_t sveortb(svuint64_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveortb_u16))) +svuint16_t sveortb(svuint16_t, svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveortb_s8))) +svint8_t sveortb(svint8_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveortb_s32))) +svint32_t sveortb(svint32_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveortb_s64))) +svint64_t sveortb(svint64_t, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveortb_s16))) +svint16_t sveortb(svint16_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_s8_m))) +svint8_t svhadd_m(svbool_t, svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_s32_m))) +svint32_t svhadd_m(svbool_t, svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_s64_m))) +svint64_t svhadd_m(svbool_t, svint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_s16_m))) +svint16_t svhadd_m(svbool_t, svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_s8_x))) +svint8_t svhadd_x(svbool_t, svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_s32_x))) +svint32_t svhadd_x(svbool_t, svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_s64_x))) +svint64_t svhadd_x(svbool_t, svint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_s16_x))) +svint16_t svhadd_x(svbool_t, svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_s8_z))) +svint8_t svhadd_z(svbool_t, svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_s32_z))) +svint32_t svhadd_z(svbool_t, svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_s64_z))) +svint64_t svhadd_z(svbool_t, svint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_s16_z))) +svint16_t svhadd_z(svbool_t, svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_u8_m))) +svuint8_t svhadd_m(svbool_t, svuint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_u32_m))) +svuint32_t svhadd_m(svbool_t, svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_u64_m))) +svuint64_t svhadd_m(svbool_t, svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_u16_m))) +svuint16_t svhadd_m(svbool_t, svuint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_u8_x))) +svuint8_t svhadd_x(svbool_t, svuint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_u32_x))) +svuint32_t svhadd_x(svbool_t, svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_u64_x))) +svuint64_t svhadd_x(svbool_t, svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_u16_x))) +svuint16_t svhadd_x(svbool_t, svuint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_u8_z))) +svuint8_t svhadd_z(svbool_t, svuint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_u32_z))) +svuint32_t svhadd_z(svbool_t, svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_u64_z))) +svuint64_t svhadd_z(svbool_t, svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_u16_z))) +svuint16_t svhadd_z(svbool_t, svuint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_s8_m))) +svint8_t svhadd_m(svbool_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_s32_m))) +svint32_t svhadd_m(svbool_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_s64_m))) +svint64_t svhadd_m(svbool_t, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_s16_m))) +svint16_t svhadd_m(svbool_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_s8_x))) +svint8_t svhadd_x(svbool_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_s32_x))) +svint32_t svhadd_x(svbool_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_s64_x))) +svint64_t svhadd_x(svbool_t, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_s16_x))) +svint16_t svhadd_x(svbool_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_s8_z))) +svint8_t svhadd_z(svbool_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_s32_z))) +svint32_t svhadd_z(svbool_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_s64_z))) +svint64_t svhadd_z(svbool_t, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_s16_z))) +svint16_t svhadd_z(svbool_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_u8_m))) +svuint8_t svhadd_m(svbool_t, svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_u32_m))) +svuint32_t svhadd_m(svbool_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_u64_m))) +svuint64_t svhadd_m(svbool_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_u16_m))) +svuint16_t svhadd_m(svbool_t, svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_u8_x))) +svuint8_t svhadd_x(svbool_t, svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_u32_x))) +svuint32_t svhadd_x(svbool_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_u64_x))) +svuint64_t svhadd_x(svbool_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_u16_x))) +svuint16_t svhadd_x(svbool_t, svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_u8_z))) +svuint8_t svhadd_z(svbool_t, svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_u32_z))) +svuint32_t svhadd_z(svbool_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_u64_z))) +svuint64_t svhadd_z(svbool_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_u16_z))) +svuint16_t svhadd_z(svbool_t, svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_s8_m))) +svint8_t svhsub_m(svbool_t, svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_s32_m))) +svint32_t svhsub_m(svbool_t, svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_s64_m))) +svint64_t svhsub_m(svbool_t, svint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_s16_m))) +svint16_t svhsub_m(svbool_t, svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_s8_x))) +svint8_t svhsub_x(svbool_t, svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_s32_x))) +svint32_t svhsub_x(svbool_t, svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_s64_x))) +svint64_t svhsub_x(svbool_t, svint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_s16_x))) +svint16_t svhsub_x(svbool_t, svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_s8_z))) +svint8_t svhsub_z(svbool_t, svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_s32_z))) +svint32_t svhsub_z(svbool_t, svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_s64_z))) +svint64_t svhsub_z(svbool_t, svint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_s16_z))) +svint16_t svhsub_z(svbool_t, svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_u8_m))) +svuint8_t svhsub_m(svbool_t, svuint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_u32_m))) +svuint32_t svhsub_m(svbool_t, svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_u64_m))) +svuint64_t svhsub_m(svbool_t, svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_u16_m))) +svuint16_t svhsub_m(svbool_t, svuint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_u8_x))) +svuint8_t svhsub_x(svbool_t, svuint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_u32_x))) +svuint32_t svhsub_x(svbool_t, svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_u64_x))) +svuint64_t svhsub_x(svbool_t, svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_u16_x))) +svuint16_t svhsub_x(svbool_t, svuint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_u8_z))) +svuint8_t svhsub_z(svbool_t, svuint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_u32_z))) +svuint32_t svhsub_z(svbool_t, svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_u64_z))) +svuint64_t svhsub_z(svbool_t, svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_u16_z))) +svuint16_t svhsub_z(svbool_t, svuint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_s8_m))) +svint8_t svhsub_m(svbool_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_s32_m))) +svint32_t svhsub_m(svbool_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_s64_m))) +svint64_t svhsub_m(svbool_t, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_s16_m))) +svint16_t svhsub_m(svbool_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_s8_x))) +svint8_t svhsub_x(svbool_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_s32_x))) +svint32_t svhsub_x(svbool_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_s64_x))) +svint64_t svhsub_x(svbool_t, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_s16_x))) +svint16_t svhsub_x(svbool_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_s8_z))) +svint8_t svhsub_z(svbool_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_s32_z))) +svint32_t svhsub_z(svbool_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_s64_z))) +svint64_t svhsub_z(svbool_t, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_s16_z))) +svint16_t svhsub_z(svbool_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_u8_m))) +svuint8_t svhsub_m(svbool_t, svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_u32_m))) +svuint32_t svhsub_m(svbool_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_u64_m))) +svuint64_t svhsub_m(svbool_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_u16_m))) +svuint16_t svhsub_m(svbool_t, svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_u8_x))) +svuint8_t svhsub_x(svbool_t, svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_u32_x))) +svuint32_t svhsub_x(svbool_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_u64_x))) +svuint64_t svhsub_x(svbool_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_u16_x))) +svuint16_t svhsub_x(svbool_t, svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_u8_z))) +svuint8_t svhsub_z(svbool_t, svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_u32_z))) +svuint32_t svhsub_z(svbool_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_u64_z))) +svuint64_t svhsub_z(svbool_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_u16_z))) +svuint16_t svhsub_z(svbool_t, svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_s8_m))) +svint8_t svhsubr_m(svbool_t, svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_s32_m))) +svint32_t svhsubr_m(svbool_t, svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_s64_m))) +svint64_t svhsubr_m(svbool_t, svint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_s16_m))) +svint16_t svhsubr_m(svbool_t, svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_s8_x))) +svint8_t svhsubr_x(svbool_t, svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_s32_x))) +svint32_t svhsubr_x(svbool_t, svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_s64_x))) +svint64_t svhsubr_x(svbool_t, svint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_s16_x))) +svint16_t svhsubr_x(svbool_t, svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_s8_z))) +svint8_t svhsubr_z(svbool_t, svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_s32_z))) +svint32_t svhsubr_z(svbool_t, svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_s64_z))) +svint64_t svhsubr_z(svbool_t, svint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_s16_z))) +svint16_t svhsubr_z(svbool_t, svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_u8_m))) +svuint8_t svhsubr_m(svbool_t, svuint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_u32_m))) +svuint32_t svhsubr_m(svbool_t, svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_u64_m))) +svuint64_t svhsubr_m(svbool_t, svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_u16_m))) +svuint16_t svhsubr_m(svbool_t, svuint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_u8_x))) +svuint8_t svhsubr_x(svbool_t, svuint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_u32_x))) +svuint32_t svhsubr_x(svbool_t, svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_u64_x))) +svuint64_t svhsubr_x(svbool_t, svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_u16_x))) +svuint16_t svhsubr_x(svbool_t, svuint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_u8_z))) +svuint8_t svhsubr_z(svbool_t, svuint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_u32_z))) +svuint32_t svhsubr_z(svbool_t, svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_u64_z))) +svuint64_t svhsubr_z(svbool_t, svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_u16_z))) +svuint16_t svhsubr_z(svbool_t, svuint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_s8_m))) +svint8_t svhsubr_m(svbool_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_s32_m))) +svint32_t svhsubr_m(svbool_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_s64_m))) +svint64_t svhsubr_m(svbool_t, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_s16_m))) +svint16_t svhsubr_m(svbool_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_s8_x))) +svint8_t svhsubr_x(svbool_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_s32_x))) +svint32_t svhsubr_x(svbool_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_s64_x))) +svint64_t svhsubr_x(svbool_t, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_s16_x))) +svint16_t svhsubr_x(svbool_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_s8_z))) +svint8_t svhsubr_z(svbool_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_s32_z))) +svint32_t svhsubr_z(svbool_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_s64_z))) +svint64_t svhsubr_z(svbool_t, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_s16_z))) +svint16_t svhsubr_z(svbool_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_u8_m))) +svuint8_t svhsubr_m(svbool_t, svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_u32_m))) +svuint32_t svhsubr_m(svbool_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_u64_m))) +svuint64_t svhsubr_m(svbool_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_u16_m))) +svuint16_t svhsubr_m(svbool_t, svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_u8_x))) +svuint8_t svhsubr_x(svbool_t, svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_u32_x))) +svuint32_t svhsubr_x(svbool_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_u64_x))) +svuint64_t svhsubr_x(svbool_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_u16_x))) +svuint16_t svhsubr_x(svbool_t, svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_u8_z))) +svuint8_t svhsubr_z(svbool_t, svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_u32_z))) +svuint32_t svhsubr_z(svbool_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_u64_z))) +svuint64_t svhsubr_z(svbool_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_u16_z))) +svuint16_t svhsubr_z(svbool_t, svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlogb_f64_m))) +svint64_t svlogb_m(svint64_t, svbool_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlogb_f32_m))) +svint32_t svlogb_m(svint32_t, svbool_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlogb_f16_m))) +svint16_t svlogb_m(svint16_t, svbool_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlogb_f64_x))) +svint64_t svlogb_x(svbool_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlogb_f32_x))) +svint32_t svlogb_x(svbool_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlogb_f16_x))) +svint16_t svlogb_x(svbool_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlogb_f64_z))) +svint64_t svlogb_z(svbool_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlogb_f32_z))) +svint32_t svlogb_z(svbool_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlogb_f16_z))) +svint16_t svlogb_z(svbool_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnmp_f64_m))) +svfloat64_t svmaxnmp_m(svbool_t, svfloat64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnmp_f32_m))) +svfloat32_t svmaxnmp_m(svbool_t, svfloat32_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnmp_f16_m))) +svfloat16_t svmaxnmp_m(svbool_t, svfloat16_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnmp_f64_x))) +svfloat64_t svmaxnmp_x(svbool_t, svfloat64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnmp_f32_x))) +svfloat32_t svmaxnmp_x(svbool_t, svfloat32_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnmp_f16_x))) +svfloat16_t svmaxnmp_x(svbool_t, svfloat16_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxp_f64_m))) +svfloat64_t svmaxp_m(svbool_t, svfloat64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxp_f32_m))) +svfloat32_t svmaxp_m(svbool_t, svfloat32_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxp_f16_m))) +svfloat16_t svmaxp_m(svbool_t, svfloat16_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxp_f64_x))) +svfloat64_t svmaxp_x(svbool_t, svfloat64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxp_f32_x))) +svfloat32_t svmaxp_x(svbool_t, svfloat32_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxp_f16_x))) +svfloat16_t svmaxp_x(svbool_t, svfloat16_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxp_s8_m))) +svint8_t svmaxp_m(svbool_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxp_s32_m))) +svint32_t svmaxp_m(svbool_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxp_s64_m))) +svint64_t svmaxp_m(svbool_t, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxp_s16_m))) +svint16_t svmaxp_m(svbool_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxp_s8_x))) +svint8_t svmaxp_x(svbool_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxp_s32_x))) +svint32_t svmaxp_x(svbool_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxp_s64_x))) +svint64_t svmaxp_x(svbool_t, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxp_s16_x))) +svint16_t svmaxp_x(svbool_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxp_u8_m))) +svuint8_t svmaxp_m(svbool_t, svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxp_u32_m))) +svuint32_t svmaxp_m(svbool_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxp_u64_m))) +svuint64_t svmaxp_m(svbool_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxp_u16_m))) +svuint16_t svmaxp_m(svbool_t, svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxp_u8_x))) +svuint8_t svmaxp_x(svbool_t, svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxp_u32_x))) +svuint32_t svmaxp_x(svbool_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxp_u64_x))) +svuint64_t svmaxp_x(svbool_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxp_u16_x))) +svuint16_t svmaxp_x(svbool_t, svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnmp_f64_m))) +svfloat64_t svminnmp_m(svbool_t, svfloat64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnmp_f32_m))) +svfloat32_t svminnmp_m(svbool_t, svfloat32_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnmp_f16_m))) +svfloat16_t svminnmp_m(svbool_t, svfloat16_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnmp_f64_x))) +svfloat64_t svminnmp_x(svbool_t, svfloat64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnmp_f32_x))) +svfloat32_t svminnmp_x(svbool_t, svfloat32_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnmp_f16_x))) +svfloat16_t svminnmp_x(svbool_t, svfloat16_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminp_f64_m))) +svfloat64_t svminp_m(svbool_t, svfloat64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminp_f32_m))) +svfloat32_t svminp_m(svbool_t, svfloat32_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminp_f16_m))) +svfloat16_t svminp_m(svbool_t, svfloat16_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminp_f64_x))) +svfloat64_t svminp_x(svbool_t, svfloat64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminp_f32_x))) +svfloat32_t svminp_x(svbool_t, svfloat32_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminp_f16_x))) +svfloat16_t svminp_x(svbool_t, svfloat16_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminp_s8_m))) +svint8_t svminp_m(svbool_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminp_s32_m))) +svint32_t svminp_m(svbool_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminp_s64_m))) +svint64_t svminp_m(svbool_t, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminp_s16_m))) +svint16_t svminp_m(svbool_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminp_s8_x))) +svint8_t svminp_x(svbool_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminp_s32_x))) +svint32_t svminp_x(svbool_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminp_s64_x))) +svint64_t svminp_x(svbool_t, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminp_s16_x))) +svint16_t svminp_x(svbool_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminp_u8_m))) +svuint8_t svminp_m(svbool_t, svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminp_u32_m))) +svuint32_t svminp_m(svbool_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminp_u64_m))) +svuint64_t svminp_m(svbool_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminp_u16_m))) +svuint16_t svminp_m(svbool_t, svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminp_u8_x))) +svuint8_t svminp_x(svbool_t, svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminp_u32_x))) +svuint32_t svminp_x(svbool_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminp_u64_x))) +svuint64_t svminp_x(svbool_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminp_u16_x))) +svuint16_t svminp_x(svbool_t, svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_lane_u32))) +svuint32_t svmla_lane(svuint32_t, svuint32_t, svuint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_lane_u64))) +svuint64_t svmla_lane(svuint64_t, svuint64_t, svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_lane_u16))) +svuint16_t svmla_lane(svuint16_t, svuint16_t, svuint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_lane_s32))) +svint32_t svmla_lane(svint32_t, svint32_t, svint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_lane_s64))) +svint64_t svmla_lane(svint64_t, svint64_t, svint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_lane_s16))) +svint16_t svmla_lane(svint16_t, svint16_t, svint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalb_n_f32))) +svfloat32_t svmlalb(svfloat32_t, svfloat16_t, float16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalb_n_s32))) +svint32_t svmlalb(svint32_t, svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalb_n_s64))) +svint64_t svmlalb(svint64_t, svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalb_n_s16))) +svint16_t svmlalb(svint16_t, svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalb_n_u32))) +svuint32_t svmlalb(svuint32_t, svuint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalb_n_u64))) +svuint64_t svmlalb(svuint64_t, svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalb_n_u16))) +svuint16_t svmlalb(svuint16_t, svuint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalb_f32))) +svfloat32_t svmlalb(svfloat32_t, svfloat16_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalb_s32))) +svint32_t svmlalb(svint32_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalb_s64))) +svint64_t svmlalb(svint64_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalb_s16))) +svint16_t svmlalb(svint16_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalb_u32))) +svuint32_t svmlalb(svuint32_t, svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalb_u64))) +svuint64_t svmlalb(svuint64_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalb_u16))) +svuint16_t svmlalb(svuint16_t, svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalb_lane_f32))) +svfloat32_t svmlalb_lane(svfloat32_t, svfloat16_t, svfloat16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalb_lane_s32))) +svint32_t svmlalb_lane(svint32_t, svint16_t, svint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalb_lane_s64))) +svint64_t svmlalb_lane(svint64_t, svint32_t, svint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalb_lane_u32))) +svuint32_t svmlalb_lane(svuint32_t, svuint16_t, svuint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalb_lane_u64))) +svuint64_t svmlalb_lane(svuint64_t, svuint32_t, svuint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalt_n_f32))) +svfloat32_t svmlalt(svfloat32_t, svfloat16_t, float16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalt_n_s32))) +svint32_t svmlalt(svint32_t, svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalt_n_s64))) +svint64_t svmlalt(svint64_t, svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalt_n_s16))) +svint16_t svmlalt(svint16_t, svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalt_n_u32))) +svuint32_t svmlalt(svuint32_t, svuint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalt_n_u64))) +svuint64_t svmlalt(svuint64_t, svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalt_n_u16))) +svuint16_t svmlalt(svuint16_t, svuint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalt_f32))) +svfloat32_t svmlalt(svfloat32_t, svfloat16_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalt_s32))) +svint32_t svmlalt(svint32_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalt_s64))) +svint64_t svmlalt(svint64_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalt_s16))) +svint16_t svmlalt(svint16_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalt_u32))) +svuint32_t svmlalt(svuint32_t, svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalt_u64))) +svuint64_t svmlalt(svuint64_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalt_u16))) +svuint16_t svmlalt(svuint16_t, svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalt_lane_f32))) +svfloat32_t svmlalt_lane(svfloat32_t, svfloat16_t, svfloat16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalt_lane_s32))) +svint32_t svmlalt_lane(svint32_t, svint16_t, svint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalt_lane_s64))) +svint64_t svmlalt_lane(svint64_t, svint32_t, svint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalt_lane_u32))) +svuint32_t svmlalt_lane(svuint32_t, svuint16_t, svuint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalt_lane_u64))) +svuint64_t svmlalt_lane(svuint64_t, svuint32_t, svuint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_lane_u32))) +svuint32_t svmls_lane(svuint32_t, svuint32_t, svuint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_lane_u64))) +svuint64_t svmls_lane(svuint64_t, svuint64_t, svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_lane_u16))) +svuint16_t svmls_lane(svuint16_t, svuint16_t, svuint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_lane_s32))) +svint32_t svmls_lane(svint32_t, svint32_t, svint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_lane_s64))) +svint64_t svmls_lane(svint64_t, svint64_t, svint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_lane_s16))) +svint16_t svmls_lane(svint16_t, svint16_t, svint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslb_n_f32))) +svfloat32_t svmlslb(svfloat32_t, svfloat16_t, float16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslb_n_s32))) +svint32_t svmlslb(svint32_t, svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslb_n_s64))) +svint64_t svmlslb(svint64_t, svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslb_n_s16))) +svint16_t svmlslb(svint16_t, svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslb_n_u32))) +svuint32_t svmlslb(svuint32_t, svuint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslb_n_u64))) +svuint64_t svmlslb(svuint64_t, svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslb_n_u16))) +svuint16_t svmlslb(svuint16_t, svuint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslb_f32))) +svfloat32_t svmlslb(svfloat32_t, svfloat16_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslb_s32))) +svint32_t svmlslb(svint32_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslb_s64))) +svint64_t svmlslb(svint64_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslb_s16))) +svint16_t svmlslb(svint16_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslb_u32))) +svuint32_t svmlslb(svuint32_t, svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslb_u64))) +svuint64_t svmlslb(svuint64_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslb_u16))) +svuint16_t svmlslb(svuint16_t, svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslb_lane_f32))) +svfloat32_t svmlslb_lane(svfloat32_t, svfloat16_t, svfloat16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslb_lane_s32))) +svint32_t svmlslb_lane(svint32_t, svint16_t, svint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslb_lane_s64))) +svint64_t svmlslb_lane(svint64_t, svint32_t, svint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslb_lane_u32))) +svuint32_t svmlslb_lane(svuint32_t, svuint16_t, svuint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslb_lane_u64))) +svuint64_t svmlslb_lane(svuint64_t, svuint32_t, svuint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslt_n_f32))) +svfloat32_t svmlslt(svfloat32_t, svfloat16_t, float16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslt_n_s32))) +svint32_t svmlslt(svint32_t, svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslt_n_s64))) +svint64_t svmlslt(svint64_t, svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslt_n_s16))) +svint16_t svmlslt(svint16_t, svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslt_n_u32))) +svuint32_t svmlslt(svuint32_t, svuint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslt_n_u64))) +svuint64_t svmlslt(svuint64_t, svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslt_n_u16))) +svuint16_t svmlslt(svuint16_t, svuint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslt_f32))) +svfloat32_t svmlslt(svfloat32_t, svfloat16_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslt_s32))) +svint32_t svmlslt(svint32_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslt_s64))) +svint64_t svmlslt(svint64_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslt_s16))) +svint16_t svmlslt(svint16_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslt_u32))) +svuint32_t svmlslt(svuint32_t, svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslt_u64))) +svuint64_t svmlslt(svuint64_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslt_u16))) +svuint16_t svmlslt(svuint16_t, svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslt_lane_f32))) +svfloat32_t svmlslt_lane(svfloat32_t, svfloat16_t, svfloat16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslt_lane_s32))) +svint32_t svmlslt_lane(svint32_t, svint16_t, svint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslt_lane_s64))) +svint64_t svmlslt_lane(svint64_t, svint32_t, svint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslt_lane_u32))) +svuint32_t svmlslt_lane(svuint32_t, svuint16_t, svuint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslt_lane_u64))) +svuint64_t svmlslt_lane(svuint64_t, svuint32_t, svuint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmovlb_s32))) +svint32_t svmovlb(svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmovlb_s64))) +svint64_t svmovlb(svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmovlb_s16))) +svint16_t svmovlb(svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmovlb_u32))) +svuint32_t svmovlb(svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmovlb_u64))) +svuint64_t svmovlb(svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmovlb_u16))) +svuint16_t svmovlb(svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmovlt_s32))) +svint32_t svmovlt(svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmovlt_s64))) +svint64_t svmovlt(svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmovlt_s16))) +svint16_t svmovlt(svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmovlt_u32))) +svuint32_t svmovlt(svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmovlt_u64))) +svuint64_t svmovlt(svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmovlt_u16))) +svuint16_t svmovlt(svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_lane_u32))) +svuint32_t svmul_lane(svuint32_t, svuint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_lane_u64))) +svuint64_t svmul_lane(svuint64_t, svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_lane_u16))) +svuint16_t svmul_lane(svuint16_t, svuint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_lane_s32))) +svint32_t svmul_lane(svint32_t, svint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_lane_s64))) +svint64_t svmul_lane(svint64_t, svint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_lane_s16))) +svint16_t svmul_lane(svint16_t, svint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullb_n_s32))) +svint32_t svmullb(svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullb_n_s64))) +svint64_t svmullb(svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullb_n_s16))) +svint16_t svmullb(svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullb_n_u32))) +svuint32_t svmullb(svuint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullb_n_u64))) +svuint64_t svmullb(svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullb_n_u16))) +svuint16_t svmullb(svuint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullb_s32))) +svint32_t svmullb(svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullb_s64))) +svint64_t svmullb(svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullb_s16))) +svint16_t svmullb(svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullb_u32))) +svuint32_t svmullb(svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullb_u64))) +svuint64_t svmullb(svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullb_u16))) +svuint16_t svmullb(svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullb_lane_s32))) +svint32_t svmullb_lane(svint16_t, svint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullb_lane_s64))) +svint64_t svmullb_lane(svint32_t, svint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullb_lane_u32))) +svuint32_t svmullb_lane(svuint16_t, svuint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullb_lane_u64))) +svuint64_t svmullb_lane(svuint32_t, svuint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullt_n_s32))) +svint32_t svmullt(svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullt_n_s64))) +svint64_t svmullt(svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullt_n_s16))) +svint16_t svmullt(svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullt_n_u32))) +svuint32_t svmullt(svuint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullt_n_u64))) +svuint64_t svmullt(svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullt_n_u16))) +svuint16_t svmullt(svuint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullt_s32))) +svint32_t svmullt(svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullt_s64))) +svint64_t svmullt(svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullt_s16))) +svint16_t svmullt(svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullt_u32))) +svuint32_t svmullt(svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullt_u64))) +svuint64_t svmullt(svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullt_u16))) +svuint16_t svmullt(svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullt_lane_s32))) +svint32_t svmullt_lane(svint16_t, svint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullt_lane_s64))) +svint64_t svmullt_lane(svint32_t, svint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullt_lane_u32))) +svuint32_t svmullt_lane(svuint16_t, svuint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullt_lane_u64))) +svuint64_t svmullt_lane(svuint32_t, svuint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnbsl_n_u8))) +svuint8_t svnbsl(svuint8_t, svuint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnbsl_n_u32))) +svuint32_t svnbsl(svuint32_t, svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnbsl_n_u64))) +svuint64_t svnbsl(svuint64_t, svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnbsl_n_u16))) +svuint16_t svnbsl(svuint16_t, svuint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnbsl_n_s8))) +svint8_t svnbsl(svint8_t, svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnbsl_n_s32))) +svint32_t svnbsl(svint32_t, svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnbsl_n_s64))) +svint64_t svnbsl(svint64_t, svint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnbsl_n_s16))) +svint16_t svnbsl(svint16_t, svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnbsl_u8))) +svuint8_t svnbsl(svuint8_t, svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnbsl_u32))) +svuint32_t svnbsl(svuint32_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnbsl_u64))) +svuint64_t svnbsl(svuint64_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnbsl_u16))) +svuint16_t svnbsl(svuint16_t, svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnbsl_s8))) +svint8_t svnbsl(svint8_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnbsl_s32))) +svint32_t svnbsl(svint32_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnbsl_s64))) +svint64_t svnbsl(svint64_t, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnbsl_s16))) +svint16_t svnbsl(svint16_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmul_n_u8))) +svuint8_t svpmul(svuint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmul_u8))) +svuint8_t svpmul(svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmullb_n_u64))) +svuint64_t svpmullb(svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmullb_n_u16))) +svuint16_t svpmullb(svuint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmullb_u64))) +svuint64_t svpmullb(svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmullb_u16))) +svuint16_t svpmullb(svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmullb_pair_n_u8))) +svuint8_t svpmullb_pair(svuint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmullb_pair_n_u32))) +svuint32_t svpmullb_pair(svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmullb_pair_u8))) +svuint8_t svpmullb_pair(svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmullb_pair_u32))) +svuint32_t svpmullb_pair(svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmullt_n_u64))) +svuint64_t svpmullt(svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmullt_n_u16))) +svuint16_t svpmullt(svuint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmullt_u64))) +svuint64_t svpmullt(svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmullt_u16))) +svuint16_t svpmullt(svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmullt_pair_n_u8))) +svuint8_t svpmullt_pair(svuint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmullt_pair_n_u32))) +svuint32_t svpmullt_pair(svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmullt_pair_u8))) +svuint8_t svpmullt_pair(svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmullt_pair_u32))) +svuint32_t svpmullt_pair(svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqabs_s8_m))) +svint8_t svqabs_m(svint8_t, svbool_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqabs_s32_m))) +svint32_t svqabs_m(svint32_t, svbool_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqabs_s64_m))) +svint64_t svqabs_m(svint64_t, svbool_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqabs_s16_m))) +svint16_t svqabs_m(svint16_t, svbool_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqabs_s8_x))) +svint8_t svqabs_x(svbool_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqabs_s32_x))) +svint32_t svqabs_x(svbool_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqabs_s64_x))) +svint64_t svqabs_x(svbool_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqabs_s16_x))) +svint16_t svqabs_x(svbool_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqabs_s8_z))) +svint8_t svqabs_z(svbool_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqabs_s32_z))) +svint32_t svqabs_z(svbool_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqabs_s64_z))) +svint64_t svqabs_z(svbool_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqabs_s16_z))) +svint16_t svqabs_z(svbool_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_s8_m))) +svint8_t svqadd_m(svbool_t, svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_s32_m))) +svint32_t svqadd_m(svbool_t, svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_s64_m))) +svint64_t svqadd_m(svbool_t, svint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_s16_m))) +svint16_t svqadd_m(svbool_t, svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_s8_x))) +svint8_t svqadd_x(svbool_t, svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_s32_x))) +svint32_t svqadd_x(svbool_t, svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_s64_x))) +svint64_t svqadd_x(svbool_t, svint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_s16_x))) +svint16_t svqadd_x(svbool_t, svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_s8_z))) +svint8_t svqadd_z(svbool_t, svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_s32_z))) +svint32_t svqadd_z(svbool_t, svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_s64_z))) +svint64_t svqadd_z(svbool_t, svint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_s16_z))) +svint16_t svqadd_z(svbool_t, svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_u8_m))) +svuint8_t svqadd_m(svbool_t, svuint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_u32_m))) +svuint32_t svqadd_m(svbool_t, svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_u64_m))) +svuint64_t svqadd_m(svbool_t, svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_u16_m))) +svuint16_t svqadd_m(svbool_t, svuint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_u8_x))) +svuint8_t svqadd_x(svbool_t, svuint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_u32_x))) +svuint32_t svqadd_x(svbool_t, svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_u64_x))) +svuint64_t svqadd_x(svbool_t, svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_u16_x))) +svuint16_t svqadd_x(svbool_t, svuint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_u8_z))) +svuint8_t svqadd_z(svbool_t, svuint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_u32_z))) +svuint32_t svqadd_z(svbool_t, svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_u64_z))) +svuint64_t svqadd_z(svbool_t, svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_u16_z))) +svuint16_t svqadd_z(svbool_t, svuint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_s8_m))) +svint8_t svqadd_m(svbool_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_s32_m))) +svint32_t svqadd_m(svbool_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_s64_m))) +svint64_t svqadd_m(svbool_t, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_s16_m))) +svint16_t svqadd_m(svbool_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_s8_x))) +svint8_t svqadd_x(svbool_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_s32_x))) +svint32_t svqadd_x(svbool_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_s64_x))) +svint64_t svqadd_x(svbool_t, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_s16_x))) +svint16_t svqadd_x(svbool_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_s8_z))) +svint8_t svqadd_z(svbool_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_s32_z))) +svint32_t svqadd_z(svbool_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_s64_z))) +svint64_t svqadd_z(svbool_t, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_s16_z))) +svint16_t svqadd_z(svbool_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_u8_m))) +svuint8_t svqadd_m(svbool_t, svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_u32_m))) +svuint32_t svqadd_m(svbool_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_u64_m))) +svuint64_t svqadd_m(svbool_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_u16_m))) +svuint16_t svqadd_m(svbool_t, svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_u8_x))) +svuint8_t svqadd_x(svbool_t, svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_u32_x))) +svuint32_t svqadd_x(svbool_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_u64_x))) +svuint64_t svqadd_x(svbool_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_u16_x))) +svuint16_t svqadd_x(svbool_t, svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_u8_z))) +svuint8_t svqadd_z(svbool_t, svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_u32_z))) +svuint32_t svqadd_z(svbool_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_u64_z))) +svuint64_t svqadd_z(svbool_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_u16_z))) +svuint16_t svqadd_z(svbool_t, svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqcadd_s8))) +svint8_t svqcadd(svint8_t, svint8_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqcadd_s32))) +svint32_t svqcadd(svint32_t, svint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqcadd_s64))) +svint64_t svqcadd(svint64_t, svint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqcadd_s16))) +svint16_t svqcadd(svint16_t, svint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlalb_n_s32))) +svint32_t svqdmlalb(svint32_t, svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlalb_n_s64))) +svint64_t svqdmlalb(svint64_t, svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlalb_n_s16))) +svint16_t svqdmlalb(svint16_t, svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlalb_s32))) +svint32_t svqdmlalb(svint32_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlalb_s64))) +svint64_t svqdmlalb(svint64_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlalb_s16))) +svint16_t svqdmlalb(svint16_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlalb_lane_s32))) +svint32_t svqdmlalb_lane(svint32_t, svint16_t, svint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlalb_lane_s64))) +svint64_t svqdmlalb_lane(svint64_t, svint32_t, svint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlalbt_n_s32))) +svint32_t svqdmlalbt(svint32_t, svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlalbt_n_s64))) +svint64_t svqdmlalbt(svint64_t, svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlalbt_n_s16))) +svint16_t svqdmlalbt(svint16_t, svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlalbt_s32))) +svint32_t svqdmlalbt(svint32_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlalbt_s64))) +svint64_t svqdmlalbt(svint64_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlalbt_s16))) +svint16_t svqdmlalbt(svint16_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlalt_n_s32))) +svint32_t svqdmlalt(svint32_t, svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlalt_n_s64))) +svint64_t svqdmlalt(svint64_t, svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlalt_n_s16))) +svint16_t svqdmlalt(svint16_t, svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlalt_s32))) +svint32_t svqdmlalt(svint32_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlalt_s64))) +svint64_t svqdmlalt(svint64_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlalt_s16))) +svint16_t svqdmlalt(svint16_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlalt_lane_s32))) +svint32_t svqdmlalt_lane(svint32_t, svint16_t, svint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlalt_lane_s64))) +svint64_t svqdmlalt_lane(svint64_t, svint32_t, svint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlslb_n_s32))) +svint32_t svqdmlslb(svint32_t, svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlslb_n_s64))) +svint64_t svqdmlslb(svint64_t, svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlslb_n_s16))) +svint16_t svqdmlslb(svint16_t, svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlslb_s32))) +svint32_t svqdmlslb(svint32_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlslb_s64))) +svint64_t svqdmlslb(svint64_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlslb_s16))) +svint16_t svqdmlslb(svint16_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlslb_lane_s32))) +svint32_t svqdmlslb_lane(svint32_t, svint16_t, svint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlslb_lane_s64))) +svint64_t svqdmlslb_lane(svint64_t, svint32_t, svint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlslbt_n_s32))) +svint32_t svqdmlslbt(svint32_t, svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlslbt_n_s64))) +svint64_t svqdmlslbt(svint64_t, svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlslbt_n_s16))) +svint16_t svqdmlslbt(svint16_t, svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlslbt_s32))) +svint32_t svqdmlslbt(svint32_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlslbt_s64))) +svint64_t svqdmlslbt(svint64_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlslbt_s16))) +svint16_t svqdmlslbt(svint16_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlslt_n_s32))) +svint32_t svqdmlslt(svint32_t, svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlslt_n_s64))) +svint64_t svqdmlslt(svint64_t, svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlslt_n_s16))) +svint16_t svqdmlslt(svint16_t, svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlslt_s32))) +svint32_t svqdmlslt(svint32_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlslt_s64))) +svint64_t svqdmlslt(svint64_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlslt_s16))) +svint16_t svqdmlslt(svint16_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlslt_lane_s32))) +svint32_t svqdmlslt_lane(svint32_t, svint16_t, svint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlslt_lane_s64))) +svint64_t svqdmlslt_lane(svint64_t, svint32_t, svint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmulh_n_s8))) +svint8_t svqdmulh(svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmulh_n_s32))) +svint32_t svqdmulh(svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmulh_n_s64))) +svint64_t svqdmulh(svint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmulh_n_s16))) +svint16_t svqdmulh(svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmulh_s8))) +svint8_t svqdmulh(svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmulh_s32))) +svint32_t svqdmulh(svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmulh_s64))) +svint64_t svqdmulh(svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmulh_s16))) +svint16_t svqdmulh(svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmulh_lane_s32))) +svint32_t svqdmulh_lane(svint32_t, svint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmulh_lane_s64))) +svint64_t svqdmulh_lane(svint64_t, svint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmulh_lane_s16))) +svint16_t svqdmulh_lane(svint16_t, svint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmullb_n_s32))) +svint32_t svqdmullb(svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmullb_n_s64))) +svint64_t svqdmullb(svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmullb_n_s16))) +svint16_t svqdmullb(svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmullb_s32))) +svint32_t svqdmullb(svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmullb_s64))) +svint64_t svqdmullb(svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmullb_s16))) +svint16_t svqdmullb(svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmullb_lane_s32))) +svint32_t svqdmullb_lane(svint16_t, svint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmullb_lane_s64))) +svint64_t svqdmullb_lane(svint32_t, svint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmullt_n_s32))) +svint32_t svqdmullt(svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmullt_n_s64))) +svint64_t svqdmullt(svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmullt_n_s16))) +svint16_t svqdmullt(svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmullt_s32))) +svint32_t svqdmullt(svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmullt_s64))) +svint64_t svqdmullt(svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmullt_s16))) +svint16_t svqdmullt(svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmullt_lane_s32))) +svint32_t svqdmullt_lane(svint16_t, svint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmullt_lane_s64))) +svint64_t svqdmullt_lane(svint32_t, svint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqneg_s8_m))) +svint8_t svqneg_m(svint8_t, svbool_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqneg_s32_m))) +svint32_t svqneg_m(svint32_t, svbool_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqneg_s64_m))) +svint64_t svqneg_m(svint64_t, svbool_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqneg_s16_m))) +svint16_t svqneg_m(svint16_t, svbool_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqneg_s8_x))) +svint8_t svqneg_x(svbool_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqneg_s32_x))) +svint32_t svqneg_x(svbool_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqneg_s64_x))) +svint64_t svqneg_x(svbool_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqneg_s16_x))) +svint16_t svqneg_x(svbool_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqneg_s8_z))) +svint8_t svqneg_z(svbool_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqneg_s32_z))) +svint32_t svqneg_z(svbool_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqneg_s64_z))) +svint64_t svqneg_z(svbool_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqneg_s16_z))) +svint16_t svqneg_z(svbool_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdcmlah_s8))) +svint8_t svqrdcmlah(svint8_t, svint8_t, svint8_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdcmlah_s32))) +svint32_t svqrdcmlah(svint32_t, svint32_t, svint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdcmlah_s64))) +svint64_t svqrdcmlah(svint64_t, svint64_t, svint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdcmlah_s16))) +svint16_t svqrdcmlah(svint16_t, svint16_t, svint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdcmlah_lane_s32))) +svint32_t svqrdcmlah_lane(svint32_t, svint32_t, svint32_t, uint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdcmlah_lane_s16))) +svint16_t svqrdcmlah_lane(svint16_t, svint16_t, svint16_t, uint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmlah_n_s8))) +svint8_t svqrdmlah(svint8_t, svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmlah_n_s32))) +svint32_t svqrdmlah(svint32_t, svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmlah_n_s64))) +svint64_t svqrdmlah(svint64_t, svint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmlah_n_s16))) +svint16_t svqrdmlah(svint16_t, svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmlah_s8))) +svint8_t svqrdmlah(svint8_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmlah_s32))) +svint32_t svqrdmlah(svint32_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmlah_s64))) +svint64_t svqrdmlah(svint64_t, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmlah_s16))) +svint16_t svqrdmlah(svint16_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmlah_lane_s32))) +svint32_t svqrdmlah_lane(svint32_t, svint32_t, svint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmlah_lane_s64))) +svint64_t svqrdmlah_lane(svint64_t, svint64_t, svint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmlah_lane_s16))) +svint16_t svqrdmlah_lane(svint16_t, svint16_t, svint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmlsh_n_s8))) +svint8_t svqrdmlsh(svint8_t, svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmlsh_n_s32))) +svint32_t svqrdmlsh(svint32_t, svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmlsh_n_s64))) +svint64_t svqrdmlsh(svint64_t, svint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmlsh_n_s16))) +svint16_t svqrdmlsh(svint16_t, svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmlsh_s8))) +svint8_t svqrdmlsh(svint8_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmlsh_s32))) +svint32_t svqrdmlsh(svint32_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmlsh_s64))) +svint64_t svqrdmlsh(svint64_t, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmlsh_s16))) +svint16_t svqrdmlsh(svint16_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmlsh_lane_s32))) +svint32_t svqrdmlsh_lane(svint32_t, svint32_t, svint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmlsh_lane_s64))) +svint64_t svqrdmlsh_lane(svint64_t, svint64_t, svint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmlsh_lane_s16))) +svint16_t svqrdmlsh_lane(svint16_t, svint16_t, svint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmulh_n_s8))) +svint8_t svqrdmulh(svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmulh_n_s32))) +svint32_t svqrdmulh(svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmulh_n_s64))) +svint64_t svqrdmulh(svint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmulh_n_s16))) +svint16_t svqrdmulh(svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmulh_s8))) +svint8_t svqrdmulh(svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmulh_s32))) +svint32_t svqrdmulh(svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmulh_s64))) +svint64_t svqrdmulh(svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmulh_s16))) +svint16_t svqrdmulh(svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmulh_lane_s32))) +svint32_t svqrdmulh_lane(svint32_t, svint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmulh_lane_s64))) +svint64_t svqrdmulh_lane(svint64_t, svint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmulh_lane_s16))) +svint16_t svqrdmulh_lane(svint16_t, svint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_s8_m))) +svint8_t svqrshl_m(svbool_t, svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_s32_m))) +svint32_t svqrshl_m(svbool_t, svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_s64_m))) +svint64_t svqrshl_m(svbool_t, svint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_s16_m))) +svint16_t svqrshl_m(svbool_t, svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_s8_x))) +svint8_t svqrshl_x(svbool_t, svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_s32_x))) +svint32_t svqrshl_x(svbool_t, svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_s64_x))) +svint64_t svqrshl_x(svbool_t, svint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_s16_x))) +svint16_t svqrshl_x(svbool_t, svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_s8_z))) +svint8_t svqrshl_z(svbool_t, svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_s32_z))) +svint32_t svqrshl_z(svbool_t, svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_s64_z))) +svint64_t svqrshl_z(svbool_t, svint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_s16_z))) +svint16_t svqrshl_z(svbool_t, svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_u8_m))) +svuint8_t svqrshl_m(svbool_t, svuint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_u32_m))) +svuint32_t svqrshl_m(svbool_t, svuint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_u64_m))) +svuint64_t svqrshl_m(svbool_t, svuint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_u16_m))) +svuint16_t svqrshl_m(svbool_t, svuint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_u8_x))) +svuint8_t svqrshl_x(svbool_t, svuint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_u32_x))) +svuint32_t svqrshl_x(svbool_t, svuint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_u64_x))) +svuint64_t svqrshl_x(svbool_t, svuint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_u16_x))) +svuint16_t svqrshl_x(svbool_t, svuint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_u8_z))) +svuint8_t svqrshl_z(svbool_t, svuint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_u32_z))) +svuint32_t svqrshl_z(svbool_t, svuint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_u64_z))) +svuint64_t svqrshl_z(svbool_t, svuint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_u16_z))) +svuint16_t svqrshl_z(svbool_t, svuint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_s8_m))) +svint8_t svqrshl_m(svbool_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_s32_m))) +svint32_t svqrshl_m(svbool_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_s64_m))) +svint64_t svqrshl_m(svbool_t, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_s16_m))) +svint16_t svqrshl_m(svbool_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_s8_x))) +svint8_t svqrshl_x(svbool_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_s32_x))) +svint32_t svqrshl_x(svbool_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_s64_x))) +svint64_t svqrshl_x(svbool_t, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_s16_x))) +svint16_t svqrshl_x(svbool_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_s8_z))) +svint8_t svqrshl_z(svbool_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_s32_z))) +svint32_t svqrshl_z(svbool_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_s64_z))) +svint64_t svqrshl_z(svbool_t, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_s16_z))) +svint16_t svqrshl_z(svbool_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_u8_m))) +svuint8_t svqrshl_m(svbool_t, svuint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_u32_m))) +svuint32_t svqrshl_m(svbool_t, svuint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_u64_m))) +svuint64_t svqrshl_m(svbool_t, svuint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_u16_m))) +svuint16_t svqrshl_m(svbool_t, svuint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_u8_x))) +svuint8_t svqrshl_x(svbool_t, svuint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_u32_x))) +svuint32_t svqrshl_x(svbool_t, svuint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_u64_x))) +svuint64_t svqrshl_x(svbool_t, svuint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_u16_x))) +svuint16_t svqrshl_x(svbool_t, svuint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_u8_z))) +svuint8_t svqrshl_z(svbool_t, svuint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_u32_z))) +svuint32_t svqrshl_z(svbool_t, svuint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_u64_z))) +svuint64_t svqrshl_z(svbool_t, svuint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_u16_z))) +svuint16_t svqrshl_z(svbool_t, svuint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshrnb_n_s32))) +svint16_t svqrshrnb(svint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshrnb_n_s64))) +svint32_t svqrshrnb(svint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshrnb_n_s16))) +svint8_t svqrshrnb(svint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshrnb_n_u32))) +svuint16_t svqrshrnb(svuint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshrnb_n_u64))) +svuint32_t svqrshrnb(svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshrnb_n_u16))) +svuint8_t svqrshrnb(svuint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshrnt_n_s32))) +svint16_t svqrshrnt(svint16_t, svint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshrnt_n_s64))) +svint32_t svqrshrnt(svint32_t, svint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshrnt_n_s16))) +svint8_t svqrshrnt(svint8_t, svint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshrnt_n_u32))) +svuint16_t svqrshrnt(svuint16_t, svuint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshrnt_n_u64))) +svuint32_t svqrshrnt(svuint32_t, svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshrnt_n_u16))) +svuint8_t svqrshrnt(svuint8_t, svuint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshrunb_n_s32))) +svuint16_t svqrshrunb(svint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshrunb_n_s64))) +svuint32_t svqrshrunb(svint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshrunb_n_s16))) +svuint8_t svqrshrunb(svint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshrunt_n_s32))) +svuint16_t svqrshrunt(svuint16_t, svint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshrunt_n_s64))) +svuint32_t svqrshrunt(svuint32_t, svint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshrunt_n_s16))) +svuint8_t svqrshrunt(svuint8_t, svint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_s8_m))) +svint8_t svqshl_m(svbool_t, svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_s32_m))) +svint32_t svqshl_m(svbool_t, svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_s64_m))) +svint64_t svqshl_m(svbool_t, svint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_s16_m))) +svint16_t svqshl_m(svbool_t, svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_s8_x))) +svint8_t svqshl_x(svbool_t, svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_s32_x))) +svint32_t svqshl_x(svbool_t, svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_s64_x))) +svint64_t svqshl_x(svbool_t, svint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_s16_x))) +svint16_t svqshl_x(svbool_t, svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_s8_z))) +svint8_t svqshl_z(svbool_t, svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_s32_z))) +svint32_t svqshl_z(svbool_t, svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_s64_z))) +svint64_t svqshl_z(svbool_t, svint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_s16_z))) +svint16_t svqshl_z(svbool_t, svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_u8_m))) +svuint8_t svqshl_m(svbool_t, svuint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_u32_m))) +svuint32_t svqshl_m(svbool_t, svuint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_u64_m))) +svuint64_t svqshl_m(svbool_t, svuint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_u16_m))) +svuint16_t svqshl_m(svbool_t, svuint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_u8_x))) +svuint8_t svqshl_x(svbool_t, svuint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_u32_x))) +svuint32_t svqshl_x(svbool_t, svuint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_u64_x))) +svuint64_t svqshl_x(svbool_t, svuint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_u16_x))) +svuint16_t svqshl_x(svbool_t, svuint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_u8_z))) +svuint8_t svqshl_z(svbool_t, svuint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_u32_z))) +svuint32_t svqshl_z(svbool_t, svuint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_u64_z))) +svuint64_t svqshl_z(svbool_t, svuint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_u16_z))) +svuint16_t svqshl_z(svbool_t, svuint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_s8_m))) +svint8_t svqshl_m(svbool_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_s32_m))) +svint32_t svqshl_m(svbool_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_s64_m))) +svint64_t svqshl_m(svbool_t, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_s16_m))) +svint16_t svqshl_m(svbool_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_s8_x))) +svint8_t svqshl_x(svbool_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_s32_x))) +svint32_t svqshl_x(svbool_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_s64_x))) +svint64_t svqshl_x(svbool_t, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_s16_x))) +svint16_t svqshl_x(svbool_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_s8_z))) +svint8_t svqshl_z(svbool_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_s32_z))) +svint32_t svqshl_z(svbool_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_s64_z))) +svint64_t svqshl_z(svbool_t, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_s16_z))) +svint16_t svqshl_z(svbool_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_u8_m))) +svuint8_t svqshl_m(svbool_t, svuint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_u32_m))) +svuint32_t svqshl_m(svbool_t, svuint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_u64_m))) +svuint64_t svqshl_m(svbool_t, svuint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_u16_m))) +svuint16_t svqshl_m(svbool_t, svuint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_u8_x))) +svuint8_t svqshl_x(svbool_t, svuint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_u32_x))) +svuint32_t svqshl_x(svbool_t, svuint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_u64_x))) +svuint64_t svqshl_x(svbool_t, svuint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_u16_x))) +svuint16_t svqshl_x(svbool_t, svuint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_u8_z))) +svuint8_t svqshl_z(svbool_t, svuint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_u32_z))) +svuint32_t svqshl_z(svbool_t, svuint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_u64_z))) +svuint64_t svqshl_z(svbool_t, svuint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_u16_z))) +svuint16_t svqshl_z(svbool_t, svuint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshlu_n_s8_m))) +svuint8_t svqshlu_m(svbool_t, svint8_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshlu_n_s32_m))) +svuint32_t svqshlu_m(svbool_t, svint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshlu_n_s64_m))) +svuint64_t svqshlu_m(svbool_t, svint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshlu_n_s16_m))) +svuint16_t svqshlu_m(svbool_t, svint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshlu_n_s8_x))) +svuint8_t svqshlu_x(svbool_t, svint8_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshlu_n_s32_x))) +svuint32_t svqshlu_x(svbool_t, svint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshlu_n_s64_x))) +svuint64_t svqshlu_x(svbool_t, svint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshlu_n_s16_x))) +svuint16_t svqshlu_x(svbool_t, svint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshlu_n_s8_z))) +svuint8_t svqshlu_z(svbool_t, svint8_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshlu_n_s32_z))) +svuint32_t svqshlu_z(svbool_t, svint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshlu_n_s64_z))) +svuint64_t svqshlu_z(svbool_t, svint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshlu_n_s16_z))) +svuint16_t svqshlu_z(svbool_t, svint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshrnb_n_s32))) +svint16_t svqshrnb(svint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshrnb_n_s64))) +svint32_t svqshrnb(svint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshrnb_n_s16))) +svint8_t svqshrnb(svint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshrnb_n_u32))) +svuint16_t svqshrnb(svuint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshrnb_n_u64))) +svuint32_t svqshrnb(svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshrnb_n_u16))) +svuint8_t svqshrnb(svuint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshrnt_n_s32))) +svint16_t svqshrnt(svint16_t, svint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshrnt_n_s64))) +svint32_t svqshrnt(svint32_t, svint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshrnt_n_s16))) +svint8_t svqshrnt(svint8_t, svint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshrnt_n_u32))) +svuint16_t svqshrnt(svuint16_t, svuint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshrnt_n_u64))) +svuint32_t svqshrnt(svuint32_t, svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshrnt_n_u16))) +svuint8_t svqshrnt(svuint8_t, svuint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshrunb_n_s32))) +svuint16_t svqshrunb(svint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshrunb_n_s64))) +svuint32_t svqshrunb(svint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshrunb_n_s16))) +svuint8_t svqshrunb(svint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshrunt_n_s32))) +svuint16_t svqshrunt(svuint16_t, svint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshrunt_n_s64))) +svuint32_t svqshrunt(svuint32_t, svint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshrunt_n_s16))) +svuint8_t svqshrunt(svuint8_t, svint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_s8_m))) +svint8_t svqsub_m(svbool_t, svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_s32_m))) +svint32_t svqsub_m(svbool_t, svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_s64_m))) +svint64_t svqsub_m(svbool_t, svint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_s16_m))) +svint16_t svqsub_m(svbool_t, svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_s8_x))) +svint8_t svqsub_x(svbool_t, svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_s32_x))) +svint32_t svqsub_x(svbool_t, svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_s64_x))) +svint64_t svqsub_x(svbool_t, svint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_s16_x))) +svint16_t svqsub_x(svbool_t, svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_s8_z))) +svint8_t svqsub_z(svbool_t, svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_s32_z))) +svint32_t svqsub_z(svbool_t, svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_s64_z))) +svint64_t svqsub_z(svbool_t, svint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_s16_z))) +svint16_t svqsub_z(svbool_t, svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_u8_m))) +svuint8_t svqsub_m(svbool_t, svuint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_u32_m))) +svuint32_t svqsub_m(svbool_t, svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_u64_m))) +svuint64_t svqsub_m(svbool_t, svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_u16_m))) +svuint16_t svqsub_m(svbool_t, svuint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_u8_x))) +svuint8_t svqsub_x(svbool_t, svuint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_u32_x))) +svuint32_t svqsub_x(svbool_t, svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_u64_x))) +svuint64_t svqsub_x(svbool_t, svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_u16_x))) +svuint16_t svqsub_x(svbool_t, svuint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_u8_z))) +svuint8_t svqsub_z(svbool_t, svuint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_u32_z))) +svuint32_t svqsub_z(svbool_t, svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_u64_z))) +svuint64_t svqsub_z(svbool_t, svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_u16_z))) +svuint16_t svqsub_z(svbool_t, svuint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_s8_m))) +svint8_t svqsub_m(svbool_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_s32_m))) +svint32_t svqsub_m(svbool_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_s64_m))) +svint64_t svqsub_m(svbool_t, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_s16_m))) +svint16_t svqsub_m(svbool_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_s8_x))) +svint8_t svqsub_x(svbool_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_s32_x))) +svint32_t svqsub_x(svbool_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_s64_x))) +svint64_t svqsub_x(svbool_t, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_s16_x))) +svint16_t svqsub_x(svbool_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_s8_z))) +svint8_t svqsub_z(svbool_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_s32_z))) +svint32_t svqsub_z(svbool_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_s64_z))) +svint64_t svqsub_z(svbool_t, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_s16_z))) +svint16_t svqsub_z(svbool_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_u8_m))) +svuint8_t svqsub_m(svbool_t, svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_u32_m))) +svuint32_t svqsub_m(svbool_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_u64_m))) +svuint64_t svqsub_m(svbool_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_u16_m))) +svuint16_t svqsub_m(svbool_t, svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_u8_x))) +svuint8_t svqsub_x(svbool_t, svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_u32_x))) +svuint32_t svqsub_x(svbool_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_u64_x))) +svuint64_t svqsub_x(svbool_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_u16_x))) +svuint16_t svqsub_x(svbool_t, svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_u8_z))) +svuint8_t svqsub_z(svbool_t, svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_u32_z))) +svuint32_t svqsub_z(svbool_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_u64_z))) +svuint64_t svqsub_z(svbool_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_u16_z))) +svuint16_t svqsub_z(svbool_t, svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_s8_m))) +svint8_t svqsubr_m(svbool_t, svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_s32_m))) +svint32_t svqsubr_m(svbool_t, svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_s64_m))) +svint64_t svqsubr_m(svbool_t, svint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_s16_m))) +svint16_t svqsubr_m(svbool_t, svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_s8_x))) +svint8_t svqsubr_x(svbool_t, svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_s32_x))) +svint32_t svqsubr_x(svbool_t, svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_s64_x))) +svint64_t svqsubr_x(svbool_t, svint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_s16_x))) +svint16_t svqsubr_x(svbool_t, svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_s8_z))) +svint8_t svqsubr_z(svbool_t, svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_s32_z))) +svint32_t svqsubr_z(svbool_t, svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_s64_z))) +svint64_t svqsubr_z(svbool_t, svint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_s16_z))) +svint16_t svqsubr_z(svbool_t, svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_u8_m))) +svuint8_t svqsubr_m(svbool_t, svuint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_u32_m))) +svuint32_t svqsubr_m(svbool_t, svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_u64_m))) +svuint64_t svqsubr_m(svbool_t, svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_u16_m))) +svuint16_t svqsubr_m(svbool_t, svuint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_u8_x))) +svuint8_t svqsubr_x(svbool_t, svuint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_u32_x))) +svuint32_t svqsubr_x(svbool_t, svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_u64_x))) +svuint64_t svqsubr_x(svbool_t, svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_u16_x))) +svuint16_t svqsubr_x(svbool_t, svuint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_u8_z))) +svuint8_t svqsubr_z(svbool_t, svuint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_u32_z))) +svuint32_t svqsubr_z(svbool_t, svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_u64_z))) +svuint64_t svqsubr_z(svbool_t, svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_u16_z))) +svuint16_t svqsubr_z(svbool_t, svuint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_s8_m))) +svint8_t svqsubr_m(svbool_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_s32_m))) +svint32_t svqsubr_m(svbool_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_s64_m))) +svint64_t svqsubr_m(svbool_t, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_s16_m))) +svint16_t svqsubr_m(svbool_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_s8_x))) +svint8_t svqsubr_x(svbool_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_s32_x))) +svint32_t svqsubr_x(svbool_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_s64_x))) +svint64_t svqsubr_x(svbool_t, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_s16_x))) +svint16_t svqsubr_x(svbool_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_s8_z))) +svint8_t svqsubr_z(svbool_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_s32_z))) +svint32_t svqsubr_z(svbool_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_s64_z))) +svint64_t svqsubr_z(svbool_t, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_s16_z))) +svint16_t svqsubr_z(svbool_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_u8_m))) +svuint8_t svqsubr_m(svbool_t, svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_u32_m))) +svuint32_t svqsubr_m(svbool_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_u64_m))) +svuint64_t svqsubr_m(svbool_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_u16_m))) +svuint16_t svqsubr_m(svbool_t, svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_u8_x))) +svuint8_t svqsubr_x(svbool_t, svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_u32_x))) +svuint32_t svqsubr_x(svbool_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_u64_x))) +svuint64_t svqsubr_x(svbool_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_u16_x))) +svuint16_t svqsubr_x(svbool_t, svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_u8_z))) +svuint8_t svqsubr_z(svbool_t, svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_u32_z))) +svuint32_t svqsubr_z(svbool_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_u64_z))) +svuint64_t svqsubr_z(svbool_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_u16_z))) +svuint16_t svqsubr_z(svbool_t, svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqxtnb_s32))) +svint16_t svqxtnb(svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqxtnb_s64))) +svint32_t svqxtnb(svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqxtnb_s16))) +svint8_t svqxtnb(svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqxtnb_u32))) +svuint16_t svqxtnb(svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqxtnb_u64))) +svuint32_t svqxtnb(svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqxtnb_u16))) +svuint8_t svqxtnb(svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqxtnt_s32))) +svint16_t svqxtnt(svint16_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqxtnt_s64))) +svint32_t svqxtnt(svint32_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqxtnt_s16))) +svint8_t svqxtnt(svint8_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqxtnt_u32))) +svuint16_t svqxtnt(svuint16_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqxtnt_u64))) +svuint32_t svqxtnt(svuint32_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqxtnt_u16))) +svuint8_t svqxtnt(svuint8_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqxtunb_s32))) +svuint16_t svqxtunb(svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqxtunb_s64))) +svuint32_t svqxtunb(svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqxtunb_s16))) +svuint8_t svqxtunb(svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqxtunt_s32))) +svuint16_t svqxtunt(svuint16_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqxtunt_s64))) +svuint32_t svqxtunt(svuint32_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqxtunt_s16))) +svuint8_t svqxtunt(svuint8_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnb_n_u32))) +svuint16_t svraddhnb(svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnb_n_u64))) +svuint32_t svraddhnb(svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnb_n_u16))) +svuint8_t svraddhnb(svuint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnb_n_s32))) +svint16_t svraddhnb(svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnb_n_s64))) +svint32_t svraddhnb(svint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnb_n_s16))) +svint8_t svraddhnb(svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnb_u32))) +svuint16_t svraddhnb(svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnb_u64))) +svuint32_t svraddhnb(svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnb_u16))) +svuint8_t svraddhnb(svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnb_s32))) +svint16_t svraddhnb(svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnb_s64))) +svint32_t svraddhnb(svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnb_s16))) +svint8_t svraddhnb(svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnt_n_u32))) +svuint16_t svraddhnt(svuint16_t, svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnt_n_u64))) +svuint32_t svraddhnt(svuint32_t, svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnt_n_u16))) +svuint8_t svraddhnt(svuint8_t, svuint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnt_n_s32))) +svint16_t svraddhnt(svint16_t, svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnt_n_s64))) +svint32_t svraddhnt(svint32_t, svint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnt_n_s16))) +svint8_t svraddhnt(svint8_t, svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnt_u32))) +svuint16_t svraddhnt(svuint16_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnt_u64))) +svuint32_t svraddhnt(svuint32_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnt_u16))) +svuint8_t svraddhnt(svuint8_t, svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnt_s32))) +svint16_t svraddhnt(svint16_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnt_s64))) +svint32_t svraddhnt(svint32_t, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnt_s16))) +svint8_t svraddhnt(svint8_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrecpe_u32_m))) +svuint32_t svrecpe_m(svuint32_t, svbool_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrecpe_u32_x))) +svuint32_t svrecpe_x(svbool_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrecpe_u32_z))) +svuint32_t svrecpe_z(svbool_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_s8_m))) +svint8_t svrhadd_m(svbool_t, svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_s32_m))) +svint32_t svrhadd_m(svbool_t, svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_s64_m))) +svint64_t svrhadd_m(svbool_t, svint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_s16_m))) +svint16_t svrhadd_m(svbool_t, svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_s8_x))) +svint8_t svrhadd_x(svbool_t, svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_s32_x))) +svint32_t svrhadd_x(svbool_t, svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_s64_x))) +svint64_t svrhadd_x(svbool_t, svint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_s16_x))) +svint16_t svrhadd_x(svbool_t, svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_s8_z))) +svint8_t svrhadd_z(svbool_t, svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_s32_z))) +svint32_t svrhadd_z(svbool_t, svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_s64_z))) +svint64_t svrhadd_z(svbool_t, svint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_s16_z))) +svint16_t svrhadd_z(svbool_t, svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_u8_m))) +svuint8_t svrhadd_m(svbool_t, svuint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_u32_m))) +svuint32_t svrhadd_m(svbool_t, svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_u64_m))) +svuint64_t svrhadd_m(svbool_t, svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_u16_m))) +svuint16_t svrhadd_m(svbool_t, svuint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_u8_x))) +svuint8_t svrhadd_x(svbool_t, svuint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_u32_x))) +svuint32_t svrhadd_x(svbool_t, svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_u64_x))) +svuint64_t svrhadd_x(svbool_t, svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_u16_x))) +svuint16_t svrhadd_x(svbool_t, svuint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_u8_z))) +svuint8_t svrhadd_z(svbool_t, svuint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_u32_z))) +svuint32_t svrhadd_z(svbool_t, svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_u64_z))) +svuint64_t svrhadd_z(svbool_t, svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_u16_z))) +svuint16_t svrhadd_z(svbool_t, svuint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_s8_m))) +svint8_t svrhadd_m(svbool_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_s32_m))) +svint32_t svrhadd_m(svbool_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_s64_m))) +svint64_t svrhadd_m(svbool_t, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_s16_m))) +svint16_t svrhadd_m(svbool_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_s8_x))) +svint8_t svrhadd_x(svbool_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_s32_x))) +svint32_t svrhadd_x(svbool_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_s64_x))) +svint64_t svrhadd_x(svbool_t, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_s16_x))) +svint16_t svrhadd_x(svbool_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_s8_z))) +svint8_t svrhadd_z(svbool_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_s32_z))) +svint32_t svrhadd_z(svbool_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_s64_z))) +svint64_t svrhadd_z(svbool_t, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_s16_z))) +svint16_t svrhadd_z(svbool_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_u8_m))) +svuint8_t svrhadd_m(svbool_t, svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_u32_m))) +svuint32_t svrhadd_m(svbool_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_u64_m))) +svuint64_t svrhadd_m(svbool_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_u16_m))) +svuint16_t svrhadd_m(svbool_t, svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_u8_x))) +svuint8_t svrhadd_x(svbool_t, svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_u32_x))) +svuint32_t svrhadd_x(svbool_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_u64_x))) +svuint64_t svrhadd_x(svbool_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_u16_x))) +svuint16_t svrhadd_x(svbool_t, svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_u8_z))) +svuint8_t svrhadd_z(svbool_t, svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_u32_z))) +svuint32_t svrhadd_z(svbool_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_u64_z))) +svuint64_t svrhadd_z(svbool_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_u16_z))) +svuint16_t svrhadd_z(svbool_t, svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_s8_m))) +svint8_t svrshl_m(svbool_t, svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_s32_m))) +svint32_t svrshl_m(svbool_t, svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_s64_m))) +svint64_t svrshl_m(svbool_t, svint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_s16_m))) +svint16_t svrshl_m(svbool_t, svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_s8_x))) +svint8_t svrshl_x(svbool_t, svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_s32_x))) +svint32_t svrshl_x(svbool_t, svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_s64_x))) +svint64_t svrshl_x(svbool_t, svint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_s16_x))) +svint16_t svrshl_x(svbool_t, svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_s8_z))) +svint8_t svrshl_z(svbool_t, svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_s32_z))) +svint32_t svrshl_z(svbool_t, svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_s64_z))) +svint64_t svrshl_z(svbool_t, svint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_s16_z))) +svint16_t svrshl_z(svbool_t, svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_u8_m))) +svuint8_t svrshl_m(svbool_t, svuint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_u32_m))) +svuint32_t svrshl_m(svbool_t, svuint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_u64_m))) +svuint64_t svrshl_m(svbool_t, svuint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_u16_m))) +svuint16_t svrshl_m(svbool_t, svuint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_u8_x))) +svuint8_t svrshl_x(svbool_t, svuint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_u32_x))) +svuint32_t svrshl_x(svbool_t, svuint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_u64_x))) +svuint64_t svrshl_x(svbool_t, svuint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_u16_x))) +svuint16_t svrshl_x(svbool_t, svuint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_u8_z))) +svuint8_t svrshl_z(svbool_t, svuint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_u32_z))) +svuint32_t svrshl_z(svbool_t, svuint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_u64_z))) +svuint64_t svrshl_z(svbool_t, svuint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_u16_z))) +svuint16_t svrshl_z(svbool_t, svuint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_s8_m))) +svint8_t svrshl_m(svbool_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_s32_m))) +svint32_t svrshl_m(svbool_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_s64_m))) +svint64_t svrshl_m(svbool_t, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_s16_m))) +svint16_t svrshl_m(svbool_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_s8_x))) +svint8_t svrshl_x(svbool_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_s32_x))) +svint32_t svrshl_x(svbool_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_s64_x))) +svint64_t svrshl_x(svbool_t, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_s16_x))) +svint16_t svrshl_x(svbool_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_s8_z))) +svint8_t svrshl_z(svbool_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_s32_z))) +svint32_t svrshl_z(svbool_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_s64_z))) +svint64_t svrshl_z(svbool_t, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_s16_z))) +svint16_t svrshl_z(svbool_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_u8_m))) +svuint8_t svrshl_m(svbool_t, svuint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_u32_m))) +svuint32_t svrshl_m(svbool_t, svuint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_u64_m))) +svuint64_t svrshl_m(svbool_t, svuint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_u16_m))) +svuint16_t svrshl_m(svbool_t, svuint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_u8_x))) +svuint8_t svrshl_x(svbool_t, svuint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_u32_x))) +svuint32_t svrshl_x(svbool_t, svuint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_u64_x))) +svuint64_t svrshl_x(svbool_t, svuint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_u16_x))) +svuint16_t svrshl_x(svbool_t, svuint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_u8_z))) +svuint8_t svrshl_z(svbool_t, svuint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_u32_z))) +svuint32_t svrshl_z(svbool_t, svuint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_u64_z))) +svuint64_t svrshl_z(svbool_t, svuint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_u16_z))) +svuint16_t svrshl_z(svbool_t, svuint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_s8_m))) +svint8_t svrshr_m(svbool_t, svint8_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_s32_m))) +svint32_t svrshr_m(svbool_t, svint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_s64_m))) +svint64_t svrshr_m(svbool_t, svint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_s16_m))) +svint16_t svrshr_m(svbool_t, svint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_u8_m))) +svuint8_t svrshr_m(svbool_t, svuint8_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_u32_m))) +svuint32_t svrshr_m(svbool_t, svuint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_u64_m))) +svuint64_t svrshr_m(svbool_t, svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_u16_m))) +svuint16_t svrshr_m(svbool_t, svuint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_s8_x))) +svint8_t svrshr_x(svbool_t, svint8_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_s32_x))) +svint32_t svrshr_x(svbool_t, svint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_s64_x))) +svint64_t svrshr_x(svbool_t, svint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_s16_x))) +svint16_t svrshr_x(svbool_t, svint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_u8_x))) +svuint8_t svrshr_x(svbool_t, svuint8_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_u32_x))) +svuint32_t svrshr_x(svbool_t, svuint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_u64_x))) +svuint64_t svrshr_x(svbool_t, svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_u16_x))) +svuint16_t svrshr_x(svbool_t, svuint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_s8_z))) +svint8_t svrshr_z(svbool_t, svint8_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_s32_z))) +svint32_t svrshr_z(svbool_t, svint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_s64_z))) +svint64_t svrshr_z(svbool_t, svint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_s16_z))) +svint16_t svrshr_z(svbool_t, svint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_u8_z))) +svuint8_t svrshr_z(svbool_t, svuint8_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_u32_z))) +svuint32_t svrshr_z(svbool_t, svuint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_u64_z))) +svuint64_t svrshr_z(svbool_t, svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_u16_z))) +svuint16_t svrshr_z(svbool_t, svuint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshrnb_n_u32))) +svuint16_t svrshrnb(svuint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshrnb_n_u64))) +svuint32_t svrshrnb(svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshrnb_n_u16))) +svuint8_t svrshrnb(svuint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshrnb_n_s32))) +svint16_t svrshrnb(svint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshrnb_n_s64))) +svint32_t svrshrnb(svint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshrnb_n_s16))) +svint8_t svrshrnb(svint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshrnt_n_u32))) +svuint16_t svrshrnt(svuint16_t, svuint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshrnt_n_u64))) +svuint32_t svrshrnt(svuint32_t, svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshrnt_n_u16))) +svuint8_t svrshrnt(svuint8_t, svuint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshrnt_n_s32))) +svint16_t svrshrnt(svint16_t, svint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshrnt_n_s64))) +svint32_t svrshrnt(svint32_t, svint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshrnt_n_s16))) +svint8_t svrshrnt(svint8_t, svint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsqrte_u32_m))) +svuint32_t svrsqrte_m(svuint32_t, svbool_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsqrte_u32_x))) +svuint32_t svrsqrte_x(svbool_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsqrte_u32_z))) +svuint32_t svrsqrte_z(svbool_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsra_n_s8))) +svint8_t svrsra(svint8_t, svint8_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsra_n_s32))) +svint32_t svrsra(svint32_t, svint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsra_n_s64))) +svint64_t svrsra(svint64_t, svint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsra_n_s16))) +svint16_t svrsra(svint16_t, svint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsra_n_u8))) +svuint8_t svrsra(svuint8_t, svuint8_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsra_n_u32))) +svuint32_t svrsra(svuint32_t, svuint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsra_n_u64))) +svuint64_t svrsra(svuint64_t, svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsra_n_u16))) +svuint16_t svrsra(svuint16_t, svuint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnb_n_u32))) +svuint16_t svrsubhnb(svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnb_n_u64))) +svuint32_t svrsubhnb(svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnb_n_u16))) +svuint8_t svrsubhnb(svuint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnb_n_s32))) +svint16_t svrsubhnb(svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnb_n_s64))) +svint32_t svrsubhnb(svint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnb_n_s16))) +svint8_t svrsubhnb(svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnb_u32))) +svuint16_t svrsubhnb(svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnb_u64))) +svuint32_t svrsubhnb(svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnb_u16))) +svuint8_t svrsubhnb(svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnb_s32))) +svint16_t svrsubhnb(svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnb_s64))) +svint32_t svrsubhnb(svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnb_s16))) +svint8_t svrsubhnb(svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnt_n_u32))) +svuint16_t svrsubhnt(svuint16_t, svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnt_n_u64))) +svuint32_t svrsubhnt(svuint32_t, svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnt_n_u16))) +svuint8_t svrsubhnt(svuint8_t, svuint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnt_n_s32))) +svint16_t svrsubhnt(svint16_t, svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnt_n_s64))) +svint32_t svrsubhnt(svint32_t, svint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnt_n_s16))) +svint8_t svrsubhnt(svint8_t, svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnt_u32))) +svuint16_t svrsubhnt(svuint16_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnt_u64))) +svuint32_t svrsubhnt(svuint32_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnt_u16))) +svuint8_t svrsubhnt(svuint8_t, svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnt_s32))) +svint16_t svrsubhnt(svint16_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnt_s64))) +svint32_t svrsubhnt(svint32_t, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnt_s16))) +svint8_t svrsubhnt(svint8_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsbclb_n_u32))) +svuint32_t svsbclb(svuint32_t, svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsbclb_n_u64))) +svuint64_t svsbclb(svuint64_t, svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsbclb_u32))) +svuint32_t svsbclb(svuint32_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsbclb_u64))) +svuint64_t svsbclb(svuint64_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsbclt_n_u32))) +svuint32_t svsbclt(svuint32_t, svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsbclt_n_u64))) +svuint64_t svsbclt(svuint64_t, svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsbclt_u32))) +svuint32_t svsbclt(svuint32_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsbclt_u64))) +svuint64_t svsbclt(svuint64_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshllb_n_s32))) +svint32_t svshllb(svint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshllb_n_s64))) +svint64_t svshllb(svint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshllb_n_s16))) +svint16_t svshllb(svint8_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshllb_n_u32))) +svuint32_t svshllb(svuint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshllb_n_u64))) +svuint64_t svshllb(svuint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshllb_n_u16))) +svuint16_t svshllb(svuint8_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshllt_n_s32))) +svint32_t svshllt(svint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshllt_n_s64))) +svint64_t svshllt(svint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshllt_n_s16))) +svint16_t svshllt(svint8_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshllt_n_u32))) +svuint32_t svshllt(svuint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshllt_n_u64))) +svuint64_t svshllt(svuint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshllt_n_u16))) +svuint16_t svshllt(svuint8_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshrnb_n_u32))) +svuint16_t svshrnb(svuint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshrnb_n_u64))) +svuint32_t svshrnb(svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshrnb_n_u16))) +svuint8_t svshrnb(svuint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshrnb_n_s32))) +svint16_t svshrnb(svint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshrnb_n_s64))) +svint32_t svshrnb(svint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshrnb_n_s16))) +svint8_t svshrnb(svint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshrnt_n_u32))) +svuint16_t svshrnt(svuint16_t, svuint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshrnt_n_u64))) +svuint32_t svshrnt(svuint32_t, svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshrnt_n_u16))) +svuint8_t svshrnt(svuint8_t, svuint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshrnt_n_s32))) +svint16_t svshrnt(svint16_t, svint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshrnt_n_s64))) +svint32_t svshrnt(svint32_t, svint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshrnt_n_s16))) +svint8_t svshrnt(svint8_t, svint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsli_n_u8))) +svuint8_t svsli(svuint8_t, svuint8_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsli_n_u32))) +svuint32_t svsli(svuint32_t, svuint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsli_n_u64))) +svuint64_t svsli(svuint64_t, svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsli_n_u16))) +svuint16_t svsli(svuint16_t, svuint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsli_n_s8))) +svint8_t svsli(svint8_t, svint8_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsli_n_s32))) +svint32_t svsli(svint32_t, svint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsli_n_s64))) +svint64_t svsli(svint64_t, svint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsli_n_s16))) +svint16_t svsli(svint16_t, svint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_n_u8_m))) +svuint8_t svsqadd_m(svbool_t, svuint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_n_u32_m))) +svuint32_t svsqadd_m(svbool_t, svuint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_n_u64_m))) +svuint64_t svsqadd_m(svbool_t, svuint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_n_u16_m))) +svuint16_t svsqadd_m(svbool_t, svuint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_n_u8_x))) +svuint8_t svsqadd_x(svbool_t, svuint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_n_u32_x))) +svuint32_t svsqadd_x(svbool_t, svuint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_n_u64_x))) +svuint64_t svsqadd_x(svbool_t, svuint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_n_u16_x))) +svuint16_t svsqadd_x(svbool_t, svuint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_n_u8_z))) +svuint8_t svsqadd_z(svbool_t, svuint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_n_u32_z))) +svuint32_t svsqadd_z(svbool_t, svuint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_n_u64_z))) +svuint64_t svsqadd_z(svbool_t, svuint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_n_u16_z))) +svuint16_t svsqadd_z(svbool_t, svuint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_u8_m))) +svuint8_t svsqadd_m(svbool_t, svuint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_u32_m))) +svuint32_t svsqadd_m(svbool_t, svuint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_u64_m))) +svuint64_t svsqadd_m(svbool_t, svuint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_u16_m))) +svuint16_t svsqadd_m(svbool_t, svuint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_u8_x))) +svuint8_t svsqadd_x(svbool_t, svuint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_u32_x))) +svuint32_t svsqadd_x(svbool_t, svuint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_u64_x))) +svuint64_t svsqadd_x(svbool_t, svuint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_u16_x))) +svuint16_t svsqadd_x(svbool_t, svuint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_u8_z))) +svuint8_t svsqadd_z(svbool_t, svuint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_u32_z))) +svuint32_t svsqadd_z(svbool_t, svuint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_u64_z))) +svuint64_t svsqadd_z(svbool_t, svuint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_u16_z))) +svuint16_t svsqadd_z(svbool_t, svuint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsra_n_s8))) +svint8_t svsra(svint8_t, svint8_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsra_n_s32))) +svint32_t svsra(svint32_t, svint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsra_n_s64))) +svint64_t svsra(svint64_t, svint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsra_n_s16))) +svint16_t svsra(svint16_t, svint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsra_n_u8))) +svuint8_t svsra(svuint8_t, svuint8_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsra_n_u32))) +svuint32_t svsra(svuint32_t, svuint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsra_n_u64))) +svuint64_t svsra(svuint64_t, svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsra_n_u16))) +svuint16_t svsra(svuint16_t, svuint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsri_n_u8))) +svuint8_t svsri(svuint8_t, svuint8_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsri_n_u32))) +svuint32_t svsri(svuint32_t, svuint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsri_n_u64))) +svuint64_t svsri(svuint64_t, svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsri_n_u16))) +svuint16_t svsri(svuint16_t, svuint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsri_n_s8))) +svint8_t svsri(svint8_t, svint8_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsri_n_s32))) +svint32_t svsri(svint32_t, svint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsri_n_s64))) +svint64_t svsri(svint64_t, svint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsri_n_s16))) +svint16_t svsri(svint16_t, svint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnb_n_u32))) +svuint16_t svsubhnb(svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnb_n_u64))) +svuint32_t svsubhnb(svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnb_n_u16))) +svuint8_t svsubhnb(svuint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnb_n_s32))) +svint16_t svsubhnb(svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnb_n_s64))) +svint32_t svsubhnb(svint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnb_n_s16))) +svint8_t svsubhnb(svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnb_u32))) +svuint16_t svsubhnb(svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnb_u64))) +svuint32_t svsubhnb(svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnb_u16))) +svuint8_t svsubhnb(svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnb_s32))) +svint16_t svsubhnb(svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnb_s64))) +svint32_t svsubhnb(svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnb_s16))) +svint8_t svsubhnb(svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnt_n_u32))) +svuint16_t svsubhnt(svuint16_t, svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnt_n_u64))) +svuint32_t svsubhnt(svuint32_t, svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnt_n_u16))) +svuint8_t svsubhnt(svuint8_t, svuint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnt_n_s32))) +svint16_t svsubhnt(svint16_t, svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnt_n_s64))) +svint32_t svsubhnt(svint32_t, svint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnt_n_s16))) +svint8_t svsubhnt(svint8_t, svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnt_u32))) +svuint16_t svsubhnt(svuint16_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnt_u64))) +svuint32_t svsubhnt(svuint32_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnt_u16))) +svuint8_t svsubhnt(svuint8_t, svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnt_s32))) +svint16_t svsubhnt(svint16_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnt_s64))) +svint32_t svsubhnt(svint32_t, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnt_s16))) +svint8_t svsubhnt(svint8_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublb_n_s32))) +svint32_t svsublb(svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublb_n_s64))) +svint64_t svsublb(svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublb_n_s16))) +svint16_t svsublb(svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublb_n_u32))) +svuint32_t svsublb(svuint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublb_n_u64))) +svuint64_t svsublb(svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublb_n_u16))) +svuint16_t svsublb(svuint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublb_s32))) +svint32_t svsublb(svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublb_s64))) +svint64_t svsublb(svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublb_s16))) +svint16_t svsublb(svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublb_u32))) +svuint32_t svsublb(svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublb_u64))) +svuint64_t svsublb(svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublb_u16))) +svuint16_t svsublb(svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublbt_n_s32))) +svint32_t svsublbt(svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublbt_n_s64))) +svint64_t svsublbt(svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublbt_n_s16))) +svint16_t svsublbt(svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublbt_s32))) +svint32_t svsublbt(svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublbt_s64))) +svint64_t svsublbt(svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublbt_s16))) +svint16_t svsublbt(svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublt_n_s32))) +svint32_t svsublt(svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublt_n_s64))) +svint64_t svsublt(svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublt_n_s16))) +svint16_t svsublt(svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublt_n_u32))) +svuint32_t svsublt(svuint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublt_n_u64))) +svuint64_t svsublt(svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublt_n_u16))) +svuint16_t svsublt(svuint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublt_s32))) +svint32_t svsublt(svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublt_s64))) +svint64_t svsublt(svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublt_s16))) +svint16_t svsublt(svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublt_u32))) +svuint32_t svsublt(svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublt_u64))) +svuint64_t svsublt(svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublt_u16))) +svuint16_t svsublt(svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubltb_n_s32))) +svint32_t svsubltb(svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubltb_n_s64))) +svint64_t svsubltb(svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubltb_n_s16))) +svint16_t svsubltb(svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubltb_s32))) +svint32_t svsubltb(svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubltb_s64))) +svint64_t svsubltb(svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubltb_s16))) +svint16_t svsubltb(svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwb_n_s32))) +svint32_t svsubwb(svint32_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwb_n_s64))) +svint64_t svsubwb(svint64_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwb_n_s16))) +svint16_t svsubwb(svint16_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwb_n_u32))) +svuint32_t svsubwb(svuint32_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwb_n_u64))) +svuint64_t svsubwb(svuint64_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwb_n_u16))) +svuint16_t svsubwb(svuint16_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwb_s32))) +svint32_t svsubwb(svint32_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwb_s64))) +svint64_t svsubwb(svint64_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwb_s16))) +svint16_t svsubwb(svint16_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwb_u32))) +svuint32_t svsubwb(svuint32_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwb_u64))) +svuint64_t svsubwb(svuint64_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwb_u16))) +svuint16_t svsubwb(svuint16_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwt_n_s32))) +svint32_t svsubwt(svint32_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwt_n_s64))) +svint64_t svsubwt(svint64_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwt_n_s16))) +svint16_t svsubwt(svint16_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwt_n_u32))) +svuint32_t svsubwt(svuint32_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwt_n_u64))) +svuint64_t svsubwt(svuint64_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwt_n_u16))) +svuint16_t svsubwt(svuint16_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwt_s32))) +svint32_t svsubwt(svint32_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwt_s64))) +svint64_t svsubwt(svint64_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwt_s16))) +svint16_t svsubwt(svint16_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwt_u32))) +svuint32_t svsubwt(svuint32_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwt_u64))) +svuint64_t svsubwt(svuint64_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwt_u16))) +svuint16_t svsubwt(svuint16_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbl2_u8))) +svuint8_t svtbl2(svuint8x2_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbl2_u32))) +svuint32_t svtbl2(svuint32x2_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbl2_u64))) +svuint64_t svtbl2(svuint64x2_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbl2_u16))) +svuint16_t svtbl2(svuint16x2_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbl2_s8))) +svint8_t svtbl2(svint8x2_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbl2_f64))) +svfloat64_t svtbl2(svfloat64x2_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbl2_f32))) +svfloat32_t svtbl2(svfloat32x2_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbl2_f16))) +svfloat16_t svtbl2(svfloat16x2_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbl2_s32))) +svint32_t svtbl2(svint32x2_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbl2_s64))) +svint64_t svtbl2(svint64x2_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbl2_s16))) +svint16_t svtbl2(svint16x2_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbx_u8))) +svuint8_t svtbx(svuint8_t, svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbx_u32))) +svuint32_t svtbx(svuint32_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbx_u64))) +svuint64_t svtbx(svuint64_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbx_u16))) +svuint16_t svtbx(svuint16_t, svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbx_s8))) +svint8_t svtbx(svint8_t, svint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbx_f64))) +svfloat64_t svtbx(svfloat64_t, svfloat64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbx_f32))) +svfloat32_t svtbx(svfloat32_t, svfloat32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbx_f16))) +svfloat16_t svtbx(svfloat16_t, svfloat16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbx_s32))) +svint32_t svtbx(svint32_t, svint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbx_s64))) +svint64_t svtbx(svint64_t, svint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbx_s16))) +svint16_t svtbx(svint16_t, svint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_n_s8_m))) +svint8_t svuqadd_m(svbool_t, svint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_n_s32_m))) +svint32_t svuqadd_m(svbool_t, svint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_n_s64_m))) +svint64_t svuqadd_m(svbool_t, svint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_n_s16_m))) +svint16_t svuqadd_m(svbool_t, svint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_n_s8_x))) +svint8_t svuqadd_x(svbool_t, svint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_n_s32_x))) +svint32_t svuqadd_x(svbool_t, svint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_n_s64_x))) +svint64_t svuqadd_x(svbool_t, svint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_n_s16_x))) +svint16_t svuqadd_x(svbool_t, svint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_n_s8_z))) +svint8_t svuqadd_z(svbool_t, svint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_n_s32_z))) +svint32_t svuqadd_z(svbool_t, svint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_n_s64_z))) +svint64_t svuqadd_z(svbool_t, svint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_n_s16_z))) +svint16_t svuqadd_z(svbool_t, svint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_s8_m))) +svint8_t svuqadd_m(svbool_t, svint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_s32_m))) +svint32_t svuqadd_m(svbool_t, svint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_s64_m))) +svint64_t svuqadd_m(svbool_t, svint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_s16_m))) +svint16_t svuqadd_m(svbool_t, svint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_s8_x))) +svint8_t svuqadd_x(svbool_t, svint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_s32_x))) +svint32_t svuqadd_x(svbool_t, svint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_s64_x))) +svint64_t svuqadd_x(svbool_t, svint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_s16_x))) +svint16_t svuqadd_x(svbool_t, svint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_s8_z))) +svint8_t svuqadd_z(svbool_t, svint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_s32_z))) +svint32_t svuqadd_z(svbool_t, svint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_s64_z))) +svint64_t svuqadd_z(svbool_t, svint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_s16_z))) +svint16_t svuqadd_z(svbool_t, svint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilege_b8_s32))) +svbool_t svwhilege_b8(int32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilege_b32_s32))) +svbool_t svwhilege_b32(int32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilege_b64_s32))) +svbool_t svwhilege_b64(int32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilege_b16_s32))) +svbool_t svwhilege_b16(int32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilege_b8_s64))) +svbool_t svwhilege_b8(int64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilege_b32_s64))) +svbool_t svwhilege_b32(int64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilege_b64_s64))) +svbool_t svwhilege_b64(int64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilege_b16_s64))) +svbool_t svwhilege_b16(int64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilege_b8_u32))) +svbool_t svwhilege_b8(uint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilege_b32_u32))) +svbool_t svwhilege_b32(uint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilege_b64_u32))) +svbool_t svwhilege_b64(uint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilege_b16_u32))) +svbool_t svwhilege_b16(uint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilege_b8_u64))) +svbool_t svwhilege_b8(uint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilege_b32_u64))) +svbool_t svwhilege_b32(uint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilege_b64_u64))) +svbool_t svwhilege_b64(uint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilege_b16_u64))) +svbool_t svwhilege_b16(uint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilegt_b8_s32))) +svbool_t svwhilegt_b8(int32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilegt_b32_s32))) +svbool_t svwhilegt_b32(int32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilegt_b64_s32))) +svbool_t svwhilegt_b64(int32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilegt_b16_s32))) +svbool_t svwhilegt_b16(int32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilegt_b8_s64))) +svbool_t svwhilegt_b8(int64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilegt_b32_s64))) +svbool_t svwhilegt_b32(int64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilegt_b64_s64))) +svbool_t svwhilegt_b64(int64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilegt_b16_s64))) +svbool_t svwhilegt_b16(int64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilegt_b8_u32))) +svbool_t svwhilegt_b8(uint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilegt_b32_u32))) +svbool_t svwhilegt_b32(uint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilegt_b64_u32))) +svbool_t svwhilegt_b64(uint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilegt_b16_u32))) +svbool_t svwhilegt_b16(uint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilegt_b8_u64))) +svbool_t svwhilegt_b8(uint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilegt_b32_u64))) +svbool_t svwhilegt_b32(uint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilegt_b64_u64))) +svbool_t svwhilegt_b64(uint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilegt_b16_u64))) +svbool_t svwhilegt_b16(uint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilerw_u8))) +svbool_t svwhilerw(uint8_t const *, uint8_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilerw_s8))) +svbool_t svwhilerw(int8_t const *, int8_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilerw_u64))) +svbool_t svwhilerw(uint64_t const *, uint64_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilerw_f64))) +svbool_t svwhilerw(float64_t const *, float64_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilerw_s64))) +svbool_t svwhilerw(int64_t const *, int64_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilerw_u16))) +svbool_t svwhilerw(uint16_t const *, uint16_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilerw_f16))) +svbool_t svwhilerw(float16_t const *, float16_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilerw_s16))) +svbool_t svwhilerw(int16_t const *, int16_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilerw_u32))) +svbool_t svwhilerw(uint32_t const *, uint32_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilerw_f32))) +svbool_t svwhilerw(float32_t const *, float32_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilerw_s32))) +svbool_t svwhilerw(int32_t const *, int32_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilewr_u8))) +svbool_t svwhilewr(uint8_t const *, uint8_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilewr_s8))) +svbool_t svwhilewr(int8_t const *, int8_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilewr_u64))) +svbool_t svwhilewr(uint64_t const *, uint64_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilewr_f64))) +svbool_t svwhilewr(float64_t const *, float64_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilewr_s64))) +svbool_t svwhilewr(int64_t const *, int64_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilewr_u16))) +svbool_t svwhilewr(uint16_t const *, uint16_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilewr_f16))) +svbool_t svwhilewr(float16_t const *, float16_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilewr_s16))) +svbool_t svwhilewr(int16_t const *, int16_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilewr_u32))) +svbool_t svwhilewr(uint32_t const *, uint32_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilewr_f32))) +svbool_t svwhilewr(float32_t const *, float32_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilewr_s32))) +svbool_t svwhilewr(int32_t const *, int32_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svxar_n_u8))) +svuint8_t svxar(svuint8_t, svuint8_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svxar_n_u32))) +svuint32_t svxar(svuint32_t, svuint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svxar_n_u64))) +svuint64_t svxar(svuint64_t, svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svxar_n_u16))) +svuint16_t svxar(svuint16_t, svuint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svxar_n_s8))) +svint8_t svxar(svint8_t, svint8_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svxar_n_s32))) +svint32_t svxar(svint32_t, svint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svxar_n_s64))) +svint64_t svxar(svint64_t, svint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svxar_n_s16))) +svint16_t svxar(svint16_t, svint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_f64_m))) +svfloat64_t svabd_n_f64_m(svbool_t, svfloat64_t, float64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_f32_m))) +svfloat32_t svabd_n_f32_m(svbool_t, svfloat32_t, float32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_f16_m))) +svfloat16_t svabd_n_f16_m(svbool_t, svfloat16_t, float16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_f64_x))) +svfloat64_t svabd_n_f64_x(svbool_t, svfloat64_t, float64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_f32_x))) +svfloat32_t svabd_n_f32_x(svbool_t, svfloat32_t, float32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_f16_x))) +svfloat16_t svabd_n_f16_x(svbool_t, svfloat16_t, float16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_f64_z))) +svfloat64_t svabd_n_f64_z(svbool_t, svfloat64_t, float64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_f32_z))) +svfloat32_t svabd_n_f32_z(svbool_t, svfloat32_t, float32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_f16_z))) +svfloat16_t svabd_n_f16_z(svbool_t, svfloat16_t, float16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_s8_m))) +svint8_t svabd_n_s8_m(svbool_t, svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_s32_m))) +svint32_t svabd_n_s32_m(svbool_t, svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_s64_m))) +svint64_t svabd_n_s64_m(svbool_t, svint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_s16_m))) +svint16_t svabd_n_s16_m(svbool_t, svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_s8_x))) +svint8_t svabd_n_s8_x(svbool_t, svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_s32_x))) +svint32_t svabd_n_s32_x(svbool_t, svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_s64_x))) +svint64_t svabd_n_s64_x(svbool_t, svint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_s16_x))) +svint16_t svabd_n_s16_x(svbool_t, svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_s8_z))) +svint8_t svabd_n_s8_z(svbool_t, svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_s32_z))) +svint32_t svabd_n_s32_z(svbool_t, svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_s64_z))) +svint64_t svabd_n_s64_z(svbool_t, svint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_s16_z))) +svint16_t svabd_n_s16_z(svbool_t, svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_u8_m))) +svuint8_t svabd_n_u8_m(svbool_t, svuint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_u32_m))) +svuint32_t svabd_n_u32_m(svbool_t, svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_u64_m))) +svuint64_t svabd_n_u64_m(svbool_t, svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_u16_m))) +svuint16_t svabd_n_u16_m(svbool_t, svuint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_u8_x))) +svuint8_t svabd_n_u8_x(svbool_t, svuint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_u32_x))) +svuint32_t svabd_n_u32_x(svbool_t, svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_u64_x))) +svuint64_t svabd_n_u64_x(svbool_t, svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_u16_x))) +svuint16_t svabd_n_u16_x(svbool_t, svuint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_u8_z))) +svuint8_t svabd_n_u8_z(svbool_t, svuint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_u32_z))) +svuint32_t svabd_n_u32_z(svbool_t, svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_u64_z))) +svuint64_t svabd_n_u64_z(svbool_t, svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_u16_z))) +svuint16_t svabd_n_u16_z(svbool_t, svuint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_f64_m))) +svfloat64_t svabd_f64_m(svbool_t, svfloat64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_f32_m))) +svfloat32_t svabd_f32_m(svbool_t, svfloat32_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_f16_m))) +svfloat16_t svabd_f16_m(svbool_t, svfloat16_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_f64_x))) +svfloat64_t svabd_f64_x(svbool_t, svfloat64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_f32_x))) +svfloat32_t svabd_f32_x(svbool_t, svfloat32_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_f16_x))) +svfloat16_t svabd_f16_x(svbool_t, svfloat16_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_f64_z))) +svfloat64_t svabd_f64_z(svbool_t, svfloat64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_f32_z))) +svfloat32_t svabd_f32_z(svbool_t, svfloat32_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_f16_z))) +svfloat16_t svabd_f16_z(svbool_t, svfloat16_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_s8_m))) +svint8_t svabd_s8_m(svbool_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_s32_m))) +svint32_t svabd_s32_m(svbool_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_s64_m))) +svint64_t svabd_s64_m(svbool_t, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_s16_m))) +svint16_t svabd_s16_m(svbool_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_s8_x))) +svint8_t svabd_s8_x(svbool_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_s32_x))) +svint32_t svabd_s32_x(svbool_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_s64_x))) +svint64_t svabd_s64_x(svbool_t, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_s16_x))) +svint16_t svabd_s16_x(svbool_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_s8_z))) +svint8_t svabd_s8_z(svbool_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_s32_z))) +svint32_t svabd_s32_z(svbool_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_s64_z))) +svint64_t svabd_s64_z(svbool_t, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_s16_z))) +svint16_t svabd_s16_z(svbool_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_u8_m))) +svuint8_t svabd_u8_m(svbool_t, svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_u32_m))) +svuint32_t svabd_u32_m(svbool_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_u64_m))) +svuint64_t svabd_u64_m(svbool_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_u16_m))) +svuint16_t svabd_u16_m(svbool_t, svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_u8_x))) +svuint8_t svabd_u8_x(svbool_t, svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_u32_x))) +svuint32_t svabd_u32_x(svbool_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_u64_x))) +svuint64_t svabd_u64_x(svbool_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_u16_x))) +svuint16_t svabd_u16_x(svbool_t, svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_u8_z))) +svuint8_t svabd_u8_z(svbool_t, svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_u32_z))) +svuint32_t svabd_u32_z(svbool_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_u64_z))) +svuint64_t svabd_u64_z(svbool_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_u16_z))) +svuint16_t svabd_u16_z(svbool_t, svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabs_f64_m))) +svfloat64_t svabs_f64_m(svfloat64_t, svbool_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabs_f32_m))) +svfloat32_t svabs_f32_m(svfloat32_t, svbool_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabs_f16_m))) +svfloat16_t svabs_f16_m(svfloat16_t, svbool_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabs_f64_x))) +svfloat64_t svabs_f64_x(svbool_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabs_f32_x))) +svfloat32_t svabs_f32_x(svbool_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabs_f16_x))) +svfloat16_t svabs_f16_x(svbool_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabs_f64_z))) +svfloat64_t svabs_f64_z(svbool_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabs_f32_z))) +svfloat32_t svabs_f32_z(svbool_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabs_f16_z))) +svfloat16_t svabs_f16_z(svbool_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabs_s8_m))) +svint8_t svabs_s8_m(svint8_t, svbool_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabs_s32_m))) +svint32_t svabs_s32_m(svint32_t, svbool_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabs_s64_m))) +svint64_t svabs_s64_m(svint64_t, svbool_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabs_s16_m))) +svint16_t svabs_s16_m(svint16_t, svbool_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabs_s8_x))) +svint8_t svabs_s8_x(svbool_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabs_s32_x))) +svint32_t svabs_s32_x(svbool_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabs_s64_x))) +svint64_t svabs_s64_x(svbool_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabs_s16_x))) +svint16_t svabs_s16_x(svbool_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabs_s8_z))) +svint8_t svabs_s8_z(svbool_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabs_s32_z))) +svint32_t svabs_s32_z(svbool_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabs_s64_z))) +svint64_t svabs_s64_z(svbool_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabs_s16_z))) +svint16_t svabs_s16_z(svbool_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svacge_n_f64))) +svbool_t svacge_n_f64(svbool_t, svfloat64_t, float64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svacge_n_f32))) +svbool_t svacge_n_f32(svbool_t, svfloat32_t, float32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svacge_n_f16))) +svbool_t svacge_n_f16(svbool_t, svfloat16_t, float16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svacge_f64))) +svbool_t svacge_f64(svbool_t, svfloat64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svacge_f32))) +svbool_t svacge_f32(svbool_t, svfloat32_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svacge_f16))) +svbool_t svacge_f16(svbool_t, svfloat16_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svacgt_n_f64))) +svbool_t svacgt_n_f64(svbool_t, svfloat64_t, float64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svacgt_n_f32))) +svbool_t svacgt_n_f32(svbool_t, svfloat32_t, float32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svacgt_n_f16))) +svbool_t svacgt_n_f16(svbool_t, svfloat16_t, float16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svacgt_f64))) +svbool_t svacgt_f64(svbool_t, svfloat64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svacgt_f32))) +svbool_t svacgt_f32(svbool_t, svfloat32_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svacgt_f16))) +svbool_t svacgt_f16(svbool_t, svfloat16_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svacle_n_f64))) +svbool_t svacle_n_f64(svbool_t, svfloat64_t, float64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svacle_n_f32))) +svbool_t svacle_n_f32(svbool_t, svfloat32_t, float32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svacle_n_f16))) +svbool_t svacle_n_f16(svbool_t, svfloat16_t, float16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svacle_f64))) +svbool_t svacle_f64(svbool_t, svfloat64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svacle_f32))) +svbool_t svacle_f32(svbool_t, svfloat32_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svacle_f16))) +svbool_t svacle_f16(svbool_t, svfloat16_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaclt_n_f64))) +svbool_t svaclt_n_f64(svbool_t, svfloat64_t, float64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaclt_n_f32))) +svbool_t svaclt_n_f32(svbool_t, svfloat32_t, float32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaclt_n_f16))) +svbool_t svaclt_n_f16(svbool_t, svfloat16_t, float16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaclt_f64))) +svbool_t svaclt_f64(svbool_t, svfloat64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaclt_f32))) +svbool_t svaclt_f32(svbool_t, svfloat32_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaclt_f16))) +svbool_t svaclt_f16(svbool_t, svfloat16_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_f64_m))) +svfloat64_t svadd_n_f64_m(svbool_t, svfloat64_t, float64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_f32_m))) +svfloat32_t svadd_n_f32_m(svbool_t, svfloat32_t, float32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_f16_m))) +svfloat16_t svadd_n_f16_m(svbool_t, svfloat16_t, float16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_f64_x))) +svfloat64_t svadd_n_f64_x(svbool_t, svfloat64_t, float64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_f32_x))) +svfloat32_t svadd_n_f32_x(svbool_t, svfloat32_t, float32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_f16_x))) +svfloat16_t svadd_n_f16_x(svbool_t, svfloat16_t, float16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_f64_z))) +svfloat64_t svadd_n_f64_z(svbool_t, svfloat64_t, float64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_f32_z))) +svfloat32_t svadd_n_f32_z(svbool_t, svfloat32_t, float32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_f16_z))) +svfloat16_t svadd_n_f16_z(svbool_t, svfloat16_t, float16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_u8_m))) +svuint8_t svadd_n_u8_m(svbool_t, svuint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_u32_m))) +svuint32_t svadd_n_u32_m(svbool_t, svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_u64_m))) +svuint64_t svadd_n_u64_m(svbool_t, svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_u16_m))) +svuint16_t svadd_n_u16_m(svbool_t, svuint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_s8_m))) +svint8_t svadd_n_s8_m(svbool_t, svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_s32_m))) +svint32_t svadd_n_s32_m(svbool_t, svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_s64_m))) +svint64_t svadd_n_s64_m(svbool_t, svint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_s16_m))) +svint16_t svadd_n_s16_m(svbool_t, svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_u8_x))) +svuint8_t svadd_n_u8_x(svbool_t, svuint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_u32_x))) +svuint32_t svadd_n_u32_x(svbool_t, svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_u64_x))) +svuint64_t svadd_n_u64_x(svbool_t, svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_u16_x))) +svuint16_t svadd_n_u16_x(svbool_t, svuint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_s8_x))) +svint8_t svadd_n_s8_x(svbool_t, svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_s32_x))) +svint32_t svadd_n_s32_x(svbool_t, svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_s64_x))) +svint64_t svadd_n_s64_x(svbool_t, svint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_s16_x))) +svint16_t svadd_n_s16_x(svbool_t, svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_u8_z))) +svuint8_t svadd_n_u8_z(svbool_t, svuint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_u32_z))) +svuint32_t svadd_n_u32_z(svbool_t, svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_u64_z))) +svuint64_t svadd_n_u64_z(svbool_t, svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_u16_z))) +svuint16_t svadd_n_u16_z(svbool_t, svuint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_s8_z))) +svint8_t svadd_n_s8_z(svbool_t, svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_s32_z))) +svint32_t svadd_n_s32_z(svbool_t, svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_s64_z))) +svint64_t svadd_n_s64_z(svbool_t, svint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_s16_z))) +svint16_t svadd_n_s16_z(svbool_t, svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_f64_m))) +svfloat64_t svadd_f64_m(svbool_t, svfloat64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_f32_m))) +svfloat32_t svadd_f32_m(svbool_t, svfloat32_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_f16_m))) +svfloat16_t svadd_f16_m(svbool_t, svfloat16_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_f64_x))) +svfloat64_t svadd_f64_x(svbool_t, svfloat64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_f32_x))) +svfloat32_t svadd_f32_x(svbool_t, svfloat32_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_f16_x))) +svfloat16_t svadd_f16_x(svbool_t, svfloat16_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_f64_z))) +svfloat64_t svadd_f64_z(svbool_t, svfloat64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_f32_z))) +svfloat32_t svadd_f32_z(svbool_t, svfloat32_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_f16_z))) +svfloat16_t svadd_f16_z(svbool_t, svfloat16_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_u8_m))) +svuint8_t svadd_u8_m(svbool_t, svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_u32_m))) +svuint32_t svadd_u32_m(svbool_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_u64_m))) +svuint64_t svadd_u64_m(svbool_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_u16_m))) +svuint16_t svadd_u16_m(svbool_t, svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_s8_m))) +svint8_t svadd_s8_m(svbool_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_s32_m))) +svint32_t svadd_s32_m(svbool_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_s64_m))) +svint64_t svadd_s64_m(svbool_t, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_s16_m))) +svint16_t svadd_s16_m(svbool_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_u8_x))) +svuint8_t svadd_u8_x(svbool_t, svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_u32_x))) +svuint32_t svadd_u32_x(svbool_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_u64_x))) +svuint64_t svadd_u64_x(svbool_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_u16_x))) +svuint16_t svadd_u16_x(svbool_t, svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_s8_x))) +svint8_t svadd_s8_x(svbool_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_s32_x))) +svint32_t svadd_s32_x(svbool_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_s64_x))) +svint64_t svadd_s64_x(svbool_t, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_s16_x))) +svint16_t svadd_s16_x(svbool_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_u8_z))) +svuint8_t svadd_u8_z(svbool_t, svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_u32_z))) +svuint32_t svadd_u32_z(svbool_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_u64_z))) +svuint64_t svadd_u64_z(svbool_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_u16_z))) +svuint16_t svadd_u16_z(svbool_t, svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_s8_z))) +svint8_t svadd_s8_z(svbool_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_s32_z))) +svint32_t svadd_s32_z(svbool_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_s64_z))) +svint64_t svadd_s64_z(svbool_t, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_s16_z))) +svint16_t svadd_s16_z(svbool_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadda_f64))) +float64_t svadda_f64(svbool_t, float64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadda_f32))) +float32_t svadda_f32(svbool_t, float32_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadda_f16))) +float16_t svadda_f16(svbool_t, float16_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddv_s8))) +int64_t svaddv_s8(svbool_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddv_s32))) +int64_t svaddv_s32(svbool_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddv_s64))) +int64_t svaddv_s64(svbool_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddv_s16))) +int64_t svaddv_s16(svbool_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddv_u8))) +uint64_t svaddv_u8(svbool_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddv_u32))) +uint64_t svaddv_u32(svbool_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddv_u64))) +uint64_t svaddv_u64(svbool_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddv_u16))) +uint64_t svaddv_u16(svbool_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddv_f64))) +float64_t svaddv_f64(svbool_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddv_f32))) +float32_t svaddv_f32(svbool_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddv_f16))) +float16_t svaddv_f16(svbool_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_b_z))) +svbool_t svand_b_z(svbool_t, svbool_t, svbool_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_n_u8_m))) +svuint8_t svand_n_u8_m(svbool_t, svuint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_n_u32_m))) +svuint32_t svand_n_u32_m(svbool_t, svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_n_u64_m))) +svuint64_t svand_n_u64_m(svbool_t, svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_n_u16_m))) +svuint16_t svand_n_u16_m(svbool_t, svuint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_n_s8_m))) +svint8_t svand_n_s8_m(svbool_t, svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_n_s32_m))) +svint32_t svand_n_s32_m(svbool_t, svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_n_s64_m))) +svint64_t svand_n_s64_m(svbool_t, svint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_n_s16_m))) +svint16_t svand_n_s16_m(svbool_t, svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_n_u8_x))) +svuint8_t svand_n_u8_x(svbool_t, svuint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_n_u32_x))) +svuint32_t svand_n_u32_x(svbool_t, svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_n_u64_x))) +svuint64_t svand_n_u64_x(svbool_t, svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_n_u16_x))) +svuint16_t svand_n_u16_x(svbool_t, svuint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_n_s8_x))) +svint8_t svand_n_s8_x(svbool_t, svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_n_s32_x))) +svint32_t svand_n_s32_x(svbool_t, svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_n_s64_x))) +svint64_t svand_n_s64_x(svbool_t, svint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_n_s16_x))) +svint16_t svand_n_s16_x(svbool_t, svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_n_u8_z))) +svuint8_t svand_n_u8_z(svbool_t, svuint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_n_u32_z))) +svuint32_t svand_n_u32_z(svbool_t, svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_n_u64_z))) +svuint64_t svand_n_u64_z(svbool_t, svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_n_u16_z))) +svuint16_t svand_n_u16_z(svbool_t, svuint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_n_s8_z))) +svint8_t svand_n_s8_z(svbool_t, svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_n_s32_z))) +svint32_t svand_n_s32_z(svbool_t, svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_n_s64_z))) +svint64_t svand_n_s64_z(svbool_t, svint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_n_s16_z))) +svint16_t svand_n_s16_z(svbool_t, svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_u8_m))) +svuint8_t svand_u8_m(svbool_t, svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_u32_m))) +svuint32_t svand_u32_m(svbool_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_u64_m))) +svuint64_t svand_u64_m(svbool_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_u16_m))) +svuint16_t svand_u16_m(svbool_t, svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_s8_m))) +svint8_t svand_s8_m(svbool_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_s32_m))) +svint32_t svand_s32_m(svbool_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_s64_m))) +svint64_t svand_s64_m(svbool_t, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_s16_m))) +svint16_t svand_s16_m(svbool_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_u8_x))) +svuint8_t svand_u8_x(svbool_t, svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_u32_x))) +svuint32_t svand_u32_x(svbool_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_u64_x))) +svuint64_t svand_u64_x(svbool_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_u16_x))) +svuint16_t svand_u16_x(svbool_t, svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_s8_x))) +svint8_t svand_s8_x(svbool_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_s32_x))) +svint32_t svand_s32_x(svbool_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_s64_x))) +svint64_t svand_s64_x(svbool_t, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_s16_x))) +svint16_t svand_s16_x(svbool_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_u8_z))) +svuint8_t svand_u8_z(svbool_t, svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_u32_z))) +svuint32_t svand_u32_z(svbool_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_u64_z))) +svuint64_t svand_u64_z(svbool_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_u16_z))) +svuint16_t svand_u16_z(svbool_t, svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_s8_z))) +svint8_t svand_s8_z(svbool_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_s32_z))) +svint32_t svand_s32_z(svbool_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_s64_z))) +svint64_t svand_s64_z(svbool_t, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_s16_z))) +svint16_t svand_s16_z(svbool_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svandv_u8))) +uint8_t svandv_u8(svbool_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svandv_u32))) +uint32_t svandv_u32(svbool_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svandv_u64))) +uint64_t svandv_u64(svbool_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svandv_u16))) +uint16_t svandv_u16(svbool_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svandv_s8))) +int8_t svandv_s8(svbool_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svandv_s32))) +int32_t svandv_s32(svbool_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svandv_s64))) +int64_t svandv_s64(svbool_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svandv_s16))) +int16_t svandv_s16(svbool_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_n_s8_m))) +svint8_t svasr_n_s8_m(svbool_t, svint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_n_s32_m))) +svint32_t svasr_n_s32_m(svbool_t, svint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_n_s64_m))) +svint64_t svasr_n_s64_m(svbool_t, svint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_n_s16_m))) +svint16_t svasr_n_s16_m(svbool_t, svint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_n_s8_x))) +svint8_t svasr_n_s8_x(svbool_t, svint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_n_s32_x))) +svint32_t svasr_n_s32_x(svbool_t, svint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_n_s64_x))) +svint64_t svasr_n_s64_x(svbool_t, svint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_n_s16_x))) +svint16_t svasr_n_s16_x(svbool_t, svint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_n_s8_z))) +svint8_t svasr_n_s8_z(svbool_t, svint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_n_s32_z))) +svint32_t svasr_n_s32_z(svbool_t, svint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_n_s64_z))) +svint64_t svasr_n_s64_z(svbool_t, svint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_n_s16_z))) +svint16_t svasr_n_s16_z(svbool_t, svint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_s8_m))) +svint8_t svasr_s8_m(svbool_t, svint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_s32_m))) +svint32_t svasr_s32_m(svbool_t, svint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_s64_m))) +svint64_t svasr_s64_m(svbool_t, svint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_s16_m))) +svint16_t svasr_s16_m(svbool_t, svint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_s8_x))) +svint8_t svasr_s8_x(svbool_t, svint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_s32_x))) +svint32_t svasr_s32_x(svbool_t, svint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_s64_x))) +svint64_t svasr_s64_x(svbool_t, svint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_s16_x))) +svint16_t svasr_s16_x(svbool_t, svint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_s8_z))) +svint8_t svasr_s8_z(svbool_t, svint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_s32_z))) +svint32_t svasr_s32_z(svbool_t, svint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_s64_z))) +svint64_t svasr_s64_z(svbool_t, svint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_s16_z))) +svint16_t svasr_s16_z(svbool_t, svint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_wide_n_s8_m))) +svint8_t svasr_wide_n_s8_m(svbool_t, svint8_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_wide_n_s32_m))) +svint32_t svasr_wide_n_s32_m(svbool_t, svint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_wide_n_s16_m))) +svint16_t svasr_wide_n_s16_m(svbool_t, svint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_wide_n_s8_x))) +svint8_t svasr_wide_n_s8_x(svbool_t, svint8_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_wide_n_s32_x))) +svint32_t svasr_wide_n_s32_x(svbool_t, svint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_wide_n_s16_x))) +svint16_t svasr_wide_n_s16_x(svbool_t, svint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_wide_n_s8_z))) +svint8_t svasr_wide_n_s8_z(svbool_t, svint8_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_wide_n_s32_z))) +svint32_t svasr_wide_n_s32_z(svbool_t, svint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_wide_n_s16_z))) +svint16_t svasr_wide_n_s16_z(svbool_t, svint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_wide_s8_m))) +svint8_t svasr_wide_s8_m(svbool_t, svint8_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_wide_s32_m))) +svint32_t svasr_wide_s32_m(svbool_t, svint32_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_wide_s16_m))) +svint16_t svasr_wide_s16_m(svbool_t, svint16_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_wide_s8_x))) +svint8_t svasr_wide_s8_x(svbool_t, svint8_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_wide_s32_x))) +svint32_t svasr_wide_s32_x(svbool_t, svint32_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_wide_s16_x))) +svint16_t svasr_wide_s16_x(svbool_t, svint16_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_wide_s8_z))) +svint8_t svasr_wide_s8_z(svbool_t, svint8_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_wide_s32_z))) +svint32_t svasr_wide_s32_z(svbool_t, svint32_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_wide_s16_z))) +svint16_t svasr_wide_s16_z(svbool_t, svint16_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasrd_n_s8_m))) +svint8_t svasrd_n_s8_m(svbool_t, svint8_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasrd_n_s32_m))) +svint32_t svasrd_n_s32_m(svbool_t, svint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasrd_n_s64_m))) +svint64_t svasrd_n_s64_m(svbool_t, svint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasrd_n_s16_m))) +svint16_t svasrd_n_s16_m(svbool_t, svint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasrd_n_s8_x))) +svint8_t svasrd_n_s8_x(svbool_t, svint8_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasrd_n_s32_x))) +svint32_t svasrd_n_s32_x(svbool_t, svint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasrd_n_s64_x))) +svint64_t svasrd_n_s64_x(svbool_t, svint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasrd_n_s16_x))) +svint16_t svasrd_n_s16_x(svbool_t, svint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasrd_n_s8_z))) +svint8_t svasrd_n_s8_z(svbool_t, svint8_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasrd_n_s32_z))) +svint32_t svasrd_n_s32_z(svbool_t, svint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasrd_n_s64_z))) +svint64_t svasrd_n_s64_z(svbool_t, svint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasrd_n_s16_z))) +svint16_t svasrd_n_s16_z(svbool_t, svint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_b_z))) +svbool_t svbic_b_z(svbool_t, svbool_t, svbool_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_n_u8_m))) +svuint8_t svbic_n_u8_m(svbool_t, svuint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_n_u32_m))) +svuint32_t svbic_n_u32_m(svbool_t, svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_n_u64_m))) +svuint64_t svbic_n_u64_m(svbool_t, svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_n_u16_m))) +svuint16_t svbic_n_u16_m(svbool_t, svuint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_n_s8_m))) +svint8_t svbic_n_s8_m(svbool_t, svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_n_s32_m))) +svint32_t svbic_n_s32_m(svbool_t, svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_n_s64_m))) +svint64_t svbic_n_s64_m(svbool_t, svint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_n_s16_m))) +svint16_t svbic_n_s16_m(svbool_t, svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_n_u8_x))) +svuint8_t svbic_n_u8_x(svbool_t, svuint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_n_u32_x))) +svuint32_t svbic_n_u32_x(svbool_t, svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_n_u64_x))) +svuint64_t svbic_n_u64_x(svbool_t, svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_n_u16_x))) +svuint16_t svbic_n_u16_x(svbool_t, svuint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_n_s8_x))) +svint8_t svbic_n_s8_x(svbool_t, svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_n_s32_x))) +svint32_t svbic_n_s32_x(svbool_t, svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_n_s64_x))) +svint64_t svbic_n_s64_x(svbool_t, svint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_n_s16_x))) +svint16_t svbic_n_s16_x(svbool_t, svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_n_u8_z))) +svuint8_t svbic_n_u8_z(svbool_t, svuint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_n_u32_z))) +svuint32_t svbic_n_u32_z(svbool_t, svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_n_u64_z))) +svuint64_t svbic_n_u64_z(svbool_t, svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_n_u16_z))) +svuint16_t svbic_n_u16_z(svbool_t, svuint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_n_s8_z))) +svint8_t svbic_n_s8_z(svbool_t, svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_n_s32_z))) +svint32_t svbic_n_s32_z(svbool_t, svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_n_s64_z))) +svint64_t svbic_n_s64_z(svbool_t, svint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_n_s16_z))) +svint16_t svbic_n_s16_z(svbool_t, svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_u8_m))) +svuint8_t svbic_u8_m(svbool_t, svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_u32_m))) +svuint32_t svbic_u32_m(svbool_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_u64_m))) +svuint64_t svbic_u64_m(svbool_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_u16_m))) +svuint16_t svbic_u16_m(svbool_t, svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_s8_m))) +svint8_t svbic_s8_m(svbool_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_s32_m))) +svint32_t svbic_s32_m(svbool_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_s64_m))) +svint64_t svbic_s64_m(svbool_t, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_s16_m))) +svint16_t svbic_s16_m(svbool_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_u8_x))) +svuint8_t svbic_u8_x(svbool_t, svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_u32_x))) +svuint32_t svbic_u32_x(svbool_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_u64_x))) +svuint64_t svbic_u64_x(svbool_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_u16_x))) +svuint16_t svbic_u16_x(svbool_t, svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_s8_x))) +svint8_t svbic_s8_x(svbool_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_s32_x))) +svint32_t svbic_s32_x(svbool_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_s64_x))) +svint64_t svbic_s64_x(svbool_t, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_s16_x))) +svint16_t svbic_s16_x(svbool_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_u8_z))) +svuint8_t svbic_u8_z(svbool_t, svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_u32_z))) +svuint32_t svbic_u32_z(svbool_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_u64_z))) +svuint64_t svbic_u64_z(svbool_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_u16_z))) +svuint16_t svbic_u16_z(svbool_t, svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_s8_z))) +svint8_t svbic_s8_z(svbool_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_s32_z))) +svint32_t svbic_s32_z(svbool_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_s64_z))) +svint64_t svbic_s64_z(svbool_t, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_s16_z))) +svint16_t svbic_s16_z(svbool_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbrka_b_m))) +svbool_t svbrka_b_m(svbool_t, svbool_t, svbool_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbrka_b_z))) +svbool_t svbrka_b_z(svbool_t, svbool_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbrkb_b_m))) +svbool_t svbrkb_b_m(svbool_t, svbool_t, svbool_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbrkb_b_z))) +svbool_t svbrkb_b_z(svbool_t, svbool_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbrkn_b_z))) +svbool_t svbrkn_b_z(svbool_t, svbool_t, svbool_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbrkpa_b_z))) +svbool_t svbrkpa_b_z(svbool_t, svbool_t, svbool_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbrkpb_b_z))) +svbool_t svbrkpb_b_z(svbool_t, svbool_t, svbool_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcadd_f64_m))) +svfloat64_t svcadd_f64_m(svbool_t, svfloat64_t, svfloat64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcadd_f32_m))) +svfloat32_t svcadd_f32_m(svbool_t, svfloat32_t, svfloat32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcadd_f16_m))) +svfloat16_t svcadd_f16_m(svbool_t, svfloat16_t, svfloat16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcadd_f64_x))) +svfloat64_t svcadd_f64_x(svbool_t, svfloat64_t, svfloat64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcadd_f32_x))) +svfloat32_t svcadd_f32_x(svbool_t, svfloat32_t, svfloat32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcadd_f16_x))) +svfloat16_t svcadd_f16_x(svbool_t, svfloat16_t, svfloat16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcadd_f64_z))) +svfloat64_t svcadd_f64_z(svbool_t, svfloat64_t, svfloat64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcadd_f32_z))) +svfloat32_t svcadd_f32_z(svbool_t, svfloat32_t, svfloat32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcadd_f16_z))) +svfloat16_t svcadd_f16_z(svbool_t, svfloat16_t, svfloat16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclasta_n_u8))) +uint8_t svclasta_n_u8(svbool_t, uint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclasta_n_u32))) +uint32_t svclasta_n_u32(svbool_t, uint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclasta_n_u64))) +uint64_t svclasta_n_u64(svbool_t, uint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclasta_n_u16))) +uint16_t svclasta_n_u16(svbool_t, uint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclasta_n_s8))) +int8_t svclasta_n_s8(svbool_t, int8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclasta_n_f64))) +float64_t svclasta_n_f64(svbool_t, float64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclasta_n_f32))) +float32_t svclasta_n_f32(svbool_t, float32_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclasta_n_f16))) +float16_t svclasta_n_f16(svbool_t, float16_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclasta_n_s32))) +int32_t svclasta_n_s32(svbool_t, int32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclasta_n_s64))) +int64_t svclasta_n_s64(svbool_t, int64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclasta_n_s16))) +int16_t svclasta_n_s16(svbool_t, int16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclasta_u8))) +svuint8_t svclasta_u8(svbool_t, svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclasta_u32))) +svuint32_t svclasta_u32(svbool_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclasta_u64))) +svuint64_t svclasta_u64(svbool_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclasta_u16))) +svuint16_t svclasta_u16(svbool_t, svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclasta_s8))) +svint8_t svclasta_s8(svbool_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclasta_f64))) +svfloat64_t svclasta_f64(svbool_t, svfloat64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclasta_f32))) +svfloat32_t svclasta_f32(svbool_t, svfloat32_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclasta_f16))) +svfloat16_t svclasta_f16(svbool_t, svfloat16_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclasta_s32))) +svint32_t svclasta_s32(svbool_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclasta_s64))) +svint64_t svclasta_s64(svbool_t, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclasta_s16))) +svint16_t svclasta_s16(svbool_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclastb_n_u8))) +uint8_t svclastb_n_u8(svbool_t, uint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclastb_n_u32))) +uint32_t svclastb_n_u32(svbool_t, uint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclastb_n_u64))) +uint64_t svclastb_n_u64(svbool_t, uint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclastb_n_u16))) +uint16_t svclastb_n_u16(svbool_t, uint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclastb_n_s8))) +int8_t svclastb_n_s8(svbool_t, int8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclastb_n_f64))) +float64_t svclastb_n_f64(svbool_t, float64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclastb_n_f32))) +float32_t svclastb_n_f32(svbool_t, float32_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclastb_n_f16))) +float16_t svclastb_n_f16(svbool_t, float16_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclastb_n_s32))) +int32_t svclastb_n_s32(svbool_t, int32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclastb_n_s64))) +int64_t svclastb_n_s64(svbool_t, int64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclastb_n_s16))) +int16_t svclastb_n_s16(svbool_t, int16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclastb_u8))) +svuint8_t svclastb_u8(svbool_t, svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclastb_u32))) +svuint32_t svclastb_u32(svbool_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclastb_u64))) +svuint64_t svclastb_u64(svbool_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclastb_u16))) +svuint16_t svclastb_u16(svbool_t, svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclastb_s8))) +svint8_t svclastb_s8(svbool_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclastb_f64))) +svfloat64_t svclastb_f64(svbool_t, svfloat64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclastb_f32))) +svfloat32_t svclastb_f32(svbool_t, svfloat32_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclastb_f16))) +svfloat16_t svclastb_f16(svbool_t, svfloat16_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclastb_s32))) +svint32_t svclastb_s32(svbool_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclastb_s64))) +svint64_t svclastb_s64(svbool_t, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclastb_s16))) +svint16_t svclastb_s16(svbool_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcls_s8_m))) +svuint8_t svcls_s8_m(svuint8_t, svbool_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcls_s32_m))) +svuint32_t svcls_s32_m(svuint32_t, svbool_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcls_s64_m))) +svuint64_t svcls_s64_m(svuint64_t, svbool_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcls_s16_m))) +svuint16_t svcls_s16_m(svuint16_t, svbool_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcls_s8_x))) +svuint8_t svcls_s8_x(svbool_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcls_s32_x))) +svuint32_t svcls_s32_x(svbool_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcls_s64_x))) +svuint64_t svcls_s64_x(svbool_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcls_s16_x))) +svuint16_t svcls_s16_x(svbool_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcls_s8_z))) +svuint8_t svcls_s8_z(svbool_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcls_s32_z))) +svuint32_t svcls_s32_z(svbool_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcls_s64_z))) +svuint64_t svcls_s64_z(svbool_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcls_s16_z))) +svuint16_t svcls_s16_z(svbool_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclz_u8_m))) +svuint8_t svclz_u8_m(svuint8_t, svbool_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclz_u32_m))) +svuint32_t svclz_u32_m(svuint32_t, svbool_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclz_u64_m))) +svuint64_t svclz_u64_m(svuint64_t, svbool_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclz_u16_m))) +svuint16_t svclz_u16_m(svuint16_t, svbool_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclz_s8_m))) +svuint8_t svclz_s8_m(svuint8_t, svbool_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclz_s32_m))) +svuint32_t svclz_s32_m(svuint32_t, svbool_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclz_s64_m))) +svuint64_t svclz_s64_m(svuint64_t, svbool_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclz_s16_m))) +svuint16_t svclz_s16_m(svuint16_t, svbool_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclz_u8_x))) +svuint8_t svclz_u8_x(svbool_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclz_u32_x))) +svuint32_t svclz_u32_x(svbool_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclz_u64_x))) +svuint64_t svclz_u64_x(svbool_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclz_u16_x))) +svuint16_t svclz_u16_x(svbool_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclz_s8_x))) +svuint8_t svclz_s8_x(svbool_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclz_s32_x))) +svuint32_t svclz_s32_x(svbool_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclz_s64_x))) +svuint64_t svclz_s64_x(svbool_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclz_s16_x))) +svuint16_t svclz_s16_x(svbool_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclz_u8_z))) +svuint8_t svclz_u8_z(svbool_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclz_u32_z))) +svuint32_t svclz_u32_z(svbool_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclz_u64_z))) +svuint64_t svclz_u64_z(svbool_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclz_u16_z))) +svuint16_t svclz_u16_z(svbool_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclz_s8_z))) +svuint8_t svclz_s8_z(svbool_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclz_s32_z))) +svuint32_t svclz_s32_z(svbool_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclz_s64_z))) +svuint64_t svclz_s64_z(svbool_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclz_s16_z))) +svuint16_t svclz_s16_z(svbool_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmla_f64_m))) +svfloat64_t svcmla_f64_m(svbool_t, svfloat64_t, svfloat64_t, svfloat64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmla_f32_m))) +svfloat32_t svcmla_f32_m(svbool_t, svfloat32_t, svfloat32_t, svfloat32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmla_f16_m))) +svfloat16_t svcmla_f16_m(svbool_t, svfloat16_t, svfloat16_t, svfloat16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmla_f64_x))) +svfloat64_t svcmla_f64_x(svbool_t, svfloat64_t, svfloat64_t, svfloat64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmla_f32_x))) +svfloat32_t svcmla_f32_x(svbool_t, svfloat32_t, svfloat32_t, svfloat32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmla_f16_x))) +svfloat16_t svcmla_f16_x(svbool_t, svfloat16_t, svfloat16_t, svfloat16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmla_f64_z))) +svfloat64_t svcmla_f64_z(svbool_t, svfloat64_t, svfloat64_t, svfloat64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmla_f32_z))) +svfloat32_t svcmla_f32_z(svbool_t, svfloat32_t, svfloat32_t, svfloat32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmla_f16_z))) +svfloat16_t svcmla_f16_z(svbool_t, svfloat16_t, svfloat16_t, svfloat16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmla_lane_f32))) +svfloat32_t svcmla_lane_f32(svfloat32_t, svfloat32_t, svfloat32_t, uint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmla_lane_f16))) +svfloat16_t svcmla_lane_f16(svfloat16_t, svfloat16_t, svfloat16_t, uint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpeq_n_f64))) +svbool_t svcmpeq_n_f64(svbool_t, svfloat64_t, float64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpeq_n_f32))) +svbool_t svcmpeq_n_f32(svbool_t, svfloat32_t, float32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpeq_n_f16))) +svbool_t svcmpeq_n_f16(svbool_t, svfloat16_t, float16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpeq_n_u8))) +svbool_t svcmpeq_n_u8(svbool_t, svuint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpeq_n_u32))) +svbool_t svcmpeq_n_u32(svbool_t, svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpeq_n_u64))) +svbool_t svcmpeq_n_u64(svbool_t, svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpeq_n_u16))) +svbool_t svcmpeq_n_u16(svbool_t, svuint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpeq_n_s8))) +svbool_t svcmpeq_n_s8(svbool_t, svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpeq_n_s32))) +svbool_t svcmpeq_n_s32(svbool_t, svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpeq_n_s64))) +svbool_t svcmpeq_n_s64(svbool_t, svint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpeq_n_s16))) +svbool_t svcmpeq_n_s16(svbool_t, svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpeq_u8))) +svbool_t svcmpeq_u8(svbool_t, svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpeq_u32))) +svbool_t svcmpeq_u32(svbool_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpeq_u64))) +svbool_t svcmpeq_u64(svbool_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpeq_u16))) +svbool_t svcmpeq_u16(svbool_t, svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpeq_s8))) +svbool_t svcmpeq_s8(svbool_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpeq_s32))) +svbool_t svcmpeq_s32(svbool_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpeq_s64))) +svbool_t svcmpeq_s64(svbool_t, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpeq_s16))) +svbool_t svcmpeq_s16(svbool_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpeq_f64))) +svbool_t svcmpeq_f64(svbool_t, svfloat64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpeq_f32))) +svbool_t svcmpeq_f32(svbool_t, svfloat32_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpeq_f16))) +svbool_t svcmpeq_f16(svbool_t, svfloat16_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpeq_wide_n_s8))) +svbool_t svcmpeq_wide_n_s8(svbool_t, svint8_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpeq_wide_n_s32))) +svbool_t svcmpeq_wide_n_s32(svbool_t, svint32_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpeq_wide_n_s16))) +svbool_t svcmpeq_wide_n_s16(svbool_t, svint16_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpeq_wide_s8))) +svbool_t svcmpeq_wide_s8(svbool_t, svint8_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpeq_wide_s32))) +svbool_t svcmpeq_wide_s32(svbool_t, svint32_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpeq_wide_s16))) +svbool_t svcmpeq_wide_s16(svbool_t, svint16_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_n_f64))) +svbool_t svcmpge_n_f64(svbool_t, svfloat64_t, float64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_n_f32))) +svbool_t svcmpge_n_f32(svbool_t, svfloat32_t, float32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_n_f16))) +svbool_t svcmpge_n_f16(svbool_t, svfloat16_t, float16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_n_s8))) +svbool_t svcmpge_n_s8(svbool_t, svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_n_s32))) +svbool_t svcmpge_n_s32(svbool_t, svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_n_s64))) +svbool_t svcmpge_n_s64(svbool_t, svint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_n_s16))) +svbool_t svcmpge_n_s16(svbool_t, svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_n_u8))) +svbool_t svcmpge_n_u8(svbool_t, svuint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_n_u32))) +svbool_t svcmpge_n_u32(svbool_t, svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_n_u64))) +svbool_t svcmpge_n_u64(svbool_t, svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_n_u16))) +svbool_t svcmpge_n_u16(svbool_t, svuint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_s8))) +svbool_t svcmpge_s8(svbool_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_s32))) +svbool_t svcmpge_s32(svbool_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_s64))) +svbool_t svcmpge_s64(svbool_t, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_s16))) +svbool_t svcmpge_s16(svbool_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_f64))) +svbool_t svcmpge_f64(svbool_t, svfloat64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_f32))) +svbool_t svcmpge_f32(svbool_t, svfloat32_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_f16))) +svbool_t svcmpge_f16(svbool_t, svfloat16_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_u8))) +svbool_t svcmpge_u8(svbool_t, svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_u32))) +svbool_t svcmpge_u32(svbool_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_u64))) +svbool_t svcmpge_u64(svbool_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_u16))) +svbool_t svcmpge_u16(svbool_t, svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_wide_n_s8))) +svbool_t svcmpge_wide_n_s8(svbool_t, svint8_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_wide_n_s32))) +svbool_t svcmpge_wide_n_s32(svbool_t, svint32_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_wide_n_s16))) +svbool_t svcmpge_wide_n_s16(svbool_t, svint16_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_wide_n_u8))) +svbool_t svcmpge_wide_n_u8(svbool_t, svuint8_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_wide_n_u32))) +svbool_t svcmpge_wide_n_u32(svbool_t, svuint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_wide_n_u16))) +svbool_t svcmpge_wide_n_u16(svbool_t, svuint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_wide_s8))) +svbool_t svcmpge_wide_s8(svbool_t, svint8_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_wide_s32))) +svbool_t svcmpge_wide_s32(svbool_t, svint32_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_wide_s16))) +svbool_t svcmpge_wide_s16(svbool_t, svint16_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_wide_u8))) +svbool_t svcmpge_wide_u8(svbool_t, svuint8_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_wide_u32))) +svbool_t svcmpge_wide_u32(svbool_t, svuint32_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_wide_u16))) +svbool_t svcmpge_wide_u16(svbool_t, svuint16_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_n_f64))) +svbool_t svcmpgt_n_f64(svbool_t, svfloat64_t, float64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_n_f32))) +svbool_t svcmpgt_n_f32(svbool_t, svfloat32_t, float32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_n_f16))) +svbool_t svcmpgt_n_f16(svbool_t, svfloat16_t, float16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_n_s8))) +svbool_t svcmpgt_n_s8(svbool_t, svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_n_s32))) +svbool_t svcmpgt_n_s32(svbool_t, svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_n_s64))) +svbool_t svcmpgt_n_s64(svbool_t, svint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_n_s16))) +svbool_t svcmpgt_n_s16(svbool_t, svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_n_u8))) +svbool_t svcmpgt_n_u8(svbool_t, svuint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_n_u32))) +svbool_t svcmpgt_n_u32(svbool_t, svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_n_u64))) +svbool_t svcmpgt_n_u64(svbool_t, svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_n_u16))) +svbool_t svcmpgt_n_u16(svbool_t, svuint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_s8))) +svbool_t svcmpgt_s8(svbool_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_s32))) +svbool_t svcmpgt_s32(svbool_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_s64))) +svbool_t svcmpgt_s64(svbool_t, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_s16))) +svbool_t svcmpgt_s16(svbool_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_f64))) +svbool_t svcmpgt_f64(svbool_t, svfloat64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_f32))) +svbool_t svcmpgt_f32(svbool_t, svfloat32_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_f16))) +svbool_t svcmpgt_f16(svbool_t, svfloat16_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_u8))) +svbool_t svcmpgt_u8(svbool_t, svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_u32))) +svbool_t svcmpgt_u32(svbool_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_u64))) +svbool_t svcmpgt_u64(svbool_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_u16))) +svbool_t svcmpgt_u16(svbool_t, svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_wide_n_s8))) +svbool_t svcmpgt_wide_n_s8(svbool_t, svint8_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_wide_n_s32))) +svbool_t svcmpgt_wide_n_s32(svbool_t, svint32_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_wide_n_s16))) +svbool_t svcmpgt_wide_n_s16(svbool_t, svint16_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_wide_n_u8))) +svbool_t svcmpgt_wide_n_u8(svbool_t, svuint8_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_wide_n_u32))) +svbool_t svcmpgt_wide_n_u32(svbool_t, svuint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_wide_n_u16))) +svbool_t svcmpgt_wide_n_u16(svbool_t, svuint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_wide_s8))) +svbool_t svcmpgt_wide_s8(svbool_t, svint8_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_wide_s32))) +svbool_t svcmpgt_wide_s32(svbool_t, svint32_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_wide_s16))) +svbool_t svcmpgt_wide_s16(svbool_t, svint16_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_wide_u8))) +svbool_t svcmpgt_wide_u8(svbool_t, svuint8_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_wide_u32))) +svbool_t svcmpgt_wide_u32(svbool_t, svuint32_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_wide_u16))) +svbool_t svcmpgt_wide_u16(svbool_t, svuint16_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_n_f64))) +svbool_t svcmple_n_f64(svbool_t, svfloat64_t, float64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_n_f32))) +svbool_t svcmple_n_f32(svbool_t, svfloat32_t, float32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_n_f16))) +svbool_t svcmple_n_f16(svbool_t, svfloat16_t, float16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_n_s8))) +svbool_t svcmple_n_s8(svbool_t, svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_n_s32))) +svbool_t svcmple_n_s32(svbool_t, svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_n_s64))) +svbool_t svcmple_n_s64(svbool_t, svint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_n_s16))) +svbool_t svcmple_n_s16(svbool_t, svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_n_u8))) +svbool_t svcmple_n_u8(svbool_t, svuint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_n_u32))) +svbool_t svcmple_n_u32(svbool_t, svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_n_u64))) +svbool_t svcmple_n_u64(svbool_t, svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_n_u16))) +svbool_t svcmple_n_u16(svbool_t, svuint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_s8))) +svbool_t svcmple_s8(svbool_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_s32))) +svbool_t svcmple_s32(svbool_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_s64))) +svbool_t svcmple_s64(svbool_t, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_s16))) +svbool_t svcmple_s16(svbool_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_f64))) +svbool_t svcmple_f64(svbool_t, svfloat64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_f32))) +svbool_t svcmple_f32(svbool_t, svfloat32_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_f16))) +svbool_t svcmple_f16(svbool_t, svfloat16_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_u8))) +svbool_t svcmple_u8(svbool_t, svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_u32))) +svbool_t svcmple_u32(svbool_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_u64))) +svbool_t svcmple_u64(svbool_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_u16))) +svbool_t svcmple_u16(svbool_t, svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_wide_n_s8))) +svbool_t svcmple_wide_n_s8(svbool_t, svint8_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_wide_n_s32))) +svbool_t svcmple_wide_n_s32(svbool_t, svint32_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_wide_n_s16))) +svbool_t svcmple_wide_n_s16(svbool_t, svint16_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_wide_n_u8))) +svbool_t svcmple_wide_n_u8(svbool_t, svuint8_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_wide_n_u32))) +svbool_t svcmple_wide_n_u32(svbool_t, svuint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_wide_n_u16))) +svbool_t svcmple_wide_n_u16(svbool_t, svuint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_wide_s8))) +svbool_t svcmple_wide_s8(svbool_t, svint8_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_wide_s32))) +svbool_t svcmple_wide_s32(svbool_t, svint32_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_wide_s16))) +svbool_t svcmple_wide_s16(svbool_t, svint16_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_wide_u8))) +svbool_t svcmple_wide_u8(svbool_t, svuint8_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_wide_u32))) +svbool_t svcmple_wide_u32(svbool_t, svuint32_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_wide_u16))) +svbool_t svcmple_wide_u16(svbool_t, svuint16_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_n_u8))) +svbool_t svcmplt_n_u8(svbool_t, svuint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_n_u32))) +svbool_t svcmplt_n_u32(svbool_t, svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_n_u64))) +svbool_t svcmplt_n_u64(svbool_t, svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_n_u16))) +svbool_t svcmplt_n_u16(svbool_t, svuint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_n_f64))) +svbool_t svcmplt_n_f64(svbool_t, svfloat64_t, float64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_n_f32))) +svbool_t svcmplt_n_f32(svbool_t, svfloat32_t, float32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_n_f16))) +svbool_t svcmplt_n_f16(svbool_t, svfloat16_t, float16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_n_s8))) +svbool_t svcmplt_n_s8(svbool_t, svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_n_s32))) +svbool_t svcmplt_n_s32(svbool_t, svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_n_s64))) +svbool_t svcmplt_n_s64(svbool_t, svint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_n_s16))) +svbool_t svcmplt_n_s16(svbool_t, svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_u8))) +svbool_t svcmplt_u8(svbool_t, svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_u32))) +svbool_t svcmplt_u32(svbool_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_u64))) +svbool_t svcmplt_u64(svbool_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_u16))) +svbool_t svcmplt_u16(svbool_t, svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_s8))) +svbool_t svcmplt_s8(svbool_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_s32))) +svbool_t svcmplt_s32(svbool_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_s64))) +svbool_t svcmplt_s64(svbool_t, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_s16))) +svbool_t svcmplt_s16(svbool_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_f64))) +svbool_t svcmplt_f64(svbool_t, svfloat64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_f32))) +svbool_t svcmplt_f32(svbool_t, svfloat32_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_f16))) +svbool_t svcmplt_f16(svbool_t, svfloat16_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_wide_n_u8))) +svbool_t svcmplt_wide_n_u8(svbool_t, svuint8_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_wide_n_u32))) +svbool_t svcmplt_wide_n_u32(svbool_t, svuint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_wide_n_u16))) +svbool_t svcmplt_wide_n_u16(svbool_t, svuint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_wide_n_s8))) +svbool_t svcmplt_wide_n_s8(svbool_t, svint8_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_wide_n_s32))) +svbool_t svcmplt_wide_n_s32(svbool_t, svint32_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_wide_n_s16))) +svbool_t svcmplt_wide_n_s16(svbool_t, svint16_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_wide_u8))) +svbool_t svcmplt_wide_u8(svbool_t, svuint8_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_wide_u32))) +svbool_t svcmplt_wide_u32(svbool_t, svuint32_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_wide_u16))) +svbool_t svcmplt_wide_u16(svbool_t, svuint16_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_wide_s8))) +svbool_t svcmplt_wide_s8(svbool_t, svint8_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_wide_s32))) +svbool_t svcmplt_wide_s32(svbool_t, svint32_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_wide_s16))) +svbool_t svcmplt_wide_s16(svbool_t, svint16_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpne_n_f64))) +svbool_t svcmpne_n_f64(svbool_t, svfloat64_t, float64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpne_n_f32))) +svbool_t svcmpne_n_f32(svbool_t, svfloat32_t, float32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpne_n_f16))) +svbool_t svcmpne_n_f16(svbool_t, svfloat16_t, float16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpne_n_u8))) +svbool_t svcmpne_n_u8(svbool_t, svuint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpne_n_u32))) +svbool_t svcmpne_n_u32(svbool_t, svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpne_n_u64))) +svbool_t svcmpne_n_u64(svbool_t, svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpne_n_u16))) +svbool_t svcmpne_n_u16(svbool_t, svuint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpne_n_s8))) +svbool_t svcmpne_n_s8(svbool_t, svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpne_n_s32))) +svbool_t svcmpne_n_s32(svbool_t, svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpne_n_s64))) +svbool_t svcmpne_n_s64(svbool_t, svint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpne_n_s16))) +svbool_t svcmpne_n_s16(svbool_t, svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpne_u8))) +svbool_t svcmpne_u8(svbool_t, svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpne_u32))) +svbool_t svcmpne_u32(svbool_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpne_u64))) +svbool_t svcmpne_u64(svbool_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpne_u16))) +svbool_t svcmpne_u16(svbool_t, svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpne_s8))) +svbool_t svcmpne_s8(svbool_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpne_s32))) +svbool_t svcmpne_s32(svbool_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpne_s64))) +svbool_t svcmpne_s64(svbool_t, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpne_s16))) +svbool_t svcmpne_s16(svbool_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpne_f64))) +svbool_t svcmpne_f64(svbool_t, svfloat64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpne_f32))) +svbool_t svcmpne_f32(svbool_t, svfloat32_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpne_f16))) +svbool_t svcmpne_f16(svbool_t, svfloat16_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpne_wide_n_s8))) +svbool_t svcmpne_wide_n_s8(svbool_t, svint8_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpne_wide_n_s32))) +svbool_t svcmpne_wide_n_s32(svbool_t, svint32_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpne_wide_n_s16))) +svbool_t svcmpne_wide_n_s16(svbool_t, svint16_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpne_wide_s8))) +svbool_t svcmpne_wide_s8(svbool_t, svint8_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpne_wide_s32))) +svbool_t svcmpne_wide_s32(svbool_t, svint32_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpne_wide_s16))) +svbool_t svcmpne_wide_s16(svbool_t, svint16_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpuo_n_f64))) +svbool_t svcmpuo_n_f64(svbool_t, svfloat64_t, float64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpuo_n_f32))) +svbool_t svcmpuo_n_f32(svbool_t, svfloat32_t, float32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpuo_n_f16))) +svbool_t svcmpuo_n_f16(svbool_t, svfloat16_t, float16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpuo_f64))) +svbool_t svcmpuo_f64(svbool_t, svfloat64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpuo_f32))) +svbool_t svcmpuo_f32(svbool_t, svfloat32_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpuo_f16))) +svbool_t svcmpuo_f16(svbool_t, svfloat16_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnot_u8_m))) +svuint8_t svcnot_u8_m(svuint8_t, svbool_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnot_u32_m))) +svuint32_t svcnot_u32_m(svuint32_t, svbool_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnot_u64_m))) +svuint64_t svcnot_u64_m(svuint64_t, svbool_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnot_u16_m))) +svuint16_t svcnot_u16_m(svuint16_t, svbool_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnot_s8_m))) +svint8_t svcnot_s8_m(svint8_t, svbool_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnot_s32_m))) +svint32_t svcnot_s32_m(svint32_t, svbool_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnot_s64_m))) +svint64_t svcnot_s64_m(svint64_t, svbool_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnot_s16_m))) +svint16_t svcnot_s16_m(svint16_t, svbool_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnot_u8_x))) +svuint8_t svcnot_u8_x(svbool_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnot_u32_x))) +svuint32_t svcnot_u32_x(svbool_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnot_u64_x))) +svuint64_t svcnot_u64_x(svbool_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnot_u16_x))) +svuint16_t svcnot_u16_x(svbool_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnot_s8_x))) +svint8_t svcnot_s8_x(svbool_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnot_s32_x))) +svint32_t svcnot_s32_x(svbool_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnot_s64_x))) +svint64_t svcnot_s64_x(svbool_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnot_s16_x))) +svint16_t svcnot_s16_x(svbool_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnot_u8_z))) +svuint8_t svcnot_u8_z(svbool_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnot_u32_z))) +svuint32_t svcnot_u32_z(svbool_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnot_u64_z))) +svuint64_t svcnot_u64_z(svbool_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnot_u16_z))) +svuint16_t svcnot_u16_z(svbool_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnot_s8_z))) +svint8_t svcnot_s8_z(svbool_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnot_s32_z))) +svint32_t svcnot_s32_z(svbool_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnot_s64_z))) +svint64_t svcnot_s64_z(svbool_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnot_s16_z))) +svint16_t svcnot_s16_z(svbool_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_u8_m))) +svuint8_t svcnt_u8_m(svuint8_t, svbool_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_u32_m))) +svuint32_t svcnt_u32_m(svuint32_t, svbool_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_u64_m))) +svuint64_t svcnt_u64_m(svuint64_t, svbool_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_u16_m))) +svuint16_t svcnt_u16_m(svuint16_t, svbool_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_s8_m))) +svuint8_t svcnt_s8_m(svuint8_t, svbool_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_f64_m))) +svuint64_t svcnt_f64_m(svuint64_t, svbool_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_f32_m))) +svuint32_t svcnt_f32_m(svuint32_t, svbool_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_f16_m))) +svuint16_t svcnt_f16_m(svuint16_t, svbool_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_s32_m))) +svuint32_t svcnt_s32_m(svuint32_t, svbool_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_s64_m))) +svuint64_t svcnt_s64_m(svuint64_t, svbool_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_s16_m))) +svuint16_t svcnt_s16_m(svuint16_t, svbool_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_u8_x))) +svuint8_t svcnt_u8_x(svbool_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_u32_x))) +svuint32_t svcnt_u32_x(svbool_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_u64_x))) +svuint64_t svcnt_u64_x(svbool_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_u16_x))) +svuint16_t svcnt_u16_x(svbool_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_s8_x))) +svuint8_t svcnt_s8_x(svbool_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_f64_x))) +svuint64_t svcnt_f64_x(svbool_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_f32_x))) +svuint32_t svcnt_f32_x(svbool_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_f16_x))) +svuint16_t svcnt_f16_x(svbool_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_s32_x))) +svuint32_t svcnt_s32_x(svbool_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_s64_x))) +svuint64_t svcnt_s64_x(svbool_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_s16_x))) +svuint16_t svcnt_s16_x(svbool_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_u8_z))) +svuint8_t svcnt_u8_z(svbool_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_u32_z))) +svuint32_t svcnt_u32_z(svbool_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_u64_z))) +svuint64_t svcnt_u64_z(svbool_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_u16_z))) +svuint16_t svcnt_u16_z(svbool_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_s8_z))) +svuint8_t svcnt_s8_z(svbool_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_f64_z))) +svuint64_t svcnt_f64_z(svbool_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_f32_z))) +svuint32_t svcnt_f32_z(svbool_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_f16_z))) +svuint16_t svcnt_f16_z(svbool_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_s32_z))) +svuint32_t svcnt_s32_z(svbool_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_s64_z))) +svuint64_t svcnt_s64_z(svbool_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_s16_z))) +svuint16_t svcnt_s16_z(svbool_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcntb))) +uint64_t svcntb(void); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcntb_pat))) +uint64_t svcntb_pat(enum svpattern); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcntd))) +uint64_t svcntd(void); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcntd_pat))) +uint64_t svcntd_pat(enum svpattern); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnth))) +uint64_t svcnth(void); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnth_pat))) +uint64_t svcnth_pat(enum svpattern); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcntp_b8))) +uint64_t svcntp_b8(svbool_t, svbool_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcntp_b32))) +uint64_t svcntp_b32(svbool_t, svbool_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcntp_b64))) +uint64_t svcntp_b64(svbool_t, svbool_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcntp_b16))) +uint64_t svcntp_b16(svbool_t, svbool_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcntw))) +uint64_t svcntw(void); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcntw_pat))) +uint64_t svcntw_pat(enum svpattern); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate2_u8))) +svuint8x2_t svcreate2_u8(svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate2_u32))) +svuint32x2_t svcreate2_u32(svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate2_u64))) +svuint64x2_t svcreate2_u64(svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate2_u16))) +svuint16x2_t svcreate2_u16(svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate2_s8))) +svint8x2_t svcreate2_s8(svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate2_f64))) +svfloat64x2_t svcreate2_f64(svfloat64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate2_f32))) +svfloat32x2_t svcreate2_f32(svfloat32_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate2_f16))) +svfloat16x2_t svcreate2_f16(svfloat16_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate2_s32))) +svint32x2_t svcreate2_s32(svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate2_s64))) +svint64x2_t svcreate2_s64(svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate2_s16))) +svint16x2_t svcreate2_s16(svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate3_u8))) +svuint8x3_t svcreate3_u8(svuint8_t, svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate3_u32))) +svuint32x3_t svcreate3_u32(svuint32_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate3_u64))) +svuint64x3_t svcreate3_u64(svuint64_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate3_u16))) +svuint16x3_t svcreate3_u16(svuint16_t, svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate3_s8))) +svint8x3_t svcreate3_s8(svint8_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate3_f64))) +svfloat64x3_t svcreate3_f64(svfloat64_t, svfloat64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate3_f32))) +svfloat32x3_t svcreate3_f32(svfloat32_t, svfloat32_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate3_f16))) +svfloat16x3_t svcreate3_f16(svfloat16_t, svfloat16_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate3_s32))) +svint32x3_t svcreate3_s32(svint32_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate3_s64))) +svint64x3_t svcreate3_s64(svint64_t, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate3_s16))) +svint16x3_t svcreate3_s16(svint16_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate4_u8))) +svuint8x4_t svcreate4_u8(svuint8_t, svuint8_t, svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate4_u32))) +svuint32x4_t svcreate4_u32(svuint32_t, svuint32_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate4_u64))) +svuint64x4_t svcreate4_u64(svuint64_t, svuint64_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate4_u16))) +svuint16x4_t svcreate4_u16(svuint16_t, svuint16_t, svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate4_s8))) +svint8x4_t svcreate4_s8(svint8_t, svint8_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate4_f64))) +svfloat64x4_t svcreate4_f64(svfloat64_t, svfloat64_t, svfloat64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate4_f32))) +svfloat32x4_t svcreate4_f32(svfloat32_t, svfloat32_t, svfloat32_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate4_f16))) +svfloat16x4_t svcreate4_f16(svfloat16_t, svfloat16_t, svfloat16_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate4_s32))) +svint32x4_t svcreate4_s32(svint32_t, svint32_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate4_s64))) +svint64x4_t svcreate4_s64(svint64_t, svint64_t, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate4_s16))) +svint16x4_t svcreate4_s16(svint16_t, svint16_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f16_f32_m))) +svfloat16_t svcvt_f16_f32_m(svfloat16_t, svbool_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f16_f32_x))) +svfloat16_t svcvt_f16_f32_x(svbool_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f16_f32_z))) +svfloat16_t svcvt_f16_f32_z(svbool_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f16_f64_m))) +svfloat16_t svcvt_f16_f64_m(svfloat16_t, svbool_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f16_f64_x))) +svfloat16_t svcvt_f16_f64_x(svbool_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f16_f64_z))) +svfloat16_t svcvt_f16_f64_z(svbool_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f16_s16_m))) +svfloat16_t svcvt_f16_s16_m(svfloat16_t, svbool_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f16_s16_x))) +svfloat16_t svcvt_f16_s16_x(svbool_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f16_s16_z))) +svfloat16_t svcvt_f16_s16_z(svbool_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f16_s32_m))) +svfloat16_t svcvt_f16_s32_m(svfloat16_t, svbool_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f16_s32_x))) +svfloat16_t svcvt_f16_s32_x(svbool_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f16_s32_z))) +svfloat16_t svcvt_f16_s32_z(svbool_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f16_s64_m))) +svfloat16_t svcvt_f16_s64_m(svfloat16_t, svbool_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f16_s64_x))) +svfloat16_t svcvt_f16_s64_x(svbool_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f16_s64_z))) +svfloat16_t svcvt_f16_s64_z(svbool_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f16_u16_m))) +svfloat16_t svcvt_f16_u16_m(svfloat16_t, svbool_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f16_u16_x))) +svfloat16_t svcvt_f16_u16_x(svbool_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f16_u16_z))) +svfloat16_t svcvt_f16_u16_z(svbool_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f16_u32_m))) +svfloat16_t svcvt_f16_u32_m(svfloat16_t, svbool_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f16_u32_x))) +svfloat16_t svcvt_f16_u32_x(svbool_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f16_u32_z))) +svfloat16_t svcvt_f16_u32_z(svbool_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f16_u64_m))) +svfloat16_t svcvt_f16_u64_m(svfloat16_t, svbool_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f16_u64_x))) +svfloat16_t svcvt_f16_u64_x(svbool_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f16_u64_z))) +svfloat16_t svcvt_f16_u64_z(svbool_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f32_f16_m))) +svfloat32_t svcvt_f32_f16_m(svfloat32_t, svbool_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f32_f16_x))) +svfloat32_t svcvt_f32_f16_x(svbool_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f32_f16_z))) +svfloat32_t svcvt_f32_f16_z(svbool_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f32_f64_m))) +svfloat32_t svcvt_f32_f64_m(svfloat32_t, svbool_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f32_f64_x))) +svfloat32_t svcvt_f32_f64_x(svbool_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f32_f64_z))) +svfloat32_t svcvt_f32_f64_z(svbool_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f32_s32_m))) +svfloat32_t svcvt_f32_s32_m(svfloat32_t, svbool_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f32_s32_x))) +svfloat32_t svcvt_f32_s32_x(svbool_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f32_s32_z))) +svfloat32_t svcvt_f32_s32_z(svbool_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f32_s64_m))) +svfloat32_t svcvt_f32_s64_m(svfloat32_t, svbool_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f32_s64_x))) +svfloat32_t svcvt_f32_s64_x(svbool_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f32_s64_z))) +svfloat32_t svcvt_f32_s64_z(svbool_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f32_u32_m))) +svfloat32_t svcvt_f32_u32_m(svfloat32_t, svbool_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f32_u32_x))) +svfloat32_t svcvt_f32_u32_x(svbool_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f32_u32_z))) +svfloat32_t svcvt_f32_u32_z(svbool_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f32_u64_m))) +svfloat32_t svcvt_f32_u64_m(svfloat32_t, svbool_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f32_u64_x))) +svfloat32_t svcvt_f32_u64_x(svbool_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f32_u64_z))) +svfloat32_t svcvt_f32_u64_z(svbool_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f64_f16_m))) +svfloat64_t svcvt_f64_f16_m(svfloat64_t, svbool_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f64_f16_x))) +svfloat64_t svcvt_f64_f16_x(svbool_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f64_f16_z))) +svfloat64_t svcvt_f64_f16_z(svbool_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f64_f32_m))) +svfloat64_t svcvt_f64_f32_m(svfloat64_t, svbool_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f64_f32_x))) +svfloat64_t svcvt_f64_f32_x(svbool_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f64_f32_z))) +svfloat64_t svcvt_f64_f32_z(svbool_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f64_s32_m))) +svfloat64_t svcvt_f64_s32_m(svfloat64_t, svbool_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f64_s32_x))) +svfloat64_t svcvt_f64_s32_x(svbool_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f64_s32_z))) +svfloat64_t svcvt_f64_s32_z(svbool_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f64_s64_m))) +svfloat64_t svcvt_f64_s64_m(svfloat64_t, svbool_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f64_s64_x))) +svfloat64_t svcvt_f64_s64_x(svbool_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f64_s64_z))) +svfloat64_t svcvt_f64_s64_z(svbool_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f64_u32_m))) +svfloat64_t svcvt_f64_u32_m(svfloat64_t, svbool_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f64_u32_x))) +svfloat64_t svcvt_f64_u32_x(svbool_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f64_u32_z))) +svfloat64_t svcvt_f64_u32_z(svbool_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f64_u64_m))) +svfloat64_t svcvt_f64_u64_m(svfloat64_t, svbool_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f64_u64_x))) +svfloat64_t svcvt_f64_u64_x(svbool_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f64_u64_z))) +svfloat64_t svcvt_f64_u64_z(svbool_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_s16_f16_m))) +svint16_t svcvt_s16_f16_m(svint16_t, svbool_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_s16_f16_x))) +svint16_t svcvt_s16_f16_x(svbool_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_s16_f16_z))) +svint16_t svcvt_s16_f16_z(svbool_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_s32_f16_m))) +svint32_t svcvt_s32_f16_m(svint32_t, svbool_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_s32_f16_x))) +svint32_t svcvt_s32_f16_x(svbool_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_s32_f16_z))) +svint32_t svcvt_s32_f16_z(svbool_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_s32_f32_m))) +svint32_t svcvt_s32_f32_m(svint32_t, svbool_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_s32_f32_x))) +svint32_t svcvt_s32_f32_x(svbool_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_s32_f32_z))) +svint32_t svcvt_s32_f32_z(svbool_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_s32_f64_m))) +svint32_t svcvt_s32_f64_m(svint32_t, svbool_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_s32_f64_x))) +svint32_t svcvt_s32_f64_x(svbool_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_s32_f64_z))) +svint32_t svcvt_s32_f64_z(svbool_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_s64_f16_m))) +svint64_t svcvt_s64_f16_m(svint64_t, svbool_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_s64_f16_x))) +svint64_t svcvt_s64_f16_x(svbool_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_s64_f16_z))) +svint64_t svcvt_s64_f16_z(svbool_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_s64_f32_m))) +svint64_t svcvt_s64_f32_m(svint64_t, svbool_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_s64_f32_x))) +svint64_t svcvt_s64_f32_x(svbool_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_s64_f32_z))) +svint64_t svcvt_s64_f32_z(svbool_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_s64_f64_m))) +svint64_t svcvt_s64_f64_m(svint64_t, svbool_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_s64_f64_x))) +svint64_t svcvt_s64_f64_x(svbool_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_s64_f64_z))) +svint64_t svcvt_s64_f64_z(svbool_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_u16_f16_m))) +svuint16_t svcvt_u16_f16_m(svuint16_t, svbool_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_u16_f16_x))) +svuint16_t svcvt_u16_f16_x(svbool_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_u16_f16_z))) +svuint16_t svcvt_u16_f16_z(svbool_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_u32_f16_m))) +svuint32_t svcvt_u32_f16_m(svuint32_t, svbool_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_u32_f16_x))) +svuint32_t svcvt_u32_f16_x(svbool_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_u32_f16_z))) +svuint32_t svcvt_u32_f16_z(svbool_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_u32_f32_m))) +svuint32_t svcvt_u32_f32_m(svuint32_t, svbool_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_u32_f32_x))) +svuint32_t svcvt_u32_f32_x(svbool_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_u32_f32_z))) +svuint32_t svcvt_u32_f32_z(svbool_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_u32_f64_m))) +svuint32_t svcvt_u32_f64_m(svuint32_t, svbool_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_u32_f64_x))) +svuint32_t svcvt_u32_f64_x(svbool_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_u32_f64_z))) +svuint32_t svcvt_u32_f64_z(svbool_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_u64_f16_m))) +svuint64_t svcvt_u64_f16_m(svuint64_t, svbool_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_u64_f16_x))) +svuint64_t svcvt_u64_f16_x(svbool_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_u64_f16_z))) +svuint64_t svcvt_u64_f16_z(svbool_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_u64_f32_m))) +svuint64_t svcvt_u64_f32_m(svuint64_t, svbool_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_u64_f32_x))) +svuint64_t svcvt_u64_f32_x(svbool_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_u64_f32_z))) +svuint64_t svcvt_u64_f32_z(svbool_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_u64_f64_m))) +svuint64_t svcvt_u64_f64_m(svuint64_t, svbool_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_u64_f64_x))) +svuint64_t svcvt_u64_f64_x(svbool_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_u64_f64_z))) +svuint64_t svcvt_u64_f64_z(svbool_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_n_f64_m))) +svfloat64_t svdiv_n_f64_m(svbool_t, svfloat64_t, float64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_n_f32_m))) +svfloat32_t svdiv_n_f32_m(svbool_t, svfloat32_t, float32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_n_f16_m))) +svfloat16_t svdiv_n_f16_m(svbool_t, svfloat16_t, float16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_n_f64_x))) +svfloat64_t svdiv_n_f64_x(svbool_t, svfloat64_t, float64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_n_f32_x))) +svfloat32_t svdiv_n_f32_x(svbool_t, svfloat32_t, float32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_n_f16_x))) +svfloat16_t svdiv_n_f16_x(svbool_t, svfloat16_t, float16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_n_f64_z))) +svfloat64_t svdiv_n_f64_z(svbool_t, svfloat64_t, float64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_n_f32_z))) +svfloat32_t svdiv_n_f32_z(svbool_t, svfloat32_t, float32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_n_f16_z))) +svfloat16_t svdiv_n_f16_z(svbool_t, svfloat16_t, float16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_n_s32_m))) +svint32_t svdiv_n_s32_m(svbool_t, svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_n_s64_m))) +svint64_t svdiv_n_s64_m(svbool_t, svint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_n_s32_x))) +svint32_t svdiv_n_s32_x(svbool_t, svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_n_s64_x))) +svint64_t svdiv_n_s64_x(svbool_t, svint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_n_s32_z))) +svint32_t svdiv_n_s32_z(svbool_t, svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_n_s64_z))) +svint64_t svdiv_n_s64_z(svbool_t, svint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_n_u32_m))) +svuint32_t svdiv_n_u32_m(svbool_t, svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_n_u64_m))) +svuint64_t svdiv_n_u64_m(svbool_t, svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_n_u32_x))) +svuint32_t svdiv_n_u32_x(svbool_t, svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_n_u64_x))) +svuint64_t svdiv_n_u64_x(svbool_t, svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_n_u32_z))) +svuint32_t svdiv_n_u32_z(svbool_t, svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_n_u64_z))) +svuint64_t svdiv_n_u64_z(svbool_t, svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_f64_m))) +svfloat64_t svdiv_f64_m(svbool_t, svfloat64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_f32_m))) +svfloat32_t svdiv_f32_m(svbool_t, svfloat32_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_f16_m))) +svfloat16_t svdiv_f16_m(svbool_t, svfloat16_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_f64_x))) +svfloat64_t svdiv_f64_x(svbool_t, svfloat64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_f32_x))) +svfloat32_t svdiv_f32_x(svbool_t, svfloat32_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_f16_x))) +svfloat16_t svdiv_f16_x(svbool_t, svfloat16_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_f64_z))) +svfloat64_t svdiv_f64_z(svbool_t, svfloat64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_f32_z))) +svfloat32_t svdiv_f32_z(svbool_t, svfloat32_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_f16_z))) +svfloat16_t svdiv_f16_z(svbool_t, svfloat16_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_s32_m))) +svint32_t svdiv_s32_m(svbool_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_s64_m))) +svint64_t svdiv_s64_m(svbool_t, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_s32_x))) +svint32_t svdiv_s32_x(svbool_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_s64_x))) +svint64_t svdiv_s64_x(svbool_t, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_s32_z))) +svint32_t svdiv_s32_z(svbool_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_s64_z))) +svint64_t svdiv_s64_z(svbool_t, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_u32_m))) +svuint32_t svdiv_u32_m(svbool_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_u64_m))) +svuint64_t svdiv_u64_m(svbool_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_u32_x))) +svuint32_t svdiv_u32_x(svbool_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_u64_x))) +svuint64_t svdiv_u64_x(svbool_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_u32_z))) +svuint32_t svdiv_u32_z(svbool_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_u64_z))) +svuint64_t svdiv_u64_z(svbool_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_n_f64_m))) +svfloat64_t svdivr_n_f64_m(svbool_t, svfloat64_t, float64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_n_f32_m))) +svfloat32_t svdivr_n_f32_m(svbool_t, svfloat32_t, float32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_n_f16_m))) +svfloat16_t svdivr_n_f16_m(svbool_t, svfloat16_t, float16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_n_f64_x))) +svfloat64_t svdivr_n_f64_x(svbool_t, svfloat64_t, float64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_n_f32_x))) +svfloat32_t svdivr_n_f32_x(svbool_t, svfloat32_t, float32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_n_f16_x))) +svfloat16_t svdivr_n_f16_x(svbool_t, svfloat16_t, float16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_n_f64_z))) +svfloat64_t svdivr_n_f64_z(svbool_t, svfloat64_t, float64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_n_f32_z))) +svfloat32_t svdivr_n_f32_z(svbool_t, svfloat32_t, float32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_n_f16_z))) +svfloat16_t svdivr_n_f16_z(svbool_t, svfloat16_t, float16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_n_s32_m))) +svint32_t svdivr_n_s32_m(svbool_t, svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_n_s64_m))) +svint64_t svdivr_n_s64_m(svbool_t, svint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_n_s32_x))) +svint32_t svdivr_n_s32_x(svbool_t, svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_n_s64_x))) +svint64_t svdivr_n_s64_x(svbool_t, svint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_n_s32_z))) +svint32_t svdivr_n_s32_z(svbool_t, svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_n_s64_z))) +svint64_t svdivr_n_s64_z(svbool_t, svint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_n_u32_m))) +svuint32_t svdivr_n_u32_m(svbool_t, svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_n_u64_m))) +svuint64_t svdivr_n_u64_m(svbool_t, svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_n_u32_x))) +svuint32_t svdivr_n_u32_x(svbool_t, svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_n_u64_x))) +svuint64_t svdivr_n_u64_x(svbool_t, svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_n_u32_z))) +svuint32_t svdivr_n_u32_z(svbool_t, svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_n_u64_z))) +svuint64_t svdivr_n_u64_z(svbool_t, svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_f64_m))) +svfloat64_t svdivr_f64_m(svbool_t, svfloat64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_f32_m))) +svfloat32_t svdivr_f32_m(svbool_t, svfloat32_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_f16_m))) +svfloat16_t svdivr_f16_m(svbool_t, svfloat16_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_f64_x))) +svfloat64_t svdivr_f64_x(svbool_t, svfloat64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_f32_x))) +svfloat32_t svdivr_f32_x(svbool_t, svfloat32_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_f16_x))) +svfloat16_t svdivr_f16_x(svbool_t, svfloat16_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_f64_z))) +svfloat64_t svdivr_f64_z(svbool_t, svfloat64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_f32_z))) +svfloat32_t svdivr_f32_z(svbool_t, svfloat32_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_f16_z))) +svfloat16_t svdivr_f16_z(svbool_t, svfloat16_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_s32_m))) +svint32_t svdivr_s32_m(svbool_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_s64_m))) +svint64_t svdivr_s64_m(svbool_t, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_s32_x))) +svint32_t svdivr_s32_x(svbool_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_s64_x))) +svint64_t svdivr_s64_x(svbool_t, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_s32_z))) +svint32_t svdivr_s32_z(svbool_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_s64_z))) +svint64_t svdivr_s64_z(svbool_t, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_u32_m))) +svuint32_t svdivr_u32_m(svbool_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_u64_m))) +svuint64_t svdivr_u64_m(svbool_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_u32_x))) +svuint32_t svdivr_u32_x(svbool_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_u64_x))) +svuint64_t svdivr_u64_x(svbool_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_u32_z))) +svuint32_t svdivr_u32_z(svbool_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_u64_z))) +svuint64_t svdivr_u64_z(svbool_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdot_n_s32))) +svint32_t svdot_n_s32(svint32_t, svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdot_n_s64))) +svint64_t svdot_n_s64(svint64_t, svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdot_n_u32))) +svuint32_t svdot_n_u32(svuint32_t, svuint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdot_n_u64))) +svuint64_t svdot_n_u64(svuint64_t, svuint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdot_s32))) +svint32_t svdot_s32(svint32_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdot_s64))) +svint64_t svdot_s64(svint64_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdot_u32))) +svuint32_t svdot_u32(svuint32_t, svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdot_u64))) +svuint64_t svdot_u64(svuint64_t, svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdot_lane_s32))) +svint32_t svdot_lane_s32(svint32_t, svint8_t, svint8_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdot_lane_s64))) +svint64_t svdot_lane_s64(svint64_t, svint16_t, svint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdot_lane_u32))) +svuint32_t svdot_lane_u32(svuint32_t, svuint8_t, svuint8_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdot_lane_u64))) +svuint64_t svdot_lane_u64(svuint64_t, svuint16_t, svuint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_u8))) +svuint8_t svdup_n_u8(uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_u32))) +svuint32_t svdup_n_u32(uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_u64))) +svuint64_t svdup_n_u64(uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_u16))) +svuint16_t svdup_n_u16(uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_s8))) +svint8_t svdup_n_s8(int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_f64))) +svfloat64_t svdup_n_f64(float64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_f32))) +svfloat32_t svdup_n_f32(float32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_f16))) +svfloat16_t svdup_n_f16(float16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_s32))) +svint32_t svdup_n_s32(int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_s64))) +svint64_t svdup_n_s64(int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_s16))) +svint16_t svdup_n_s16(int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_u8_m))) +svuint8_t svdup_n_u8_m(svuint8_t, svbool_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_u32_m))) +svuint32_t svdup_n_u32_m(svuint32_t, svbool_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_u64_m))) +svuint64_t svdup_n_u64_m(svuint64_t, svbool_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_u16_m))) +svuint16_t svdup_n_u16_m(svuint16_t, svbool_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_s8_m))) +svint8_t svdup_n_s8_m(svint8_t, svbool_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_f64_m))) +svfloat64_t svdup_n_f64_m(svfloat64_t, svbool_t, float64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_f32_m))) +svfloat32_t svdup_n_f32_m(svfloat32_t, svbool_t, float32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_f16_m))) +svfloat16_t svdup_n_f16_m(svfloat16_t, svbool_t, float16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_s32_m))) +svint32_t svdup_n_s32_m(svint32_t, svbool_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_s64_m))) +svint64_t svdup_n_s64_m(svint64_t, svbool_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_s16_m))) +svint16_t svdup_n_s16_m(svint16_t, svbool_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_b8))) +svbool_t svdup_n_b8(bool); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_b32))) +svbool_t svdup_n_b32(bool); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_b64))) +svbool_t svdup_n_b64(bool); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_b16))) +svbool_t svdup_n_b16(bool); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_u8_x))) +svuint8_t svdup_n_u8_x(svbool_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_u32_x))) +svuint32_t svdup_n_u32_x(svbool_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_u64_x))) +svuint64_t svdup_n_u64_x(svbool_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_u16_x))) +svuint16_t svdup_n_u16_x(svbool_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_s8_x))) +svint8_t svdup_n_s8_x(svbool_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_f64_x))) +svfloat64_t svdup_n_f64_x(svbool_t, float64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_f32_x))) +svfloat32_t svdup_n_f32_x(svbool_t, float32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_f16_x))) +svfloat16_t svdup_n_f16_x(svbool_t, float16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_s32_x))) +svint32_t svdup_n_s32_x(svbool_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_s64_x))) +svint64_t svdup_n_s64_x(svbool_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_s16_x))) +svint16_t svdup_n_s16_x(svbool_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_u8_z))) +svuint8_t svdup_n_u8_z(svbool_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_u32_z))) +svuint32_t svdup_n_u32_z(svbool_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_u64_z))) +svuint64_t svdup_n_u64_z(svbool_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_u16_z))) +svuint16_t svdup_n_u16_z(svbool_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_s8_z))) +svint8_t svdup_n_s8_z(svbool_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_f64_z))) +svfloat64_t svdup_n_f64_z(svbool_t, float64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_f32_z))) +svfloat32_t svdup_n_f32_z(svbool_t, float32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_f16_z))) +svfloat16_t svdup_n_f16_z(svbool_t, float16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_s32_z))) +svint32_t svdup_n_s32_z(svbool_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_s64_z))) +svint64_t svdup_n_s64_z(svbool_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_s16_z))) +svint16_t svdup_n_s16_z(svbool_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_lane_u8))) +svuint8_t svdup_lane_u8(svuint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_lane_u32))) +svuint32_t svdup_lane_u32(svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_lane_u64))) +svuint64_t svdup_lane_u64(svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_lane_u16))) +svuint16_t svdup_lane_u16(svuint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_lane_s8))) +svint8_t svdup_lane_s8(svint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_lane_f64))) +svfloat64_t svdup_lane_f64(svfloat64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_lane_f32))) +svfloat32_t svdup_lane_f32(svfloat32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_lane_f16))) +svfloat16_t svdup_lane_f16(svfloat16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_lane_s32))) +svint32_t svdup_lane_s32(svint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_lane_s64))) +svint64_t svdup_lane_s64(svint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_lane_s16))) +svint16_t svdup_lane_s16(svint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_n_u8))) +svuint8_t svdupq_n_u8(uint8_t, uint8_t, uint8_t, uint8_t, uint8_t, uint8_t, uint8_t, uint8_t, uint8_t, uint8_t, uint8_t, uint8_t, uint8_t, uint8_t, uint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_n_s8))) +svint8_t svdupq_n_s8(int8_t, int8_t, int8_t, int8_t, int8_t, int8_t, int8_t, int8_t, int8_t, int8_t, int8_t, int8_t, int8_t, int8_t, int8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_n_u16))) +svuint16_t svdupq_n_u16(uint16_t, uint16_t, uint16_t, uint16_t, uint16_t, uint16_t, uint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_n_f16))) +svfloat16_t svdupq_n_f16(float16_t, float16_t, float16_t, float16_t, float16_t, float16_t, float16_t, float16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_n_s16))) +svint16_t svdupq_n_s16(int16_t, int16_t, int16_t, int16_t, int16_t, int16_t, int16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_n_u32))) +svuint32_t svdupq_n_u32(uint32_t, uint32_t, uint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_n_f32))) +svfloat32_t svdupq_n_f32(float32_t, float32_t, float32_t, float32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_n_s32))) +svint32_t svdupq_n_s32(int32_t, int32_t, int32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_n_u64))) +svuint64_t svdupq_n_u64(uint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_n_f64))) +svfloat64_t svdupq_n_f64(float64_t, float64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_n_s64))) +svint64_t svdupq_n_s64(int64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_n_b8))) +svbool_t svdupq_n_b8(bool, bool, bool, bool, bool, bool, bool, bool, bool, bool, bool, bool, bool, bool, bool, bool); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_n_b16))) +svbool_t svdupq_n_b16(bool, bool, bool, bool, bool, bool, bool, bool); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_n_b32))) +svbool_t svdupq_n_b32(bool, bool, bool, bool); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_n_b64))) +svbool_t svdupq_n_b64(bool, bool); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_lane_u8))) +svuint8_t svdupq_lane_u8(svuint8_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_lane_u32))) +svuint32_t svdupq_lane_u32(svuint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_lane_u64))) +svuint64_t svdupq_lane_u64(svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_lane_u16))) +svuint16_t svdupq_lane_u16(svuint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_lane_s8))) +svint8_t svdupq_lane_s8(svint8_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_lane_f64))) +svfloat64_t svdupq_lane_f64(svfloat64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_lane_f32))) +svfloat32_t svdupq_lane_f32(svfloat32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_lane_f16))) +svfloat16_t svdupq_lane_f16(svfloat16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_lane_s32))) +svint32_t svdupq_lane_s32(svint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_lane_s64))) +svint64_t svdupq_lane_s64(svint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_lane_s16))) +svint16_t svdupq_lane_s16(svint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_b_z))) +svbool_t sveor_b_z(svbool_t, svbool_t, svbool_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_n_u8_m))) +svuint8_t sveor_n_u8_m(svbool_t, svuint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_n_u32_m))) +svuint32_t sveor_n_u32_m(svbool_t, svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_n_u64_m))) +svuint64_t sveor_n_u64_m(svbool_t, svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_n_u16_m))) +svuint16_t sveor_n_u16_m(svbool_t, svuint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_n_s8_m))) +svint8_t sveor_n_s8_m(svbool_t, svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_n_s32_m))) +svint32_t sveor_n_s32_m(svbool_t, svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_n_s64_m))) +svint64_t sveor_n_s64_m(svbool_t, svint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_n_s16_m))) +svint16_t sveor_n_s16_m(svbool_t, svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_n_u8_x))) +svuint8_t sveor_n_u8_x(svbool_t, svuint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_n_u32_x))) +svuint32_t sveor_n_u32_x(svbool_t, svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_n_u64_x))) +svuint64_t sveor_n_u64_x(svbool_t, svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_n_u16_x))) +svuint16_t sveor_n_u16_x(svbool_t, svuint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_n_s8_x))) +svint8_t sveor_n_s8_x(svbool_t, svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_n_s32_x))) +svint32_t sveor_n_s32_x(svbool_t, svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_n_s64_x))) +svint64_t sveor_n_s64_x(svbool_t, svint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_n_s16_x))) +svint16_t sveor_n_s16_x(svbool_t, svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_n_u8_z))) +svuint8_t sveor_n_u8_z(svbool_t, svuint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_n_u32_z))) +svuint32_t sveor_n_u32_z(svbool_t, svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_n_u64_z))) +svuint64_t sveor_n_u64_z(svbool_t, svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_n_u16_z))) +svuint16_t sveor_n_u16_z(svbool_t, svuint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_n_s8_z))) +svint8_t sveor_n_s8_z(svbool_t, svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_n_s32_z))) +svint32_t sveor_n_s32_z(svbool_t, svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_n_s64_z))) +svint64_t sveor_n_s64_z(svbool_t, svint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_n_s16_z))) +svint16_t sveor_n_s16_z(svbool_t, svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_u8_m))) +svuint8_t sveor_u8_m(svbool_t, svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_u32_m))) +svuint32_t sveor_u32_m(svbool_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_u64_m))) +svuint64_t sveor_u64_m(svbool_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_u16_m))) +svuint16_t sveor_u16_m(svbool_t, svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_s8_m))) +svint8_t sveor_s8_m(svbool_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_s32_m))) +svint32_t sveor_s32_m(svbool_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_s64_m))) +svint64_t sveor_s64_m(svbool_t, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_s16_m))) +svint16_t sveor_s16_m(svbool_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_u8_x))) +svuint8_t sveor_u8_x(svbool_t, svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_u32_x))) +svuint32_t sveor_u32_x(svbool_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_u64_x))) +svuint64_t sveor_u64_x(svbool_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_u16_x))) +svuint16_t sveor_u16_x(svbool_t, svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_s8_x))) +svint8_t sveor_s8_x(svbool_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_s32_x))) +svint32_t sveor_s32_x(svbool_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_s64_x))) +svint64_t sveor_s64_x(svbool_t, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_s16_x))) +svint16_t sveor_s16_x(svbool_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_u8_z))) +svuint8_t sveor_u8_z(svbool_t, svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_u32_z))) +svuint32_t sveor_u32_z(svbool_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_u64_z))) +svuint64_t sveor_u64_z(svbool_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_u16_z))) +svuint16_t sveor_u16_z(svbool_t, svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_s8_z))) +svint8_t sveor_s8_z(svbool_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_s32_z))) +svint32_t sveor_s32_z(svbool_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_s64_z))) +svint64_t sveor_s64_z(svbool_t, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_s16_z))) +svint16_t sveor_s16_z(svbool_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorv_u8))) +uint8_t sveorv_u8(svbool_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorv_u32))) +uint32_t sveorv_u32(svbool_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorv_u64))) +uint64_t sveorv_u64(svbool_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorv_u16))) +uint16_t sveorv_u16(svbool_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorv_s8))) +int8_t sveorv_s8(svbool_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorv_s32))) +int32_t sveorv_s32(svbool_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorv_s64))) +int64_t sveorv_s64(svbool_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorv_s16))) +int16_t sveorv_s16(svbool_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svext_u8))) +svuint8_t svext_u8(svuint8_t, svuint8_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svext_u32))) +svuint32_t svext_u32(svuint32_t, svuint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svext_u64))) +svuint64_t svext_u64(svuint64_t, svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svext_u16))) +svuint16_t svext_u16(svuint16_t, svuint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svext_s8))) +svint8_t svext_s8(svint8_t, svint8_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svext_f64))) +svfloat64_t svext_f64(svfloat64_t, svfloat64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svext_f32))) +svfloat32_t svext_f32(svfloat32_t, svfloat32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svext_f16))) +svfloat16_t svext_f16(svfloat16_t, svfloat16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svext_s32))) +svint32_t svext_s32(svint32_t, svint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svext_s64))) +svint64_t svext_s64(svint64_t, svint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svext_s16))) +svint16_t svext_s16(svint16_t, svint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svextb_s32_m))) +svint32_t svextb_s32_m(svint32_t, svbool_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svextb_s64_m))) +svint64_t svextb_s64_m(svint64_t, svbool_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svextb_s16_m))) +svint16_t svextb_s16_m(svint16_t, svbool_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svextb_s32_x))) +svint32_t svextb_s32_x(svbool_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svextb_s64_x))) +svint64_t svextb_s64_x(svbool_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svextb_s16_x))) +svint16_t svextb_s16_x(svbool_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svextb_s32_z))) +svint32_t svextb_s32_z(svbool_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svextb_s64_z))) +svint64_t svextb_s64_z(svbool_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svextb_s16_z))) +svint16_t svextb_s16_z(svbool_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svextb_u32_m))) +svuint32_t svextb_u32_m(svuint32_t, svbool_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svextb_u64_m))) +svuint64_t svextb_u64_m(svuint64_t, svbool_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svextb_u16_m))) +svuint16_t svextb_u16_m(svuint16_t, svbool_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svextb_u32_x))) +svuint32_t svextb_u32_x(svbool_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svextb_u64_x))) +svuint64_t svextb_u64_x(svbool_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svextb_u16_x))) +svuint16_t svextb_u16_x(svbool_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svextb_u32_z))) +svuint32_t svextb_u32_z(svbool_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svextb_u64_z))) +svuint64_t svextb_u64_z(svbool_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svextb_u16_z))) +svuint16_t svextb_u16_z(svbool_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svexth_s32_m))) +svint32_t svexth_s32_m(svint32_t, svbool_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svexth_s64_m))) +svint64_t svexth_s64_m(svint64_t, svbool_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svexth_s32_x))) +svint32_t svexth_s32_x(svbool_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svexth_s64_x))) +svint64_t svexth_s64_x(svbool_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svexth_s32_z))) +svint32_t svexth_s32_z(svbool_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svexth_s64_z))) +svint64_t svexth_s64_z(svbool_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svexth_u32_m))) +svuint32_t svexth_u32_m(svuint32_t, svbool_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svexth_u64_m))) +svuint64_t svexth_u64_m(svuint64_t, svbool_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svexth_u32_x))) +svuint32_t svexth_u32_x(svbool_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svexth_u64_x))) +svuint64_t svexth_u64_x(svbool_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svexth_u32_z))) +svuint32_t svexth_u32_z(svbool_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svexth_u64_z))) +svuint64_t svexth_u64_z(svbool_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svextw_s64_m))) +svint64_t svextw_s64_m(svint64_t, svbool_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svextw_s64_x))) +svint64_t svextw_s64_x(svbool_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svextw_s64_z))) +svint64_t svextw_s64_z(svbool_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svextw_u64_m))) +svuint64_t svextw_u64_m(svuint64_t, svbool_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svextw_u64_x))) +svuint64_t svextw_u64_x(svbool_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svextw_u64_z))) +svuint64_t svextw_u64_z(svbool_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget2_u8))) +svuint8_t svget2_u8(svuint8x2_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget2_u32))) +svuint32_t svget2_u32(svuint32x2_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget2_u64))) +svuint64_t svget2_u64(svuint64x2_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget2_u16))) +svuint16_t svget2_u16(svuint16x2_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget2_s8))) +svint8_t svget2_s8(svint8x2_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget2_f64))) +svfloat64_t svget2_f64(svfloat64x2_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget2_f32))) +svfloat32_t svget2_f32(svfloat32x2_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget2_f16))) +svfloat16_t svget2_f16(svfloat16x2_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget2_s32))) +svint32_t svget2_s32(svint32x2_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget2_s64))) +svint64_t svget2_s64(svint64x2_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget2_s16))) +svint16_t svget2_s16(svint16x2_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget3_u8))) +svuint8_t svget3_u8(svuint8x3_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget3_u32))) +svuint32_t svget3_u32(svuint32x3_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget3_u64))) +svuint64_t svget3_u64(svuint64x3_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget3_u16))) +svuint16_t svget3_u16(svuint16x3_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget3_s8))) +svint8_t svget3_s8(svint8x3_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget3_f64))) +svfloat64_t svget3_f64(svfloat64x3_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget3_f32))) +svfloat32_t svget3_f32(svfloat32x3_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget3_f16))) +svfloat16_t svget3_f16(svfloat16x3_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget3_s32))) +svint32_t svget3_s32(svint32x3_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget3_s64))) +svint64_t svget3_s64(svint64x3_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget3_s16))) +svint16_t svget3_s16(svint16x3_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget4_u8))) +svuint8_t svget4_u8(svuint8x4_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget4_u32))) +svuint32_t svget4_u32(svuint32x4_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget4_u64))) +svuint64_t svget4_u64(svuint64x4_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget4_u16))) +svuint16_t svget4_u16(svuint16x4_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget4_s8))) +svint8_t svget4_s8(svint8x4_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget4_f64))) +svfloat64_t svget4_f64(svfloat64x4_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget4_f32))) +svfloat32_t svget4_f32(svfloat32x4_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget4_f16))) +svfloat16_t svget4_f16(svfloat16x4_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget4_s32))) +svint32_t svget4_s32(svint32x4_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget4_s64))) +svint64_t svget4_s64(svint64x4_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget4_s16))) +svint16_t svget4_s16(svint16x4_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svindex_u8))) +svuint8_t svindex_u8(uint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svindex_u32))) +svuint32_t svindex_u32(uint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svindex_u64))) +svuint64_t svindex_u64(uint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svindex_u16))) +svuint16_t svindex_u16(uint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svindex_s8))) +svint8_t svindex_s8(int8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svindex_s32))) +svint32_t svindex_s32(int32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svindex_s64))) +svint64_t svindex_s64(int64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svindex_s16))) +svint16_t svindex_s16(int16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svinsr_n_u8))) +svuint8_t svinsr_n_u8(svuint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svinsr_n_u32))) +svuint32_t svinsr_n_u32(svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svinsr_n_u64))) +svuint64_t svinsr_n_u64(svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svinsr_n_u16))) +svuint16_t svinsr_n_u16(svuint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svinsr_n_s8))) +svint8_t svinsr_n_s8(svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svinsr_n_f64))) +svfloat64_t svinsr_n_f64(svfloat64_t, float64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svinsr_n_f32))) +svfloat32_t svinsr_n_f32(svfloat32_t, float32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svinsr_n_f16))) +svfloat16_t svinsr_n_f16(svfloat16_t, float16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svinsr_n_s32))) +svint32_t svinsr_n_s32(svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svinsr_n_s64))) +svint64_t svinsr_n_s64(svint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svinsr_n_s16))) +svint16_t svinsr_n_s16(svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlasta_u8))) +uint8_t svlasta_u8(svbool_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlasta_u32))) +uint32_t svlasta_u32(svbool_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlasta_u64))) +uint64_t svlasta_u64(svbool_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlasta_u16))) +uint16_t svlasta_u16(svbool_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlasta_s8))) +int8_t svlasta_s8(svbool_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlasta_f64))) +float64_t svlasta_f64(svbool_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlasta_f32))) +float32_t svlasta_f32(svbool_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlasta_f16))) +float16_t svlasta_f16(svbool_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlasta_s32))) +int32_t svlasta_s32(svbool_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlasta_s64))) +int64_t svlasta_s64(svbool_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlasta_s16))) +int16_t svlasta_s16(svbool_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlastb_u8))) +uint8_t svlastb_u8(svbool_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlastb_u32))) +uint32_t svlastb_u32(svbool_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlastb_u64))) +uint64_t svlastb_u64(svbool_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlastb_u16))) +uint16_t svlastb_u16(svbool_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlastb_s8))) +int8_t svlastb_s8(svbool_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlastb_f64))) +float64_t svlastb_f64(svbool_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlastb_f32))) +float32_t svlastb_f32(svbool_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlastb_f16))) +float16_t svlastb_f16(svbool_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlastb_s32))) +int32_t svlastb_s32(svbool_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlastb_s64))) +int64_t svlastb_s64(svbool_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlastb_s16))) +int16_t svlastb_s16(svbool_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_u8))) +svuint8_t svld1_u8(svbool_t, uint8_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_u32))) +svuint32_t svld1_u32(svbool_t, uint32_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_u64))) +svuint64_t svld1_u64(svbool_t, uint64_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_u16))) +svuint16_t svld1_u16(svbool_t, uint16_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_s8))) +svint8_t svld1_s8(svbool_t, int8_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_f64))) +svfloat64_t svld1_f64(svbool_t, float64_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_f32))) +svfloat32_t svld1_f32(svbool_t, float32_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_f16))) +svfloat16_t svld1_f16(svbool_t, float16_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_s32))) +svint32_t svld1_s32(svbool_t, int32_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_s64))) +svint64_t svld1_s64(svbool_t, int64_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_s16))) +svint16_t svld1_s16(svbool_t, int16_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_vnum_u8))) +svuint8_t svld1_vnum_u8(svbool_t, uint8_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_vnum_u32))) +svuint32_t svld1_vnum_u32(svbool_t, uint32_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_vnum_u64))) +svuint64_t svld1_vnum_u64(svbool_t, uint64_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_vnum_u16))) +svuint16_t svld1_vnum_u16(svbool_t, uint16_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_vnum_s8))) +svint8_t svld1_vnum_s8(svbool_t, int8_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_vnum_f64))) +svfloat64_t svld1_vnum_f64(svbool_t, float64_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_vnum_f32))) +svfloat32_t svld1_vnum_f32(svbool_t, float32_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_vnum_f16))) +svfloat16_t svld1_vnum_f16(svbool_t, float16_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_vnum_s32))) +svint32_t svld1_vnum_s32(svbool_t, int32_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_vnum_s64))) +svint64_t svld1_vnum_s64(svbool_t, int64_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_vnum_s16))) +svint16_t svld1_vnum_s16(svbool_t, int16_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1rq_u8))) +svuint8_t svld1rq_u8(svbool_t, uint8_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1rq_u32))) +svuint32_t svld1rq_u32(svbool_t, uint32_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1rq_u64))) +svuint64_t svld1rq_u64(svbool_t, uint64_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1rq_u16))) +svuint16_t svld1rq_u16(svbool_t, uint16_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1rq_s8))) +svint8_t svld1rq_s8(svbool_t, int8_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1rq_f64))) +svfloat64_t svld1rq_f64(svbool_t, float64_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1rq_f32))) +svfloat32_t svld1rq_f32(svbool_t, float32_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1rq_f16))) +svfloat16_t svld1rq_f16(svbool_t, float16_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1rq_s32))) +svint32_t svld1rq_s32(svbool_t, int32_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1rq_s64))) +svint64_t svld1rq_s64(svbool_t, int64_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1rq_s16))) +svint16_t svld1rq_s16(svbool_t, int16_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sb_vnum_u32))) +svuint32_t svld1sb_vnum_u32(svbool_t, int8_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sb_vnum_u64))) +svuint64_t svld1sb_vnum_u64(svbool_t, int8_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sb_vnum_u16))) +svuint16_t svld1sb_vnum_u16(svbool_t, int8_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sb_vnum_s32))) +svint32_t svld1sb_vnum_s32(svbool_t, int8_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sb_vnum_s64))) +svint64_t svld1sb_vnum_s64(svbool_t, int8_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sb_vnum_s16))) +svint16_t svld1sb_vnum_s16(svbool_t, int8_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sb_u32))) +svuint32_t svld1sb_u32(svbool_t, int8_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sb_u64))) +svuint64_t svld1sb_u64(svbool_t, int8_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sb_u16))) +svuint16_t svld1sb_u16(svbool_t, int8_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sb_s32))) +svint32_t svld1sb_s32(svbool_t, int8_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sb_s64))) +svint64_t svld1sb_s64(svbool_t, int8_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sb_s16))) +svint16_t svld1sb_s16(svbool_t, int8_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_vnum_u32))) +svuint32_t svld1sh_vnum_u32(svbool_t, int16_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_vnum_u64))) +svuint64_t svld1sh_vnum_u64(svbool_t, int16_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_vnum_s32))) +svint32_t svld1sh_vnum_s32(svbool_t, int16_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_vnum_s64))) +svint64_t svld1sh_vnum_s64(svbool_t, int16_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_u32))) +svuint32_t svld1sh_u32(svbool_t, int16_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_u64))) +svuint64_t svld1sh_u64(svbool_t, int16_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_s32))) +svint32_t svld1sh_s32(svbool_t, int16_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_s64))) +svint64_t svld1sh_s64(svbool_t, int16_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sw_vnum_u64))) +svuint64_t svld1sw_vnum_u64(svbool_t, int32_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sw_vnum_s64))) +svint64_t svld1sw_vnum_s64(svbool_t, int32_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sw_u64))) +svuint64_t svld1sw_u64(svbool_t, int32_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sw_s64))) +svint64_t svld1sw_s64(svbool_t, int32_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ub_vnum_u32))) +svuint32_t svld1ub_vnum_u32(svbool_t, uint8_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ub_vnum_u64))) +svuint64_t svld1ub_vnum_u64(svbool_t, uint8_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ub_vnum_u16))) +svuint16_t svld1ub_vnum_u16(svbool_t, uint8_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ub_vnum_s32))) +svint32_t svld1ub_vnum_s32(svbool_t, uint8_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ub_vnum_s64))) +svint64_t svld1ub_vnum_s64(svbool_t, uint8_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ub_vnum_s16))) +svint16_t svld1ub_vnum_s16(svbool_t, uint8_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ub_u32))) +svuint32_t svld1ub_u32(svbool_t, uint8_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ub_u64))) +svuint64_t svld1ub_u64(svbool_t, uint8_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ub_u16))) +svuint16_t svld1ub_u16(svbool_t, uint8_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ub_s32))) +svint32_t svld1ub_s32(svbool_t, uint8_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ub_s64))) +svint64_t svld1ub_s64(svbool_t, uint8_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ub_s16))) +svint16_t svld1ub_s16(svbool_t, uint8_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_vnum_u32))) +svuint32_t svld1uh_vnum_u32(svbool_t, uint16_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_vnum_u64))) +svuint64_t svld1uh_vnum_u64(svbool_t, uint16_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_vnum_s32))) +svint32_t svld1uh_vnum_s32(svbool_t, uint16_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_vnum_s64))) +svint64_t svld1uh_vnum_s64(svbool_t, uint16_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_u32))) +svuint32_t svld1uh_u32(svbool_t, uint16_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_u64))) +svuint64_t svld1uh_u64(svbool_t, uint16_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_s32))) +svint32_t svld1uh_s32(svbool_t, uint16_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_s64))) +svint64_t svld1uh_s64(svbool_t, uint16_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uw_vnum_u64))) +svuint64_t svld1uw_vnum_u64(svbool_t, uint32_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uw_vnum_s64))) +svint64_t svld1uw_vnum_s64(svbool_t, uint32_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uw_u64))) +svuint64_t svld1uw_u64(svbool_t, uint32_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uw_s64))) +svint64_t svld1uw_s64(svbool_t, uint32_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2_u8))) +svuint8x2_t svld2_u8(svbool_t, uint8_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2_u32))) +svuint32x2_t svld2_u32(svbool_t, uint32_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2_u64))) +svuint64x2_t svld2_u64(svbool_t, uint64_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2_u16))) +svuint16x2_t svld2_u16(svbool_t, uint16_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2_s8))) +svint8x2_t svld2_s8(svbool_t, int8_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2_f64))) +svfloat64x2_t svld2_f64(svbool_t, float64_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2_f32))) +svfloat32x2_t svld2_f32(svbool_t, float32_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2_f16))) +svfloat16x2_t svld2_f16(svbool_t, float16_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2_s32))) +svint32x2_t svld2_s32(svbool_t, int32_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2_s64))) +svint64x2_t svld2_s64(svbool_t, int64_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2_s16))) +svint16x2_t svld2_s16(svbool_t, int16_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2_vnum_u8))) +svuint8x2_t svld2_vnum_u8(svbool_t, uint8_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2_vnum_u32))) +svuint32x2_t svld2_vnum_u32(svbool_t, uint32_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2_vnum_u64))) +svuint64x2_t svld2_vnum_u64(svbool_t, uint64_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2_vnum_u16))) +svuint16x2_t svld2_vnum_u16(svbool_t, uint16_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2_vnum_s8))) +svint8x2_t svld2_vnum_s8(svbool_t, int8_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2_vnum_f64))) +svfloat64x2_t svld2_vnum_f64(svbool_t, float64_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2_vnum_f32))) +svfloat32x2_t svld2_vnum_f32(svbool_t, float32_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2_vnum_f16))) +svfloat16x2_t svld2_vnum_f16(svbool_t, float16_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2_vnum_s32))) +svint32x2_t svld2_vnum_s32(svbool_t, int32_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2_vnum_s64))) +svint64x2_t svld2_vnum_s64(svbool_t, int64_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2_vnum_s16))) +svint16x2_t svld2_vnum_s16(svbool_t, int16_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3_u8))) +svuint8x3_t svld3_u8(svbool_t, uint8_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3_u32))) +svuint32x3_t svld3_u32(svbool_t, uint32_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3_u64))) +svuint64x3_t svld3_u64(svbool_t, uint64_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3_u16))) +svuint16x3_t svld3_u16(svbool_t, uint16_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3_s8))) +svint8x3_t svld3_s8(svbool_t, int8_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3_f64))) +svfloat64x3_t svld3_f64(svbool_t, float64_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3_f32))) +svfloat32x3_t svld3_f32(svbool_t, float32_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3_f16))) +svfloat16x3_t svld3_f16(svbool_t, float16_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3_s32))) +svint32x3_t svld3_s32(svbool_t, int32_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3_s64))) +svint64x3_t svld3_s64(svbool_t, int64_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3_s16))) +svint16x3_t svld3_s16(svbool_t, int16_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3_vnum_u8))) +svuint8x3_t svld3_vnum_u8(svbool_t, uint8_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3_vnum_u32))) +svuint32x3_t svld3_vnum_u32(svbool_t, uint32_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3_vnum_u64))) +svuint64x3_t svld3_vnum_u64(svbool_t, uint64_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3_vnum_u16))) +svuint16x3_t svld3_vnum_u16(svbool_t, uint16_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3_vnum_s8))) +svint8x3_t svld3_vnum_s8(svbool_t, int8_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3_vnum_f64))) +svfloat64x3_t svld3_vnum_f64(svbool_t, float64_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3_vnum_f32))) +svfloat32x3_t svld3_vnum_f32(svbool_t, float32_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3_vnum_f16))) +svfloat16x3_t svld3_vnum_f16(svbool_t, float16_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3_vnum_s32))) +svint32x3_t svld3_vnum_s32(svbool_t, int32_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3_vnum_s64))) +svint64x3_t svld3_vnum_s64(svbool_t, int64_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3_vnum_s16))) +svint16x3_t svld3_vnum_s16(svbool_t, int16_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4_u8))) +svuint8x4_t svld4_u8(svbool_t, uint8_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4_u32))) +svuint32x4_t svld4_u32(svbool_t, uint32_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4_u64))) +svuint64x4_t svld4_u64(svbool_t, uint64_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4_u16))) +svuint16x4_t svld4_u16(svbool_t, uint16_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4_s8))) +svint8x4_t svld4_s8(svbool_t, int8_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4_f64))) +svfloat64x4_t svld4_f64(svbool_t, float64_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4_f32))) +svfloat32x4_t svld4_f32(svbool_t, float32_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4_f16))) +svfloat16x4_t svld4_f16(svbool_t, float16_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4_s32))) +svint32x4_t svld4_s32(svbool_t, int32_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4_s64))) +svint64x4_t svld4_s64(svbool_t, int64_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4_s16))) +svint16x4_t svld4_s16(svbool_t, int16_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4_vnum_u8))) +svuint8x4_t svld4_vnum_u8(svbool_t, uint8_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4_vnum_u32))) +svuint32x4_t svld4_vnum_u32(svbool_t, uint32_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4_vnum_u64))) +svuint64x4_t svld4_vnum_u64(svbool_t, uint64_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4_vnum_u16))) +svuint16x4_t svld4_vnum_u16(svbool_t, uint16_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4_vnum_s8))) +svint8x4_t svld4_vnum_s8(svbool_t, int8_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4_vnum_f64))) +svfloat64x4_t svld4_vnum_f64(svbool_t, float64_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4_vnum_f32))) +svfloat32x4_t svld4_vnum_f32(svbool_t, float32_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4_vnum_f16))) +svfloat16x4_t svld4_vnum_f16(svbool_t, float16_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4_vnum_s32))) +svint32x4_t svld4_vnum_s32(svbool_t, int32_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4_vnum_s64))) +svint64x4_t svld4_vnum_s64(svbool_t, int64_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4_vnum_s16))) +svint16x4_t svld4_vnum_s16(svbool_t, int16_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_u8))) +svuint8_t svldnt1_u8(svbool_t, uint8_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_u32))) +svuint32_t svldnt1_u32(svbool_t, uint32_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_u64))) +svuint64_t svldnt1_u64(svbool_t, uint64_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_u16))) +svuint16_t svldnt1_u16(svbool_t, uint16_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_s8))) +svint8_t svldnt1_s8(svbool_t, int8_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_f64))) +svfloat64_t svldnt1_f64(svbool_t, float64_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_f32))) +svfloat32_t svldnt1_f32(svbool_t, float32_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_f16))) +svfloat16_t svldnt1_f16(svbool_t, float16_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_s32))) +svint32_t svldnt1_s32(svbool_t, int32_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_s64))) +svint64_t svldnt1_s64(svbool_t, int64_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_s16))) +svint16_t svldnt1_s16(svbool_t, int16_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_vnum_u8))) +svuint8_t svldnt1_vnum_u8(svbool_t, uint8_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_vnum_u32))) +svuint32_t svldnt1_vnum_u32(svbool_t, uint32_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_vnum_u64))) +svuint64_t svldnt1_vnum_u64(svbool_t, uint64_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_vnum_u16))) +svuint16_t svldnt1_vnum_u16(svbool_t, uint16_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_vnum_s8))) +svint8_t svldnt1_vnum_s8(svbool_t, int8_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_vnum_f64))) +svfloat64_t svldnt1_vnum_f64(svbool_t, float64_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_vnum_f32))) +svfloat32_t svldnt1_vnum_f32(svbool_t, float32_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_vnum_f16))) +svfloat16_t svldnt1_vnum_f16(svbool_t, float16_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_vnum_s32))) +svint32_t svldnt1_vnum_s32(svbool_t, int32_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_vnum_s64))) +svint64_t svldnt1_vnum_s64(svbool_t, int64_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_vnum_s16))) +svint16_t svldnt1_vnum_s16(svbool_t, int16_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlen_u8))) +uint64_t svlen_u8(svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlen_u32))) +uint64_t svlen_u32(svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlen_u64))) +uint64_t svlen_u64(svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlen_u16))) +uint64_t svlen_u16(svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlen_s8))) +uint64_t svlen_s8(svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlen_f64))) +uint64_t svlen_f64(svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlen_f32))) +uint64_t svlen_f32(svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlen_f16))) +uint64_t svlen_f16(svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlen_s32))) +uint64_t svlen_s32(svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlen_s64))) +uint64_t svlen_s64(svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlen_s16))) +uint64_t svlen_s16(svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_n_u8_m))) +svuint8_t svlsl_n_u8_m(svbool_t, svuint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_n_u32_m))) +svuint32_t svlsl_n_u32_m(svbool_t, svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_n_u64_m))) +svuint64_t svlsl_n_u64_m(svbool_t, svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_n_u16_m))) +svuint16_t svlsl_n_u16_m(svbool_t, svuint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_n_s8_m))) +svint8_t svlsl_n_s8_m(svbool_t, svint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_n_s32_m))) +svint32_t svlsl_n_s32_m(svbool_t, svint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_n_s64_m))) +svint64_t svlsl_n_s64_m(svbool_t, svint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_n_s16_m))) +svint16_t svlsl_n_s16_m(svbool_t, svint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_n_u8_x))) +svuint8_t svlsl_n_u8_x(svbool_t, svuint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_n_u32_x))) +svuint32_t svlsl_n_u32_x(svbool_t, svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_n_u64_x))) +svuint64_t svlsl_n_u64_x(svbool_t, svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_n_u16_x))) +svuint16_t svlsl_n_u16_x(svbool_t, svuint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_n_s8_x))) +svint8_t svlsl_n_s8_x(svbool_t, svint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_n_s32_x))) +svint32_t svlsl_n_s32_x(svbool_t, svint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_n_s64_x))) +svint64_t svlsl_n_s64_x(svbool_t, svint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_n_s16_x))) +svint16_t svlsl_n_s16_x(svbool_t, svint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_n_u8_z))) +svuint8_t svlsl_n_u8_z(svbool_t, svuint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_n_u32_z))) +svuint32_t svlsl_n_u32_z(svbool_t, svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_n_u64_z))) +svuint64_t svlsl_n_u64_z(svbool_t, svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_n_u16_z))) +svuint16_t svlsl_n_u16_z(svbool_t, svuint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_n_s8_z))) +svint8_t svlsl_n_s8_z(svbool_t, svint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_n_s32_z))) +svint32_t svlsl_n_s32_z(svbool_t, svint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_n_s64_z))) +svint64_t svlsl_n_s64_z(svbool_t, svint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_n_s16_z))) +svint16_t svlsl_n_s16_z(svbool_t, svint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_u8_m))) +svuint8_t svlsl_u8_m(svbool_t, svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_u32_m))) +svuint32_t svlsl_u32_m(svbool_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_u64_m))) +svuint64_t svlsl_u64_m(svbool_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_u16_m))) +svuint16_t svlsl_u16_m(svbool_t, svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_s8_m))) +svint8_t svlsl_s8_m(svbool_t, svint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_s32_m))) +svint32_t svlsl_s32_m(svbool_t, svint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_s64_m))) +svint64_t svlsl_s64_m(svbool_t, svint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_s16_m))) +svint16_t svlsl_s16_m(svbool_t, svint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_u8_x))) +svuint8_t svlsl_u8_x(svbool_t, svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_u32_x))) +svuint32_t svlsl_u32_x(svbool_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_u64_x))) +svuint64_t svlsl_u64_x(svbool_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_u16_x))) +svuint16_t svlsl_u16_x(svbool_t, svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_s8_x))) +svint8_t svlsl_s8_x(svbool_t, svint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_s32_x))) +svint32_t svlsl_s32_x(svbool_t, svint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_s64_x))) +svint64_t svlsl_s64_x(svbool_t, svint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_s16_x))) +svint16_t svlsl_s16_x(svbool_t, svint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_u8_z))) +svuint8_t svlsl_u8_z(svbool_t, svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_u32_z))) +svuint32_t svlsl_u32_z(svbool_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_u64_z))) +svuint64_t svlsl_u64_z(svbool_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_u16_z))) +svuint16_t svlsl_u16_z(svbool_t, svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_s8_z))) +svint8_t svlsl_s8_z(svbool_t, svint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_s32_z))) +svint32_t svlsl_s32_z(svbool_t, svint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_s64_z))) +svint64_t svlsl_s64_z(svbool_t, svint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_s16_z))) +svint16_t svlsl_s16_z(svbool_t, svint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_n_u8_m))) +svuint8_t svlsl_wide_n_u8_m(svbool_t, svuint8_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_n_u32_m))) +svuint32_t svlsl_wide_n_u32_m(svbool_t, svuint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_n_u16_m))) +svuint16_t svlsl_wide_n_u16_m(svbool_t, svuint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_n_s8_m))) +svint8_t svlsl_wide_n_s8_m(svbool_t, svint8_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_n_s32_m))) +svint32_t svlsl_wide_n_s32_m(svbool_t, svint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_n_s16_m))) +svint16_t svlsl_wide_n_s16_m(svbool_t, svint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_n_u8_x))) +svuint8_t svlsl_wide_n_u8_x(svbool_t, svuint8_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_n_u32_x))) +svuint32_t svlsl_wide_n_u32_x(svbool_t, svuint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_n_u16_x))) +svuint16_t svlsl_wide_n_u16_x(svbool_t, svuint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_n_s8_x))) +svint8_t svlsl_wide_n_s8_x(svbool_t, svint8_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_n_s32_x))) +svint32_t svlsl_wide_n_s32_x(svbool_t, svint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_n_s16_x))) +svint16_t svlsl_wide_n_s16_x(svbool_t, svint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_n_u8_z))) +svuint8_t svlsl_wide_n_u8_z(svbool_t, svuint8_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_n_u32_z))) +svuint32_t svlsl_wide_n_u32_z(svbool_t, svuint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_n_u16_z))) +svuint16_t svlsl_wide_n_u16_z(svbool_t, svuint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_n_s8_z))) +svint8_t svlsl_wide_n_s8_z(svbool_t, svint8_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_n_s32_z))) +svint32_t svlsl_wide_n_s32_z(svbool_t, svint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_n_s16_z))) +svint16_t svlsl_wide_n_s16_z(svbool_t, svint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_u8_m))) +svuint8_t svlsl_wide_u8_m(svbool_t, svuint8_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_u32_m))) +svuint32_t svlsl_wide_u32_m(svbool_t, svuint32_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_u16_m))) +svuint16_t svlsl_wide_u16_m(svbool_t, svuint16_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_s8_m))) +svint8_t svlsl_wide_s8_m(svbool_t, svint8_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_s32_m))) +svint32_t svlsl_wide_s32_m(svbool_t, svint32_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_s16_m))) +svint16_t svlsl_wide_s16_m(svbool_t, svint16_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_u8_x))) +svuint8_t svlsl_wide_u8_x(svbool_t, svuint8_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_u32_x))) +svuint32_t svlsl_wide_u32_x(svbool_t, svuint32_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_u16_x))) +svuint16_t svlsl_wide_u16_x(svbool_t, svuint16_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_s8_x))) +svint8_t svlsl_wide_s8_x(svbool_t, svint8_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_s32_x))) +svint32_t svlsl_wide_s32_x(svbool_t, svint32_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_s16_x))) +svint16_t svlsl_wide_s16_x(svbool_t, svint16_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_u8_z))) +svuint8_t svlsl_wide_u8_z(svbool_t, svuint8_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_u32_z))) +svuint32_t svlsl_wide_u32_z(svbool_t, svuint32_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_u16_z))) +svuint16_t svlsl_wide_u16_z(svbool_t, svuint16_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_s8_z))) +svint8_t svlsl_wide_s8_z(svbool_t, svint8_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_s32_z))) +svint32_t svlsl_wide_s32_z(svbool_t, svint32_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_s16_z))) +svint16_t svlsl_wide_s16_z(svbool_t, svint16_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_n_u8_m))) +svuint8_t svlsr_n_u8_m(svbool_t, svuint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_n_u32_m))) +svuint32_t svlsr_n_u32_m(svbool_t, svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_n_u64_m))) +svuint64_t svlsr_n_u64_m(svbool_t, svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_n_u16_m))) +svuint16_t svlsr_n_u16_m(svbool_t, svuint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_n_u8_x))) +svuint8_t svlsr_n_u8_x(svbool_t, svuint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_n_u32_x))) +svuint32_t svlsr_n_u32_x(svbool_t, svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_n_u64_x))) +svuint64_t svlsr_n_u64_x(svbool_t, svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_n_u16_x))) +svuint16_t svlsr_n_u16_x(svbool_t, svuint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_n_u8_z))) +svuint8_t svlsr_n_u8_z(svbool_t, svuint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_n_u32_z))) +svuint32_t svlsr_n_u32_z(svbool_t, svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_n_u64_z))) +svuint64_t svlsr_n_u64_z(svbool_t, svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_n_u16_z))) +svuint16_t svlsr_n_u16_z(svbool_t, svuint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_u8_m))) +svuint8_t svlsr_u8_m(svbool_t, svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_u32_m))) +svuint32_t svlsr_u32_m(svbool_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_u64_m))) +svuint64_t svlsr_u64_m(svbool_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_u16_m))) +svuint16_t svlsr_u16_m(svbool_t, svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_u8_x))) +svuint8_t svlsr_u8_x(svbool_t, svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_u32_x))) +svuint32_t svlsr_u32_x(svbool_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_u64_x))) +svuint64_t svlsr_u64_x(svbool_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_u16_x))) +svuint16_t svlsr_u16_x(svbool_t, svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_u8_z))) +svuint8_t svlsr_u8_z(svbool_t, svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_u32_z))) +svuint32_t svlsr_u32_z(svbool_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_u64_z))) +svuint64_t svlsr_u64_z(svbool_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_u16_z))) +svuint16_t svlsr_u16_z(svbool_t, svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_wide_n_u8_m))) +svuint8_t svlsr_wide_n_u8_m(svbool_t, svuint8_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_wide_n_u32_m))) +svuint32_t svlsr_wide_n_u32_m(svbool_t, svuint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_wide_n_u16_m))) +svuint16_t svlsr_wide_n_u16_m(svbool_t, svuint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_wide_n_u8_x))) +svuint8_t svlsr_wide_n_u8_x(svbool_t, svuint8_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_wide_n_u32_x))) +svuint32_t svlsr_wide_n_u32_x(svbool_t, svuint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_wide_n_u16_x))) +svuint16_t svlsr_wide_n_u16_x(svbool_t, svuint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_wide_n_u8_z))) +svuint8_t svlsr_wide_n_u8_z(svbool_t, svuint8_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_wide_n_u32_z))) +svuint32_t svlsr_wide_n_u32_z(svbool_t, svuint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_wide_n_u16_z))) +svuint16_t svlsr_wide_n_u16_z(svbool_t, svuint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_wide_u8_m))) +svuint8_t svlsr_wide_u8_m(svbool_t, svuint8_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_wide_u32_m))) +svuint32_t svlsr_wide_u32_m(svbool_t, svuint32_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_wide_u16_m))) +svuint16_t svlsr_wide_u16_m(svbool_t, svuint16_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_wide_u8_x))) +svuint8_t svlsr_wide_u8_x(svbool_t, svuint8_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_wide_u32_x))) +svuint32_t svlsr_wide_u32_x(svbool_t, svuint32_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_wide_u16_x))) +svuint16_t svlsr_wide_u16_x(svbool_t, svuint16_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_wide_u8_z))) +svuint8_t svlsr_wide_u8_z(svbool_t, svuint8_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_wide_u32_z))) +svuint32_t svlsr_wide_u32_z(svbool_t, svuint32_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_wide_u16_z))) +svuint16_t svlsr_wide_u16_z(svbool_t, svuint16_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_f64_m))) +svfloat64_t svmad_n_f64_m(svbool_t, svfloat64_t, svfloat64_t, float64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_f32_m))) +svfloat32_t svmad_n_f32_m(svbool_t, svfloat32_t, svfloat32_t, float32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_f16_m))) +svfloat16_t svmad_n_f16_m(svbool_t, svfloat16_t, svfloat16_t, float16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_f64_x))) +svfloat64_t svmad_n_f64_x(svbool_t, svfloat64_t, svfloat64_t, float64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_f32_x))) +svfloat32_t svmad_n_f32_x(svbool_t, svfloat32_t, svfloat32_t, float32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_f16_x))) +svfloat16_t svmad_n_f16_x(svbool_t, svfloat16_t, svfloat16_t, float16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_f64_z))) +svfloat64_t svmad_n_f64_z(svbool_t, svfloat64_t, svfloat64_t, float64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_f32_z))) +svfloat32_t svmad_n_f32_z(svbool_t, svfloat32_t, svfloat32_t, float32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_f16_z))) +svfloat16_t svmad_n_f16_z(svbool_t, svfloat16_t, svfloat16_t, float16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_u8_m))) +svuint8_t svmad_n_u8_m(svbool_t, svuint8_t, svuint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_u32_m))) +svuint32_t svmad_n_u32_m(svbool_t, svuint32_t, svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_u64_m))) +svuint64_t svmad_n_u64_m(svbool_t, svuint64_t, svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_u16_m))) +svuint16_t svmad_n_u16_m(svbool_t, svuint16_t, svuint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_s8_m))) +svint8_t svmad_n_s8_m(svbool_t, svint8_t, svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_s32_m))) +svint32_t svmad_n_s32_m(svbool_t, svint32_t, svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_s64_m))) +svint64_t svmad_n_s64_m(svbool_t, svint64_t, svint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_s16_m))) +svint16_t svmad_n_s16_m(svbool_t, svint16_t, svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_u8_x))) +svuint8_t svmad_n_u8_x(svbool_t, svuint8_t, svuint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_u32_x))) +svuint32_t svmad_n_u32_x(svbool_t, svuint32_t, svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_u64_x))) +svuint64_t svmad_n_u64_x(svbool_t, svuint64_t, svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_u16_x))) +svuint16_t svmad_n_u16_x(svbool_t, svuint16_t, svuint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_s8_x))) +svint8_t svmad_n_s8_x(svbool_t, svint8_t, svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_s32_x))) +svint32_t svmad_n_s32_x(svbool_t, svint32_t, svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_s64_x))) +svint64_t svmad_n_s64_x(svbool_t, svint64_t, svint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_s16_x))) +svint16_t svmad_n_s16_x(svbool_t, svint16_t, svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_u8_z))) +svuint8_t svmad_n_u8_z(svbool_t, svuint8_t, svuint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_u32_z))) +svuint32_t svmad_n_u32_z(svbool_t, svuint32_t, svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_u64_z))) +svuint64_t svmad_n_u64_z(svbool_t, svuint64_t, svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_u16_z))) +svuint16_t svmad_n_u16_z(svbool_t, svuint16_t, svuint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_s8_z))) +svint8_t svmad_n_s8_z(svbool_t, svint8_t, svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_s32_z))) +svint32_t svmad_n_s32_z(svbool_t, svint32_t, svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_s64_z))) +svint64_t svmad_n_s64_z(svbool_t, svint64_t, svint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_s16_z))) +svint16_t svmad_n_s16_z(svbool_t, svint16_t, svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_f64_m))) +svfloat64_t svmad_f64_m(svbool_t, svfloat64_t, svfloat64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_f32_m))) +svfloat32_t svmad_f32_m(svbool_t, svfloat32_t, svfloat32_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_f16_m))) +svfloat16_t svmad_f16_m(svbool_t, svfloat16_t, svfloat16_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_f64_x))) +svfloat64_t svmad_f64_x(svbool_t, svfloat64_t, svfloat64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_f32_x))) +svfloat32_t svmad_f32_x(svbool_t, svfloat32_t, svfloat32_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_f16_x))) +svfloat16_t svmad_f16_x(svbool_t, svfloat16_t, svfloat16_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_f64_z))) +svfloat64_t svmad_f64_z(svbool_t, svfloat64_t, svfloat64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_f32_z))) +svfloat32_t svmad_f32_z(svbool_t, svfloat32_t, svfloat32_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_f16_z))) +svfloat16_t svmad_f16_z(svbool_t, svfloat16_t, svfloat16_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_u8_m))) +svuint8_t svmad_u8_m(svbool_t, svuint8_t, svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_u32_m))) +svuint32_t svmad_u32_m(svbool_t, svuint32_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_u64_m))) +svuint64_t svmad_u64_m(svbool_t, svuint64_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_u16_m))) +svuint16_t svmad_u16_m(svbool_t, svuint16_t, svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_s8_m))) +svint8_t svmad_s8_m(svbool_t, svint8_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_s32_m))) +svint32_t svmad_s32_m(svbool_t, svint32_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_s64_m))) +svint64_t svmad_s64_m(svbool_t, svint64_t, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_s16_m))) +svint16_t svmad_s16_m(svbool_t, svint16_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_u8_x))) +svuint8_t svmad_u8_x(svbool_t, svuint8_t, svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_u32_x))) +svuint32_t svmad_u32_x(svbool_t, svuint32_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_u64_x))) +svuint64_t svmad_u64_x(svbool_t, svuint64_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_u16_x))) +svuint16_t svmad_u16_x(svbool_t, svuint16_t, svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_s8_x))) +svint8_t svmad_s8_x(svbool_t, svint8_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_s32_x))) +svint32_t svmad_s32_x(svbool_t, svint32_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_s64_x))) +svint64_t svmad_s64_x(svbool_t, svint64_t, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_s16_x))) +svint16_t svmad_s16_x(svbool_t, svint16_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_u8_z))) +svuint8_t svmad_u8_z(svbool_t, svuint8_t, svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_u32_z))) +svuint32_t svmad_u32_z(svbool_t, svuint32_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_u64_z))) +svuint64_t svmad_u64_z(svbool_t, svuint64_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_u16_z))) +svuint16_t svmad_u16_z(svbool_t, svuint16_t, svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_s8_z))) +svint8_t svmad_s8_z(svbool_t, svint8_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_s32_z))) +svint32_t svmad_s32_z(svbool_t, svint32_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_s64_z))) +svint64_t svmad_s64_z(svbool_t, svint64_t, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_s16_z))) +svint16_t svmad_s16_z(svbool_t, svint16_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_f64_m))) +svfloat64_t svmax_n_f64_m(svbool_t, svfloat64_t, float64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_f32_m))) +svfloat32_t svmax_n_f32_m(svbool_t, svfloat32_t, float32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_f16_m))) +svfloat16_t svmax_n_f16_m(svbool_t, svfloat16_t, float16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_f64_x))) +svfloat64_t svmax_n_f64_x(svbool_t, svfloat64_t, float64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_f32_x))) +svfloat32_t svmax_n_f32_x(svbool_t, svfloat32_t, float32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_f16_x))) +svfloat16_t svmax_n_f16_x(svbool_t, svfloat16_t, float16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_f64_z))) +svfloat64_t svmax_n_f64_z(svbool_t, svfloat64_t, float64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_f32_z))) +svfloat32_t svmax_n_f32_z(svbool_t, svfloat32_t, float32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_f16_z))) +svfloat16_t svmax_n_f16_z(svbool_t, svfloat16_t, float16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_s8_m))) +svint8_t svmax_n_s8_m(svbool_t, svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_s32_m))) +svint32_t svmax_n_s32_m(svbool_t, svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_s64_m))) +svint64_t svmax_n_s64_m(svbool_t, svint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_s16_m))) +svint16_t svmax_n_s16_m(svbool_t, svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_s8_x))) +svint8_t svmax_n_s8_x(svbool_t, svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_s32_x))) +svint32_t svmax_n_s32_x(svbool_t, svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_s64_x))) +svint64_t svmax_n_s64_x(svbool_t, svint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_s16_x))) +svint16_t svmax_n_s16_x(svbool_t, svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_s8_z))) +svint8_t svmax_n_s8_z(svbool_t, svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_s32_z))) +svint32_t svmax_n_s32_z(svbool_t, svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_s64_z))) +svint64_t svmax_n_s64_z(svbool_t, svint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_s16_z))) +svint16_t svmax_n_s16_z(svbool_t, svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_u8_m))) +svuint8_t svmax_n_u8_m(svbool_t, svuint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_u32_m))) +svuint32_t svmax_n_u32_m(svbool_t, svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_u64_m))) +svuint64_t svmax_n_u64_m(svbool_t, svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_u16_m))) +svuint16_t svmax_n_u16_m(svbool_t, svuint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_u8_x))) +svuint8_t svmax_n_u8_x(svbool_t, svuint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_u32_x))) +svuint32_t svmax_n_u32_x(svbool_t, svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_u64_x))) +svuint64_t svmax_n_u64_x(svbool_t, svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_u16_x))) +svuint16_t svmax_n_u16_x(svbool_t, svuint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_u8_z))) +svuint8_t svmax_n_u8_z(svbool_t, svuint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_u32_z))) +svuint32_t svmax_n_u32_z(svbool_t, svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_u64_z))) +svuint64_t svmax_n_u64_z(svbool_t, svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_u16_z))) +svuint16_t svmax_n_u16_z(svbool_t, svuint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_f64_m))) +svfloat64_t svmax_f64_m(svbool_t, svfloat64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_f32_m))) +svfloat32_t svmax_f32_m(svbool_t, svfloat32_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_f16_m))) +svfloat16_t svmax_f16_m(svbool_t, svfloat16_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_f64_x))) +svfloat64_t svmax_f64_x(svbool_t, svfloat64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_f32_x))) +svfloat32_t svmax_f32_x(svbool_t, svfloat32_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_f16_x))) +svfloat16_t svmax_f16_x(svbool_t, svfloat16_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_f64_z))) +svfloat64_t svmax_f64_z(svbool_t, svfloat64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_f32_z))) +svfloat32_t svmax_f32_z(svbool_t, svfloat32_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_f16_z))) +svfloat16_t svmax_f16_z(svbool_t, svfloat16_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_s8_m))) +svint8_t svmax_s8_m(svbool_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_s32_m))) +svint32_t svmax_s32_m(svbool_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_s64_m))) +svint64_t svmax_s64_m(svbool_t, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_s16_m))) +svint16_t svmax_s16_m(svbool_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_s8_x))) +svint8_t svmax_s8_x(svbool_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_s32_x))) +svint32_t svmax_s32_x(svbool_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_s64_x))) +svint64_t svmax_s64_x(svbool_t, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_s16_x))) +svint16_t svmax_s16_x(svbool_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_s8_z))) +svint8_t svmax_s8_z(svbool_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_s32_z))) +svint32_t svmax_s32_z(svbool_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_s64_z))) +svint64_t svmax_s64_z(svbool_t, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_s16_z))) +svint16_t svmax_s16_z(svbool_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_u8_m))) +svuint8_t svmax_u8_m(svbool_t, svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_u32_m))) +svuint32_t svmax_u32_m(svbool_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_u64_m))) +svuint64_t svmax_u64_m(svbool_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_u16_m))) +svuint16_t svmax_u16_m(svbool_t, svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_u8_x))) +svuint8_t svmax_u8_x(svbool_t, svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_u32_x))) +svuint32_t svmax_u32_x(svbool_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_u64_x))) +svuint64_t svmax_u64_x(svbool_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_u16_x))) +svuint16_t svmax_u16_x(svbool_t, svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_u8_z))) +svuint8_t svmax_u8_z(svbool_t, svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_u32_z))) +svuint32_t svmax_u32_z(svbool_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_u64_z))) +svuint64_t svmax_u64_z(svbool_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_u16_z))) +svuint16_t svmax_u16_z(svbool_t, svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnm_n_f64_m))) +svfloat64_t svmaxnm_n_f64_m(svbool_t, svfloat64_t, float64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnm_n_f32_m))) +svfloat32_t svmaxnm_n_f32_m(svbool_t, svfloat32_t, float32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnm_n_f16_m))) +svfloat16_t svmaxnm_n_f16_m(svbool_t, svfloat16_t, float16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnm_n_f64_x))) +svfloat64_t svmaxnm_n_f64_x(svbool_t, svfloat64_t, float64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnm_n_f32_x))) +svfloat32_t svmaxnm_n_f32_x(svbool_t, svfloat32_t, float32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnm_n_f16_x))) +svfloat16_t svmaxnm_n_f16_x(svbool_t, svfloat16_t, float16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnm_n_f64_z))) +svfloat64_t svmaxnm_n_f64_z(svbool_t, svfloat64_t, float64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnm_n_f32_z))) +svfloat32_t svmaxnm_n_f32_z(svbool_t, svfloat32_t, float32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnm_n_f16_z))) +svfloat16_t svmaxnm_n_f16_z(svbool_t, svfloat16_t, float16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnm_f64_m))) +svfloat64_t svmaxnm_f64_m(svbool_t, svfloat64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnm_f32_m))) +svfloat32_t svmaxnm_f32_m(svbool_t, svfloat32_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnm_f16_m))) +svfloat16_t svmaxnm_f16_m(svbool_t, svfloat16_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnm_f64_x))) +svfloat64_t svmaxnm_f64_x(svbool_t, svfloat64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnm_f32_x))) +svfloat32_t svmaxnm_f32_x(svbool_t, svfloat32_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnm_f16_x))) +svfloat16_t svmaxnm_f16_x(svbool_t, svfloat16_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnm_f64_z))) +svfloat64_t svmaxnm_f64_z(svbool_t, svfloat64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnm_f32_z))) +svfloat32_t svmaxnm_f32_z(svbool_t, svfloat32_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnm_f16_z))) +svfloat16_t svmaxnm_f16_z(svbool_t, svfloat16_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnmv_f64))) +float64_t svmaxnmv_f64(svbool_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnmv_f32))) +float32_t svmaxnmv_f32(svbool_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnmv_f16))) +float16_t svmaxnmv_f16(svbool_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxv_f64))) +float64_t svmaxv_f64(svbool_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxv_f32))) +float32_t svmaxv_f32(svbool_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxv_f16))) +float16_t svmaxv_f16(svbool_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxv_s8))) +int8_t svmaxv_s8(svbool_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxv_s32))) +int32_t svmaxv_s32(svbool_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxv_s64))) +int64_t svmaxv_s64(svbool_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxv_s16))) +int16_t svmaxv_s16(svbool_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxv_u8))) +uint8_t svmaxv_u8(svbool_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxv_u32))) +uint32_t svmaxv_u32(svbool_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxv_u64))) +uint64_t svmaxv_u64(svbool_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxv_u16))) +uint16_t svmaxv_u16(svbool_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_f64_m))) +svfloat64_t svmin_n_f64_m(svbool_t, svfloat64_t, float64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_f32_m))) +svfloat32_t svmin_n_f32_m(svbool_t, svfloat32_t, float32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_f16_m))) +svfloat16_t svmin_n_f16_m(svbool_t, svfloat16_t, float16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_f64_x))) +svfloat64_t svmin_n_f64_x(svbool_t, svfloat64_t, float64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_f32_x))) +svfloat32_t svmin_n_f32_x(svbool_t, svfloat32_t, float32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_f16_x))) +svfloat16_t svmin_n_f16_x(svbool_t, svfloat16_t, float16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_f64_z))) +svfloat64_t svmin_n_f64_z(svbool_t, svfloat64_t, float64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_f32_z))) +svfloat32_t svmin_n_f32_z(svbool_t, svfloat32_t, float32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_f16_z))) +svfloat16_t svmin_n_f16_z(svbool_t, svfloat16_t, float16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_s8_m))) +svint8_t svmin_n_s8_m(svbool_t, svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_s32_m))) +svint32_t svmin_n_s32_m(svbool_t, svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_s64_m))) +svint64_t svmin_n_s64_m(svbool_t, svint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_s16_m))) +svint16_t svmin_n_s16_m(svbool_t, svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_s8_x))) +svint8_t svmin_n_s8_x(svbool_t, svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_s32_x))) +svint32_t svmin_n_s32_x(svbool_t, svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_s64_x))) +svint64_t svmin_n_s64_x(svbool_t, svint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_s16_x))) +svint16_t svmin_n_s16_x(svbool_t, svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_s8_z))) +svint8_t svmin_n_s8_z(svbool_t, svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_s32_z))) +svint32_t svmin_n_s32_z(svbool_t, svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_s64_z))) +svint64_t svmin_n_s64_z(svbool_t, svint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_s16_z))) +svint16_t svmin_n_s16_z(svbool_t, svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_u8_m))) +svuint8_t svmin_n_u8_m(svbool_t, svuint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_u32_m))) +svuint32_t svmin_n_u32_m(svbool_t, svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_u64_m))) +svuint64_t svmin_n_u64_m(svbool_t, svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_u16_m))) +svuint16_t svmin_n_u16_m(svbool_t, svuint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_u8_x))) +svuint8_t svmin_n_u8_x(svbool_t, svuint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_u32_x))) +svuint32_t svmin_n_u32_x(svbool_t, svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_u64_x))) +svuint64_t svmin_n_u64_x(svbool_t, svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_u16_x))) +svuint16_t svmin_n_u16_x(svbool_t, svuint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_u8_z))) +svuint8_t svmin_n_u8_z(svbool_t, svuint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_u32_z))) +svuint32_t svmin_n_u32_z(svbool_t, svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_u64_z))) +svuint64_t svmin_n_u64_z(svbool_t, svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_u16_z))) +svuint16_t svmin_n_u16_z(svbool_t, svuint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_f64_m))) +svfloat64_t svmin_f64_m(svbool_t, svfloat64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_f32_m))) +svfloat32_t svmin_f32_m(svbool_t, svfloat32_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_f16_m))) +svfloat16_t svmin_f16_m(svbool_t, svfloat16_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_f64_x))) +svfloat64_t svmin_f64_x(svbool_t, svfloat64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_f32_x))) +svfloat32_t svmin_f32_x(svbool_t, svfloat32_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_f16_x))) +svfloat16_t svmin_f16_x(svbool_t, svfloat16_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_f64_z))) +svfloat64_t svmin_f64_z(svbool_t, svfloat64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_f32_z))) +svfloat32_t svmin_f32_z(svbool_t, svfloat32_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_f16_z))) +svfloat16_t svmin_f16_z(svbool_t, svfloat16_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_s8_m))) +svint8_t svmin_s8_m(svbool_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_s32_m))) +svint32_t svmin_s32_m(svbool_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_s64_m))) +svint64_t svmin_s64_m(svbool_t, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_s16_m))) +svint16_t svmin_s16_m(svbool_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_s8_x))) +svint8_t svmin_s8_x(svbool_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_s32_x))) +svint32_t svmin_s32_x(svbool_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_s64_x))) +svint64_t svmin_s64_x(svbool_t, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_s16_x))) +svint16_t svmin_s16_x(svbool_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_s8_z))) +svint8_t svmin_s8_z(svbool_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_s32_z))) +svint32_t svmin_s32_z(svbool_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_s64_z))) +svint64_t svmin_s64_z(svbool_t, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_s16_z))) +svint16_t svmin_s16_z(svbool_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_u8_m))) +svuint8_t svmin_u8_m(svbool_t, svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_u32_m))) +svuint32_t svmin_u32_m(svbool_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_u64_m))) +svuint64_t svmin_u64_m(svbool_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_u16_m))) +svuint16_t svmin_u16_m(svbool_t, svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_u8_x))) +svuint8_t svmin_u8_x(svbool_t, svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_u32_x))) +svuint32_t svmin_u32_x(svbool_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_u64_x))) +svuint64_t svmin_u64_x(svbool_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_u16_x))) +svuint16_t svmin_u16_x(svbool_t, svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_u8_z))) +svuint8_t svmin_u8_z(svbool_t, svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_u32_z))) +svuint32_t svmin_u32_z(svbool_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_u64_z))) +svuint64_t svmin_u64_z(svbool_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_u16_z))) +svuint16_t svmin_u16_z(svbool_t, svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnm_n_f64_m))) +svfloat64_t svminnm_n_f64_m(svbool_t, svfloat64_t, float64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnm_n_f32_m))) +svfloat32_t svminnm_n_f32_m(svbool_t, svfloat32_t, float32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnm_n_f16_m))) +svfloat16_t svminnm_n_f16_m(svbool_t, svfloat16_t, float16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnm_n_f64_x))) +svfloat64_t svminnm_n_f64_x(svbool_t, svfloat64_t, float64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnm_n_f32_x))) +svfloat32_t svminnm_n_f32_x(svbool_t, svfloat32_t, float32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnm_n_f16_x))) +svfloat16_t svminnm_n_f16_x(svbool_t, svfloat16_t, float16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnm_n_f64_z))) +svfloat64_t svminnm_n_f64_z(svbool_t, svfloat64_t, float64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnm_n_f32_z))) +svfloat32_t svminnm_n_f32_z(svbool_t, svfloat32_t, float32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnm_n_f16_z))) +svfloat16_t svminnm_n_f16_z(svbool_t, svfloat16_t, float16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnm_f64_m))) +svfloat64_t svminnm_f64_m(svbool_t, svfloat64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnm_f32_m))) +svfloat32_t svminnm_f32_m(svbool_t, svfloat32_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnm_f16_m))) +svfloat16_t svminnm_f16_m(svbool_t, svfloat16_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnm_f64_x))) +svfloat64_t svminnm_f64_x(svbool_t, svfloat64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnm_f32_x))) +svfloat32_t svminnm_f32_x(svbool_t, svfloat32_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnm_f16_x))) +svfloat16_t svminnm_f16_x(svbool_t, svfloat16_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnm_f64_z))) +svfloat64_t svminnm_f64_z(svbool_t, svfloat64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnm_f32_z))) +svfloat32_t svminnm_f32_z(svbool_t, svfloat32_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnm_f16_z))) +svfloat16_t svminnm_f16_z(svbool_t, svfloat16_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnmv_f64))) +float64_t svminnmv_f64(svbool_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnmv_f32))) +float32_t svminnmv_f32(svbool_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnmv_f16))) +float16_t svminnmv_f16(svbool_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminv_f64))) +float64_t svminv_f64(svbool_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminv_f32))) +float32_t svminv_f32(svbool_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminv_f16))) +float16_t svminv_f16(svbool_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminv_s8))) +int8_t svminv_s8(svbool_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminv_s32))) +int32_t svminv_s32(svbool_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminv_s64))) +int64_t svminv_s64(svbool_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminv_s16))) +int16_t svminv_s16(svbool_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminv_u8))) +uint8_t svminv_u8(svbool_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminv_u32))) +uint32_t svminv_u32(svbool_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminv_u64))) +uint64_t svminv_u64(svbool_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminv_u16))) +uint16_t svminv_u16(svbool_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_f64_m))) +svfloat64_t svmla_n_f64_m(svbool_t, svfloat64_t, svfloat64_t, float64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_f32_m))) +svfloat32_t svmla_n_f32_m(svbool_t, svfloat32_t, svfloat32_t, float32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_f16_m))) +svfloat16_t svmla_n_f16_m(svbool_t, svfloat16_t, svfloat16_t, float16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_f64_x))) +svfloat64_t svmla_n_f64_x(svbool_t, svfloat64_t, svfloat64_t, float64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_f32_x))) +svfloat32_t svmla_n_f32_x(svbool_t, svfloat32_t, svfloat32_t, float32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_f16_x))) +svfloat16_t svmla_n_f16_x(svbool_t, svfloat16_t, svfloat16_t, float16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_f64_z))) +svfloat64_t svmla_n_f64_z(svbool_t, svfloat64_t, svfloat64_t, float64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_f32_z))) +svfloat32_t svmla_n_f32_z(svbool_t, svfloat32_t, svfloat32_t, float32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_f16_z))) +svfloat16_t svmla_n_f16_z(svbool_t, svfloat16_t, svfloat16_t, float16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_u8_m))) +svuint8_t svmla_n_u8_m(svbool_t, svuint8_t, svuint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_u32_m))) +svuint32_t svmla_n_u32_m(svbool_t, svuint32_t, svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_u64_m))) +svuint64_t svmla_n_u64_m(svbool_t, svuint64_t, svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_u16_m))) +svuint16_t svmla_n_u16_m(svbool_t, svuint16_t, svuint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_s8_m))) +svint8_t svmla_n_s8_m(svbool_t, svint8_t, svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_s32_m))) +svint32_t svmla_n_s32_m(svbool_t, svint32_t, svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_s64_m))) +svint64_t svmla_n_s64_m(svbool_t, svint64_t, svint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_s16_m))) +svint16_t svmla_n_s16_m(svbool_t, svint16_t, svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_u8_x))) +svuint8_t svmla_n_u8_x(svbool_t, svuint8_t, svuint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_u32_x))) +svuint32_t svmla_n_u32_x(svbool_t, svuint32_t, svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_u64_x))) +svuint64_t svmla_n_u64_x(svbool_t, svuint64_t, svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_u16_x))) +svuint16_t svmla_n_u16_x(svbool_t, svuint16_t, svuint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_s8_x))) +svint8_t svmla_n_s8_x(svbool_t, svint8_t, svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_s32_x))) +svint32_t svmla_n_s32_x(svbool_t, svint32_t, svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_s64_x))) +svint64_t svmla_n_s64_x(svbool_t, svint64_t, svint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_s16_x))) +svint16_t svmla_n_s16_x(svbool_t, svint16_t, svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_u8_z))) +svuint8_t svmla_n_u8_z(svbool_t, svuint8_t, svuint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_u32_z))) +svuint32_t svmla_n_u32_z(svbool_t, svuint32_t, svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_u64_z))) +svuint64_t svmla_n_u64_z(svbool_t, svuint64_t, svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_u16_z))) +svuint16_t svmla_n_u16_z(svbool_t, svuint16_t, svuint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_s8_z))) +svint8_t svmla_n_s8_z(svbool_t, svint8_t, svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_s32_z))) +svint32_t svmla_n_s32_z(svbool_t, svint32_t, svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_s64_z))) +svint64_t svmla_n_s64_z(svbool_t, svint64_t, svint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_s16_z))) +svint16_t svmla_n_s16_z(svbool_t, svint16_t, svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_f64_m))) +svfloat64_t svmla_f64_m(svbool_t, svfloat64_t, svfloat64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_f32_m))) +svfloat32_t svmla_f32_m(svbool_t, svfloat32_t, svfloat32_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_f16_m))) +svfloat16_t svmla_f16_m(svbool_t, svfloat16_t, svfloat16_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_f64_x))) +svfloat64_t svmla_f64_x(svbool_t, svfloat64_t, svfloat64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_f32_x))) +svfloat32_t svmla_f32_x(svbool_t, svfloat32_t, svfloat32_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_f16_x))) +svfloat16_t svmla_f16_x(svbool_t, svfloat16_t, svfloat16_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_f64_z))) +svfloat64_t svmla_f64_z(svbool_t, svfloat64_t, svfloat64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_f32_z))) +svfloat32_t svmla_f32_z(svbool_t, svfloat32_t, svfloat32_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_f16_z))) +svfloat16_t svmla_f16_z(svbool_t, svfloat16_t, svfloat16_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_u8_m))) +svuint8_t svmla_u8_m(svbool_t, svuint8_t, svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_u32_m))) +svuint32_t svmla_u32_m(svbool_t, svuint32_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_u64_m))) +svuint64_t svmla_u64_m(svbool_t, svuint64_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_u16_m))) +svuint16_t svmla_u16_m(svbool_t, svuint16_t, svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_s8_m))) +svint8_t svmla_s8_m(svbool_t, svint8_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_s32_m))) +svint32_t svmla_s32_m(svbool_t, svint32_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_s64_m))) +svint64_t svmla_s64_m(svbool_t, svint64_t, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_s16_m))) +svint16_t svmla_s16_m(svbool_t, svint16_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_u8_x))) +svuint8_t svmla_u8_x(svbool_t, svuint8_t, svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_u32_x))) +svuint32_t svmla_u32_x(svbool_t, svuint32_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_u64_x))) +svuint64_t svmla_u64_x(svbool_t, svuint64_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_u16_x))) +svuint16_t svmla_u16_x(svbool_t, svuint16_t, svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_s8_x))) +svint8_t svmla_s8_x(svbool_t, svint8_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_s32_x))) +svint32_t svmla_s32_x(svbool_t, svint32_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_s64_x))) +svint64_t svmla_s64_x(svbool_t, svint64_t, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_s16_x))) +svint16_t svmla_s16_x(svbool_t, svint16_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_u8_z))) +svuint8_t svmla_u8_z(svbool_t, svuint8_t, svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_u32_z))) +svuint32_t svmla_u32_z(svbool_t, svuint32_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_u64_z))) +svuint64_t svmla_u64_z(svbool_t, svuint64_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_u16_z))) +svuint16_t svmla_u16_z(svbool_t, svuint16_t, svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_s8_z))) +svint8_t svmla_s8_z(svbool_t, svint8_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_s32_z))) +svint32_t svmla_s32_z(svbool_t, svint32_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_s64_z))) +svint64_t svmla_s64_z(svbool_t, svint64_t, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_s16_z))) +svint16_t svmla_s16_z(svbool_t, svint16_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_lane_f64))) +svfloat64_t svmla_lane_f64(svfloat64_t, svfloat64_t, svfloat64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_lane_f32))) +svfloat32_t svmla_lane_f32(svfloat32_t, svfloat32_t, svfloat32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_lane_f16))) +svfloat16_t svmla_lane_f16(svfloat16_t, svfloat16_t, svfloat16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_f64_m))) +svfloat64_t svmls_n_f64_m(svbool_t, svfloat64_t, svfloat64_t, float64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_f32_m))) +svfloat32_t svmls_n_f32_m(svbool_t, svfloat32_t, svfloat32_t, float32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_f16_m))) +svfloat16_t svmls_n_f16_m(svbool_t, svfloat16_t, svfloat16_t, float16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_f64_x))) +svfloat64_t svmls_n_f64_x(svbool_t, svfloat64_t, svfloat64_t, float64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_f32_x))) +svfloat32_t svmls_n_f32_x(svbool_t, svfloat32_t, svfloat32_t, float32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_f16_x))) +svfloat16_t svmls_n_f16_x(svbool_t, svfloat16_t, svfloat16_t, float16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_f64_z))) +svfloat64_t svmls_n_f64_z(svbool_t, svfloat64_t, svfloat64_t, float64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_f32_z))) +svfloat32_t svmls_n_f32_z(svbool_t, svfloat32_t, svfloat32_t, float32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_f16_z))) +svfloat16_t svmls_n_f16_z(svbool_t, svfloat16_t, svfloat16_t, float16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_u8_m))) +svuint8_t svmls_n_u8_m(svbool_t, svuint8_t, svuint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_u32_m))) +svuint32_t svmls_n_u32_m(svbool_t, svuint32_t, svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_u64_m))) +svuint64_t svmls_n_u64_m(svbool_t, svuint64_t, svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_u16_m))) +svuint16_t svmls_n_u16_m(svbool_t, svuint16_t, svuint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_s8_m))) +svint8_t svmls_n_s8_m(svbool_t, svint8_t, svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_s32_m))) +svint32_t svmls_n_s32_m(svbool_t, svint32_t, svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_s64_m))) +svint64_t svmls_n_s64_m(svbool_t, svint64_t, svint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_s16_m))) +svint16_t svmls_n_s16_m(svbool_t, svint16_t, svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_u8_x))) +svuint8_t svmls_n_u8_x(svbool_t, svuint8_t, svuint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_u32_x))) +svuint32_t svmls_n_u32_x(svbool_t, svuint32_t, svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_u64_x))) +svuint64_t svmls_n_u64_x(svbool_t, svuint64_t, svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_u16_x))) +svuint16_t svmls_n_u16_x(svbool_t, svuint16_t, svuint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_s8_x))) +svint8_t svmls_n_s8_x(svbool_t, svint8_t, svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_s32_x))) +svint32_t svmls_n_s32_x(svbool_t, svint32_t, svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_s64_x))) +svint64_t svmls_n_s64_x(svbool_t, svint64_t, svint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_s16_x))) +svint16_t svmls_n_s16_x(svbool_t, svint16_t, svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_u8_z))) +svuint8_t svmls_n_u8_z(svbool_t, svuint8_t, svuint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_u32_z))) +svuint32_t svmls_n_u32_z(svbool_t, svuint32_t, svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_u64_z))) +svuint64_t svmls_n_u64_z(svbool_t, svuint64_t, svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_u16_z))) +svuint16_t svmls_n_u16_z(svbool_t, svuint16_t, svuint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_s8_z))) +svint8_t svmls_n_s8_z(svbool_t, svint8_t, svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_s32_z))) +svint32_t svmls_n_s32_z(svbool_t, svint32_t, svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_s64_z))) +svint64_t svmls_n_s64_z(svbool_t, svint64_t, svint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_s16_z))) +svint16_t svmls_n_s16_z(svbool_t, svint16_t, svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_f64_m))) +svfloat64_t svmls_f64_m(svbool_t, svfloat64_t, svfloat64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_f32_m))) +svfloat32_t svmls_f32_m(svbool_t, svfloat32_t, svfloat32_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_f16_m))) +svfloat16_t svmls_f16_m(svbool_t, svfloat16_t, svfloat16_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_f64_x))) +svfloat64_t svmls_f64_x(svbool_t, svfloat64_t, svfloat64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_f32_x))) +svfloat32_t svmls_f32_x(svbool_t, svfloat32_t, svfloat32_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_f16_x))) +svfloat16_t svmls_f16_x(svbool_t, svfloat16_t, svfloat16_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_f64_z))) +svfloat64_t svmls_f64_z(svbool_t, svfloat64_t, svfloat64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_f32_z))) +svfloat32_t svmls_f32_z(svbool_t, svfloat32_t, svfloat32_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_f16_z))) +svfloat16_t svmls_f16_z(svbool_t, svfloat16_t, svfloat16_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_u8_m))) +svuint8_t svmls_u8_m(svbool_t, svuint8_t, svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_u32_m))) +svuint32_t svmls_u32_m(svbool_t, svuint32_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_u64_m))) +svuint64_t svmls_u64_m(svbool_t, svuint64_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_u16_m))) +svuint16_t svmls_u16_m(svbool_t, svuint16_t, svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_s8_m))) +svint8_t svmls_s8_m(svbool_t, svint8_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_s32_m))) +svint32_t svmls_s32_m(svbool_t, svint32_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_s64_m))) +svint64_t svmls_s64_m(svbool_t, svint64_t, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_s16_m))) +svint16_t svmls_s16_m(svbool_t, svint16_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_u8_x))) +svuint8_t svmls_u8_x(svbool_t, svuint8_t, svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_u32_x))) +svuint32_t svmls_u32_x(svbool_t, svuint32_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_u64_x))) +svuint64_t svmls_u64_x(svbool_t, svuint64_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_u16_x))) +svuint16_t svmls_u16_x(svbool_t, svuint16_t, svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_s8_x))) +svint8_t svmls_s8_x(svbool_t, svint8_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_s32_x))) +svint32_t svmls_s32_x(svbool_t, svint32_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_s64_x))) +svint64_t svmls_s64_x(svbool_t, svint64_t, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_s16_x))) +svint16_t svmls_s16_x(svbool_t, svint16_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_u8_z))) +svuint8_t svmls_u8_z(svbool_t, svuint8_t, svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_u32_z))) +svuint32_t svmls_u32_z(svbool_t, svuint32_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_u64_z))) +svuint64_t svmls_u64_z(svbool_t, svuint64_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_u16_z))) +svuint16_t svmls_u16_z(svbool_t, svuint16_t, svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_s8_z))) +svint8_t svmls_s8_z(svbool_t, svint8_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_s32_z))) +svint32_t svmls_s32_z(svbool_t, svint32_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_s64_z))) +svint64_t svmls_s64_z(svbool_t, svint64_t, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_s16_z))) +svint16_t svmls_s16_z(svbool_t, svint16_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_lane_f64))) +svfloat64_t svmls_lane_f64(svfloat64_t, svfloat64_t, svfloat64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_lane_f32))) +svfloat32_t svmls_lane_f32(svfloat32_t, svfloat32_t, svfloat32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_lane_f16))) +svfloat16_t svmls_lane_f16(svfloat16_t, svfloat16_t, svfloat16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmov_b_z))) +svbool_t svmov_b_z(svbool_t, svbool_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_f64_m))) +svfloat64_t svmsb_n_f64_m(svbool_t, svfloat64_t, svfloat64_t, float64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_f32_m))) +svfloat32_t svmsb_n_f32_m(svbool_t, svfloat32_t, svfloat32_t, float32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_f16_m))) +svfloat16_t svmsb_n_f16_m(svbool_t, svfloat16_t, svfloat16_t, float16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_f64_x))) +svfloat64_t svmsb_n_f64_x(svbool_t, svfloat64_t, svfloat64_t, float64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_f32_x))) +svfloat32_t svmsb_n_f32_x(svbool_t, svfloat32_t, svfloat32_t, float32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_f16_x))) +svfloat16_t svmsb_n_f16_x(svbool_t, svfloat16_t, svfloat16_t, float16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_f64_z))) +svfloat64_t svmsb_n_f64_z(svbool_t, svfloat64_t, svfloat64_t, float64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_f32_z))) +svfloat32_t svmsb_n_f32_z(svbool_t, svfloat32_t, svfloat32_t, float32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_f16_z))) +svfloat16_t svmsb_n_f16_z(svbool_t, svfloat16_t, svfloat16_t, float16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_u8_m))) +svuint8_t svmsb_n_u8_m(svbool_t, svuint8_t, svuint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_u32_m))) +svuint32_t svmsb_n_u32_m(svbool_t, svuint32_t, svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_u64_m))) +svuint64_t svmsb_n_u64_m(svbool_t, svuint64_t, svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_u16_m))) +svuint16_t svmsb_n_u16_m(svbool_t, svuint16_t, svuint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_s8_m))) +svint8_t svmsb_n_s8_m(svbool_t, svint8_t, svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_s32_m))) +svint32_t svmsb_n_s32_m(svbool_t, svint32_t, svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_s64_m))) +svint64_t svmsb_n_s64_m(svbool_t, svint64_t, svint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_s16_m))) +svint16_t svmsb_n_s16_m(svbool_t, svint16_t, svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_u8_x))) +svuint8_t svmsb_n_u8_x(svbool_t, svuint8_t, svuint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_u32_x))) +svuint32_t svmsb_n_u32_x(svbool_t, svuint32_t, svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_u64_x))) +svuint64_t svmsb_n_u64_x(svbool_t, svuint64_t, svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_u16_x))) +svuint16_t svmsb_n_u16_x(svbool_t, svuint16_t, svuint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_s8_x))) +svint8_t svmsb_n_s8_x(svbool_t, svint8_t, svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_s32_x))) +svint32_t svmsb_n_s32_x(svbool_t, svint32_t, svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_s64_x))) +svint64_t svmsb_n_s64_x(svbool_t, svint64_t, svint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_s16_x))) +svint16_t svmsb_n_s16_x(svbool_t, svint16_t, svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_u8_z))) +svuint8_t svmsb_n_u8_z(svbool_t, svuint8_t, svuint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_u32_z))) +svuint32_t svmsb_n_u32_z(svbool_t, svuint32_t, svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_u64_z))) +svuint64_t svmsb_n_u64_z(svbool_t, svuint64_t, svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_u16_z))) +svuint16_t svmsb_n_u16_z(svbool_t, svuint16_t, svuint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_s8_z))) +svint8_t svmsb_n_s8_z(svbool_t, svint8_t, svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_s32_z))) +svint32_t svmsb_n_s32_z(svbool_t, svint32_t, svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_s64_z))) +svint64_t svmsb_n_s64_z(svbool_t, svint64_t, svint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_s16_z))) +svint16_t svmsb_n_s16_z(svbool_t, svint16_t, svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_f64_m))) +svfloat64_t svmsb_f64_m(svbool_t, svfloat64_t, svfloat64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_f32_m))) +svfloat32_t svmsb_f32_m(svbool_t, svfloat32_t, svfloat32_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_f16_m))) +svfloat16_t svmsb_f16_m(svbool_t, svfloat16_t, svfloat16_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_f64_x))) +svfloat64_t svmsb_f64_x(svbool_t, svfloat64_t, svfloat64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_f32_x))) +svfloat32_t svmsb_f32_x(svbool_t, svfloat32_t, svfloat32_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_f16_x))) +svfloat16_t svmsb_f16_x(svbool_t, svfloat16_t, svfloat16_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_f64_z))) +svfloat64_t svmsb_f64_z(svbool_t, svfloat64_t, svfloat64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_f32_z))) +svfloat32_t svmsb_f32_z(svbool_t, svfloat32_t, svfloat32_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_f16_z))) +svfloat16_t svmsb_f16_z(svbool_t, svfloat16_t, svfloat16_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_u8_m))) +svuint8_t svmsb_u8_m(svbool_t, svuint8_t, svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_u32_m))) +svuint32_t svmsb_u32_m(svbool_t, svuint32_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_u64_m))) +svuint64_t svmsb_u64_m(svbool_t, svuint64_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_u16_m))) +svuint16_t svmsb_u16_m(svbool_t, svuint16_t, svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_s8_m))) +svint8_t svmsb_s8_m(svbool_t, svint8_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_s32_m))) +svint32_t svmsb_s32_m(svbool_t, svint32_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_s64_m))) +svint64_t svmsb_s64_m(svbool_t, svint64_t, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_s16_m))) +svint16_t svmsb_s16_m(svbool_t, svint16_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_u8_x))) +svuint8_t svmsb_u8_x(svbool_t, svuint8_t, svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_u32_x))) +svuint32_t svmsb_u32_x(svbool_t, svuint32_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_u64_x))) +svuint64_t svmsb_u64_x(svbool_t, svuint64_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_u16_x))) +svuint16_t svmsb_u16_x(svbool_t, svuint16_t, svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_s8_x))) +svint8_t svmsb_s8_x(svbool_t, svint8_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_s32_x))) +svint32_t svmsb_s32_x(svbool_t, svint32_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_s64_x))) +svint64_t svmsb_s64_x(svbool_t, svint64_t, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_s16_x))) +svint16_t svmsb_s16_x(svbool_t, svint16_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_u8_z))) +svuint8_t svmsb_u8_z(svbool_t, svuint8_t, svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_u32_z))) +svuint32_t svmsb_u32_z(svbool_t, svuint32_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_u64_z))) +svuint64_t svmsb_u64_z(svbool_t, svuint64_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_u16_z))) +svuint16_t svmsb_u16_z(svbool_t, svuint16_t, svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_s8_z))) +svint8_t svmsb_s8_z(svbool_t, svint8_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_s32_z))) +svint32_t svmsb_s32_z(svbool_t, svint32_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_s64_z))) +svint64_t svmsb_s64_z(svbool_t, svint64_t, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_s16_z))) +svint16_t svmsb_s16_z(svbool_t, svint16_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_f64_m))) +svfloat64_t svmul_n_f64_m(svbool_t, svfloat64_t, float64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_f32_m))) +svfloat32_t svmul_n_f32_m(svbool_t, svfloat32_t, float32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_f16_m))) +svfloat16_t svmul_n_f16_m(svbool_t, svfloat16_t, float16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_f64_x))) +svfloat64_t svmul_n_f64_x(svbool_t, svfloat64_t, float64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_f32_x))) +svfloat32_t svmul_n_f32_x(svbool_t, svfloat32_t, float32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_f16_x))) +svfloat16_t svmul_n_f16_x(svbool_t, svfloat16_t, float16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_f64_z))) +svfloat64_t svmul_n_f64_z(svbool_t, svfloat64_t, float64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_f32_z))) +svfloat32_t svmul_n_f32_z(svbool_t, svfloat32_t, float32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_f16_z))) +svfloat16_t svmul_n_f16_z(svbool_t, svfloat16_t, float16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_u8_m))) +svuint8_t svmul_n_u8_m(svbool_t, svuint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_u32_m))) +svuint32_t svmul_n_u32_m(svbool_t, svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_u64_m))) +svuint64_t svmul_n_u64_m(svbool_t, svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_u16_m))) +svuint16_t svmul_n_u16_m(svbool_t, svuint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_s8_m))) +svint8_t svmul_n_s8_m(svbool_t, svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_s32_m))) +svint32_t svmul_n_s32_m(svbool_t, svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_s64_m))) +svint64_t svmul_n_s64_m(svbool_t, svint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_s16_m))) +svint16_t svmul_n_s16_m(svbool_t, svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_u8_x))) +svuint8_t svmul_n_u8_x(svbool_t, svuint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_u32_x))) +svuint32_t svmul_n_u32_x(svbool_t, svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_u64_x))) +svuint64_t svmul_n_u64_x(svbool_t, svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_u16_x))) +svuint16_t svmul_n_u16_x(svbool_t, svuint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_s8_x))) +svint8_t svmul_n_s8_x(svbool_t, svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_s32_x))) +svint32_t svmul_n_s32_x(svbool_t, svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_s64_x))) +svint64_t svmul_n_s64_x(svbool_t, svint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_s16_x))) +svint16_t svmul_n_s16_x(svbool_t, svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_u8_z))) +svuint8_t svmul_n_u8_z(svbool_t, svuint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_u32_z))) +svuint32_t svmul_n_u32_z(svbool_t, svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_u64_z))) +svuint64_t svmul_n_u64_z(svbool_t, svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_u16_z))) +svuint16_t svmul_n_u16_z(svbool_t, svuint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_s8_z))) +svint8_t svmul_n_s8_z(svbool_t, svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_s32_z))) +svint32_t svmul_n_s32_z(svbool_t, svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_s64_z))) +svint64_t svmul_n_s64_z(svbool_t, svint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_s16_z))) +svint16_t svmul_n_s16_z(svbool_t, svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_f64_m))) +svfloat64_t svmul_f64_m(svbool_t, svfloat64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_f32_m))) +svfloat32_t svmul_f32_m(svbool_t, svfloat32_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_f16_m))) +svfloat16_t svmul_f16_m(svbool_t, svfloat16_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_f64_x))) +svfloat64_t svmul_f64_x(svbool_t, svfloat64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_f32_x))) +svfloat32_t svmul_f32_x(svbool_t, svfloat32_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_f16_x))) +svfloat16_t svmul_f16_x(svbool_t, svfloat16_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_f64_z))) +svfloat64_t svmul_f64_z(svbool_t, svfloat64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_f32_z))) +svfloat32_t svmul_f32_z(svbool_t, svfloat32_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_f16_z))) +svfloat16_t svmul_f16_z(svbool_t, svfloat16_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_u8_m))) +svuint8_t svmul_u8_m(svbool_t, svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_u32_m))) +svuint32_t svmul_u32_m(svbool_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_u64_m))) +svuint64_t svmul_u64_m(svbool_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_u16_m))) +svuint16_t svmul_u16_m(svbool_t, svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_s8_m))) +svint8_t svmul_s8_m(svbool_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_s32_m))) +svint32_t svmul_s32_m(svbool_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_s64_m))) +svint64_t svmul_s64_m(svbool_t, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_s16_m))) +svint16_t svmul_s16_m(svbool_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_u8_x))) +svuint8_t svmul_u8_x(svbool_t, svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_u32_x))) +svuint32_t svmul_u32_x(svbool_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_u64_x))) +svuint64_t svmul_u64_x(svbool_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_u16_x))) +svuint16_t svmul_u16_x(svbool_t, svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_s8_x))) +svint8_t svmul_s8_x(svbool_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_s32_x))) +svint32_t svmul_s32_x(svbool_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_s64_x))) +svint64_t svmul_s64_x(svbool_t, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_s16_x))) +svint16_t svmul_s16_x(svbool_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_u8_z))) +svuint8_t svmul_u8_z(svbool_t, svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_u32_z))) +svuint32_t svmul_u32_z(svbool_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_u64_z))) +svuint64_t svmul_u64_z(svbool_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_u16_z))) +svuint16_t svmul_u16_z(svbool_t, svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_s8_z))) +svint8_t svmul_s8_z(svbool_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_s32_z))) +svint32_t svmul_s32_z(svbool_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_s64_z))) +svint64_t svmul_s64_z(svbool_t, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_s16_z))) +svint16_t svmul_s16_z(svbool_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_lane_f64))) +svfloat64_t svmul_lane_f64(svfloat64_t, svfloat64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_lane_f32))) +svfloat32_t svmul_lane_f32(svfloat32_t, svfloat32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_lane_f16))) +svfloat16_t svmul_lane_f16(svfloat16_t, svfloat16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_n_s8_m))) +svint8_t svmulh_n_s8_m(svbool_t, svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_n_s32_m))) +svint32_t svmulh_n_s32_m(svbool_t, svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_n_s64_m))) +svint64_t svmulh_n_s64_m(svbool_t, svint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_n_s16_m))) +svint16_t svmulh_n_s16_m(svbool_t, svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_n_s8_x))) +svint8_t svmulh_n_s8_x(svbool_t, svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_n_s32_x))) +svint32_t svmulh_n_s32_x(svbool_t, svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_n_s64_x))) +svint64_t svmulh_n_s64_x(svbool_t, svint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_n_s16_x))) +svint16_t svmulh_n_s16_x(svbool_t, svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_n_s8_z))) +svint8_t svmulh_n_s8_z(svbool_t, svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_n_s32_z))) +svint32_t svmulh_n_s32_z(svbool_t, svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_n_s64_z))) +svint64_t svmulh_n_s64_z(svbool_t, svint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_n_s16_z))) +svint16_t svmulh_n_s16_z(svbool_t, svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_n_u8_m))) +svuint8_t svmulh_n_u8_m(svbool_t, svuint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_n_u32_m))) +svuint32_t svmulh_n_u32_m(svbool_t, svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_n_u64_m))) +svuint64_t svmulh_n_u64_m(svbool_t, svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_n_u16_m))) +svuint16_t svmulh_n_u16_m(svbool_t, svuint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_n_u8_x))) +svuint8_t svmulh_n_u8_x(svbool_t, svuint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_n_u32_x))) +svuint32_t svmulh_n_u32_x(svbool_t, svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_n_u64_x))) +svuint64_t svmulh_n_u64_x(svbool_t, svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_n_u16_x))) +svuint16_t svmulh_n_u16_x(svbool_t, svuint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_n_u8_z))) +svuint8_t svmulh_n_u8_z(svbool_t, svuint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_n_u32_z))) +svuint32_t svmulh_n_u32_z(svbool_t, svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_n_u64_z))) +svuint64_t svmulh_n_u64_z(svbool_t, svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_n_u16_z))) +svuint16_t svmulh_n_u16_z(svbool_t, svuint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_s8_m))) +svint8_t svmulh_s8_m(svbool_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_s32_m))) +svint32_t svmulh_s32_m(svbool_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_s64_m))) +svint64_t svmulh_s64_m(svbool_t, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_s16_m))) +svint16_t svmulh_s16_m(svbool_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_s8_x))) +svint8_t svmulh_s8_x(svbool_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_s32_x))) +svint32_t svmulh_s32_x(svbool_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_s64_x))) +svint64_t svmulh_s64_x(svbool_t, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_s16_x))) +svint16_t svmulh_s16_x(svbool_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_s8_z))) +svint8_t svmulh_s8_z(svbool_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_s32_z))) +svint32_t svmulh_s32_z(svbool_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_s64_z))) +svint64_t svmulh_s64_z(svbool_t, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_s16_z))) +svint16_t svmulh_s16_z(svbool_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_u8_m))) +svuint8_t svmulh_u8_m(svbool_t, svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_u32_m))) +svuint32_t svmulh_u32_m(svbool_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_u64_m))) +svuint64_t svmulh_u64_m(svbool_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_u16_m))) +svuint16_t svmulh_u16_m(svbool_t, svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_u8_x))) +svuint8_t svmulh_u8_x(svbool_t, svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_u32_x))) +svuint32_t svmulh_u32_x(svbool_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_u64_x))) +svuint64_t svmulh_u64_x(svbool_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_u16_x))) +svuint16_t svmulh_u16_x(svbool_t, svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_u8_z))) +svuint8_t svmulh_u8_z(svbool_t, svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_u32_z))) +svuint32_t svmulh_u32_z(svbool_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_u64_z))) +svuint64_t svmulh_u64_z(svbool_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_u16_z))) +svuint16_t svmulh_u16_z(svbool_t, svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulx_n_f64_m))) +svfloat64_t svmulx_n_f64_m(svbool_t, svfloat64_t, float64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulx_n_f32_m))) +svfloat32_t svmulx_n_f32_m(svbool_t, svfloat32_t, float32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulx_n_f16_m))) +svfloat16_t svmulx_n_f16_m(svbool_t, svfloat16_t, float16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulx_n_f64_x))) +svfloat64_t svmulx_n_f64_x(svbool_t, svfloat64_t, float64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulx_n_f32_x))) +svfloat32_t svmulx_n_f32_x(svbool_t, svfloat32_t, float32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulx_n_f16_x))) +svfloat16_t svmulx_n_f16_x(svbool_t, svfloat16_t, float16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulx_n_f64_z))) +svfloat64_t svmulx_n_f64_z(svbool_t, svfloat64_t, float64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulx_n_f32_z))) +svfloat32_t svmulx_n_f32_z(svbool_t, svfloat32_t, float32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulx_n_f16_z))) +svfloat16_t svmulx_n_f16_z(svbool_t, svfloat16_t, float16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulx_f64_m))) +svfloat64_t svmulx_f64_m(svbool_t, svfloat64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulx_f32_m))) +svfloat32_t svmulx_f32_m(svbool_t, svfloat32_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulx_f16_m))) +svfloat16_t svmulx_f16_m(svbool_t, svfloat16_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulx_f64_x))) +svfloat64_t svmulx_f64_x(svbool_t, svfloat64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulx_f32_x))) +svfloat32_t svmulx_f32_x(svbool_t, svfloat32_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulx_f16_x))) +svfloat16_t svmulx_f16_x(svbool_t, svfloat16_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulx_f64_z))) +svfloat64_t svmulx_f64_z(svbool_t, svfloat64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulx_f32_z))) +svfloat32_t svmulx_f32_z(svbool_t, svfloat32_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulx_f16_z))) +svfloat16_t svmulx_f16_z(svbool_t, svfloat16_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnand_b_z))) +svbool_t svnand_b_z(svbool_t, svbool_t, svbool_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svneg_f64_m))) +svfloat64_t svneg_f64_m(svfloat64_t, svbool_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svneg_f32_m))) +svfloat32_t svneg_f32_m(svfloat32_t, svbool_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svneg_f16_m))) +svfloat16_t svneg_f16_m(svfloat16_t, svbool_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svneg_f64_x))) +svfloat64_t svneg_f64_x(svbool_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svneg_f32_x))) +svfloat32_t svneg_f32_x(svbool_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svneg_f16_x))) +svfloat16_t svneg_f16_x(svbool_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svneg_f64_z))) +svfloat64_t svneg_f64_z(svbool_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svneg_f32_z))) +svfloat32_t svneg_f32_z(svbool_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svneg_f16_z))) +svfloat16_t svneg_f16_z(svbool_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svneg_s8_m))) +svint8_t svneg_s8_m(svint8_t, svbool_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svneg_s32_m))) +svint32_t svneg_s32_m(svint32_t, svbool_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svneg_s64_m))) +svint64_t svneg_s64_m(svint64_t, svbool_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svneg_s16_m))) +svint16_t svneg_s16_m(svint16_t, svbool_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svneg_s8_x))) +svint8_t svneg_s8_x(svbool_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svneg_s32_x))) +svint32_t svneg_s32_x(svbool_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svneg_s64_x))) +svint64_t svneg_s64_x(svbool_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svneg_s16_x))) +svint16_t svneg_s16_x(svbool_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svneg_s8_z))) +svint8_t svneg_s8_z(svbool_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svneg_s32_z))) +svint32_t svneg_s32_z(svbool_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svneg_s64_z))) +svint64_t svneg_s64_z(svbool_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svneg_s16_z))) +svint16_t svneg_s16_z(svbool_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmad_n_f64_m))) +svfloat64_t svnmad_n_f64_m(svbool_t, svfloat64_t, svfloat64_t, float64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmad_n_f32_m))) +svfloat32_t svnmad_n_f32_m(svbool_t, svfloat32_t, svfloat32_t, float32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmad_n_f16_m))) +svfloat16_t svnmad_n_f16_m(svbool_t, svfloat16_t, svfloat16_t, float16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmad_n_f64_x))) +svfloat64_t svnmad_n_f64_x(svbool_t, svfloat64_t, svfloat64_t, float64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmad_n_f32_x))) +svfloat32_t svnmad_n_f32_x(svbool_t, svfloat32_t, svfloat32_t, float32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmad_n_f16_x))) +svfloat16_t svnmad_n_f16_x(svbool_t, svfloat16_t, svfloat16_t, float16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmad_n_f64_z))) +svfloat64_t svnmad_n_f64_z(svbool_t, svfloat64_t, svfloat64_t, float64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmad_n_f32_z))) +svfloat32_t svnmad_n_f32_z(svbool_t, svfloat32_t, svfloat32_t, float32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmad_n_f16_z))) +svfloat16_t svnmad_n_f16_z(svbool_t, svfloat16_t, svfloat16_t, float16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmad_f64_m))) +svfloat64_t svnmad_f64_m(svbool_t, svfloat64_t, svfloat64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmad_f32_m))) +svfloat32_t svnmad_f32_m(svbool_t, svfloat32_t, svfloat32_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmad_f16_m))) +svfloat16_t svnmad_f16_m(svbool_t, svfloat16_t, svfloat16_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmad_f64_x))) +svfloat64_t svnmad_f64_x(svbool_t, svfloat64_t, svfloat64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmad_f32_x))) +svfloat32_t svnmad_f32_x(svbool_t, svfloat32_t, svfloat32_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmad_f16_x))) +svfloat16_t svnmad_f16_x(svbool_t, svfloat16_t, svfloat16_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmad_f64_z))) +svfloat64_t svnmad_f64_z(svbool_t, svfloat64_t, svfloat64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmad_f32_z))) +svfloat32_t svnmad_f32_z(svbool_t, svfloat32_t, svfloat32_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmad_f16_z))) +svfloat16_t svnmad_f16_z(svbool_t, svfloat16_t, svfloat16_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmla_n_f64_m))) +svfloat64_t svnmla_n_f64_m(svbool_t, svfloat64_t, svfloat64_t, float64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmla_n_f32_m))) +svfloat32_t svnmla_n_f32_m(svbool_t, svfloat32_t, svfloat32_t, float32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmla_n_f16_m))) +svfloat16_t svnmla_n_f16_m(svbool_t, svfloat16_t, svfloat16_t, float16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmla_n_f64_x))) +svfloat64_t svnmla_n_f64_x(svbool_t, svfloat64_t, svfloat64_t, float64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmla_n_f32_x))) +svfloat32_t svnmla_n_f32_x(svbool_t, svfloat32_t, svfloat32_t, float32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmla_n_f16_x))) +svfloat16_t svnmla_n_f16_x(svbool_t, svfloat16_t, svfloat16_t, float16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmla_n_f64_z))) +svfloat64_t svnmla_n_f64_z(svbool_t, svfloat64_t, svfloat64_t, float64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmla_n_f32_z))) +svfloat32_t svnmla_n_f32_z(svbool_t, svfloat32_t, svfloat32_t, float32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmla_n_f16_z))) +svfloat16_t svnmla_n_f16_z(svbool_t, svfloat16_t, svfloat16_t, float16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmla_f64_m))) +svfloat64_t svnmla_f64_m(svbool_t, svfloat64_t, svfloat64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmla_f32_m))) +svfloat32_t svnmla_f32_m(svbool_t, svfloat32_t, svfloat32_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmla_f16_m))) +svfloat16_t svnmla_f16_m(svbool_t, svfloat16_t, svfloat16_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmla_f64_x))) +svfloat64_t svnmla_f64_x(svbool_t, svfloat64_t, svfloat64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmla_f32_x))) +svfloat32_t svnmla_f32_x(svbool_t, svfloat32_t, svfloat32_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmla_f16_x))) +svfloat16_t svnmla_f16_x(svbool_t, svfloat16_t, svfloat16_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmla_f64_z))) +svfloat64_t svnmla_f64_z(svbool_t, svfloat64_t, svfloat64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmla_f32_z))) +svfloat32_t svnmla_f32_z(svbool_t, svfloat32_t, svfloat32_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmla_f16_z))) +svfloat16_t svnmla_f16_z(svbool_t, svfloat16_t, svfloat16_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmls_n_f64_m))) +svfloat64_t svnmls_n_f64_m(svbool_t, svfloat64_t, svfloat64_t, float64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmls_n_f32_m))) +svfloat32_t svnmls_n_f32_m(svbool_t, svfloat32_t, svfloat32_t, float32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmls_n_f16_m))) +svfloat16_t svnmls_n_f16_m(svbool_t, svfloat16_t, svfloat16_t, float16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmls_n_f64_x))) +svfloat64_t svnmls_n_f64_x(svbool_t, svfloat64_t, svfloat64_t, float64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmls_n_f32_x))) +svfloat32_t svnmls_n_f32_x(svbool_t, svfloat32_t, svfloat32_t, float32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmls_n_f16_x))) +svfloat16_t svnmls_n_f16_x(svbool_t, svfloat16_t, svfloat16_t, float16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmls_n_f64_z))) +svfloat64_t svnmls_n_f64_z(svbool_t, svfloat64_t, svfloat64_t, float64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmls_n_f32_z))) +svfloat32_t svnmls_n_f32_z(svbool_t, svfloat32_t, svfloat32_t, float32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmls_n_f16_z))) +svfloat16_t svnmls_n_f16_z(svbool_t, svfloat16_t, svfloat16_t, float16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmls_f64_m))) +svfloat64_t svnmls_f64_m(svbool_t, svfloat64_t, svfloat64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmls_f32_m))) +svfloat32_t svnmls_f32_m(svbool_t, svfloat32_t, svfloat32_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmls_f16_m))) +svfloat16_t svnmls_f16_m(svbool_t, svfloat16_t, svfloat16_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmls_f64_x))) +svfloat64_t svnmls_f64_x(svbool_t, svfloat64_t, svfloat64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmls_f32_x))) +svfloat32_t svnmls_f32_x(svbool_t, svfloat32_t, svfloat32_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmls_f16_x))) +svfloat16_t svnmls_f16_x(svbool_t, svfloat16_t, svfloat16_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmls_f64_z))) +svfloat64_t svnmls_f64_z(svbool_t, svfloat64_t, svfloat64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmls_f32_z))) +svfloat32_t svnmls_f32_z(svbool_t, svfloat32_t, svfloat32_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmls_f16_z))) +svfloat16_t svnmls_f16_z(svbool_t, svfloat16_t, svfloat16_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmsb_n_f64_m))) +svfloat64_t svnmsb_n_f64_m(svbool_t, svfloat64_t, svfloat64_t, float64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmsb_n_f32_m))) +svfloat32_t svnmsb_n_f32_m(svbool_t, svfloat32_t, svfloat32_t, float32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmsb_n_f16_m))) +svfloat16_t svnmsb_n_f16_m(svbool_t, svfloat16_t, svfloat16_t, float16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmsb_n_f64_x))) +svfloat64_t svnmsb_n_f64_x(svbool_t, svfloat64_t, svfloat64_t, float64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmsb_n_f32_x))) +svfloat32_t svnmsb_n_f32_x(svbool_t, svfloat32_t, svfloat32_t, float32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmsb_n_f16_x))) +svfloat16_t svnmsb_n_f16_x(svbool_t, svfloat16_t, svfloat16_t, float16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmsb_n_f64_z))) +svfloat64_t svnmsb_n_f64_z(svbool_t, svfloat64_t, svfloat64_t, float64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmsb_n_f32_z))) +svfloat32_t svnmsb_n_f32_z(svbool_t, svfloat32_t, svfloat32_t, float32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmsb_n_f16_z))) +svfloat16_t svnmsb_n_f16_z(svbool_t, svfloat16_t, svfloat16_t, float16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmsb_f64_m))) +svfloat64_t svnmsb_f64_m(svbool_t, svfloat64_t, svfloat64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmsb_f32_m))) +svfloat32_t svnmsb_f32_m(svbool_t, svfloat32_t, svfloat32_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmsb_f16_m))) +svfloat16_t svnmsb_f16_m(svbool_t, svfloat16_t, svfloat16_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmsb_f64_x))) +svfloat64_t svnmsb_f64_x(svbool_t, svfloat64_t, svfloat64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmsb_f32_x))) +svfloat32_t svnmsb_f32_x(svbool_t, svfloat32_t, svfloat32_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmsb_f16_x))) +svfloat16_t svnmsb_f16_x(svbool_t, svfloat16_t, svfloat16_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmsb_f64_z))) +svfloat64_t svnmsb_f64_z(svbool_t, svfloat64_t, svfloat64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmsb_f32_z))) +svfloat32_t svnmsb_f32_z(svbool_t, svfloat32_t, svfloat32_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmsb_f16_z))) +svfloat16_t svnmsb_f16_z(svbool_t, svfloat16_t, svfloat16_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnor_b_z))) +svbool_t svnor_b_z(svbool_t, svbool_t, svbool_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnot_b_z))) +svbool_t svnot_b_z(svbool_t, svbool_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnot_u8_m))) +svuint8_t svnot_u8_m(svuint8_t, svbool_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnot_u32_m))) +svuint32_t svnot_u32_m(svuint32_t, svbool_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnot_u64_m))) +svuint64_t svnot_u64_m(svuint64_t, svbool_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnot_u16_m))) +svuint16_t svnot_u16_m(svuint16_t, svbool_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnot_s8_m))) +svint8_t svnot_s8_m(svint8_t, svbool_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnot_s32_m))) +svint32_t svnot_s32_m(svint32_t, svbool_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnot_s64_m))) +svint64_t svnot_s64_m(svint64_t, svbool_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnot_s16_m))) +svint16_t svnot_s16_m(svint16_t, svbool_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnot_u8_x))) +svuint8_t svnot_u8_x(svbool_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnot_u32_x))) +svuint32_t svnot_u32_x(svbool_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnot_u64_x))) +svuint64_t svnot_u64_x(svbool_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnot_u16_x))) +svuint16_t svnot_u16_x(svbool_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnot_s8_x))) +svint8_t svnot_s8_x(svbool_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnot_s32_x))) +svint32_t svnot_s32_x(svbool_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnot_s64_x))) +svint64_t svnot_s64_x(svbool_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnot_s16_x))) +svint16_t svnot_s16_x(svbool_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnot_u8_z))) +svuint8_t svnot_u8_z(svbool_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnot_u32_z))) +svuint32_t svnot_u32_z(svbool_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnot_u64_z))) +svuint64_t svnot_u64_z(svbool_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnot_u16_z))) +svuint16_t svnot_u16_z(svbool_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnot_s8_z))) +svint8_t svnot_s8_z(svbool_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnot_s32_z))) +svint32_t svnot_s32_z(svbool_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnot_s64_z))) +svint64_t svnot_s64_z(svbool_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnot_s16_z))) +svint16_t svnot_s16_z(svbool_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorn_b_z))) +svbool_t svorn_b_z(svbool_t, svbool_t, svbool_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_b_z))) +svbool_t svorr_b_z(svbool_t, svbool_t, svbool_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_n_u8_m))) +svuint8_t svorr_n_u8_m(svbool_t, svuint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_n_u32_m))) +svuint32_t svorr_n_u32_m(svbool_t, svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_n_u64_m))) +svuint64_t svorr_n_u64_m(svbool_t, svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_n_u16_m))) +svuint16_t svorr_n_u16_m(svbool_t, svuint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_n_s8_m))) +svint8_t svorr_n_s8_m(svbool_t, svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_n_s32_m))) +svint32_t svorr_n_s32_m(svbool_t, svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_n_s64_m))) +svint64_t svorr_n_s64_m(svbool_t, svint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_n_s16_m))) +svint16_t svorr_n_s16_m(svbool_t, svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_n_u8_x))) +svuint8_t svorr_n_u8_x(svbool_t, svuint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_n_u32_x))) +svuint32_t svorr_n_u32_x(svbool_t, svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_n_u64_x))) +svuint64_t svorr_n_u64_x(svbool_t, svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_n_u16_x))) +svuint16_t svorr_n_u16_x(svbool_t, svuint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_n_s8_x))) +svint8_t svorr_n_s8_x(svbool_t, svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_n_s32_x))) +svint32_t svorr_n_s32_x(svbool_t, svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_n_s64_x))) +svint64_t svorr_n_s64_x(svbool_t, svint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_n_s16_x))) +svint16_t svorr_n_s16_x(svbool_t, svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_n_u8_z))) +svuint8_t svorr_n_u8_z(svbool_t, svuint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_n_u32_z))) +svuint32_t svorr_n_u32_z(svbool_t, svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_n_u64_z))) +svuint64_t svorr_n_u64_z(svbool_t, svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_n_u16_z))) +svuint16_t svorr_n_u16_z(svbool_t, svuint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_n_s8_z))) +svint8_t svorr_n_s8_z(svbool_t, svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_n_s32_z))) +svint32_t svorr_n_s32_z(svbool_t, svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_n_s64_z))) +svint64_t svorr_n_s64_z(svbool_t, svint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_n_s16_z))) +svint16_t svorr_n_s16_z(svbool_t, svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_u8_m))) +svuint8_t svorr_u8_m(svbool_t, svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_u32_m))) +svuint32_t svorr_u32_m(svbool_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_u64_m))) +svuint64_t svorr_u64_m(svbool_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_u16_m))) +svuint16_t svorr_u16_m(svbool_t, svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_s8_m))) +svint8_t svorr_s8_m(svbool_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_s32_m))) +svint32_t svorr_s32_m(svbool_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_s64_m))) +svint64_t svorr_s64_m(svbool_t, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_s16_m))) +svint16_t svorr_s16_m(svbool_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_u8_x))) +svuint8_t svorr_u8_x(svbool_t, svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_u32_x))) +svuint32_t svorr_u32_x(svbool_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_u64_x))) +svuint64_t svorr_u64_x(svbool_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_u16_x))) +svuint16_t svorr_u16_x(svbool_t, svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_s8_x))) +svint8_t svorr_s8_x(svbool_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_s32_x))) +svint32_t svorr_s32_x(svbool_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_s64_x))) +svint64_t svorr_s64_x(svbool_t, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_s16_x))) +svint16_t svorr_s16_x(svbool_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_u8_z))) +svuint8_t svorr_u8_z(svbool_t, svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_u32_z))) +svuint32_t svorr_u32_z(svbool_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_u64_z))) +svuint64_t svorr_u64_z(svbool_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_u16_z))) +svuint16_t svorr_u16_z(svbool_t, svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_s8_z))) +svint8_t svorr_s8_z(svbool_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_s32_z))) +svint32_t svorr_s32_z(svbool_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_s64_z))) +svint64_t svorr_s64_z(svbool_t, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_s16_z))) +svint16_t svorr_s16_z(svbool_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorv_u8))) +uint8_t svorv_u8(svbool_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorv_u32))) +uint32_t svorv_u32(svbool_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorv_u64))) +uint64_t svorv_u64(svbool_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorv_u16))) +uint16_t svorv_u16(svbool_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorv_s8))) +int8_t svorv_s8(svbool_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorv_s32))) +int32_t svorv_s32(svbool_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorv_s64))) +int64_t svorv_s64(svbool_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorv_s16))) +int16_t svorv_s16(svbool_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpfalse_b))) +svbool_t svpfalse_b(void); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpfirst_b))) +svbool_t svpfirst_b(svbool_t, svbool_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpnext_b8))) +svbool_t svpnext_b8(svbool_t, svbool_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpnext_b32))) +svbool_t svpnext_b32(svbool_t, svbool_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpnext_b64))) +svbool_t svpnext_b64(svbool_t, svbool_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpnext_b16))) +svbool_t svpnext_b16(svbool_t, svbool_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfb))) +void svprfb(svbool_t, void const *, enum svprfop); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfb_vnum))) +void svprfb_vnum(svbool_t, void const *, int64_t, enum svprfop); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfd))) +void svprfd(svbool_t, void const *, enum svprfop); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfd_vnum))) +void svprfd_vnum(svbool_t, void const *, int64_t, enum svprfop); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfh))) +void svprfh(svbool_t, void const *, enum svprfop); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfh_vnum))) +void svprfh_vnum(svbool_t, void const *, int64_t, enum svprfop); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfw))) +void svprfw(svbool_t, void const *, enum svprfop); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfw_vnum))) +void svprfw_vnum(svbool_t, void const *, int64_t, enum svprfop); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svptest_any))) +bool svptest_any(svbool_t, svbool_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svptest_first))) +bool svptest_first(svbool_t, svbool_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svptest_last))) +bool svptest_last(svbool_t, svbool_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svptrue_pat_b8))) +svbool_t svptrue_pat_b8(enum svpattern); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svptrue_pat_b32))) +svbool_t svptrue_pat_b32(enum svpattern); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svptrue_pat_b64))) +svbool_t svptrue_pat_b64(enum svpattern); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svptrue_pat_b16))) +svbool_t svptrue_pat_b16(enum svpattern); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svptrue_b8))) +svbool_t svptrue_b8(void); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svptrue_b32))) +svbool_t svptrue_b32(void); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svptrue_b64))) +svbool_t svptrue_b64(void); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svptrue_b16))) +svbool_t svptrue_b16(void); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_s8))) +svint8_t svqadd_n_s8(svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_s32))) +svint32_t svqadd_n_s32(svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_s64))) +svint64_t svqadd_n_s64(svint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_s16))) +svint16_t svqadd_n_s16(svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_u8))) +svuint8_t svqadd_n_u8(svuint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_u32))) +svuint32_t svqadd_n_u32(svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_u64))) +svuint64_t svqadd_n_u64(svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_u16))) +svuint16_t svqadd_n_u16(svuint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_s8))) +svint8_t svqadd_s8(svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_s32))) +svint32_t svqadd_s32(svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_s64))) +svint64_t svqadd_s64(svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_s16))) +svint16_t svqadd_s16(svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_u8))) +svuint8_t svqadd_u8(svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_u32))) +svuint32_t svqadd_u32(svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_u64))) +svuint64_t svqadd_u64(svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_u16))) +svuint16_t svqadd_u16(svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecb_n_s32))) +int32_t svqdecb_n_s32(int32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecb_n_s64))) +int64_t svqdecb_n_s64(int64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecb_n_u32))) +uint32_t svqdecb_n_u32(uint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecb_n_u64))) +uint64_t svqdecb_n_u64(uint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecb_pat_n_s32))) +int32_t svqdecb_pat_n_s32(int32_t, enum svpattern, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecb_pat_n_s64))) +int64_t svqdecb_pat_n_s64(int64_t, enum svpattern, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecb_pat_n_u32))) +uint32_t svqdecb_pat_n_u32(uint32_t, enum svpattern, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecb_pat_n_u64))) +uint64_t svqdecb_pat_n_u64(uint64_t, enum svpattern, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecd_n_s32))) +int32_t svqdecd_n_s32(int32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecd_n_s64))) +int64_t svqdecd_n_s64(int64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecd_n_u32))) +uint32_t svqdecd_n_u32(uint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecd_n_u64))) +uint64_t svqdecd_n_u64(uint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecd_s64))) +svint64_t svqdecd_s64(svint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecd_u64))) +svuint64_t svqdecd_u64(svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecd_pat_n_s32))) +int32_t svqdecd_pat_n_s32(int32_t, enum svpattern, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecd_pat_n_s64))) +int64_t svqdecd_pat_n_s64(int64_t, enum svpattern, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecd_pat_n_u32))) +uint32_t svqdecd_pat_n_u32(uint32_t, enum svpattern, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecd_pat_n_u64))) +uint64_t svqdecd_pat_n_u64(uint64_t, enum svpattern, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecd_pat_s64))) +svint64_t svqdecd_pat_s64(svint64_t, enum svpattern, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecd_pat_u64))) +svuint64_t svqdecd_pat_u64(svuint64_t, enum svpattern, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdech_n_s32))) +int32_t svqdech_n_s32(int32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdech_n_s64))) +int64_t svqdech_n_s64(int64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdech_n_u32))) +uint32_t svqdech_n_u32(uint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdech_n_u64))) +uint64_t svqdech_n_u64(uint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdech_s16))) +svint16_t svqdech_s16(svint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdech_u16))) +svuint16_t svqdech_u16(svuint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdech_pat_n_s32))) +int32_t svqdech_pat_n_s32(int32_t, enum svpattern, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdech_pat_n_s64))) +int64_t svqdech_pat_n_s64(int64_t, enum svpattern, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdech_pat_n_u32))) +uint32_t svqdech_pat_n_u32(uint32_t, enum svpattern, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdech_pat_n_u64))) +uint64_t svqdech_pat_n_u64(uint64_t, enum svpattern, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdech_pat_s16))) +svint16_t svqdech_pat_s16(svint16_t, enum svpattern, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdech_pat_u16))) +svuint16_t svqdech_pat_u16(svuint16_t, enum svpattern, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecp_n_s32_b8))) +int32_t svqdecp_n_s32_b8(int32_t, svbool_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecp_n_s32_b32))) +int32_t svqdecp_n_s32_b32(int32_t, svbool_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecp_n_s32_b64))) +int32_t svqdecp_n_s32_b64(int32_t, svbool_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecp_n_s32_b16))) +int32_t svqdecp_n_s32_b16(int32_t, svbool_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecp_n_s64_b8))) +int64_t svqdecp_n_s64_b8(int64_t, svbool_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecp_n_s64_b32))) +int64_t svqdecp_n_s64_b32(int64_t, svbool_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecp_n_s64_b64))) +int64_t svqdecp_n_s64_b64(int64_t, svbool_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecp_n_s64_b16))) +int64_t svqdecp_n_s64_b16(int64_t, svbool_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecp_n_u32_b8))) +uint32_t svqdecp_n_u32_b8(uint32_t, svbool_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecp_n_u32_b32))) +uint32_t svqdecp_n_u32_b32(uint32_t, svbool_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecp_n_u32_b64))) +uint32_t svqdecp_n_u32_b64(uint32_t, svbool_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecp_n_u32_b16))) +uint32_t svqdecp_n_u32_b16(uint32_t, svbool_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecp_n_u64_b8))) +uint64_t svqdecp_n_u64_b8(uint64_t, svbool_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecp_n_u64_b32))) +uint64_t svqdecp_n_u64_b32(uint64_t, svbool_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecp_n_u64_b64))) +uint64_t svqdecp_n_u64_b64(uint64_t, svbool_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecp_n_u64_b16))) +uint64_t svqdecp_n_u64_b16(uint64_t, svbool_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecp_s32))) +svint32_t svqdecp_s32(svint32_t, svbool_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecp_s64))) +svint64_t svqdecp_s64(svint64_t, svbool_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecp_s16))) +svint16_t svqdecp_s16(svint16_t, svbool_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecp_u32))) +svuint32_t svqdecp_u32(svuint32_t, svbool_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecp_u64))) +svuint64_t svqdecp_u64(svuint64_t, svbool_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecp_u16))) +svuint16_t svqdecp_u16(svuint16_t, svbool_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecw_n_s32))) +int32_t svqdecw_n_s32(int32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecw_n_s64))) +int64_t svqdecw_n_s64(int64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecw_n_u32))) +uint32_t svqdecw_n_u32(uint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecw_n_u64))) +uint64_t svqdecw_n_u64(uint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecw_s32))) +svint32_t svqdecw_s32(svint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecw_u32))) +svuint32_t svqdecw_u32(svuint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecw_pat_n_s32))) +int32_t svqdecw_pat_n_s32(int32_t, enum svpattern, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecw_pat_n_s64))) +int64_t svqdecw_pat_n_s64(int64_t, enum svpattern, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecw_pat_n_u32))) +uint32_t svqdecw_pat_n_u32(uint32_t, enum svpattern, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecw_pat_n_u64))) +uint64_t svqdecw_pat_n_u64(uint64_t, enum svpattern, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecw_pat_s32))) +svint32_t svqdecw_pat_s32(svint32_t, enum svpattern, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecw_pat_u32))) +svuint32_t svqdecw_pat_u32(svuint32_t, enum svpattern, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincb_n_s32))) +int32_t svqincb_n_s32(int32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincb_n_s64))) +int64_t svqincb_n_s64(int64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincb_n_u32))) +uint32_t svqincb_n_u32(uint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincb_n_u64))) +uint64_t svqincb_n_u64(uint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincb_pat_n_s32))) +int32_t svqincb_pat_n_s32(int32_t, enum svpattern, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincb_pat_n_s64))) +int64_t svqincb_pat_n_s64(int64_t, enum svpattern, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincb_pat_n_u32))) +uint32_t svqincb_pat_n_u32(uint32_t, enum svpattern, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincb_pat_n_u64))) +uint64_t svqincb_pat_n_u64(uint64_t, enum svpattern, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincd_n_s32))) +int32_t svqincd_n_s32(int32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincd_n_s64))) +int64_t svqincd_n_s64(int64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincd_n_u32))) +uint32_t svqincd_n_u32(uint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincd_n_u64))) +uint64_t svqincd_n_u64(uint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincd_s64))) +svint64_t svqincd_s64(svint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincd_u64))) +svuint64_t svqincd_u64(svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincd_pat_n_s32))) +int32_t svqincd_pat_n_s32(int32_t, enum svpattern, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincd_pat_n_s64))) +int64_t svqincd_pat_n_s64(int64_t, enum svpattern, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincd_pat_n_u32))) +uint32_t svqincd_pat_n_u32(uint32_t, enum svpattern, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincd_pat_n_u64))) +uint64_t svqincd_pat_n_u64(uint64_t, enum svpattern, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincd_pat_s64))) +svint64_t svqincd_pat_s64(svint64_t, enum svpattern, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincd_pat_u64))) +svuint64_t svqincd_pat_u64(svuint64_t, enum svpattern, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqinch_n_s32))) +int32_t svqinch_n_s32(int32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqinch_n_s64))) +int64_t svqinch_n_s64(int64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqinch_n_u32))) +uint32_t svqinch_n_u32(uint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqinch_n_u64))) +uint64_t svqinch_n_u64(uint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqinch_s16))) +svint16_t svqinch_s16(svint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqinch_u16))) +svuint16_t svqinch_u16(svuint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqinch_pat_n_s32))) +int32_t svqinch_pat_n_s32(int32_t, enum svpattern, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqinch_pat_n_s64))) +int64_t svqinch_pat_n_s64(int64_t, enum svpattern, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqinch_pat_n_u32))) +uint32_t svqinch_pat_n_u32(uint32_t, enum svpattern, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqinch_pat_n_u64))) +uint64_t svqinch_pat_n_u64(uint64_t, enum svpattern, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqinch_pat_s16))) +svint16_t svqinch_pat_s16(svint16_t, enum svpattern, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqinch_pat_u16))) +svuint16_t svqinch_pat_u16(svuint16_t, enum svpattern, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincp_n_s32_b8))) +int32_t svqincp_n_s32_b8(int32_t, svbool_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincp_n_s32_b32))) +int32_t svqincp_n_s32_b32(int32_t, svbool_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincp_n_s32_b64))) +int32_t svqincp_n_s32_b64(int32_t, svbool_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincp_n_s32_b16))) +int32_t svqincp_n_s32_b16(int32_t, svbool_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincp_n_s64_b8))) +int64_t svqincp_n_s64_b8(int64_t, svbool_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincp_n_s64_b32))) +int64_t svqincp_n_s64_b32(int64_t, svbool_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincp_n_s64_b64))) +int64_t svqincp_n_s64_b64(int64_t, svbool_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincp_n_s64_b16))) +int64_t svqincp_n_s64_b16(int64_t, svbool_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincp_n_u32_b8))) +uint32_t svqincp_n_u32_b8(uint32_t, svbool_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincp_n_u32_b32))) +uint32_t svqincp_n_u32_b32(uint32_t, svbool_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincp_n_u32_b64))) +uint32_t svqincp_n_u32_b64(uint32_t, svbool_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincp_n_u32_b16))) +uint32_t svqincp_n_u32_b16(uint32_t, svbool_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincp_n_u64_b8))) +uint64_t svqincp_n_u64_b8(uint64_t, svbool_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincp_n_u64_b32))) +uint64_t svqincp_n_u64_b32(uint64_t, svbool_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincp_n_u64_b64))) +uint64_t svqincp_n_u64_b64(uint64_t, svbool_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincp_n_u64_b16))) +uint64_t svqincp_n_u64_b16(uint64_t, svbool_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincp_s32))) +svint32_t svqincp_s32(svint32_t, svbool_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincp_s64))) +svint64_t svqincp_s64(svint64_t, svbool_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincp_s16))) +svint16_t svqincp_s16(svint16_t, svbool_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincp_u32))) +svuint32_t svqincp_u32(svuint32_t, svbool_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincp_u64))) +svuint64_t svqincp_u64(svuint64_t, svbool_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincp_u16))) +svuint16_t svqincp_u16(svuint16_t, svbool_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincw_n_s32))) +int32_t svqincw_n_s32(int32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincw_n_s64))) +int64_t svqincw_n_s64(int64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincw_n_u32))) +uint32_t svqincw_n_u32(uint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincw_n_u64))) +uint64_t svqincw_n_u64(uint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincw_s32))) +svint32_t svqincw_s32(svint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincw_u32))) +svuint32_t svqincw_u32(svuint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincw_pat_n_s32))) +int32_t svqincw_pat_n_s32(int32_t, enum svpattern, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincw_pat_n_s64))) +int64_t svqincw_pat_n_s64(int64_t, enum svpattern, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincw_pat_n_u32))) +uint32_t svqincw_pat_n_u32(uint32_t, enum svpattern, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincw_pat_n_u64))) +uint64_t svqincw_pat_n_u64(uint64_t, enum svpattern, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincw_pat_s32))) +svint32_t svqincw_pat_s32(svint32_t, enum svpattern, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincw_pat_u32))) +svuint32_t svqincw_pat_u32(svuint32_t, enum svpattern, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_s8))) +svint8_t svqsub_n_s8(svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_s32))) +svint32_t svqsub_n_s32(svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_s64))) +svint64_t svqsub_n_s64(svint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_s16))) +svint16_t svqsub_n_s16(svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_u8))) +svuint8_t svqsub_n_u8(svuint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_u32))) +svuint32_t svqsub_n_u32(svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_u64))) +svuint64_t svqsub_n_u64(svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_u16))) +svuint16_t svqsub_n_u16(svuint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_s8))) +svint8_t svqsub_s8(svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_s32))) +svint32_t svqsub_s32(svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_s64))) +svint64_t svqsub_s64(svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_s16))) +svint16_t svqsub_s16(svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_u8))) +svuint8_t svqsub_u8(svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_u32))) +svuint32_t svqsub_u32(svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_u64))) +svuint64_t svqsub_u64(svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_u16))) +svuint16_t svqsub_u16(svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrbit_u8_m))) +svuint8_t svrbit_u8_m(svuint8_t, svbool_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrbit_u32_m))) +svuint32_t svrbit_u32_m(svuint32_t, svbool_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrbit_u64_m))) +svuint64_t svrbit_u64_m(svuint64_t, svbool_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrbit_u16_m))) +svuint16_t svrbit_u16_m(svuint16_t, svbool_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrbit_s8_m))) +svint8_t svrbit_s8_m(svint8_t, svbool_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrbit_s32_m))) +svint32_t svrbit_s32_m(svint32_t, svbool_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrbit_s64_m))) +svint64_t svrbit_s64_m(svint64_t, svbool_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrbit_s16_m))) +svint16_t svrbit_s16_m(svint16_t, svbool_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrbit_u8_x))) +svuint8_t svrbit_u8_x(svbool_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrbit_u32_x))) +svuint32_t svrbit_u32_x(svbool_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrbit_u64_x))) +svuint64_t svrbit_u64_x(svbool_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrbit_u16_x))) +svuint16_t svrbit_u16_x(svbool_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrbit_s8_x))) +svint8_t svrbit_s8_x(svbool_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrbit_s32_x))) +svint32_t svrbit_s32_x(svbool_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrbit_s64_x))) +svint64_t svrbit_s64_x(svbool_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrbit_s16_x))) +svint16_t svrbit_s16_x(svbool_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrbit_u8_z))) +svuint8_t svrbit_u8_z(svbool_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrbit_u32_z))) +svuint32_t svrbit_u32_z(svbool_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrbit_u64_z))) +svuint64_t svrbit_u64_z(svbool_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrbit_u16_z))) +svuint16_t svrbit_u16_z(svbool_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrbit_s8_z))) +svint8_t svrbit_s8_z(svbool_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrbit_s32_z))) +svint32_t svrbit_s32_z(svbool_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrbit_s64_z))) +svint64_t svrbit_s64_z(svbool_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrbit_s16_z))) +svint16_t svrbit_s16_z(svbool_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrecpe_f64))) +svfloat64_t svrecpe_f64(svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrecpe_f32))) +svfloat32_t svrecpe_f32(svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrecpe_f16))) +svfloat16_t svrecpe_f16(svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrecps_f64))) +svfloat64_t svrecps_f64(svfloat64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrecps_f32))) +svfloat32_t svrecps_f32(svfloat32_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrecps_f16))) +svfloat16_t svrecps_f16(svfloat16_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrecpx_f64_m))) +svfloat64_t svrecpx_f64_m(svfloat64_t, svbool_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrecpx_f32_m))) +svfloat32_t svrecpx_f32_m(svfloat32_t, svbool_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrecpx_f16_m))) +svfloat16_t svrecpx_f16_m(svfloat16_t, svbool_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrecpx_f64_x))) +svfloat64_t svrecpx_f64_x(svbool_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrecpx_f32_x))) +svfloat32_t svrecpx_f32_x(svbool_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrecpx_f16_x))) +svfloat16_t svrecpx_f16_x(svbool_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrecpx_f64_z))) +svfloat64_t svrecpx_f64_z(svbool_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrecpx_f32_z))) +svfloat32_t svrecpx_f32_z(svbool_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrecpx_f16_z))) +svfloat16_t svrecpx_f16_z(svbool_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrev_u8))) +svuint8_t svrev_u8(svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrev_u32))) +svuint32_t svrev_u32(svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrev_u64))) +svuint64_t svrev_u64(svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrev_u16))) +svuint16_t svrev_u16(svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrev_s8))) +svint8_t svrev_s8(svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrev_f64))) +svfloat64_t svrev_f64(svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrev_f32))) +svfloat32_t svrev_f32(svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrev_f16))) +svfloat16_t svrev_f16(svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrev_s32))) +svint32_t svrev_s32(svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrev_s64))) +svint64_t svrev_s64(svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrev_s16))) +svint16_t svrev_s16(svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrev_b16))) +svbool_t svrev_b16(svbool_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrev_b32))) +svbool_t svrev_b32(svbool_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrev_b64))) +svbool_t svrev_b64(svbool_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrev_b8))) +svbool_t svrev_b8(svbool_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevb_u32_m))) +svuint32_t svrevb_u32_m(svuint32_t, svbool_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevb_u64_m))) +svuint64_t svrevb_u64_m(svuint64_t, svbool_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevb_u16_m))) +svuint16_t svrevb_u16_m(svuint16_t, svbool_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevb_s32_m))) +svint32_t svrevb_s32_m(svint32_t, svbool_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevb_s64_m))) +svint64_t svrevb_s64_m(svint64_t, svbool_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevb_s16_m))) +svint16_t svrevb_s16_m(svint16_t, svbool_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevb_u32_x))) +svuint32_t svrevb_u32_x(svbool_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevb_u64_x))) +svuint64_t svrevb_u64_x(svbool_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevb_u16_x))) +svuint16_t svrevb_u16_x(svbool_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevb_s32_x))) +svint32_t svrevb_s32_x(svbool_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevb_s64_x))) +svint64_t svrevb_s64_x(svbool_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevb_s16_x))) +svint16_t svrevb_s16_x(svbool_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevb_u32_z))) +svuint32_t svrevb_u32_z(svbool_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevb_u64_z))) +svuint64_t svrevb_u64_z(svbool_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevb_u16_z))) +svuint16_t svrevb_u16_z(svbool_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevb_s32_z))) +svint32_t svrevb_s32_z(svbool_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevb_s64_z))) +svint64_t svrevb_s64_z(svbool_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevb_s16_z))) +svint16_t svrevb_s16_z(svbool_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevh_u32_m))) +svuint32_t svrevh_u32_m(svuint32_t, svbool_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevh_u64_m))) +svuint64_t svrevh_u64_m(svuint64_t, svbool_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevh_s32_m))) +svint32_t svrevh_s32_m(svint32_t, svbool_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevh_s64_m))) +svint64_t svrevh_s64_m(svint64_t, svbool_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevh_u32_x))) +svuint32_t svrevh_u32_x(svbool_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevh_u64_x))) +svuint64_t svrevh_u64_x(svbool_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevh_s32_x))) +svint32_t svrevh_s32_x(svbool_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevh_s64_x))) +svint64_t svrevh_s64_x(svbool_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevh_u32_z))) +svuint32_t svrevh_u32_z(svbool_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevh_u64_z))) +svuint64_t svrevh_u64_z(svbool_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevh_s32_z))) +svint32_t svrevh_s32_z(svbool_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevh_s64_z))) +svint64_t svrevh_s64_z(svbool_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevw_u64_m))) +svuint64_t svrevw_u64_m(svuint64_t, svbool_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevw_s64_m))) +svint64_t svrevw_s64_m(svint64_t, svbool_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevw_u64_x))) +svuint64_t svrevw_u64_x(svbool_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevw_s64_x))) +svint64_t svrevw_s64_x(svbool_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevw_u64_z))) +svuint64_t svrevw_u64_z(svbool_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevw_s64_z))) +svint64_t svrevw_s64_z(svbool_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrinta_f64_m))) +svfloat64_t svrinta_f64_m(svfloat64_t, svbool_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrinta_f32_m))) +svfloat32_t svrinta_f32_m(svfloat32_t, svbool_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrinta_f16_m))) +svfloat16_t svrinta_f16_m(svfloat16_t, svbool_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrinta_f64_x))) +svfloat64_t svrinta_f64_x(svbool_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrinta_f32_x))) +svfloat32_t svrinta_f32_x(svbool_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrinta_f16_x))) +svfloat16_t svrinta_f16_x(svbool_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrinta_f64_z))) +svfloat64_t svrinta_f64_z(svbool_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrinta_f32_z))) +svfloat32_t svrinta_f32_z(svbool_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrinta_f16_z))) +svfloat16_t svrinta_f16_z(svbool_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrinti_f64_m))) +svfloat64_t svrinti_f64_m(svfloat64_t, svbool_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrinti_f32_m))) +svfloat32_t svrinti_f32_m(svfloat32_t, svbool_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrinti_f16_m))) +svfloat16_t svrinti_f16_m(svfloat16_t, svbool_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrinti_f64_x))) +svfloat64_t svrinti_f64_x(svbool_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrinti_f32_x))) +svfloat32_t svrinti_f32_x(svbool_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrinti_f16_x))) +svfloat16_t svrinti_f16_x(svbool_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrinti_f64_z))) +svfloat64_t svrinti_f64_z(svbool_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrinti_f32_z))) +svfloat32_t svrinti_f32_z(svbool_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrinti_f16_z))) +svfloat16_t svrinti_f16_z(svbool_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintm_f64_m))) +svfloat64_t svrintm_f64_m(svfloat64_t, svbool_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintm_f32_m))) +svfloat32_t svrintm_f32_m(svfloat32_t, svbool_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintm_f16_m))) +svfloat16_t svrintm_f16_m(svfloat16_t, svbool_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintm_f64_x))) +svfloat64_t svrintm_f64_x(svbool_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintm_f32_x))) +svfloat32_t svrintm_f32_x(svbool_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintm_f16_x))) +svfloat16_t svrintm_f16_x(svbool_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintm_f64_z))) +svfloat64_t svrintm_f64_z(svbool_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintm_f32_z))) +svfloat32_t svrintm_f32_z(svbool_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintm_f16_z))) +svfloat16_t svrintm_f16_z(svbool_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintn_f64_m))) +svfloat64_t svrintn_f64_m(svfloat64_t, svbool_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintn_f32_m))) +svfloat32_t svrintn_f32_m(svfloat32_t, svbool_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintn_f16_m))) +svfloat16_t svrintn_f16_m(svfloat16_t, svbool_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintn_f64_x))) +svfloat64_t svrintn_f64_x(svbool_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintn_f32_x))) +svfloat32_t svrintn_f32_x(svbool_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintn_f16_x))) +svfloat16_t svrintn_f16_x(svbool_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintn_f64_z))) +svfloat64_t svrintn_f64_z(svbool_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintn_f32_z))) +svfloat32_t svrintn_f32_z(svbool_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintn_f16_z))) +svfloat16_t svrintn_f16_z(svbool_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintp_f64_m))) +svfloat64_t svrintp_f64_m(svfloat64_t, svbool_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintp_f32_m))) +svfloat32_t svrintp_f32_m(svfloat32_t, svbool_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintp_f16_m))) +svfloat16_t svrintp_f16_m(svfloat16_t, svbool_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintp_f64_x))) +svfloat64_t svrintp_f64_x(svbool_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintp_f32_x))) +svfloat32_t svrintp_f32_x(svbool_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintp_f16_x))) +svfloat16_t svrintp_f16_x(svbool_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintp_f64_z))) +svfloat64_t svrintp_f64_z(svbool_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintp_f32_z))) +svfloat32_t svrintp_f32_z(svbool_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintp_f16_z))) +svfloat16_t svrintp_f16_z(svbool_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintx_f64_m))) +svfloat64_t svrintx_f64_m(svfloat64_t, svbool_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintx_f32_m))) +svfloat32_t svrintx_f32_m(svfloat32_t, svbool_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintx_f16_m))) +svfloat16_t svrintx_f16_m(svfloat16_t, svbool_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintx_f64_x))) +svfloat64_t svrintx_f64_x(svbool_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintx_f32_x))) +svfloat32_t svrintx_f32_x(svbool_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintx_f16_x))) +svfloat16_t svrintx_f16_x(svbool_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintx_f64_z))) +svfloat64_t svrintx_f64_z(svbool_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintx_f32_z))) +svfloat32_t svrintx_f32_z(svbool_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintx_f16_z))) +svfloat16_t svrintx_f16_z(svbool_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintz_f64_m))) +svfloat64_t svrintz_f64_m(svfloat64_t, svbool_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintz_f32_m))) +svfloat32_t svrintz_f32_m(svfloat32_t, svbool_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintz_f16_m))) +svfloat16_t svrintz_f16_m(svfloat16_t, svbool_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintz_f64_x))) +svfloat64_t svrintz_f64_x(svbool_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintz_f32_x))) +svfloat32_t svrintz_f32_x(svbool_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintz_f16_x))) +svfloat16_t svrintz_f16_x(svbool_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintz_f64_z))) +svfloat64_t svrintz_f64_z(svbool_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintz_f32_z))) +svfloat32_t svrintz_f32_z(svbool_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintz_f16_z))) +svfloat16_t svrintz_f16_z(svbool_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsqrte_f64))) +svfloat64_t svrsqrte_f64(svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsqrte_f32))) +svfloat32_t svrsqrte_f32(svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsqrte_f16))) +svfloat16_t svrsqrte_f16(svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsqrts_f64))) +svfloat64_t svrsqrts_f64(svfloat64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsqrts_f32))) +svfloat32_t svrsqrts_f32(svfloat32_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsqrts_f16))) +svfloat16_t svrsqrts_f16(svfloat16_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svscale_n_f64_m))) +svfloat64_t svscale_n_f64_m(svbool_t, svfloat64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svscale_n_f32_m))) +svfloat32_t svscale_n_f32_m(svbool_t, svfloat32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svscale_n_f16_m))) +svfloat16_t svscale_n_f16_m(svbool_t, svfloat16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svscale_n_f64_x))) +svfloat64_t svscale_n_f64_x(svbool_t, svfloat64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svscale_n_f32_x))) +svfloat32_t svscale_n_f32_x(svbool_t, svfloat32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svscale_n_f16_x))) +svfloat16_t svscale_n_f16_x(svbool_t, svfloat16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svscale_n_f64_z))) +svfloat64_t svscale_n_f64_z(svbool_t, svfloat64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svscale_n_f32_z))) +svfloat32_t svscale_n_f32_z(svbool_t, svfloat32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svscale_n_f16_z))) +svfloat16_t svscale_n_f16_z(svbool_t, svfloat16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svscale_f64_m))) +svfloat64_t svscale_f64_m(svbool_t, svfloat64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svscale_f32_m))) +svfloat32_t svscale_f32_m(svbool_t, svfloat32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svscale_f16_m))) +svfloat16_t svscale_f16_m(svbool_t, svfloat16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svscale_f64_x))) +svfloat64_t svscale_f64_x(svbool_t, svfloat64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svscale_f32_x))) +svfloat32_t svscale_f32_x(svbool_t, svfloat32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svscale_f16_x))) +svfloat16_t svscale_f16_x(svbool_t, svfloat16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svscale_f64_z))) +svfloat64_t svscale_f64_z(svbool_t, svfloat64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svscale_f32_z))) +svfloat32_t svscale_f32_z(svbool_t, svfloat32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svscale_f16_z))) +svfloat16_t svscale_f16_z(svbool_t, svfloat16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsel_b))) +svbool_t svsel_b(svbool_t, svbool_t, svbool_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsel_u8))) +svuint8_t svsel_u8(svbool_t, svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsel_u32))) +svuint32_t svsel_u32(svbool_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsel_u64))) +svuint64_t svsel_u64(svbool_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsel_u16))) +svuint16_t svsel_u16(svbool_t, svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsel_s8))) +svint8_t svsel_s8(svbool_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsel_f64))) +svfloat64_t svsel_f64(svbool_t, svfloat64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsel_f32))) +svfloat32_t svsel_f32(svbool_t, svfloat32_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsel_f16))) +svfloat16_t svsel_f16(svbool_t, svfloat16_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsel_s32))) +svint32_t svsel_s32(svbool_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsel_s64))) +svint64_t svsel_s64(svbool_t, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsel_s16))) +svint16_t svsel_s16(svbool_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset2_u8))) +svuint8x2_t svset2_u8(svuint8x2_t, uint64_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset2_u32))) +svuint32x2_t svset2_u32(svuint32x2_t, uint64_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset2_u64))) +svuint64x2_t svset2_u64(svuint64x2_t, uint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset2_u16))) +svuint16x2_t svset2_u16(svuint16x2_t, uint64_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset2_s8))) +svint8x2_t svset2_s8(svint8x2_t, uint64_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset2_f64))) +svfloat64x2_t svset2_f64(svfloat64x2_t, uint64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset2_f32))) +svfloat32x2_t svset2_f32(svfloat32x2_t, uint64_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset2_f16))) +svfloat16x2_t svset2_f16(svfloat16x2_t, uint64_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset2_s32))) +svint32x2_t svset2_s32(svint32x2_t, uint64_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset2_s64))) +svint64x2_t svset2_s64(svint64x2_t, uint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset2_s16))) +svint16x2_t svset2_s16(svint16x2_t, uint64_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset3_u8))) +svuint8x3_t svset3_u8(svuint8x3_t, uint64_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset3_u32))) +svuint32x3_t svset3_u32(svuint32x3_t, uint64_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset3_u64))) +svuint64x3_t svset3_u64(svuint64x3_t, uint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset3_u16))) +svuint16x3_t svset3_u16(svuint16x3_t, uint64_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset3_s8))) +svint8x3_t svset3_s8(svint8x3_t, uint64_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset3_f64))) +svfloat64x3_t svset3_f64(svfloat64x3_t, uint64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset3_f32))) +svfloat32x3_t svset3_f32(svfloat32x3_t, uint64_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset3_f16))) +svfloat16x3_t svset3_f16(svfloat16x3_t, uint64_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset3_s32))) +svint32x3_t svset3_s32(svint32x3_t, uint64_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset3_s64))) +svint64x3_t svset3_s64(svint64x3_t, uint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset3_s16))) +svint16x3_t svset3_s16(svint16x3_t, uint64_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset4_u8))) +svuint8x4_t svset4_u8(svuint8x4_t, uint64_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset4_u32))) +svuint32x4_t svset4_u32(svuint32x4_t, uint64_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset4_u64))) +svuint64x4_t svset4_u64(svuint64x4_t, uint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset4_u16))) +svuint16x4_t svset4_u16(svuint16x4_t, uint64_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset4_s8))) +svint8x4_t svset4_s8(svint8x4_t, uint64_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset4_f64))) +svfloat64x4_t svset4_f64(svfloat64x4_t, uint64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset4_f32))) +svfloat32x4_t svset4_f32(svfloat32x4_t, uint64_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset4_f16))) +svfloat16x4_t svset4_f16(svfloat16x4_t, uint64_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset4_s32))) +svint32x4_t svset4_s32(svint32x4_t, uint64_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset4_s64))) +svint64x4_t svset4_s64(svint64x4_t, uint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset4_s16))) +svint16x4_t svset4_s16(svint16x4_t, uint64_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsplice_u8))) +svuint8_t svsplice_u8(svbool_t, svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsplice_u32))) +svuint32_t svsplice_u32(svbool_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsplice_u64))) +svuint64_t svsplice_u64(svbool_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsplice_u16))) +svuint16_t svsplice_u16(svbool_t, svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsplice_s8))) +svint8_t svsplice_s8(svbool_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsplice_f64))) +svfloat64_t svsplice_f64(svbool_t, svfloat64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsplice_f32))) +svfloat32_t svsplice_f32(svbool_t, svfloat32_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsplice_f16))) +svfloat16_t svsplice_f16(svbool_t, svfloat16_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsplice_s32))) +svint32_t svsplice_s32(svbool_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsplice_s64))) +svint64_t svsplice_s64(svbool_t, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsplice_s16))) +svint16_t svsplice_s16(svbool_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqrt_f64_m))) +svfloat64_t svsqrt_f64_m(svfloat64_t, svbool_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqrt_f32_m))) +svfloat32_t svsqrt_f32_m(svfloat32_t, svbool_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqrt_f16_m))) +svfloat16_t svsqrt_f16_m(svfloat16_t, svbool_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqrt_f64_x))) +svfloat64_t svsqrt_f64_x(svbool_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqrt_f32_x))) +svfloat32_t svsqrt_f32_x(svbool_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqrt_f16_x))) +svfloat16_t svsqrt_f16_x(svbool_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqrt_f64_z))) +svfloat64_t svsqrt_f64_z(svbool_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqrt_f32_z))) +svfloat32_t svsqrt_f32_z(svbool_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqrt_f16_z))) +svfloat16_t svsqrt_f16_z(svbool_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_u8))) +void svst1_u8(svbool_t, uint8_t *, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_u32))) +void svst1_u32(svbool_t, uint32_t *, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_u64))) +void svst1_u64(svbool_t, uint64_t *, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_u16))) +void svst1_u16(svbool_t, uint16_t *, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_s8))) +void svst1_s8(svbool_t, int8_t *, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_f64))) +void svst1_f64(svbool_t, float64_t *, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_f32))) +void svst1_f32(svbool_t, float32_t *, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_f16))) +void svst1_f16(svbool_t, float16_t *, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_s32))) +void svst1_s32(svbool_t, int32_t *, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_s64))) +void svst1_s64(svbool_t, int64_t *, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_s16))) +void svst1_s16(svbool_t, int16_t *, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_vnum_u8))) +void svst1_vnum_u8(svbool_t, uint8_t *, int64_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_vnum_u32))) +void svst1_vnum_u32(svbool_t, uint32_t *, int64_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_vnum_u64))) +void svst1_vnum_u64(svbool_t, uint64_t *, int64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_vnum_u16))) +void svst1_vnum_u16(svbool_t, uint16_t *, int64_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_vnum_s8))) +void svst1_vnum_s8(svbool_t, int8_t *, int64_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_vnum_f64))) +void svst1_vnum_f64(svbool_t, float64_t *, int64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_vnum_f32))) +void svst1_vnum_f32(svbool_t, float32_t *, int64_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_vnum_f16))) +void svst1_vnum_f16(svbool_t, float16_t *, int64_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_vnum_s32))) +void svst1_vnum_s32(svbool_t, int32_t *, int64_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_vnum_s64))) +void svst1_vnum_s64(svbool_t, int64_t *, int64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_vnum_s16))) +void svst1_vnum_s16(svbool_t, int16_t *, int64_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_s32))) +void svst1b_s32(svbool_t, int8_t *, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_s64))) +void svst1b_s64(svbool_t, int8_t *, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_s16))) +void svst1b_s16(svbool_t, int8_t *, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_u32))) +void svst1b_u32(svbool_t, uint8_t *, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_u64))) +void svst1b_u64(svbool_t, uint8_t *, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_u16))) +void svst1b_u16(svbool_t, uint8_t *, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_vnum_s32))) +void svst1b_vnum_s32(svbool_t, int8_t *, int64_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_vnum_s64))) +void svst1b_vnum_s64(svbool_t, int8_t *, int64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_vnum_s16))) +void svst1b_vnum_s16(svbool_t, int8_t *, int64_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_vnum_u32))) +void svst1b_vnum_u32(svbool_t, uint8_t *, int64_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_vnum_u64))) +void svst1b_vnum_u64(svbool_t, uint8_t *, int64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_vnum_u16))) +void svst1b_vnum_u16(svbool_t, uint8_t *, int64_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_s32))) +void svst1h_s32(svbool_t, int16_t *, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_s64))) +void svst1h_s64(svbool_t, int16_t *, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_u32))) +void svst1h_u32(svbool_t, uint16_t *, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_u64))) +void svst1h_u64(svbool_t, uint16_t *, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_vnum_s32))) +void svst1h_vnum_s32(svbool_t, int16_t *, int64_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_vnum_s64))) +void svst1h_vnum_s64(svbool_t, int16_t *, int64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_vnum_u32))) +void svst1h_vnum_u32(svbool_t, uint16_t *, int64_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_vnum_u64))) +void svst1h_vnum_u64(svbool_t, uint16_t *, int64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1w_s64))) +void svst1w_s64(svbool_t, int32_t *, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1w_u64))) +void svst1w_u64(svbool_t, uint32_t *, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1w_vnum_s64))) +void svst1w_vnum_s64(svbool_t, int32_t *, int64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1w_vnum_u64))) +void svst1w_vnum_u64(svbool_t, uint32_t *, int64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2_u8))) +void svst2_u8(svbool_t, uint8_t *, svuint8x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2_u32))) +void svst2_u32(svbool_t, uint32_t *, svuint32x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2_u64))) +void svst2_u64(svbool_t, uint64_t *, svuint64x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2_u16))) +void svst2_u16(svbool_t, uint16_t *, svuint16x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2_s8))) +void svst2_s8(svbool_t, int8_t *, svint8x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2_f64))) +void svst2_f64(svbool_t, float64_t *, svfloat64x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2_f32))) +void svst2_f32(svbool_t, float32_t *, svfloat32x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2_f16))) +void svst2_f16(svbool_t, float16_t *, svfloat16x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2_s32))) +void svst2_s32(svbool_t, int32_t *, svint32x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2_s64))) +void svst2_s64(svbool_t, int64_t *, svint64x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2_s16))) +void svst2_s16(svbool_t, int16_t *, svint16x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2_vnum_u8))) +void svst2_vnum_u8(svbool_t, uint8_t *, int64_t, svuint8x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2_vnum_u32))) +void svst2_vnum_u32(svbool_t, uint32_t *, int64_t, svuint32x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2_vnum_u64))) +void svst2_vnum_u64(svbool_t, uint64_t *, int64_t, svuint64x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2_vnum_u16))) +void svst2_vnum_u16(svbool_t, uint16_t *, int64_t, svuint16x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2_vnum_s8))) +void svst2_vnum_s8(svbool_t, int8_t *, int64_t, svint8x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2_vnum_f64))) +void svst2_vnum_f64(svbool_t, float64_t *, int64_t, svfloat64x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2_vnum_f32))) +void svst2_vnum_f32(svbool_t, float32_t *, int64_t, svfloat32x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2_vnum_f16))) +void svst2_vnum_f16(svbool_t, float16_t *, int64_t, svfloat16x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2_vnum_s32))) +void svst2_vnum_s32(svbool_t, int32_t *, int64_t, svint32x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2_vnum_s64))) +void svst2_vnum_s64(svbool_t, int64_t *, int64_t, svint64x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2_vnum_s16))) +void svst2_vnum_s16(svbool_t, int16_t *, int64_t, svint16x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3_u8))) +void svst3_u8(svbool_t, uint8_t *, svuint8x3_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3_u32))) +void svst3_u32(svbool_t, uint32_t *, svuint32x3_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3_u64))) +void svst3_u64(svbool_t, uint64_t *, svuint64x3_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3_u16))) +void svst3_u16(svbool_t, uint16_t *, svuint16x3_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3_s8))) +void svst3_s8(svbool_t, int8_t *, svint8x3_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3_f64))) +void svst3_f64(svbool_t, float64_t *, svfloat64x3_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3_f32))) +void svst3_f32(svbool_t, float32_t *, svfloat32x3_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3_f16))) +void svst3_f16(svbool_t, float16_t *, svfloat16x3_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3_s32))) +void svst3_s32(svbool_t, int32_t *, svint32x3_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3_s64))) +void svst3_s64(svbool_t, int64_t *, svint64x3_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3_s16))) +void svst3_s16(svbool_t, int16_t *, svint16x3_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3_vnum_u8))) +void svst3_vnum_u8(svbool_t, uint8_t *, int64_t, svuint8x3_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3_vnum_u32))) +void svst3_vnum_u32(svbool_t, uint32_t *, int64_t, svuint32x3_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3_vnum_u64))) +void svst3_vnum_u64(svbool_t, uint64_t *, int64_t, svuint64x3_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3_vnum_u16))) +void svst3_vnum_u16(svbool_t, uint16_t *, int64_t, svuint16x3_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3_vnum_s8))) +void svst3_vnum_s8(svbool_t, int8_t *, int64_t, svint8x3_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3_vnum_f64))) +void svst3_vnum_f64(svbool_t, float64_t *, int64_t, svfloat64x3_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3_vnum_f32))) +void svst3_vnum_f32(svbool_t, float32_t *, int64_t, svfloat32x3_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3_vnum_f16))) +void svst3_vnum_f16(svbool_t, float16_t *, int64_t, svfloat16x3_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3_vnum_s32))) +void svst3_vnum_s32(svbool_t, int32_t *, int64_t, svint32x3_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3_vnum_s64))) +void svst3_vnum_s64(svbool_t, int64_t *, int64_t, svint64x3_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3_vnum_s16))) +void svst3_vnum_s16(svbool_t, int16_t *, int64_t, svint16x3_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4_u8))) +void svst4_u8(svbool_t, uint8_t *, svuint8x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4_u32))) +void svst4_u32(svbool_t, uint32_t *, svuint32x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4_u64))) +void svst4_u64(svbool_t, uint64_t *, svuint64x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4_u16))) +void svst4_u16(svbool_t, uint16_t *, svuint16x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4_s8))) +void svst4_s8(svbool_t, int8_t *, svint8x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4_f64))) +void svst4_f64(svbool_t, float64_t *, svfloat64x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4_f32))) +void svst4_f32(svbool_t, float32_t *, svfloat32x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4_f16))) +void svst4_f16(svbool_t, float16_t *, svfloat16x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4_s32))) +void svst4_s32(svbool_t, int32_t *, svint32x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4_s64))) +void svst4_s64(svbool_t, int64_t *, svint64x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4_s16))) +void svst4_s16(svbool_t, int16_t *, svint16x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4_vnum_u8))) +void svst4_vnum_u8(svbool_t, uint8_t *, int64_t, svuint8x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4_vnum_u32))) +void svst4_vnum_u32(svbool_t, uint32_t *, int64_t, svuint32x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4_vnum_u64))) +void svst4_vnum_u64(svbool_t, uint64_t *, int64_t, svuint64x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4_vnum_u16))) +void svst4_vnum_u16(svbool_t, uint16_t *, int64_t, svuint16x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4_vnum_s8))) +void svst4_vnum_s8(svbool_t, int8_t *, int64_t, svint8x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4_vnum_f64))) +void svst4_vnum_f64(svbool_t, float64_t *, int64_t, svfloat64x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4_vnum_f32))) +void svst4_vnum_f32(svbool_t, float32_t *, int64_t, svfloat32x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4_vnum_f16))) +void svst4_vnum_f16(svbool_t, float16_t *, int64_t, svfloat16x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4_vnum_s32))) +void svst4_vnum_s32(svbool_t, int32_t *, int64_t, svint32x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4_vnum_s64))) +void svst4_vnum_s64(svbool_t, int64_t *, int64_t, svint64x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4_vnum_s16))) +void svst4_vnum_s16(svbool_t, int16_t *, int64_t, svint16x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_u8))) +void svstnt1_u8(svbool_t, uint8_t *, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_u32))) +void svstnt1_u32(svbool_t, uint32_t *, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_u64))) +void svstnt1_u64(svbool_t, uint64_t *, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_u16))) +void svstnt1_u16(svbool_t, uint16_t *, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_s8))) +void svstnt1_s8(svbool_t, int8_t *, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_f64))) +void svstnt1_f64(svbool_t, float64_t *, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_f32))) +void svstnt1_f32(svbool_t, float32_t *, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_f16))) +void svstnt1_f16(svbool_t, float16_t *, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_s32))) +void svstnt1_s32(svbool_t, int32_t *, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_s64))) +void svstnt1_s64(svbool_t, int64_t *, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_s16))) +void svstnt1_s16(svbool_t, int16_t *, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_vnum_u8))) +void svstnt1_vnum_u8(svbool_t, uint8_t *, int64_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_vnum_u32))) +void svstnt1_vnum_u32(svbool_t, uint32_t *, int64_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_vnum_u64))) +void svstnt1_vnum_u64(svbool_t, uint64_t *, int64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_vnum_u16))) +void svstnt1_vnum_u16(svbool_t, uint16_t *, int64_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_vnum_s8))) +void svstnt1_vnum_s8(svbool_t, int8_t *, int64_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_vnum_f64))) +void svstnt1_vnum_f64(svbool_t, float64_t *, int64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_vnum_f32))) +void svstnt1_vnum_f32(svbool_t, float32_t *, int64_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_vnum_f16))) +void svstnt1_vnum_f16(svbool_t, float16_t *, int64_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_vnum_s32))) +void svstnt1_vnum_s32(svbool_t, int32_t *, int64_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_vnum_s64))) +void svstnt1_vnum_s64(svbool_t, int64_t *, int64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_vnum_s16))) +void svstnt1_vnum_s16(svbool_t, int16_t *, int64_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_f64_m))) +svfloat64_t svsub_n_f64_m(svbool_t, svfloat64_t, float64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_f32_m))) +svfloat32_t svsub_n_f32_m(svbool_t, svfloat32_t, float32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_f16_m))) +svfloat16_t svsub_n_f16_m(svbool_t, svfloat16_t, float16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_f64_x))) +svfloat64_t svsub_n_f64_x(svbool_t, svfloat64_t, float64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_f32_x))) +svfloat32_t svsub_n_f32_x(svbool_t, svfloat32_t, float32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_f16_x))) +svfloat16_t svsub_n_f16_x(svbool_t, svfloat16_t, float16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_f64_z))) +svfloat64_t svsub_n_f64_z(svbool_t, svfloat64_t, float64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_f32_z))) +svfloat32_t svsub_n_f32_z(svbool_t, svfloat32_t, float32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_f16_z))) +svfloat16_t svsub_n_f16_z(svbool_t, svfloat16_t, float16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_u8_m))) +svuint8_t svsub_n_u8_m(svbool_t, svuint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_u32_m))) +svuint32_t svsub_n_u32_m(svbool_t, svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_u64_m))) +svuint64_t svsub_n_u64_m(svbool_t, svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_u16_m))) +svuint16_t svsub_n_u16_m(svbool_t, svuint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_s8_m))) +svint8_t svsub_n_s8_m(svbool_t, svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_s32_m))) +svint32_t svsub_n_s32_m(svbool_t, svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_s64_m))) +svint64_t svsub_n_s64_m(svbool_t, svint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_s16_m))) +svint16_t svsub_n_s16_m(svbool_t, svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_u8_x))) +svuint8_t svsub_n_u8_x(svbool_t, svuint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_u32_x))) +svuint32_t svsub_n_u32_x(svbool_t, svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_u64_x))) +svuint64_t svsub_n_u64_x(svbool_t, svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_u16_x))) +svuint16_t svsub_n_u16_x(svbool_t, svuint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_s8_x))) +svint8_t svsub_n_s8_x(svbool_t, svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_s32_x))) +svint32_t svsub_n_s32_x(svbool_t, svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_s64_x))) +svint64_t svsub_n_s64_x(svbool_t, svint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_s16_x))) +svint16_t svsub_n_s16_x(svbool_t, svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_u8_z))) +svuint8_t svsub_n_u8_z(svbool_t, svuint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_u32_z))) +svuint32_t svsub_n_u32_z(svbool_t, svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_u64_z))) +svuint64_t svsub_n_u64_z(svbool_t, svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_u16_z))) +svuint16_t svsub_n_u16_z(svbool_t, svuint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_s8_z))) +svint8_t svsub_n_s8_z(svbool_t, svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_s32_z))) +svint32_t svsub_n_s32_z(svbool_t, svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_s64_z))) +svint64_t svsub_n_s64_z(svbool_t, svint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_s16_z))) +svint16_t svsub_n_s16_z(svbool_t, svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_f64_m))) +svfloat64_t svsub_f64_m(svbool_t, svfloat64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_f32_m))) +svfloat32_t svsub_f32_m(svbool_t, svfloat32_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_f16_m))) +svfloat16_t svsub_f16_m(svbool_t, svfloat16_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_f64_x))) +svfloat64_t svsub_f64_x(svbool_t, svfloat64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_f32_x))) +svfloat32_t svsub_f32_x(svbool_t, svfloat32_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_f16_x))) +svfloat16_t svsub_f16_x(svbool_t, svfloat16_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_f64_z))) +svfloat64_t svsub_f64_z(svbool_t, svfloat64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_f32_z))) +svfloat32_t svsub_f32_z(svbool_t, svfloat32_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_f16_z))) +svfloat16_t svsub_f16_z(svbool_t, svfloat16_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_u8_m))) +svuint8_t svsub_u8_m(svbool_t, svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_u32_m))) +svuint32_t svsub_u32_m(svbool_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_u64_m))) +svuint64_t svsub_u64_m(svbool_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_u16_m))) +svuint16_t svsub_u16_m(svbool_t, svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_s8_m))) +svint8_t svsub_s8_m(svbool_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_s32_m))) +svint32_t svsub_s32_m(svbool_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_s64_m))) +svint64_t svsub_s64_m(svbool_t, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_s16_m))) +svint16_t svsub_s16_m(svbool_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_u8_x))) +svuint8_t svsub_u8_x(svbool_t, svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_u32_x))) +svuint32_t svsub_u32_x(svbool_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_u64_x))) +svuint64_t svsub_u64_x(svbool_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_u16_x))) +svuint16_t svsub_u16_x(svbool_t, svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_s8_x))) +svint8_t svsub_s8_x(svbool_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_s32_x))) +svint32_t svsub_s32_x(svbool_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_s64_x))) +svint64_t svsub_s64_x(svbool_t, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_s16_x))) +svint16_t svsub_s16_x(svbool_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_u8_z))) +svuint8_t svsub_u8_z(svbool_t, svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_u32_z))) +svuint32_t svsub_u32_z(svbool_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_u64_z))) +svuint64_t svsub_u64_z(svbool_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_u16_z))) +svuint16_t svsub_u16_z(svbool_t, svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_s8_z))) +svint8_t svsub_s8_z(svbool_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_s32_z))) +svint32_t svsub_s32_z(svbool_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_s64_z))) +svint64_t svsub_s64_z(svbool_t, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_s16_z))) +svint16_t svsub_s16_z(svbool_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_f64_m))) +svfloat64_t svsubr_n_f64_m(svbool_t, svfloat64_t, float64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_f32_m))) +svfloat32_t svsubr_n_f32_m(svbool_t, svfloat32_t, float32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_f16_m))) +svfloat16_t svsubr_n_f16_m(svbool_t, svfloat16_t, float16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_f64_x))) +svfloat64_t svsubr_n_f64_x(svbool_t, svfloat64_t, float64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_f32_x))) +svfloat32_t svsubr_n_f32_x(svbool_t, svfloat32_t, float32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_f16_x))) +svfloat16_t svsubr_n_f16_x(svbool_t, svfloat16_t, float16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_f64_z))) +svfloat64_t svsubr_n_f64_z(svbool_t, svfloat64_t, float64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_f32_z))) +svfloat32_t svsubr_n_f32_z(svbool_t, svfloat32_t, float32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_f16_z))) +svfloat16_t svsubr_n_f16_z(svbool_t, svfloat16_t, float16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_u8_m))) +svuint8_t svsubr_n_u8_m(svbool_t, svuint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_u32_m))) +svuint32_t svsubr_n_u32_m(svbool_t, svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_u64_m))) +svuint64_t svsubr_n_u64_m(svbool_t, svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_u16_m))) +svuint16_t svsubr_n_u16_m(svbool_t, svuint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_s8_m))) +svint8_t svsubr_n_s8_m(svbool_t, svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_s32_m))) +svint32_t svsubr_n_s32_m(svbool_t, svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_s64_m))) +svint64_t svsubr_n_s64_m(svbool_t, svint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_s16_m))) +svint16_t svsubr_n_s16_m(svbool_t, svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_u8_x))) +svuint8_t svsubr_n_u8_x(svbool_t, svuint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_u32_x))) +svuint32_t svsubr_n_u32_x(svbool_t, svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_u64_x))) +svuint64_t svsubr_n_u64_x(svbool_t, svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_u16_x))) +svuint16_t svsubr_n_u16_x(svbool_t, svuint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_s8_x))) +svint8_t svsubr_n_s8_x(svbool_t, svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_s32_x))) +svint32_t svsubr_n_s32_x(svbool_t, svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_s64_x))) +svint64_t svsubr_n_s64_x(svbool_t, svint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_s16_x))) +svint16_t svsubr_n_s16_x(svbool_t, svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_u8_z))) +svuint8_t svsubr_n_u8_z(svbool_t, svuint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_u32_z))) +svuint32_t svsubr_n_u32_z(svbool_t, svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_u64_z))) +svuint64_t svsubr_n_u64_z(svbool_t, svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_u16_z))) +svuint16_t svsubr_n_u16_z(svbool_t, svuint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_s8_z))) +svint8_t svsubr_n_s8_z(svbool_t, svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_s32_z))) +svint32_t svsubr_n_s32_z(svbool_t, svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_s64_z))) +svint64_t svsubr_n_s64_z(svbool_t, svint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_s16_z))) +svint16_t svsubr_n_s16_z(svbool_t, svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_f64_m))) +svfloat64_t svsubr_f64_m(svbool_t, svfloat64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_f32_m))) +svfloat32_t svsubr_f32_m(svbool_t, svfloat32_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_f16_m))) +svfloat16_t svsubr_f16_m(svbool_t, svfloat16_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_f64_x))) +svfloat64_t svsubr_f64_x(svbool_t, svfloat64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_f32_x))) +svfloat32_t svsubr_f32_x(svbool_t, svfloat32_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_f16_x))) +svfloat16_t svsubr_f16_x(svbool_t, svfloat16_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_f64_z))) +svfloat64_t svsubr_f64_z(svbool_t, svfloat64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_f32_z))) +svfloat32_t svsubr_f32_z(svbool_t, svfloat32_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_f16_z))) +svfloat16_t svsubr_f16_z(svbool_t, svfloat16_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_u8_m))) +svuint8_t svsubr_u8_m(svbool_t, svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_u32_m))) +svuint32_t svsubr_u32_m(svbool_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_u64_m))) +svuint64_t svsubr_u64_m(svbool_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_u16_m))) +svuint16_t svsubr_u16_m(svbool_t, svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_s8_m))) +svint8_t svsubr_s8_m(svbool_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_s32_m))) +svint32_t svsubr_s32_m(svbool_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_s64_m))) +svint64_t svsubr_s64_m(svbool_t, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_s16_m))) +svint16_t svsubr_s16_m(svbool_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_u8_x))) +svuint8_t svsubr_u8_x(svbool_t, svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_u32_x))) +svuint32_t svsubr_u32_x(svbool_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_u64_x))) +svuint64_t svsubr_u64_x(svbool_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_u16_x))) +svuint16_t svsubr_u16_x(svbool_t, svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_s8_x))) +svint8_t svsubr_s8_x(svbool_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_s32_x))) +svint32_t svsubr_s32_x(svbool_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_s64_x))) +svint64_t svsubr_s64_x(svbool_t, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_s16_x))) +svint16_t svsubr_s16_x(svbool_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_u8_z))) +svuint8_t svsubr_u8_z(svbool_t, svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_u32_z))) +svuint32_t svsubr_u32_z(svbool_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_u64_z))) +svuint64_t svsubr_u64_z(svbool_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_u16_z))) +svuint16_t svsubr_u16_z(svbool_t, svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_s8_z))) +svint8_t svsubr_s8_z(svbool_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_s32_z))) +svint32_t svsubr_s32_z(svbool_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_s64_z))) +svint64_t svsubr_s64_z(svbool_t, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_s16_z))) +svint16_t svsubr_s16_z(svbool_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbl_u8))) +svuint8_t svtbl_u8(svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbl_u32))) +svuint32_t svtbl_u32(svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbl_u64))) +svuint64_t svtbl_u64(svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbl_u16))) +svuint16_t svtbl_u16(svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbl_s8))) +svint8_t svtbl_s8(svint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbl_f64))) +svfloat64_t svtbl_f64(svfloat64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbl_f32))) +svfloat32_t svtbl_f32(svfloat32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbl_f16))) +svfloat16_t svtbl_f16(svfloat16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbl_s32))) +svint32_t svtbl_s32(svint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbl_s64))) +svint64_t svtbl_s64(svint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbl_s16))) +svint16_t svtbl_s16(svint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1_u8))) +svuint8_t svtrn1_u8(svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1_u32))) +svuint32_t svtrn1_u32(svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1_u64))) +svuint64_t svtrn1_u64(svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1_u16))) +svuint16_t svtrn1_u16(svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1_s8))) +svint8_t svtrn1_s8(svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1_f64))) +svfloat64_t svtrn1_f64(svfloat64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1_f32))) +svfloat32_t svtrn1_f32(svfloat32_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1_f16))) +svfloat16_t svtrn1_f16(svfloat16_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1_s32))) +svint32_t svtrn1_s32(svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1_s64))) +svint64_t svtrn1_s64(svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1_s16))) +svint16_t svtrn1_s16(svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1_b16))) +svbool_t svtrn1_b16(svbool_t, svbool_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1_b32))) +svbool_t svtrn1_b32(svbool_t, svbool_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1_b64))) +svbool_t svtrn1_b64(svbool_t, svbool_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1_b8))) +svbool_t svtrn1_b8(svbool_t, svbool_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2_u8))) +svuint8_t svtrn2_u8(svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2_u32))) +svuint32_t svtrn2_u32(svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2_u64))) +svuint64_t svtrn2_u64(svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2_u16))) +svuint16_t svtrn2_u16(svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2_s8))) +svint8_t svtrn2_s8(svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2_f64))) +svfloat64_t svtrn2_f64(svfloat64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2_f32))) +svfloat32_t svtrn2_f32(svfloat32_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2_f16))) +svfloat16_t svtrn2_f16(svfloat16_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2_s32))) +svint32_t svtrn2_s32(svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2_s64))) +svint64_t svtrn2_s64(svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2_s16))) +svint16_t svtrn2_s16(svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2_b16))) +svbool_t svtrn2_b16(svbool_t, svbool_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2_b32))) +svbool_t svtrn2_b32(svbool_t, svbool_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2_b64))) +svbool_t svtrn2_b64(svbool_t, svbool_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2_b8))) +svbool_t svtrn2_b8(svbool_t, svbool_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svundef2_u8))) +svuint8x2_t svundef2_u8(void); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svundef2_u32))) +svuint32x2_t svundef2_u32(void); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svundef2_u64))) +svuint64x2_t svundef2_u64(void); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svundef2_u16))) +svuint16x2_t svundef2_u16(void); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svundef2_s8))) +svint8x2_t svundef2_s8(void); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svundef2_f64))) +svfloat64x2_t svundef2_f64(void); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svundef2_f32))) +svfloat32x2_t svundef2_f32(void); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svundef2_f16))) +svfloat16x2_t svundef2_f16(void); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svundef2_s32))) +svint32x2_t svundef2_s32(void); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svundef2_s64))) +svint64x2_t svundef2_s64(void); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svundef2_s16))) +svint16x2_t svundef2_s16(void); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svundef3_u8))) +svuint8x3_t svundef3_u8(void); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svundef3_u32))) +svuint32x3_t svundef3_u32(void); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svundef3_u64))) +svuint64x3_t svundef3_u64(void); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svundef3_u16))) +svuint16x3_t svundef3_u16(void); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svundef3_s8))) +svint8x3_t svundef3_s8(void); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svundef3_f64))) +svfloat64x3_t svundef3_f64(void); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svundef3_f32))) +svfloat32x3_t svundef3_f32(void); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svundef3_f16))) +svfloat16x3_t svundef3_f16(void); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svundef3_s32))) +svint32x3_t svundef3_s32(void); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svundef3_s64))) +svint64x3_t svundef3_s64(void); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svundef3_s16))) +svint16x3_t svundef3_s16(void); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svundef4_u8))) +svuint8x4_t svundef4_u8(void); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svundef4_u32))) +svuint32x4_t svundef4_u32(void); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svundef4_u64))) +svuint64x4_t svundef4_u64(void); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svundef4_u16))) +svuint16x4_t svundef4_u16(void); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svundef4_s8))) +svint8x4_t svundef4_s8(void); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svundef4_f64))) +svfloat64x4_t svundef4_f64(void); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svundef4_f32))) +svfloat32x4_t svundef4_f32(void); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svundef4_f16))) +svfloat16x4_t svundef4_f16(void); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svundef4_s32))) +svint32x4_t svundef4_s32(void); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svundef4_s64))) +svint64x4_t svundef4_s64(void); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svundef4_s16))) +svint16x4_t svundef4_s16(void); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svundef_u8))) +svuint8_t svundef_u8(void); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svundef_u32))) +svuint32_t svundef_u32(void); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svundef_u64))) +svuint64_t svundef_u64(void); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svundef_u16))) +svuint16_t svundef_u16(void); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svundef_s8))) +svint8_t svundef_s8(void); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svundef_f64))) +svfloat64_t svundef_f64(void); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svundef_f32))) +svfloat32_t svundef_f32(void); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svundef_f16))) +svfloat16_t svundef_f16(void); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svundef_s32))) +svint32_t svundef_s32(void); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svundef_s64))) +svint64_t svundef_s64(void); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svundef_s16))) +svint16_t svundef_s16(void); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svunpkhi_b))) +svbool_t svunpkhi_b(svbool_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svunpkhi_s32))) +svint32_t svunpkhi_s32(svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svunpkhi_s64))) +svint64_t svunpkhi_s64(svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svunpkhi_s16))) +svint16_t svunpkhi_s16(svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svunpkhi_u32))) +svuint32_t svunpkhi_u32(svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svunpkhi_u64))) +svuint64_t svunpkhi_u64(svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svunpkhi_u16))) +svuint16_t svunpkhi_u16(svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svunpklo_b))) +svbool_t svunpklo_b(svbool_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svunpklo_s32))) +svint32_t svunpklo_s32(svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svunpklo_s64))) +svint64_t svunpklo_s64(svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svunpklo_s16))) +svint16_t svunpklo_s16(svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svunpklo_u32))) +svuint32_t svunpklo_u32(svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svunpklo_u64))) +svuint64_t svunpklo_u64(svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svunpklo_u16))) +svuint16_t svunpklo_u16(svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1_u8))) +svuint8_t svuzp1_u8(svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1_u32))) +svuint32_t svuzp1_u32(svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1_u64))) +svuint64_t svuzp1_u64(svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1_u16))) +svuint16_t svuzp1_u16(svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1_s8))) +svint8_t svuzp1_s8(svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1_f64))) +svfloat64_t svuzp1_f64(svfloat64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1_f32))) +svfloat32_t svuzp1_f32(svfloat32_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1_f16))) +svfloat16_t svuzp1_f16(svfloat16_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1_s32))) +svint32_t svuzp1_s32(svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1_s64))) +svint64_t svuzp1_s64(svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1_s16))) +svint16_t svuzp1_s16(svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1_b16))) +svbool_t svuzp1_b16(svbool_t, svbool_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1_b32))) +svbool_t svuzp1_b32(svbool_t, svbool_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1_b64))) +svbool_t svuzp1_b64(svbool_t, svbool_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1_b8))) +svbool_t svuzp1_b8(svbool_t, svbool_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2_u8))) +svuint8_t svuzp2_u8(svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2_u32))) +svuint32_t svuzp2_u32(svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2_u64))) +svuint64_t svuzp2_u64(svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2_u16))) +svuint16_t svuzp2_u16(svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2_s8))) +svint8_t svuzp2_s8(svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2_f64))) +svfloat64_t svuzp2_f64(svfloat64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2_f32))) +svfloat32_t svuzp2_f32(svfloat32_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2_f16))) +svfloat16_t svuzp2_f16(svfloat16_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2_s32))) +svint32_t svuzp2_s32(svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2_s64))) +svint64_t svuzp2_s64(svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2_s16))) +svint16_t svuzp2_s16(svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2_b16))) +svbool_t svuzp2_b16(svbool_t, svbool_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2_b32))) +svbool_t svuzp2_b32(svbool_t, svbool_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2_b64))) +svbool_t svuzp2_b64(svbool_t, svbool_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2_b8))) +svbool_t svuzp2_b8(svbool_t, svbool_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilele_b8_s32))) +svbool_t svwhilele_b8_s32(int32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilele_b32_s32))) +svbool_t svwhilele_b32_s32(int32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilele_b64_s32))) +svbool_t svwhilele_b64_s32(int32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilele_b16_s32))) +svbool_t svwhilele_b16_s32(int32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilele_b8_s64))) +svbool_t svwhilele_b8_s64(int64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilele_b32_s64))) +svbool_t svwhilele_b32_s64(int64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilele_b64_s64))) +svbool_t svwhilele_b64_s64(int64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilele_b16_s64))) +svbool_t svwhilele_b16_s64(int64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilele_b8_u32))) +svbool_t svwhilele_b8_u32(uint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilele_b32_u32))) +svbool_t svwhilele_b32_u32(uint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilele_b64_u32))) +svbool_t svwhilele_b64_u32(uint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilele_b16_u32))) +svbool_t svwhilele_b16_u32(uint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilele_b8_u64))) +svbool_t svwhilele_b8_u64(uint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilele_b32_u64))) +svbool_t svwhilele_b32_u64(uint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilele_b64_u64))) +svbool_t svwhilele_b64_u64(uint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilele_b16_u64))) +svbool_t svwhilele_b16_u64(uint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilelt_b8_u32))) +svbool_t svwhilelt_b8_u32(uint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilelt_b32_u32))) +svbool_t svwhilelt_b32_u32(uint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilelt_b64_u32))) +svbool_t svwhilelt_b64_u32(uint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilelt_b16_u32))) +svbool_t svwhilelt_b16_u32(uint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilelt_b8_u64))) +svbool_t svwhilelt_b8_u64(uint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilelt_b32_u64))) +svbool_t svwhilelt_b32_u64(uint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilelt_b64_u64))) +svbool_t svwhilelt_b64_u64(uint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilelt_b16_u64))) +svbool_t svwhilelt_b16_u64(uint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilelt_b8_s32))) +svbool_t svwhilelt_b8_s32(int32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilelt_b32_s32))) +svbool_t svwhilelt_b32_s32(int32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilelt_b64_s32))) +svbool_t svwhilelt_b64_s32(int32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilelt_b16_s32))) +svbool_t svwhilelt_b16_s32(int32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilelt_b8_s64))) +svbool_t svwhilelt_b8_s64(int64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilelt_b32_s64))) +svbool_t svwhilelt_b32_s64(int64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilelt_b64_s64))) +svbool_t svwhilelt_b64_s64(int64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilelt_b16_s64))) +svbool_t svwhilelt_b16_s64(int64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1_u8))) +svuint8_t svzip1_u8(svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1_u32))) +svuint32_t svzip1_u32(svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1_u64))) +svuint64_t svzip1_u64(svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1_u16))) +svuint16_t svzip1_u16(svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1_s8))) +svint8_t svzip1_s8(svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1_f64))) +svfloat64_t svzip1_f64(svfloat64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1_f32))) +svfloat32_t svzip1_f32(svfloat32_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1_f16))) +svfloat16_t svzip1_f16(svfloat16_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1_s32))) +svint32_t svzip1_s32(svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1_s64))) +svint64_t svzip1_s64(svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1_s16))) +svint16_t svzip1_s16(svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1_b16))) +svbool_t svzip1_b16(svbool_t, svbool_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1_b32))) +svbool_t svzip1_b32(svbool_t, svbool_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1_b64))) +svbool_t svzip1_b64(svbool_t, svbool_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1_b8))) +svbool_t svzip1_b8(svbool_t, svbool_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2_u8))) +svuint8_t svzip2_u8(svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2_u32))) +svuint32_t svzip2_u32(svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2_u64))) +svuint64_t svzip2_u64(svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2_u16))) +svuint16_t svzip2_u16(svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2_s8))) +svint8_t svzip2_s8(svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2_f64))) +svfloat64_t svzip2_f64(svfloat64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2_f32))) +svfloat32_t svzip2_f32(svfloat32_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2_f16))) +svfloat16_t svzip2_f16(svfloat16_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2_s32))) +svint32_t svzip2_s32(svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2_s64))) +svint64_t svzip2_s64(svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2_s16))) +svint16_t svzip2_s16(svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2_b16))) +svbool_t svzip2_b16(svbool_t, svbool_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2_b32))) +svbool_t svzip2_b32(svbool_t, svbool_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2_b64))) +svbool_t svzip2_b64(svbool_t, svbool_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2_b8))) +svbool_t svzip2_b8(svbool_t, svbool_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_f64_m))) +svfloat64_t svabd_m(svbool_t, svfloat64_t, float64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_f32_m))) +svfloat32_t svabd_m(svbool_t, svfloat32_t, float32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_f16_m))) +svfloat16_t svabd_m(svbool_t, svfloat16_t, float16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_f64_x))) +svfloat64_t svabd_x(svbool_t, svfloat64_t, float64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_f32_x))) +svfloat32_t svabd_x(svbool_t, svfloat32_t, float32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_f16_x))) +svfloat16_t svabd_x(svbool_t, svfloat16_t, float16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_f64_z))) +svfloat64_t svabd_z(svbool_t, svfloat64_t, float64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_f32_z))) +svfloat32_t svabd_z(svbool_t, svfloat32_t, float32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_f16_z))) +svfloat16_t svabd_z(svbool_t, svfloat16_t, float16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_s8_m))) +svint8_t svabd_m(svbool_t, svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_s32_m))) +svint32_t svabd_m(svbool_t, svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_s64_m))) +svint64_t svabd_m(svbool_t, svint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_s16_m))) +svint16_t svabd_m(svbool_t, svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_s8_x))) +svint8_t svabd_x(svbool_t, svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_s32_x))) +svint32_t svabd_x(svbool_t, svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_s64_x))) +svint64_t svabd_x(svbool_t, svint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_s16_x))) +svint16_t svabd_x(svbool_t, svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_s8_z))) +svint8_t svabd_z(svbool_t, svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_s32_z))) +svint32_t svabd_z(svbool_t, svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_s64_z))) +svint64_t svabd_z(svbool_t, svint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_s16_z))) +svint16_t svabd_z(svbool_t, svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_u8_m))) +svuint8_t svabd_m(svbool_t, svuint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_u32_m))) +svuint32_t svabd_m(svbool_t, svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_u64_m))) +svuint64_t svabd_m(svbool_t, svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_u16_m))) +svuint16_t svabd_m(svbool_t, svuint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_u8_x))) +svuint8_t svabd_x(svbool_t, svuint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_u32_x))) +svuint32_t svabd_x(svbool_t, svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_u64_x))) +svuint64_t svabd_x(svbool_t, svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_u16_x))) +svuint16_t svabd_x(svbool_t, svuint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_u8_z))) +svuint8_t svabd_z(svbool_t, svuint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_u32_z))) +svuint32_t svabd_z(svbool_t, svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_u64_z))) +svuint64_t svabd_z(svbool_t, svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_u16_z))) +svuint16_t svabd_z(svbool_t, svuint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_f64_m))) +svfloat64_t svabd_m(svbool_t, svfloat64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_f32_m))) +svfloat32_t svabd_m(svbool_t, svfloat32_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_f16_m))) +svfloat16_t svabd_m(svbool_t, svfloat16_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_f64_x))) +svfloat64_t svabd_x(svbool_t, svfloat64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_f32_x))) +svfloat32_t svabd_x(svbool_t, svfloat32_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_f16_x))) +svfloat16_t svabd_x(svbool_t, svfloat16_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_f64_z))) +svfloat64_t svabd_z(svbool_t, svfloat64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_f32_z))) +svfloat32_t svabd_z(svbool_t, svfloat32_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_f16_z))) +svfloat16_t svabd_z(svbool_t, svfloat16_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_s8_m))) +svint8_t svabd_m(svbool_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_s32_m))) +svint32_t svabd_m(svbool_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_s64_m))) +svint64_t svabd_m(svbool_t, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_s16_m))) +svint16_t svabd_m(svbool_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_s8_x))) +svint8_t svabd_x(svbool_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_s32_x))) +svint32_t svabd_x(svbool_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_s64_x))) +svint64_t svabd_x(svbool_t, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_s16_x))) +svint16_t svabd_x(svbool_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_s8_z))) +svint8_t svabd_z(svbool_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_s32_z))) +svint32_t svabd_z(svbool_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_s64_z))) +svint64_t svabd_z(svbool_t, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_s16_z))) +svint16_t svabd_z(svbool_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_u8_m))) +svuint8_t svabd_m(svbool_t, svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_u32_m))) +svuint32_t svabd_m(svbool_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_u64_m))) +svuint64_t svabd_m(svbool_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_u16_m))) +svuint16_t svabd_m(svbool_t, svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_u8_x))) +svuint8_t svabd_x(svbool_t, svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_u32_x))) +svuint32_t svabd_x(svbool_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_u64_x))) +svuint64_t svabd_x(svbool_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_u16_x))) +svuint16_t svabd_x(svbool_t, svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_u8_z))) +svuint8_t svabd_z(svbool_t, svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_u32_z))) +svuint32_t svabd_z(svbool_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_u64_z))) +svuint64_t svabd_z(svbool_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_u16_z))) +svuint16_t svabd_z(svbool_t, svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabs_f64_m))) +svfloat64_t svabs_m(svfloat64_t, svbool_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabs_f32_m))) +svfloat32_t svabs_m(svfloat32_t, svbool_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabs_f16_m))) +svfloat16_t svabs_m(svfloat16_t, svbool_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabs_f64_x))) +svfloat64_t svabs_x(svbool_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabs_f32_x))) +svfloat32_t svabs_x(svbool_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabs_f16_x))) +svfloat16_t svabs_x(svbool_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabs_f64_z))) +svfloat64_t svabs_z(svbool_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabs_f32_z))) +svfloat32_t svabs_z(svbool_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabs_f16_z))) +svfloat16_t svabs_z(svbool_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabs_s8_m))) +svint8_t svabs_m(svint8_t, svbool_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabs_s32_m))) +svint32_t svabs_m(svint32_t, svbool_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabs_s64_m))) +svint64_t svabs_m(svint64_t, svbool_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabs_s16_m))) +svint16_t svabs_m(svint16_t, svbool_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabs_s8_x))) +svint8_t svabs_x(svbool_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabs_s32_x))) +svint32_t svabs_x(svbool_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabs_s64_x))) +svint64_t svabs_x(svbool_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabs_s16_x))) +svint16_t svabs_x(svbool_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabs_s8_z))) +svint8_t svabs_z(svbool_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabs_s32_z))) +svint32_t svabs_z(svbool_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabs_s64_z))) +svint64_t svabs_z(svbool_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabs_s16_z))) +svint16_t svabs_z(svbool_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svacge_n_f64))) +svbool_t svacge(svbool_t, svfloat64_t, float64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svacge_n_f32))) +svbool_t svacge(svbool_t, svfloat32_t, float32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svacge_n_f16))) +svbool_t svacge(svbool_t, svfloat16_t, float16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svacge_f64))) +svbool_t svacge(svbool_t, svfloat64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svacge_f32))) +svbool_t svacge(svbool_t, svfloat32_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svacge_f16))) +svbool_t svacge(svbool_t, svfloat16_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svacgt_n_f64))) +svbool_t svacgt(svbool_t, svfloat64_t, float64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svacgt_n_f32))) +svbool_t svacgt(svbool_t, svfloat32_t, float32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svacgt_n_f16))) +svbool_t svacgt(svbool_t, svfloat16_t, float16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svacgt_f64))) +svbool_t svacgt(svbool_t, svfloat64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svacgt_f32))) +svbool_t svacgt(svbool_t, svfloat32_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svacgt_f16))) +svbool_t svacgt(svbool_t, svfloat16_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svacle_n_f64))) +svbool_t svacle(svbool_t, svfloat64_t, float64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svacle_n_f32))) +svbool_t svacle(svbool_t, svfloat32_t, float32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svacle_n_f16))) +svbool_t svacle(svbool_t, svfloat16_t, float16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svacle_f64))) +svbool_t svacle(svbool_t, svfloat64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svacle_f32))) +svbool_t svacle(svbool_t, svfloat32_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svacle_f16))) +svbool_t svacle(svbool_t, svfloat16_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaclt_n_f64))) +svbool_t svaclt(svbool_t, svfloat64_t, float64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaclt_n_f32))) +svbool_t svaclt(svbool_t, svfloat32_t, float32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaclt_n_f16))) +svbool_t svaclt(svbool_t, svfloat16_t, float16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaclt_f64))) +svbool_t svaclt(svbool_t, svfloat64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaclt_f32))) +svbool_t svaclt(svbool_t, svfloat32_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaclt_f16))) +svbool_t svaclt(svbool_t, svfloat16_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_f64_m))) +svfloat64_t svadd_m(svbool_t, svfloat64_t, float64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_f32_m))) +svfloat32_t svadd_m(svbool_t, svfloat32_t, float32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_f16_m))) +svfloat16_t svadd_m(svbool_t, svfloat16_t, float16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_f64_x))) +svfloat64_t svadd_x(svbool_t, svfloat64_t, float64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_f32_x))) +svfloat32_t svadd_x(svbool_t, svfloat32_t, float32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_f16_x))) +svfloat16_t svadd_x(svbool_t, svfloat16_t, float16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_f64_z))) +svfloat64_t svadd_z(svbool_t, svfloat64_t, float64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_f32_z))) +svfloat32_t svadd_z(svbool_t, svfloat32_t, float32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_f16_z))) +svfloat16_t svadd_z(svbool_t, svfloat16_t, float16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_u8_m))) +svuint8_t svadd_m(svbool_t, svuint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_u32_m))) +svuint32_t svadd_m(svbool_t, svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_u64_m))) +svuint64_t svadd_m(svbool_t, svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_u16_m))) +svuint16_t svadd_m(svbool_t, svuint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_s8_m))) +svint8_t svadd_m(svbool_t, svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_s32_m))) +svint32_t svadd_m(svbool_t, svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_s64_m))) +svint64_t svadd_m(svbool_t, svint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_s16_m))) +svint16_t svadd_m(svbool_t, svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_u8_x))) +svuint8_t svadd_x(svbool_t, svuint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_u32_x))) +svuint32_t svadd_x(svbool_t, svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_u64_x))) +svuint64_t svadd_x(svbool_t, svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_u16_x))) +svuint16_t svadd_x(svbool_t, svuint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_s8_x))) +svint8_t svadd_x(svbool_t, svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_s32_x))) +svint32_t svadd_x(svbool_t, svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_s64_x))) +svint64_t svadd_x(svbool_t, svint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_s16_x))) +svint16_t svadd_x(svbool_t, svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_u8_z))) +svuint8_t svadd_z(svbool_t, svuint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_u32_z))) +svuint32_t svadd_z(svbool_t, svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_u64_z))) +svuint64_t svadd_z(svbool_t, svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_u16_z))) +svuint16_t svadd_z(svbool_t, svuint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_s8_z))) +svint8_t svadd_z(svbool_t, svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_s32_z))) +svint32_t svadd_z(svbool_t, svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_s64_z))) +svint64_t svadd_z(svbool_t, svint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_s16_z))) +svint16_t svadd_z(svbool_t, svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_f64_m))) +svfloat64_t svadd_m(svbool_t, svfloat64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_f32_m))) +svfloat32_t svadd_m(svbool_t, svfloat32_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_f16_m))) +svfloat16_t svadd_m(svbool_t, svfloat16_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_f64_x))) +svfloat64_t svadd_x(svbool_t, svfloat64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_f32_x))) +svfloat32_t svadd_x(svbool_t, svfloat32_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_f16_x))) +svfloat16_t svadd_x(svbool_t, svfloat16_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_f64_z))) +svfloat64_t svadd_z(svbool_t, svfloat64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_f32_z))) +svfloat32_t svadd_z(svbool_t, svfloat32_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_f16_z))) +svfloat16_t svadd_z(svbool_t, svfloat16_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_u8_m))) +svuint8_t svadd_m(svbool_t, svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_u32_m))) +svuint32_t svadd_m(svbool_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_u64_m))) +svuint64_t svadd_m(svbool_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_u16_m))) +svuint16_t svadd_m(svbool_t, svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_s8_m))) +svint8_t svadd_m(svbool_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_s32_m))) +svint32_t svadd_m(svbool_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_s64_m))) +svint64_t svadd_m(svbool_t, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_s16_m))) +svint16_t svadd_m(svbool_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_u8_x))) +svuint8_t svadd_x(svbool_t, svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_u32_x))) +svuint32_t svadd_x(svbool_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_u64_x))) +svuint64_t svadd_x(svbool_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_u16_x))) +svuint16_t svadd_x(svbool_t, svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_s8_x))) +svint8_t svadd_x(svbool_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_s32_x))) +svint32_t svadd_x(svbool_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_s64_x))) +svint64_t svadd_x(svbool_t, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_s16_x))) +svint16_t svadd_x(svbool_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_u8_z))) +svuint8_t svadd_z(svbool_t, svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_u32_z))) +svuint32_t svadd_z(svbool_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_u64_z))) +svuint64_t svadd_z(svbool_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_u16_z))) +svuint16_t svadd_z(svbool_t, svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_s8_z))) +svint8_t svadd_z(svbool_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_s32_z))) +svint32_t svadd_z(svbool_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_s64_z))) +svint64_t svadd_z(svbool_t, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_s16_z))) +svint16_t svadd_z(svbool_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadda_f64))) +float64_t svadda(svbool_t, float64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadda_f32))) +float32_t svadda(svbool_t, float32_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadda_f16))) +float16_t svadda(svbool_t, float16_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddv_s8))) +int64_t svaddv(svbool_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddv_s32))) +int64_t svaddv(svbool_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddv_s64))) +int64_t svaddv(svbool_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddv_s16))) +int64_t svaddv(svbool_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddv_u8))) +uint64_t svaddv(svbool_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddv_u32))) +uint64_t svaddv(svbool_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddv_u64))) +uint64_t svaddv(svbool_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddv_u16))) +uint64_t svaddv(svbool_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddv_f64))) +float64_t svaddv(svbool_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddv_f32))) +float32_t svaddv(svbool_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddv_f16))) +float16_t svaddv(svbool_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_b_z))) +svbool_t svand_z(svbool_t, svbool_t, svbool_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_n_u8_m))) +svuint8_t svand_m(svbool_t, svuint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_n_u32_m))) +svuint32_t svand_m(svbool_t, svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_n_u64_m))) +svuint64_t svand_m(svbool_t, svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_n_u16_m))) +svuint16_t svand_m(svbool_t, svuint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_n_s8_m))) +svint8_t svand_m(svbool_t, svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_n_s32_m))) +svint32_t svand_m(svbool_t, svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_n_s64_m))) +svint64_t svand_m(svbool_t, svint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_n_s16_m))) +svint16_t svand_m(svbool_t, svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_n_u8_x))) +svuint8_t svand_x(svbool_t, svuint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_n_u32_x))) +svuint32_t svand_x(svbool_t, svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_n_u64_x))) +svuint64_t svand_x(svbool_t, svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_n_u16_x))) +svuint16_t svand_x(svbool_t, svuint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_n_s8_x))) +svint8_t svand_x(svbool_t, svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_n_s32_x))) +svint32_t svand_x(svbool_t, svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_n_s64_x))) +svint64_t svand_x(svbool_t, svint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_n_s16_x))) +svint16_t svand_x(svbool_t, svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_n_u8_z))) +svuint8_t svand_z(svbool_t, svuint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_n_u32_z))) +svuint32_t svand_z(svbool_t, svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_n_u64_z))) +svuint64_t svand_z(svbool_t, svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_n_u16_z))) +svuint16_t svand_z(svbool_t, svuint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_n_s8_z))) +svint8_t svand_z(svbool_t, svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_n_s32_z))) +svint32_t svand_z(svbool_t, svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_n_s64_z))) +svint64_t svand_z(svbool_t, svint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_n_s16_z))) +svint16_t svand_z(svbool_t, svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_u8_m))) +svuint8_t svand_m(svbool_t, svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_u32_m))) +svuint32_t svand_m(svbool_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_u64_m))) +svuint64_t svand_m(svbool_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_u16_m))) +svuint16_t svand_m(svbool_t, svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_s8_m))) +svint8_t svand_m(svbool_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_s32_m))) +svint32_t svand_m(svbool_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_s64_m))) +svint64_t svand_m(svbool_t, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_s16_m))) +svint16_t svand_m(svbool_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_u8_x))) +svuint8_t svand_x(svbool_t, svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_u32_x))) +svuint32_t svand_x(svbool_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_u64_x))) +svuint64_t svand_x(svbool_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_u16_x))) +svuint16_t svand_x(svbool_t, svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_s8_x))) +svint8_t svand_x(svbool_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_s32_x))) +svint32_t svand_x(svbool_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_s64_x))) +svint64_t svand_x(svbool_t, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_s16_x))) +svint16_t svand_x(svbool_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_u8_z))) +svuint8_t svand_z(svbool_t, svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_u32_z))) +svuint32_t svand_z(svbool_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_u64_z))) +svuint64_t svand_z(svbool_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_u16_z))) +svuint16_t svand_z(svbool_t, svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_s8_z))) +svint8_t svand_z(svbool_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_s32_z))) +svint32_t svand_z(svbool_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_s64_z))) +svint64_t svand_z(svbool_t, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_s16_z))) +svint16_t svand_z(svbool_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svandv_u8))) +uint8_t svandv(svbool_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svandv_u32))) +uint32_t svandv(svbool_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svandv_u64))) +uint64_t svandv(svbool_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svandv_u16))) +uint16_t svandv(svbool_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svandv_s8))) +int8_t svandv(svbool_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svandv_s32))) +int32_t svandv(svbool_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svandv_s64))) +int64_t svandv(svbool_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svandv_s16))) +int16_t svandv(svbool_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_n_s8_m))) +svint8_t svasr_m(svbool_t, svint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_n_s32_m))) +svint32_t svasr_m(svbool_t, svint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_n_s64_m))) +svint64_t svasr_m(svbool_t, svint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_n_s16_m))) +svint16_t svasr_m(svbool_t, svint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_n_s8_x))) +svint8_t svasr_x(svbool_t, svint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_n_s32_x))) +svint32_t svasr_x(svbool_t, svint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_n_s64_x))) +svint64_t svasr_x(svbool_t, svint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_n_s16_x))) +svint16_t svasr_x(svbool_t, svint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_n_s8_z))) +svint8_t svasr_z(svbool_t, svint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_n_s32_z))) +svint32_t svasr_z(svbool_t, svint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_n_s64_z))) +svint64_t svasr_z(svbool_t, svint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_n_s16_z))) +svint16_t svasr_z(svbool_t, svint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_s8_m))) +svint8_t svasr_m(svbool_t, svint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_s32_m))) +svint32_t svasr_m(svbool_t, svint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_s64_m))) +svint64_t svasr_m(svbool_t, svint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_s16_m))) +svint16_t svasr_m(svbool_t, svint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_s8_x))) +svint8_t svasr_x(svbool_t, svint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_s32_x))) +svint32_t svasr_x(svbool_t, svint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_s64_x))) +svint64_t svasr_x(svbool_t, svint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_s16_x))) +svint16_t svasr_x(svbool_t, svint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_s8_z))) +svint8_t svasr_z(svbool_t, svint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_s32_z))) +svint32_t svasr_z(svbool_t, svint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_s64_z))) +svint64_t svasr_z(svbool_t, svint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_s16_z))) +svint16_t svasr_z(svbool_t, svint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_wide_n_s8_m))) +svint8_t svasr_wide_m(svbool_t, svint8_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_wide_n_s32_m))) +svint32_t svasr_wide_m(svbool_t, svint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_wide_n_s16_m))) +svint16_t svasr_wide_m(svbool_t, svint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_wide_n_s8_x))) +svint8_t svasr_wide_x(svbool_t, svint8_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_wide_n_s32_x))) +svint32_t svasr_wide_x(svbool_t, svint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_wide_n_s16_x))) +svint16_t svasr_wide_x(svbool_t, svint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_wide_n_s8_z))) +svint8_t svasr_wide_z(svbool_t, svint8_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_wide_n_s32_z))) +svint32_t svasr_wide_z(svbool_t, svint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_wide_n_s16_z))) +svint16_t svasr_wide_z(svbool_t, svint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_wide_s8_m))) +svint8_t svasr_wide_m(svbool_t, svint8_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_wide_s32_m))) +svint32_t svasr_wide_m(svbool_t, svint32_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_wide_s16_m))) +svint16_t svasr_wide_m(svbool_t, svint16_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_wide_s8_x))) +svint8_t svasr_wide_x(svbool_t, svint8_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_wide_s32_x))) +svint32_t svasr_wide_x(svbool_t, svint32_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_wide_s16_x))) +svint16_t svasr_wide_x(svbool_t, svint16_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_wide_s8_z))) +svint8_t svasr_wide_z(svbool_t, svint8_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_wide_s32_z))) +svint32_t svasr_wide_z(svbool_t, svint32_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_wide_s16_z))) +svint16_t svasr_wide_z(svbool_t, svint16_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasrd_n_s8_m))) +svint8_t svasrd_m(svbool_t, svint8_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasrd_n_s32_m))) +svint32_t svasrd_m(svbool_t, svint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasrd_n_s64_m))) +svint64_t svasrd_m(svbool_t, svint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasrd_n_s16_m))) +svint16_t svasrd_m(svbool_t, svint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasrd_n_s8_x))) +svint8_t svasrd_x(svbool_t, svint8_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasrd_n_s32_x))) +svint32_t svasrd_x(svbool_t, svint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasrd_n_s64_x))) +svint64_t svasrd_x(svbool_t, svint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasrd_n_s16_x))) +svint16_t svasrd_x(svbool_t, svint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasrd_n_s8_z))) +svint8_t svasrd_z(svbool_t, svint8_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasrd_n_s32_z))) +svint32_t svasrd_z(svbool_t, svint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasrd_n_s64_z))) +svint64_t svasrd_z(svbool_t, svint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasrd_n_s16_z))) +svint16_t svasrd_z(svbool_t, svint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_b_z))) +svbool_t svbic_z(svbool_t, svbool_t, svbool_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_n_u8_m))) +svuint8_t svbic_m(svbool_t, svuint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_n_u32_m))) +svuint32_t svbic_m(svbool_t, svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_n_u64_m))) +svuint64_t svbic_m(svbool_t, svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_n_u16_m))) +svuint16_t svbic_m(svbool_t, svuint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_n_s8_m))) +svint8_t svbic_m(svbool_t, svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_n_s32_m))) +svint32_t svbic_m(svbool_t, svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_n_s64_m))) +svint64_t svbic_m(svbool_t, svint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_n_s16_m))) +svint16_t svbic_m(svbool_t, svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_n_u8_x))) +svuint8_t svbic_x(svbool_t, svuint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_n_u32_x))) +svuint32_t svbic_x(svbool_t, svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_n_u64_x))) +svuint64_t svbic_x(svbool_t, svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_n_u16_x))) +svuint16_t svbic_x(svbool_t, svuint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_n_s8_x))) +svint8_t svbic_x(svbool_t, svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_n_s32_x))) +svint32_t svbic_x(svbool_t, svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_n_s64_x))) +svint64_t svbic_x(svbool_t, svint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_n_s16_x))) +svint16_t svbic_x(svbool_t, svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_n_u8_z))) +svuint8_t svbic_z(svbool_t, svuint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_n_u32_z))) +svuint32_t svbic_z(svbool_t, svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_n_u64_z))) +svuint64_t svbic_z(svbool_t, svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_n_u16_z))) +svuint16_t svbic_z(svbool_t, svuint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_n_s8_z))) +svint8_t svbic_z(svbool_t, svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_n_s32_z))) +svint32_t svbic_z(svbool_t, svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_n_s64_z))) +svint64_t svbic_z(svbool_t, svint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_n_s16_z))) +svint16_t svbic_z(svbool_t, svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_u8_m))) +svuint8_t svbic_m(svbool_t, svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_u32_m))) +svuint32_t svbic_m(svbool_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_u64_m))) +svuint64_t svbic_m(svbool_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_u16_m))) +svuint16_t svbic_m(svbool_t, svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_s8_m))) +svint8_t svbic_m(svbool_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_s32_m))) +svint32_t svbic_m(svbool_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_s64_m))) +svint64_t svbic_m(svbool_t, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_s16_m))) +svint16_t svbic_m(svbool_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_u8_x))) +svuint8_t svbic_x(svbool_t, svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_u32_x))) +svuint32_t svbic_x(svbool_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_u64_x))) +svuint64_t svbic_x(svbool_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_u16_x))) +svuint16_t svbic_x(svbool_t, svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_s8_x))) +svint8_t svbic_x(svbool_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_s32_x))) +svint32_t svbic_x(svbool_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_s64_x))) +svint64_t svbic_x(svbool_t, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_s16_x))) +svint16_t svbic_x(svbool_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_u8_z))) +svuint8_t svbic_z(svbool_t, svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_u32_z))) +svuint32_t svbic_z(svbool_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_u64_z))) +svuint64_t svbic_z(svbool_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_u16_z))) +svuint16_t svbic_z(svbool_t, svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_s8_z))) +svint8_t svbic_z(svbool_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_s32_z))) +svint32_t svbic_z(svbool_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_s64_z))) +svint64_t svbic_z(svbool_t, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_s16_z))) +svint16_t svbic_z(svbool_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbrka_b_m))) +svbool_t svbrka_m(svbool_t, svbool_t, svbool_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbrka_b_z))) +svbool_t svbrka_z(svbool_t, svbool_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbrkb_b_m))) +svbool_t svbrkb_m(svbool_t, svbool_t, svbool_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbrkb_b_z))) +svbool_t svbrkb_z(svbool_t, svbool_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbrkn_b_z))) +svbool_t svbrkn_z(svbool_t, svbool_t, svbool_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbrkpa_b_z))) +svbool_t svbrkpa_z(svbool_t, svbool_t, svbool_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbrkpb_b_z))) +svbool_t svbrkpb_z(svbool_t, svbool_t, svbool_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcadd_f64_m))) +svfloat64_t svcadd_m(svbool_t, svfloat64_t, svfloat64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcadd_f32_m))) +svfloat32_t svcadd_m(svbool_t, svfloat32_t, svfloat32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcadd_f16_m))) +svfloat16_t svcadd_m(svbool_t, svfloat16_t, svfloat16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcadd_f64_x))) +svfloat64_t svcadd_x(svbool_t, svfloat64_t, svfloat64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcadd_f32_x))) +svfloat32_t svcadd_x(svbool_t, svfloat32_t, svfloat32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcadd_f16_x))) +svfloat16_t svcadd_x(svbool_t, svfloat16_t, svfloat16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcadd_f64_z))) +svfloat64_t svcadd_z(svbool_t, svfloat64_t, svfloat64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcadd_f32_z))) +svfloat32_t svcadd_z(svbool_t, svfloat32_t, svfloat32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcadd_f16_z))) +svfloat16_t svcadd_z(svbool_t, svfloat16_t, svfloat16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclasta_n_u8))) +uint8_t svclasta(svbool_t, uint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclasta_n_u32))) +uint32_t svclasta(svbool_t, uint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclasta_n_u64))) +uint64_t svclasta(svbool_t, uint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclasta_n_u16))) +uint16_t svclasta(svbool_t, uint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclasta_n_s8))) +int8_t svclasta(svbool_t, int8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclasta_n_f64))) +float64_t svclasta(svbool_t, float64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclasta_n_f32))) +float32_t svclasta(svbool_t, float32_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclasta_n_f16))) +float16_t svclasta(svbool_t, float16_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclasta_n_s32))) +int32_t svclasta(svbool_t, int32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclasta_n_s64))) +int64_t svclasta(svbool_t, int64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclasta_n_s16))) +int16_t svclasta(svbool_t, int16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclasta_u8))) +svuint8_t svclasta(svbool_t, svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclasta_u32))) +svuint32_t svclasta(svbool_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclasta_u64))) +svuint64_t svclasta(svbool_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclasta_u16))) +svuint16_t svclasta(svbool_t, svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclasta_s8))) +svint8_t svclasta(svbool_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclasta_f64))) +svfloat64_t svclasta(svbool_t, svfloat64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclasta_f32))) +svfloat32_t svclasta(svbool_t, svfloat32_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclasta_f16))) +svfloat16_t svclasta(svbool_t, svfloat16_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclasta_s32))) +svint32_t svclasta(svbool_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclasta_s64))) +svint64_t svclasta(svbool_t, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclasta_s16))) +svint16_t svclasta(svbool_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclastb_n_u8))) +uint8_t svclastb(svbool_t, uint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclastb_n_u32))) +uint32_t svclastb(svbool_t, uint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclastb_n_u64))) +uint64_t svclastb(svbool_t, uint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclastb_n_u16))) +uint16_t svclastb(svbool_t, uint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclastb_n_s8))) +int8_t svclastb(svbool_t, int8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclastb_n_f64))) +float64_t svclastb(svbool_t, float64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclastb_n_f32))) +float32_t svclastb(svbool_t, float32_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclastb_n_f16))) +float16_t svclastb(svbool_t, float16_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclastb_n_s32))) +int32_t svclastb(svbool_t, int32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclastb_n_s64))) +int64_t svclastb(svbool_t, int64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclastb_n_s16))) +int16_t svclastb(svbool_t, int16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclastb_u8))) +svuint8_t svclastb(svbool_t, svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclastb_u32))) +svuint32_t svclastb(svbool_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclastb_u64))) +svuint64_t svclastb(svbool_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclastb_u16))) +svuint16_t svclastb(svbool_t, svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclastb_s8))) +svint8_t svclastb(svbool_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclastb_f64))) +svfloat64_t svclastb(svbool_t, svfloat64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclastb_f32))) +svfloat32_t svclastb(svbool_t, svfloat32_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclastb_f16))) +svfloat16_t svclastb(svbool_t, svfloat16_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclastb_s32))) +svint32_t svclastb(svbool_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclastb_s64))) +svint64_t svclastb(svbool_t, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclastb_s16))) +svint16_t svclastb(svbool_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcls_s8_m))) +svuint8_t svcls_m(svuint8_t, svbool_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcls_s32_m))) +svuint32_t svcls_m(svuint32_t, svbool_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcls_s64_m))) +svuint64_t svcls_m(svuint64_t, svbool_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcls_s16_m))) +svuint16_t svcls_m(svuint16_t, svbool_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcls_s8_x))) +svuint8_t svcls_x(svbool_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcls_s32_x))) +svuint32_t svcls_x(svbool_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcls_s64_x))) +svuint64_t svcls_x(svbool_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcls_s16_x))) +svuint16_t svcls_x(svbool_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcls_s8_z))) +svuint8_t svcls_z(svbool_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcls_s32_z))) +svuint32_t svcls_z(svbool_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcls_s64_z))) +svuint64_t svcls_z(svbool_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcls_s16_z))) +svuint16_t svcls_z(svbool_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclz_u8_m))) +svuint8_t svclz_m(svuint8_t, svbool_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclz_u32_m))) +svuint32_t svclz_m(svuint32_t, svbool_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclz_u64_m))) +svuint64_t svclz_m(svuint64_t, svbool_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclz_u16_m))) +svuint16_t svclz_m(svuint16_t, svbool_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclz_s8_m))) +svuint8_t svclz_m(svuint8_t, svbool_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclz_s32_m))) +svuint32_t svclz_m(svuint32_t, svbool_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclz_s64_m))) +svuint64_t svclz_m(svuint64_t, svbool_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclz_s16_m))) +svuint16_t svclz_m(svuint16_t, svbool_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclz_u8_x))) +svuint8_t svclz_x(svbool_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclz_u32_x))) +svuint32_t svclz_x(svbool_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclz_u64_x))) +svuint64_t svclz_x(svbool_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclz_u16_x))) +svuint16_t svclz_x(svbool_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclz_s8_x))) +svuint8_t svclz_x(svbool_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclz_s32_x))) +svuint32_t svclz_x(svbool_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclz_s64_x))) +svuint64_t svclz_x(svbool_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclz_s16_x))) +svuint16_t svclz_x(svbool_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclz_u8_z))) +svuint8_t svclz_z(svbool_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclz_u32_z))) +svuint32_t svclz_z(svbool_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclz_u64_z))) +svuint64_t svclz_z(svbool_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclz_u16_z))) +svuint16_t svclz_z(svbool_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclz_s8_z))) +svuint8_t svclz_z(svbool_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclz_s32_z))) +svuint32_t svclz_z(svbool_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclz_s64_z))) +svuint64_t svclz_z(svbool_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclz_s16_z))) +svuint16_t svclz_z(svbool_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmla_f64_m))) +svfloat64_t svcmla_m(svbool_t, svfloat64_t, svfloat64_t, svfloat64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmla_f32_m))) +svfloat32_t svcmla_m(svbool_t, svfloat32_t, svfloat32_t, svfloat32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmla_f16_m))) +svfloat16_t svcmla_m(svbool_t, svfloat16_t, svfloat16_t, svfloat16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmla_f64_x))) +svfloat64_t svcmla_x(svbool_t, svfloat64_t, svfloat64_t, svfloat64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmla_f32_x))) +svfloat32_t svcmla_x(svbool_t, svfloat32_t, svfloat32_t, svfloat32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmla_f16_x))) +svfloat16_t svcmla_x(svbool_t, svfloat16_t, svfloat16_t, svfloat16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmla_f64_z))) +svfloat64_t svcmla_z(svbool_t, svfloat64_t, svfloat64_t, svfloat64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmla_f32_z))) +svfloat32_t svcmla_z(svbool_t, svfloat32_t, svfloat32_t, svfloat32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmla_f16_z))) +svfloat16_t svcmla_z(svbool_t, svfloat16_t, svfloat16_t, svfloat16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmla_lane_f32))) +svfloat32_t svcmla_lane(svfloat32_t, svfloat32_t, svfloat32_t, uint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmla_lane_f16))) +svfloat16_t svcmla_lane(svfloat16_t, svfloat16_t, svfloat16_t, uint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpeq_n_f64))) +svbool_t svcmpeq(svbool_t, svfloat64_t, float64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpeq_n_f32))) +svbool_t svcmpeq(svbool_t, svfloat32_t, float32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpeq_n_f16))) +svbool_t svcmpeq(svbool_t, svfloat16_t, float16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpeq_n_u8))) +svbool_t svcmpeq(svbool_t, svuint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpeq_n_u32))) +svbool_t svcmpeq(svbool_t, svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpeq_n_u64))) +svbool_t svcmpeq(svbool_t, svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpeq_n_u16))) +svbool_t svcmpeq(svbool_t, svuint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpeq_n_s8))) +svbool_t svcmpeq(svbool_t, svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpeq_n_s32))) +svbool_t svcmpeq(svbool_t, svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpeq_n_s64))) +svbool_t svcmpeq(svbool_t, svint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpeq_n_s16))) +svbool_t svcmpeq(svbool_t, svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpeq_u8))) +svbool_t svcmpeq(svbool_t, svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpeq_u32))) +svbool_t svcmpeq(svbool_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpeq_u64))) +svbool_t svcmpeq(svbool_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpeq_u16))) +svbool_t svcmpeq(svbool_t, svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpeq_s8))) +svbool_t svcmpeq(svbool_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpeq_s32))) +svbool_t svcmpeq(svbool_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpeq_s64))) +svbool_t svcmpeq(svbool_t, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpeq_s16))) +svbool_t svcmpeq(svbool_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpeq_f64))) +svbool_t svcmpeq(svbool_t, svfloat64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpeq_f32))) +svbool_t svcmpeq(svbool_t, svfloat32_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpeq_f16))) +svbool_t svcmpeq(svbool_t, svfloat16_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpeq_wide_n_s8))) +svbool_t svcmpeq_wide(svbool_t, svint8_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpeq_wide_n_s32))) +svbool_t svcmpeq_wide(svbool_t, svint32_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpeq_wide_n_s16))) +svbool_t svcmpeq_wide(svbool_t, svint16_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpeq_wide_s8))) +svbool_t svcmpeq_wide(svbool_t, svint8_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpeq_wide_s32))) +svbool_t svcmpeq_wide(svbool_t, svint32_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpeq_wide_s16))) +svbool_t svcmpeq_wide(svbool_t, svint16_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_n_f64))) +svbool_t svcmpge(svbool_t, svfloat64_t, float64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_n_f32))) +svbool_t svcmpge(svbool_t, svfloat32_t, float32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_n_f16))) +svbool_t svcmpge(svbool_t, svfloat16_t, float16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_n_s8))) +svbool_t svcmpge(svbool_t, svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_n_s32))) +svbool_t svcmpge(svbool_t, svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_n_s64))) +svbool_t svcmpge(svbool_t, svint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_n_s16))) +svbool_t svcmpge(svbool_t, svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_n_u8))) +svbool_t svcmpge(svbool_t, svuint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_n_u32))) +svbool_t svcmpge(svbool_t, svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_n_u64))) +svbool_t svcmpge(svbool_t, svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_n_u16))) +svbool_t svcmpge(svbool_t, svuint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_s8))) +svbool_t svcmpge(svbool_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_s32))) +svbool_t svcmpge(svbool_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_s64))) +svbool_t svcmpge(svbool_t, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_s16))) +svbool_t svcmpge(svbool_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_f64))) +svbool_t svcmpge(svbool_t, svfloat64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_f32))) +svbool_t svcmpge(svbool_t, svfloat32_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_f16))) +svbool_t svcmpge(svbool_t, svfloat16_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_u8))) +svbool_t svcmpge(svbool_t, svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_u32))) +svbool_t svcmpge(svbool_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_u64))) +svbool_t svcmpge(svbool_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_u16))) +svbool_t svcmpge(svbool_t, svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_wide_n_s8))) +svbool_t svcmpge_wide(svbool_t, svint8_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_wide_n_s32))) +svbool_t svcmpge_wide(svbool_t, svint32_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_wide_n_s16))) +svbool_t svcmpge_wide(svbool_t, svint16_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_wide_n_u8))) +svbool_t svcmpge_wide(svbool_t, svuint8_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_wide_n_u32))) +svbool_t svcmpge_wide(svbool_t, svuint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_wide_n_u16))) +svbool_t svcmpge_wide(svbool_t, svuint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_wide_s8))) +svbool_t svcmpge_wide(svbool_t, svint8_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_wide_s32))) +svbool_t svcmpge_wide(svbool_t, svint32_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_wide_s16))) +svbool_t svcmpge_wide(svbool_t, svint16_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_wide_u8))) +svbool_t svcmpge_wide(svbool_t, svuint8_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_wide_u32))) +svbool_t svcmpge_wide(svbool_t, svuint32_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_wide_u16))) +svbool_t svcmpge_wide(svbool_t, svuint16_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_n_f64))) +svbool_t svcmpgt(svbool_t, svfloat64_t, float64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_n_f32))) +svbool_t svcmpgt(svbool_t, svfloat32_t, float32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_n_f16))) +svbool_t svcmpgt(svbool_t, svfloat16_t, float16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_n_s8))) +svbool_t svcmpgt(svbool_t, svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_n_s32))) +svbool_t svcmpgt(svbool_t, svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_n_s64))) +svbool_t svcmpgt(svbool_t, svint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_n_s16))) +svbool_t svcmpgt(svbool_t, svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_n_u8))) +svbool_t svcmpgt(svbool_t, svuint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_n_u32))) +svbool_t svcmpgt(svbool_t, svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_n_u64))) +svbool_t svcmpgt(svbool_t, svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_n_u16))) +svbool_t svcmpgt(svbool_t, svuint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_s8))) +svbool_t svcmpgt(svbool_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_s32))) +svbool_t svcmpgt(svbool_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_s64))) +svbool_t svcmpgt(svbool_t, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_s16))) +svbool_t svcmpgt(svbool_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_f64))) +svbool_t svcmpgt(svbool_t, svfloat64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_f32))) +svbool_t svcmpgt(svbool_t, svfloat32_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_f16))) +svbool_t svcmpgt(svbool_t, svfloat16_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_u8))) +svbool_t svcmpgt(svbool_t, svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_u32))) +svbool_t svcmpgt(svbool_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_u64))) +svbool_t svcmpgt(svbool_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_u16))) +svbool_t svcmpgt(svbool_t, svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_wide_n_s8))) +svbool_t svcmpgt_wide(svbool_t, svint8_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_wide_n_s32))) +svbool_t svcmpgt_wide(svbool_t, svint32_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_wide_n_s16))) +svbool_t svcmpgt_wide(svbool_t, svint16_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_wide_n_u8))) +svbool_t svcmpgt_wide(svbool_t, svuint8_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_wide_n_u32))) +svbool_t svcmpgt_wide(svbool_t, svuint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_wide_n_u16))) +svbool_t svcmpgt_wide(svbool_t, svuint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_wide_s8))) +svbool_t svcmpgt_wide(svbool_t, svint8_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_wide_s32))) +svbool_t svcmpgt_wide(svbool_t, svint32_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_wide_s16))) +svbool_t svcmpgt_wide(svbool_t, svint16_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_wide_u8))) +svbool_t svcmpgt_wide(svbool_t, svuint8_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_wide_u32))) +svbool_t svcmpgt_wide(svbool_t, svuint32_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_wide_u16))) +svbool_t svcmpgt_wide(svbool_t, svuint16_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_n_f64))) +svbool_t svcmple(svbool_t, svfloat64_t, float64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_n_f32))) +svbool_t svcmple(svbool_t, svfloat32_t, float32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_n_f16))) +svbool_t svcmple(svbool_t, svfloat16_t, float16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_n_s8))) +svbool_t svcmple(svbool_t, svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_n_s32))) +svbool_t svcmple(svbool_t, svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_n_s64))) +svbool_t svcmple(svbool_t, svint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_n_s16))) +svbool_t svcmple(svbool_t, svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_n_u8))) +svbool_t svcmple(svbool_t, svuint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_n_u32))) +svbool_t svcmple(svbool_t, svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_n_u64))) +svbool_t svcmple(svbool_t, svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_n_u16))) +svbool_t svcmple(svbool_t, svuint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_s8))) +svbool_t svcmple(svbool_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_s32))) +svbool_t svcmple(svbool_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_s64))) +svbool_t svcmple(svbool_t, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_s16))) +svbool_t svcmple(svbool_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_f64))) +svbool_t svcmple(svbool_t, svfloat64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_f32))) +svbool_t svcmple(svbool_t, svfloat32_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_f16))) +svbool_t svcmple(svbool_t, svfloat16_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_u8))) +svbool_t svcmple(svbool_t, svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_u32))) +svbool_t svcmple(svbool_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_u64))) +svbool_t svcmple(svbool_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_u16))) +svbool_t svcmple(svbool_t, svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_wide_n_s8))) +svbool_t svcmple_wide(svbool_t, svint8_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_wide_n_s32))) +svbool_t svcmple_wide(svbool_t, svint32_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_wide_n_s16))) +svbool_t svcmple_wide(svbool_t, svint16_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_wide_n_u8))) +svbool_t svcmple_wide(svbool_t, svuint8_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_wide_n_u32))) +svbool_t svcmple_wide(svbool_t, svuint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_wide_n_u16))) +svbool_t svcmple_wide(svbool_t, svuint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_wide_s8))) +svbool_t svcmple_wide(svbool_t, svint8_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_wide_s32))) +svbool_t svcmple_wide(svbool_t, svint32_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_wide_s16))) +svbool_t svcmple_wide(svbool_t, svint16_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_wide_u8))) +svbool_t svcmple_wide(svbool_t, svuint8_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_wide_u32))) +svbool_t svcmple_wide(svbool_t, svuint32_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_wide_u16))) +svbool_t svcmple_wide(svbool_t, svuint16_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_n_u8))) +svbool_t svcmplt(svbool_t, svuint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_n_u32))) +svbool_t svcmplt(svbool_t, svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_n_u64))) +svbool_t svcmplt(svbool_t, svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_n_u16))) +svbool_t svcmplt(svbool_t, svuint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_n_f64))) +svbool_t svcmplt(svbool_t, svfloat64_t, float64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_n_f32))) +svbool_t svcmplt(svbool_t, svfloat32_t, float32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_n_f16))) +svbool_t svcmplt(svbool_t, svfloat16_t, float16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_n_s8))) +svbool_t svcmplt(svbool_t, svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_n_s32))) +svbool_t svcmplt(svbool_t, svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_n_s64))) +svbool_t svcmplt(svbool_t, svint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_n_s16))) +svbool_t svcmplt(svbool_t, svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_u8))) +svbool_t svcmplt(svbool_t, svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_u32))) +svbool_t svcmplt(svbool_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_u64))) +svbool_t svcmplt(svbool_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_u16))) +svbool_t svcmplt(svbool_t, svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_s8))) +svbool_t svcmplt(svbool_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_s32))) +svbool_t svcmplt(svbool_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_s64))) +svbool_t svcmplt(svbool_t, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_s16))) +svbool_t svcmplt(svbool_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_f64))) +svbool_t svcmplt(svbool_t, svfloat64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_f32))) +svbool_t svcmplt(svbool_t, svfloat32_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_f16))) +svbool_t svcmplt(svbool_t, svfloat16_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_wide_n_u8))) +svbool_t svcmplt_wide(svbool_t, svuint8_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_wide_n_u32))) +svbool_t svcmplt_wide(svbool_t, svuint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_wide_n_u16))) +svbool_t svcmplt_wide(svbool_t, svuint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_wide_n_s8))) +svbool_t svcmplt_wide(svbool_t, svint8_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_wide_n_s32))) +svbool_t svcmplt_wide(svbool_t, svint32_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_wide_n_s16))) +svbool_t svcmplt_wide(svbool_t, svint16_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_wide_u8))) +svbool_t svcmplt_wide(svbool_t, svuint8_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_wide_u32))) +svbool_t svcmplt_wide(svbool_t, svuint32_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_wide_u16))) +svbool_t svcmplt_wide(svbool_t, svuint16_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_wide_s8))) +svbool_t svcmplt_wide(svbool_t, svint8_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_wide_s32))) +svbool_t svcmplt_wide(svbool_t, svint32_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_wide_s16))) +svbool_t svcmplt_wide(svbool_t, svint16_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpne_n_f64))) +svbool_t svcmpne(svbool_t, svfloat64_t, float64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpne_n_f32))) +svbool_t svcmpne(svbool_t, svfloat32_t, float32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpne_n_f16))) +svbool_t svcmpne(svbool_t, svfloat16_t, float16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpne_n_u8))) +svbool_t svcmpne(svbool_t, svuint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpne_n_u32))) +svbool_t svcmpne(svbool_t, svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpne_n_u64))) +svbool_t svcmpne(svbool_t, svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpne_n_u16))) +svbool_t svcmpne(svbool_t, svuint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpne_n_s8))) +svbool_t svcmpne(svbool_t, svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpne_n_s32))) +svbool_t svcmpne(svbool_t, svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpne_n_s64))) +svbool_t svcmpne(svbool_t, svint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpne_n_s16))) +svbool_t svcmpne(svbool_t, svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpne_u8))) +svbool_t svcmpne(svbool_t, svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpne_u32))) +svbool_t svcmpne(svbool_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpne_u64))) +svbool_t svcmpne(svbool_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpne_u16))) +svbool_t svcmpne(svbool_t, svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpne_s8))) +svbool_t svcmpne(svbool_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpne_s32))) +svbool_t svcmpne(svbool_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpne_s64))) +svbool_t svcmpne(svbool_t, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpne_s16))) +svbool_t svcmpne(svbool_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpne_f64))) +svbool_t svcmpne(svbool_t, svfloat64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpne_f32))) +svbool_t svcmpne(svbool_t, svfloat32_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpne_f16))) +svbool_t svcmpne(svbool_t, svfloat16_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpne_wide_n_s8))) +svbool_t svcmpne_wide(svbool_t, svint8_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpne_wide_n_s32))) +svbool_t svcmpne_wide(svbool_t, svint32_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpne_wide_n_s16))) +svbool_t svcmpne_wide(svbool_t, svint16_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpne_wide_s8))) +svbool_t svcmpne_wide(svbool_t, svint8_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpne_wide_s32))) +svbool_t svcmpne_wide(svbool_t, svint32_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpne_wide_s16))) +svbool_t svcmpne_wide(svbool_t, svint16_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpuo_n_f64))) +svbool_t svcmpuo(svbool_t, svfloat64_t, float64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpuo_n_f32))) +svbool_t svcmpuo(svbool_t, svfloat32_t, float32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpuo_n_f16))) +svbool_t svcmpuo(svbool_t, svfloat16_t, float16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpuo_f64))) +svbool_t svcmpuo(svbool_t, svfloat64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpuo_f32))) +svbool_t svcmpuo(svbool_t, svfloat32_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpuo_f16))) +svbool_t svcmpuo(svbool_t, svfloat16_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnot_u8_m))) +svuint8_t svcnot_m(svuint8_t, svbool_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnot_u32_m))) +svuint32_t svcnot_m(svuint32_t, svbool_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnot_u64_m))) +svuint64_t svcnot_m(svuint64_t, svbool_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnot_u16_m))) +svuint16_t svcnot_m(svuint16_t, svbool_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnot_s8_m))) +svint8_t svcnot_m(svint8_t, svbool_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnot_s32_m))) +svint32_t svcnot_m(svint32_t, svbool_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnot_s64_m))) +svint64_t svcnot_m(svint64_t, svbool_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnot_s16_m))) +svint16_t svcnot_m(svint16_t, svbool_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnot_u8_x))) +svuint8_t svcnot_x(svbool_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnot_u32_x))) +svuint32_t svcnot_x(svbool_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnot_u64_x))) +svuint64_t svcnot_x(svbool_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnot_u16_x))) +svuint16_t svcnot_x(svbool_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnot_s8_x))) +svint8_t svcnot_x(svbool_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnot_s32_x))) +svint32_t svcnot_x(svbool_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnot_s64_x))) +svint64_t svcnot_x(svbool_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnot_s16_x))) +svint16_t svcnot_x(svbool_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnot_u8_z))) +svuint8_t svcnot_z(svbool_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnot_u32_z))) +svuint32_t svcnot_z(svbool_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnot_u64_z))) +svuint64_t svcnot_z(svbool_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnot_u16_z))) +svuint16_t svcnot_z(svbool_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnot_s8_z))) +svint8_t svcnot_z(svbool_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnot_s32_z))) +svint32_t svcnot_z(svbool_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnot_s64_z))) +svint64_t svcnot_z(svbool_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnot_s16_z))) +svint16_t svcnot_z(svbool_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_u8_m))) +svuint8_t svcnt_m(svuint8_t, svbool_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_u32_m))) +svuint32_t svcnt_m(svuint32_t, svbool_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_u64_m))) +svuint64_t svcnt_m(svuint64_t, svbool_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_u16_m))) +svuint16_t svcnt_m(svuint16_t, svbool_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_s8_m))) +svuint8_t svcnt_m(svuint8_t, svbool_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_f64_m))) +svuint64_t svcnt_m(svuint64_t, svbool_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_f32_m))) +svuint32_t svcnt_m(svuint32_t, svbool_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_f16_m))) +svuint16_t svcnt_m(svuint16_t, svbool_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_s32_m))) +svuint32_t svcnt_m(svuint32_t, svbool_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_s64_m))) +svuint64_t svcnt_m(svuint64_t, svbool_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_s16_m))) +svuint16_t svcnt_m(svuint16_t, svbool_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_u8_x))) +svuint8_t svcnt_x(svbool_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_u32_x))) +svuint32_t svcnt_x(svbool_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_u64_x))) +svuint64_t svcnt_x(svbool_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_u16_x))) +svuint16_t svcnt_x(svbool_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_s8_x))) +svuint8_t svcnt_x(svbool_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_f64_x))) +svuint64_t svcnt_x(svbool_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_f32_x))) +svuint32_t svcnt_x(svbool_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_f16_x))) +svuint16_t svcnt_x(svbool_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_s32_x))) +svuint32_t svcnt_x(svbool_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_s64_x))) +svuint64_t svcnt_x(svbool_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_s16_x))) +svuint16_t svcnt_x(svbool_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_u8_z))) +svuint8_t svcnt_z(svbool_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_u32_z))) +svuint32_t svcnt_z(svbool_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_u64_z))) +svuint64_t svcnt_z(svbool_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_u16_z))) +svuint16_t svcnt_z(svbool_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_s8_z))) +svuint8_t svcnt_z(svbool_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_f64_z))) +svuint64_t svcnt_z(svbool_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_f32_z))) +svuint32_t svcnt_z(svbool_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_f16_z))) +svuint16_t svcnt_z(svbool_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_s32_z))) +svuint32_t svcnt_z(svbool_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_s64_z))) +svuint64_t svcnt_z(svbool_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_s16_z))) +svuint16_t svcnt_z(svbool_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate2_u8))) +svuint8x2_t svcreate2(svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate2_u32))) +svuint32x2_t svcreate2(svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate2_u64))) +svuint64x2_t svcreate2(svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate2_u16))) +svuint16x2_t svcreate2(svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate2_s8))) +svint8x2_t svcreate2(svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate2_f64))) +svfloat64x2_t svcreate2(svfloat64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate2_f32))) +svfloat32x2_t svcreate2(svfloat32_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate2_f16))) +svfloat16x2_t svcreate2(svfloat16_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate2_s32))) +svint32x2_t svcreate2(svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate2_s64))) +svint64x2_t svcreate2(svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate2_s16))) +svint16x2_t svcreate2(svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate3_u8))) +svuint8x3_t svcreate3(svuint8_t, svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate3_u32))) +svuint32x3_t svcreate3(svuint32_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate3_u64))) +svuint64x3_t svcreate3(svuint64_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate3_u16))) +svuint16x3_t svcreate3(svuint16_t, svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate3_s8))) +svint8x3_t svcreate3(svint8_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate3_f64))) +svfloat64x3_t svcreate3(svfloat64_t, svfloat64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate3_f32))) +svfloat32x3_t svcreate3(svfloat32_t, svfloat32_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate3_f16))) +svfloat16x3_t svcreate3(svfloat16_t, svfloat16_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate3_s32))) +svint32x3_t svcreate3(svint32_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate3_s64))) +svint64x3_t svcreate3(svint64_t, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate3_s16))) +svint16x3_t svcreate3(svint16_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate4_u8))) +svuint8x4_t svcreate4(svuint8_t, svuint8_t, svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate4_u32))) +svuint32x4_t svcreate4(svuint32_t, svuint32_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate4_u64))) +svuint64x4_t svcreate4(svuint64_t, svuint64_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate4_u16))) +svuint16x4_t svcreate4(svuint16_t, svuint16_t, svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate4_s8))) +svint8x4_t svcreate4(svint8_t, svint8_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate4_f64))) +svfloat64x4_t svcreate4(svfloat64_t, svfloat64_t, svfloat64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate4_f32))) +svfloat32x4_t svcreate4(svfloat32_t, svfloat32_t, svfloat32_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate4_f16))) +svfloat16x4_t svcreate4(svfloat16_t, svfloat16_t, svfloat16_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate4_s32))) +svint32x4_t svcreate4(svint32_t, svint32_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate4_s64))) +svint64x4_t svcreate4(svint64_t, svint64_t, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate4_s16))) +svint16x4_t svcreate4(svint16_t, svint16_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f16_f32_m))) +svfloat16_t svcvt_f16_m(svfloat16_t, svbool_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f16_f32_x))) +svfloat16_t svcvt_f16_x(svbool_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f16_f32_z))) +svfloat16_t svcvt_f16_z(svbool_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f16_f64_m))) +svfloat16_t svcvt_f16_m(svfloat16_t, svbool_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f16_f64_x))) +svfloat16_t svcvt_f16_x(svbool_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f16_f64_z))) +svfloat16_t svcvt_f16_z(svbool_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f16_s16_m))) +svfloat16_t svcvt_f16_m(svfloat16_t, svbool_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f16_s16_x))) +svfloat16_t svcvt_f16_x(svbool_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f16_s16_z))) +svfloat16_t svcvt_f16_z(svbool_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f16_s32_m))) +svfloat16_t svcvt_f16_m(svfloat16_t, svbool_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f16_s32_x))) +svfloat16_t svcvt_f16_x(svbool_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f16_s32_z))) +svfloat16_t svcvt_f16_z(svbool_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f16_s64_m))) +svfloat16_t svcvt_f16_m(svfloat16_t, svbool_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f16_s64_x))) +svfloat16_t svcvt_f16_x(svbool_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f16_s64_z))) +svfloat16_t svcvt_f16_z(svbool_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f16_u16_m))) +svfloat16_t svcvt_f16_m(svfloat16_t, svbool_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f16_u16_x))) +svfloat16_t svcvt_f16_x(svbool_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f16_u16_z))) +svfloat16_t svcvt_f16_z(svbool_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f16_u32_m))) +svfloat16_t svcvt_f16_m(svfloat16_t, svbool_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f16_u32_x))) +svfloat16_t svcvt_f16_x(svbool_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f16_u32_z))) +svfloat16_t svcvt_f16_z(svbool_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f16_u64_m))) +svfloat16_t svcvt_f16_m(svfloat16_t, svbool_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f16_u64_x))) +svfloat16_t svcvt_f16_x(svbool_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f16_u64_z))) +svfloat16_t svcvt_f16_z(svbool_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f32_f16_m))) +svfloat32_t svcvt_f32_m(svfloat32_t, svbool_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f32_f16_x))) +svfloat32_t svcvt_f32_x(svbool_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f32_f16_z))) +svfloat32_t svcvt_f32_z(svbool_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f32_f64_m))) +svfloat32_t svcvt_f32_m(svfloat32_t, svbool_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f32_f64_x))) +svfloat32_t svcvt_f32_x(svbool_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f32_f64_z))) +svfloat32_t svcvt_f32_z(svbool_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f32_s32_m))) +svfloat32_t svcvt_f32_m(svfloat32_t, svbool_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f32_s32_x))) +svfloat32_t svcvt_f32_x(svbool_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f32_s32_z))) +svfloat32_t svcvt_f32_z(svbool_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f32_s64_m))) +svfloat32_t svcvt_f32_m(svfloat32_t, svbool_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f32_s64_x))) +svfloat32_t svcvt_f32_x(svbool_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f32_s64_z))) +svfloat32_t svcvt_f32_z(svbool_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f32_u32_m))) +svfloat32_t svcvt_f32_m(svfloat32_t, svbool_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f32_u32_x))) +svfloat32_t svcvt_f32_x(svbool_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f32_u32_z))) +svfloat32_t svcvt_f32_z(svbool_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f32_u64_m))) +svfloat32_t svcvt_f32_m(svfloat32_t, svbool_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f32_u64_x))) +svfloat32_t svcvt_f32_x(svbool_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f32_u64_z))) +svfloat32_t svcvt_f32_z(svbool_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f64_f16_m))) +svfloat64_t svcvt_f64_m(svfloat64_t, svbool_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f64_f16_x))) +svfloat64_t svcvt_f64_x(svbool_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f64_f16_z))) +svfloat64_t svcvt_f64_z(svbool_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f64_f32_m))) +svfloat64_t svcvt_f64_m(svfloat64_t, svbool_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f64_f32_x))) +svfloat64_t svcvt_f64_x(svbool_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f64_f32_z))) +svfloat64_t svcvt_f64_z(svbool_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f64_s32_m))) +svfloat64_t svcvt_f64_m(svfloat64_t, svbool_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f64_s32_x))) +svfloat64_t svcvt_f64_x(svbool_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f64_s32_z))) +svfloat64_t svcvt_f64_z(svbool_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f64_s64_m))) +svfloat64_t svcvt_f64_m(svfloat64_t, svbool_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f64_s64_x))) +svfloat64_t svcvt_f64_x(svbool_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f64_s64_z))) +svfloat64_t svcvt_f64_z(svbool_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f64_u32_m))) +svfloat64_t svcvt_f64_m(svfloat64_t, svbool_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f64_u32_x))) +svfloat64_t svcvt_f64_x(svbool_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f64_u32_z))) +svfloat64_t svcvt_f64_z(svbool_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f64_u64_m))) +svfloat64_t svcvt_f64_m(svfloat64_t, svbool_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f64_u64_x))) +svfloat64_t svcvt_f64_x(svbool_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f64_u64_z))) +svfloat64_t svcvt_f64_z(svbool_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_s16_f16_m))) +svint16_t svcvt_s16_m(svint16_t, svbool_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_s16_f16_x))) +svint16_t svcvt_s16_x(svbool_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_s16_f16_z))) +svint16_t svcvt_s16_z(svbool_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_s32_f16_m))) +svint32_t svcvt_s32_m(svint32_t, svbool_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_s32_f16_x))) +svint32_t svcvt_s32_x(svbool_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_s32_f16_z))) +svint32_t svcvt_s32_z(svbool_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_s32_f32_m))) +svint32_t svcvt_s32_m(svint32_t, svbool_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_s32_f32_x))) +svint32_t svcvt_s32_x(svbool_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_s32_f32_z))) +svint32_t svcvt_s32_z(svbool_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_s32_f64_m))) +svint32_t svcvt_s32_m(svint32_t, svbool_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_s32_f64_x))) +svint32_t svcvt_s32_x(svbool_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_s32_f64_z))) +svint32_t svcvt_s32_z(svbool_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_s64_f16_m))) +svint64_t svcvt_s64_m(svint64_t, svbool_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_s64_f16_x))) +svint64_t svcvt_s64_x(svbool_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_s64_f16_z))) +svint64_t svcvt_s64_z(svbool_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_s64_f32_m))) +svint64_t svcvt_s64_m(svint64_t, svbool_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_s64_f32_x))) +svint64_t svcvt_s64_x(svbool_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_s64_f32_z))) +svint64_t svcvt_s64_z(svbool_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_s64_f64_m))) +svint64_t svcvt_s64_m(svint64_t, svbool_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_s64_f64_x))) +svint64_t svcvt_s64_x(svbool_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_s64_f64_z))) +svint64_t svcvt_s64_z(svbool_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_u16_f16_m))) +svuint16_t svcvt_u16_m(svuint16_t, svbool_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_u16_f16_x))) +svuint16_t svcvt_u16_x(svbool_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_u16_f16_z))) +svuint16_t svcvt_u16_z(svbool_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_u32_f16_m))) +svuint32_t svcvt_u32_m(svuint32_t, svbool_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_u32_f16_x))) +svuint32_t svcvt_u32_x(svbool_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_u32_f16_z))) +svuint32_t svcvt_u32_z(svbool_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_u32_f32_m))) +svuint32_t svcvt_u32_m(svuint32_t, svbool_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_u32_f32_x))) +svuint32_t svcvt_u32_x(svbool_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_u32_f32_z))) +svuint32_t svcvt_u32_z(svbool_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_u32_f64_m))) +svuint32_t svcvt_u32_m(svuint32_t, svbool_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_u32_f64_x))) +svuint32_t svcvt_u32_x(svbool_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_u32_f64_z))) +svuint32_t svcvt_u32_z(svbool_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_u64_f16_m))) +svuint64_t svcvt_u64_m(svuint64_t, svbool_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_u64_f16_x))) +svuint64_t svcvt_u64_x(svbool_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_u64_f16_z))) +svuint64_t svcvt_u64_z(svbool_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_u64_f32_m))) +svuint64_t svcvt_u64_m(svuint64_t, svbool_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_u64_f32_x))) +svuint64_t svcvt_u64_x(svbool_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_u64_f32_z))) +svuint64_t svcvt_u64_z(svbool_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_u64_f64_m))) +svuint64_t svcvt_u64_m(svuint64_t, svbool_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_u64_f64_x))) +svuint64_t svcvt_u64_x(svbool_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_u64_f64_z))) +svuint64_t svcvt_u64_z(svbool_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_n_f64_m))) +svfloat64_t svdiv_m(svbool_t, svfloat64_t, float64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_n_f32_m))) +svfloat32_t svdiv_m(svbool_t, svfloat32_t, float32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_n_f16_m))) +svfloat16_t svdiv_m(svbool_t, svfloat16_t, float16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_n_f64_x))) +svfloat64_t svdiv_x(svbool_t, svfloat64_t, float64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_n_f32_x))) +svfloat32_t svdiv_x(svbool_t, svfloat32_t, float32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_n_f16_x))) +svfloat16_t svdiv_x(svbool_t, svfloat16_t, float16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_n_f64_z))) +svfloat64_t svdiv_z(svbool_t, svfloat64_t, float64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_n_f32_z))) +svfloat32_t svdiv_z(svbool_t, svfloat32_t, float32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_n_f16_z))) +svfloat16_t svdiv_z(svbool_t, svfloat16_t, float16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_n_s32_m))) +svint32_t svdiv_m(svbool_t, svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_n_s64_m))) +svint64_t svdiv_m(svbool_t, svint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_n_s32_x))) +svint32_t svdiv_x(svbool_t, svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_n_s64_x))) +svint64_t svdiv_x(svbool_t, svint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_n_s32_z))) +svint32_t svdiv_z(svbool_t, svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_n_s64_z))) +svint64_t svdiv_z(svbool_t, svint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_n_u32_m))) +svuint32_t svdiv_m(svbool_t, svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_n_u64_m))) +svuint64_t svdiv_m(svbool_t, svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_n_u32_x))) +svuint32_t svdiv_x(svbool_t, svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_n_u64_x))) +svuint64_t svdiv_x(svbool_t, svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_n_u32_z))) +svuint32_t svdiv_z(svbool_t, svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_n_u64_z))) +svuint64_t svdiv_z(svbool_t, svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_f64_m))) +svfloat64_t svdiv_m(svbool_t, svfloat64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_f32_m))) +svfloat32_t svdiv_m(svbool_t, svfloat32_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_f16_m))) +svfloat16_t svdiv_m(svbool_t, svfloat16_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_f64_x))) +svfloat64_t svdiv_x(svbool_t, svfloat64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_f32_x))) +svfloat32_t svdiv_x(svbool_t, svfloat32_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_f16_x))) +svfloat16_t svdiv_x(svbool_t, svfloat16_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_f64_z))) +svfloat64_t svdiv_z(svbool_t, svfloat64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_f32_z))) +svfloat32_t svdiv_z(svbool_t, svfloat32_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_f16_z))) +svfloat16_t svdiv_z(svbool_t, svfloat16_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_s32_m))) +svint32_t svdiv_m(svbool_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_s64_m))) +svint64_t svdiv_m(svbool_t, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_s32_x))) +svint32_t svdiv_x(svbool_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_s64_x))) +svint64_t svdiv_x(svbool_t, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_s32_z))) +svint32_t svdiv_z(svbool_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_s64_z))) +svint64_t svdiv_z(svbool_t, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_u32_m))) +svuint32_t svdiv_m(svbool_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_u64_m))) +svuint64_t svdiv_m(svbool_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_u32_x))) +svuint32_t svdiv_x(svbool_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_u64_x))) +svuint64_t svdiv_x(svbool_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_u32_z))) +svuint32_t svdiv_z(svbool_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_u64_z))) +svuint64_t svdiv_z(svbool_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_n_f64_m))) +svfloat64_t svdivr_m(svbool_t, svfloat64_t, float64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_n_f32_m))) +svfloat32_t svdivr_m(svbool_t, svfloat32_t, float32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_n_f16_m))) +svfloat16_t svdivr_m(svbool_t, svfloat16_t, float16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_n_f64_x))) +svfloat64_t svdivr_x(svbool_t, svfloat64_t, float64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_n_f32_x))) +svfloat32_t svdivr_x(svbool_t, svfloat32_t, float32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_n_f16_x))) +svfloat16_t svdivr_x(svbool_t, svfloat16_t, float16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_n_f64_z))) +svfloat64_t svdivr_z(svbool_t, svfloat64_t, float64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_n_f32_z))) +svfloat32_t svdivr_z(svbool_t, svfloat32_t, float32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_n_f16_z))) +svfloat16_t svdivr_z(svbool_t, svfloat16_t, float16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_n_s32_m))) +svint32_t svdivr_m(svbool_t, svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_n_s64_m))) +svint64_t svdivr_m(svbool_t, svint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_n_s32_x))) +svint32_t svdivr_x(svbool_t, svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_n_s64_x))) +svint64_t svdivr_x(svbool_t, svint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_n_s32_z))) +svint32_t svdivr_z(svbool_t, svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_n_s64_z))) +svint64_t svdivr_z(svbool_t, svint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_n_u32_m))) +svuint32_t svdivr_m(svbool_t, svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_n_u64_m))) +svuint64_t svdivr_m(svbool_t, svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_n_u32_x))) +svuint32_t svdivr_x(svbool_t, svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_n_u64_x))) +svuint64_t svdivr_x(svbool_t, svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_n_u32_z))) +svuint32_t svdivr_z(svbool_t, svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_n_u64_z))) +svuint64_t svdivr_z(svbool_t, svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_f64_m))) +svfloat64_t svdivr_m(svbool_t, svfloat64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_f32_m))) +svfloat32_t svdivr_m(svbool_t, svfloat32_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_f16_m))) +svfloat16_t svdivr_m(svbool_t, svfloat16_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_f64_x))) +svfloat64_t svdivr_x(svbool_t, svfloat64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_f32_x))) +svfloat32_t svdivr_x(svbool_t, svfloat32_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_f16_x))) +svfloat16_t svdivr_x(svbool_t, svfloat16_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_f64_z))) +svfloat64_t svdivr_z(svbool_t, svfloat64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_f32_z))) +svfloat32_t svdivr_z(svbool_t, svfloat32_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_f16_z))) +svfloat16_t svdivr_z(svbool_t, svfloat16_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_s32_m))) +svint32_t svdivr_m(svbool_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_s64_m))) +svint64_t svdivr_m(svbool_t, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_s32_x))) +svint32_t svdivr_x(svbool_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_s64_x))) +svint64_t svdivr_x(svbool_t, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_s32_z))) +svint32_t svdivr_z(svbool_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_s64_z))) +svint64_t svdivr_z(svbool_t, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_u32_m))) +svuint32_t svdivr_m(svbool_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_u64_m))) +svuint64_t svdivr_m(svbool_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_u32_x))) +svuint32_t svdivr_x(svbool_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_u64_x))) +svuint64_t svdivr_x(svbool_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_u32_z))) +svuint32_t svdivr_z(svbool_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_u64_z))) +svuint64_t svdivr_z(svbool_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdot_n_s32))) +svint32_t svdot(svint32_t, svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdot_n_s64))) +svint64_t svdot(svint64_t, svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdot_n_u32))) +svuint32_t svdot(svuint32_t, svuint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdot_n_u64))) +svuint64_t svdot(svuint64_t, svuint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdot_s32))) +svint32_t svdot(svint32_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdot_s64))) +svint64_t svdot(svint64_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdot_u32))) +svuint32_t svdot(svuint32_t, svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdot_u64))) +svuint64_t svdot(svuint64_t, svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdot_lane_s32))) +svint32_t svdot_lane(svint32_t, svint8_t, svint8_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdot_lane_s64))) +svint64_t svdot_lane(svint64_t, svint16_t, svint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdot_lane_u32))) +svuint32_t svdot_lane(svuint32_t, svuint8_t, svuint8_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdot_lane_u64))) +svuint64_t svdot_lane(svuint64_t, svuint16_t, svuint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_u8))) +svuint8_t svdup_u8(uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_u32))) +svuint32_t svdup_u32(uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_u64))) +svuint64_t svdup_u64(uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_u16))) +svuint16_t svdup_u16(uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_s8))) +svint8_t svdup_s8(int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_f64))) +svfloat64_t svdup_f64(float64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_f32))) +svfloat32_t svdup_f32(float32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_f16))) +svfloat16_t svdup_f16(float16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_s32))) +svint32_t svdup_s32(int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_s64))) +svint64_t svdup_s64(int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_s16))) +svint16_t svdup_s16(int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_u8_m))) +svuint8_t svdup_u8_m(svuint8_t, svbool_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_u32_m))) +svuint32_t svdup_u32_m(svuint32_t, svbool_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_u64_m))) +svuint64_t svdup_u64_m(svuint64_t, svbool_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_u16_m))) +svuint16_t svdup_u16_m(svuint16_t, svbool_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_s8_m))) +svint8_t svdup_s8_m(svint8_t, svbool_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_f64_m))) +svfloat64_t svdup_f64_m(svfloat64_t, svbool_t, float64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_f32_m))) +svfloat32_t svdup_f32_m(svfloat32_t, svbool_t, float32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_f16_m))) +svfloat16_t svdup_f16_m(svfloat16_t, svbool_t, float16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_s32_m))) +svint32_t svdup_s32_m(svint32_t, svbool_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_s64_m))) +svint64_t svdup_s64_m(svint64_t, svbool_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_s16_m))) +svint16_t svdup_s16_m(svint16_t, svbool_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_b8))) +svbool_t svdup_b8(bool); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_b32))) +svbool_t svdup_b32(bool); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_b64))) +svbool_t svdup_b64(bool); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_b16))) +svbool_t svdup_b16(bool); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_u8_x))) +svuint8_t svdup_u8_x(svbool_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_u32_x))) +svuint32_t svdup_u32_x(svbool_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_u64_x))) +svuint64_t svdup_u64_x(svbool_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_u16_x))) +svuint16_t svdup_u16_x(svbool_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_s8_x))) +svint8_t svdup_s8_x(svbool_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_f64_x))) +svfloat64_t svdup_f64_x(svbool_t, float64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_f32_x))) +svfloat32_t svdup_f32_x(svbool_t, float32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_f16_x))) +svfloat16_t svdup_f16_x(svbool_t, float16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_s32_x))) +svint32_t svdup_s32_x(svbool_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_s64_x))) +svint64_t svdup_s64_x(svbool_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_s16_x))) +svint16_t svdup_s16_x(svbool_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_u8_z))) +svuint8_t svdup_u8_z(svbool_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_u32_z))) +svuint32_t svdup_u32_z(svbool_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_u64_z))) +svuint64_t svdup_u64_z(svbool_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_u16_z))) +svuint16_t svdup_u16_z(svbool_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_s8_z))) +svint8_t svdup_s8_z(svbool_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_f64_z))) +svfloat64_t svdup_f64_z(svbool_t, float64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_f32_z))) +svfloat32_t svdup_f32_z(svbool_t, float32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_f16_z))) +svfloat16_t svdup_f16_z(svbool_t, float16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_s32_z))) +svint32_t svdup_s32_z(svbool_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_s64_z))) +svint64_t svdup_s64_z(svbool_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_s16_z))) +svint16_t svdup_s16_z(svbool_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_lane_u8))) +svuint8_t svdup_lane(svuint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_lane_u32))) +svuint32_t svdup_lane(svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_lane_u64))) +svuint64_t svdup_lane(svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_lane_u16))) +svuint16_t svdup_lane(svuint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_lane_s8))) +svint8_t svdup_lane(svint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_lane_f64))) +svfloat64_t svdup_lane(svfloat64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_lane_f32))) +svfloat32_t svdup_lane(svfloat32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_lane_f16))) +svfloat16_t svdup_lane(svfloat16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_lane_s32))) +svint32_t svdup_lane(svint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_lane_s64))) +svint64_t svdup_lane(svint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_lane_s16))) +svint16_t svdup_lane(svint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_n_u8))) +svuint8_t svdupq_u8(uint8_t, uint8_t, uint8_t, uint8_t, uint8_t, uint8_t, uint8_t, uint8_t, uint8_t, uint8_t, uint8_t, uint8_t, uint8_t, uint8_t, uint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_n_s8))) +svint8_t svdupq_s8(int8_t, int8_t, int8_t, int8_t, int8_t, int8_t, int8_t, int8_t, int8_t, int8_t, int8_t, int8_t, int8_t, int8_t, int8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_n_u16))) +svuint16_t svdupq_u16(uint16_t, uint16_t, uint16_t, uint16_t, uint16_t, uint16_t, uint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_n_f16))) +svfloat16_t svdupq_f16(float16_t, float16_t, float16_t, float16_t, float16_t, float16_t, float16_t, float16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_n_s16))) +svint16_t svdupq_s16(int16_t, int16_t, int16_t, int16_t, int16_t, int16_t, int16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_n_u32))) +svuint32_t svdupq_u32(uint32_t, uint32_t, uint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_n_f32))) +svfloat32_t svdupq_f32(float32_t, float32_t, float32_t, float32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_n_s32))) +svint32_t svdupq_s32(int32_t, int32_t, int32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_n_u64))) +svuint64_t svdupq_u64(uint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_n_f64))) +svfloat64_t svdupq_f64(float64_t, float64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_n_s64))) +svint64_t svdupq_s64(int64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_n_b8))) +svbool_t svdupq_b8(bool, bool, bool, bool, bool, bool, bool, bool, bool, bool, bool, bool, bool, bool, bool, bool); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_n_b16))) +svbool_t svdupq_b16(bool, bool, bool, bool, bool, bool, bool, bool); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_n_b32))) +svbool_t svdupq_b32(bool, bool, bool, bool); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_n_b64))) +svbool_t svdupq_b64(bool, bool); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_lane_u8))) +svuint8_t svdupq_lane(svuint8_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_lane_u32))) +svuint32_t svdupq_lane(svuint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_lane_u64))) +svuint64_t svdupq_lane(svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_lane_u16))) +svuint16_t svdupq_lane(svuint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_lane_s8))) +svint8_t svdupq_lane(svint8_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_lane_f64))) +svfloat64_t svdupq_lane(svfloat64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_lane_f32))) +svfloat32_t svdupq_lane(svfloat32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_lane_f16))) +svfloat16_t svdupq_lane(svfloat16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_lane_s32))) +svint32_t svdupq_lane(svint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_lane_s64))) +svint64_t svdupq_lane(svint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_lane_s16))) +svint16_t svdupq_lane(svint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_b_z))) +svbool_t sveor_z(svbool_t, svbool_t, svbool_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_n_u8_m))) +svuint8_t sveor_m(svbool_t, svuint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_n_u32_m))) +svuint32_t sveor_m(svbool_t, svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_n_u64_m))) +svuint64_t sveor_m(svbool_t, svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_n_u16_m))) +svuint16_t sveor_m(svbool_t, svuint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_n_s8_m))) +svint8_t sveor_m(svbool_t, svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_n_s32_m))) +svint32_t sveor_m(svbool_t, svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_n_s64_m))) +svint64_t sveor_m(svbool_t, svint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_n_s16_m))) +svint16_t sveor_m(svbool_t, svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_n_u8_x))) +svuint8_t sveor_x(svbool_t, svuint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_n_u32_x))) +svuint32_t sveor_x(svbool_t, svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_n_u64_x))) +svuint64_t sveor_x(svbool_t, svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_n_u16_x))) +svuint16_t sveor_x(svbool_t, svuint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_n_s8_x))) +svint8_t sveor_x(svbool_t, svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_n_s32_x))) +svint32_t sveor_x(svbool_t, svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_n_s64_x))) +svint64_t sveor_x(svbool_t, svint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_n_s16_x))) +svint16_t sveor_x(svbool_t, svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_n_u8_z))) +svuint8_t sveor_z(svbool_t, svuint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_n_u32_z))) +svuint32_t sveor_z(svbool_t, svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_n_u64_z))) +svuint64_t sveor_z(svbool_t, svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_n_u16_z))) +svuint16_t sveor_z(svbool_t, svuint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_n_s8_z))) +svint8_t sveor_z(svbool_t, svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_n_s32_z))) +svint32_t sveor_z(svbool_t, svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_n_s64_z))) +svint64_t sveor_z(svbool_t, svint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_n_s16_z))) +svint16_t sveor_z(svbool_t, svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_u8_m))) +svuint8_t sveor_m(svbool_t, svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_u32_m))) +svuint32_t sveor_m(svbool_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_u64_m))) +svuint64_t sveor_m(svbool_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_u16_m))) +svuint16_t sveor_m(svbool_t, svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_s8_m))) +svint8_t sveor_m(svbool_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_s32_m))) +svint32_t sveor_m(svbool_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_s64_m))) +svint64_t sveor_m(svbool_t, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_s16_m))) +svint16_t sveor_m(svbool_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_u8_x))) +svuint8_t sveor_x(svbool_t, svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_u32_x))) +svuint32_t sveor_x(svbool_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_u64_x))) +svuint64_t sveor_x(svbool_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_u16_x))) +svuint16_t sveor_x(svbool_t, svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_s8_x))) +svint8_t sveor_x(svbool_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_s32_x))) +svint32_t sveor_x(svbool_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_s64_x))) +svint64_t sveor_x(svbool_t, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_s16_x))) +svint16_t sveor_x(svbool_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_u8_z))) +svuint8_t sveor_z(svbool_t, svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_u32_z))) +svuint32_t sveor_z(svbool_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_u64_z))) +svuint64_t sveor_z(svbool_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_u16_z))) +svuint16_t sveor_z(svbool_t, svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_s8_z))) +svint8_t sveor_z(svbool_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_s32_z))) +svint32_t sveor_z(svbool_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_s64_z))) +svint64_t sveor_z(svbool_t, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_s16_z))) +svint16_t sveor_z(svbool_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorv_u8))) +uint8_t sveorv(svbool_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorv_u32))) +uint32_t sveorv(svbool_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorv_u64))) +uint64_t sveorv(svbool_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorv_u16))) +uint16_t sveorv(svbool_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorv_s8))) +int8_t sveorv(svbool_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorv_s32))) +int32_t sveorv(svbool_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorv_s64))) +int64_t sveorv(svbool_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorv_s16))) +int16_t sveorv(svbool_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svext_u8))) +svuint8_t svext(svuint8_t, svuint8_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svext_u32))) +svuint32_t svext(svuint32_t, svuint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svext_u64))) +svuint64_t svext(svuint64_t, svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svext_u16))) +svuint16_t svext(svuint16_t, svuint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svext_s8))) +svint8_t svext(svint8_t, svint8_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svext_f64))) +svfloat64_t svext(svfloat64_t, svfloat64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svext_f32))) +svfloat32_t svext(svfloat32_t, svfloat32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svext_f16))) +svfloat16_t svext(svfloat16_t, svfloat16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svext_s32))) +svint32_t svext(svint32_t, svint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svext_s64))) +svint64_t svext(svint64_t, svint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svext_s16))) +svint16_t svext(svint16_t, svint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svextb_s32_m))) +svint32_t svextb_m(svint32_t, svbool_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svextb_s64_m))) +svint64_t svextb_m(svint64_t, svbool_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svextb_s16_m))) +svint16_t svextb_m(svint16_t, svbool_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svextb_s32_x))) +svint32_t svextb_x(svbool_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svextb_s64_x))) +svint64_t svextb_x(svbool_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svextb_s16_x))) +svint16_t svextb_x(svbool_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svextb_s32_z))) +svint32_t svextb_z(svbool_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svextb_s64_z))) +svint64_t svextb_z(svbool_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svextb_s16_z))) +svint16_t svextb_z(svbool_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svextb_u32_m))) +svuint32_t svextb_m(svuint32_t, svbool_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svextb_u64_m))) +svuint64_t svextb_m(svuint64_t, svbool_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svextb_u16_m))) +svuint16_t svextb_m(svuint16_t, svbool_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svextb_u32_x))) +svuint32_t svextb_x(svbool_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svextb_u64_x))) +svuint64_t svextb_x(svbool_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svextb_u16_x))) +svuint16_t svextb_x(svbool_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svextb_u32_z))) +svuint32_t svextb_z(svbool_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svextb_u64_z))) +svuint64_t svextb_z(svbool_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svextb_u16_z))) +svuint16_t svextb_z(svbool_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svexth_s32_m))) +svint32_t svexth_m(svint32_t, svbool_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svexth_s64_m))) +svint64_t svexth_m(svint64_t, svbool_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svexth_s32_x))) +svint32_t svexth_x(svbool_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svexth_s64_x))) +svint64_t svexth_x(svbool_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svexth_s32_z))) +svint32_t svexth_z(svbool_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svexth_s64_z))) +svint64_t svexth_z(svbool_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svexth_u32_m))) +svuint32_t svexth_m(svuint32_t, svbool_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svexth_u64_m))) +svuint64_t svexth_m(svuint64_t, svbool_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svexth_u32_x))) +svuint32_t svexth_x(svbool_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svexth_u64_x))) +svuint64_t svexth_x(svbool_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svexth_u32_z))) +svuint32_t svexth_z(svbool_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svexth_u64_z))) +svuint64_t svexth_z(svbool_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svextw_s64_m))) +svint64_t svextw_m(svint64_t, svbool_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svextw_s64_x))) +svint64_t svextw_x(svbool_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svextw_s64_z))) +svint64_t svextw_z(svbool_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svextw_u64_m))) +svuint64_t svextw_m(svuint64_t, svbool_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svextw_u64_x))) +svuint64_t svextw_x(svbool_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svextw_u64_z))) +svuint64_t svextw_z(svbool_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget2_u8))) +svuint8_t svget2(svuint8x2_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget2_u32))) +svuint32_t svget2(svuint32x2_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget2_u64))) +svuint64_t svget2(svuint64x2_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget2_u16))) +svuint16_t svget2(svuint16x2_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget2_s8))) +svint8_t svget2(svint8x2_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget2_f64))) +svfloat64_t svget2(svfloat64x2_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget2_f32))) +svfloat32_t svget2(svfloat32x2_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget2_f16))) +svfloat16_t svget2(svfloat16x2_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget2_s32))) +svint32_t svget2(svint32x2_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget2_s64))) +svint64_t svget2(svint64x2_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget2_s16))) +svint16_t svget2(svint16x2_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget3_u8))) +svuint8_t svget3(svuint8x3_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget3_u32))) +svuint32_t svget3(svuint32x3_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget3_u64))) +svuint64_t svget3(svuint64x3_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget3_u16))) +svuint16_t svget3(svuint16x3_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget3_s8))) +svint8_t svget3(svint8x3_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget3_f64))) +svfloat64_t svget3(svfloat64x3_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget3_f32))) +svfloat32_t svget3(svfloat32x3_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget3_f16))) +svfloat16_t svget3(svfloat16x3_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget3_s32))) +svint32_t svget3(svint32x3_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget3_s64))) +svint64_t svget3(svint64x3_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget3_s16))) +svint16_t svget3(svint16x3_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget4_u8))) +svuint8_t svget4(svuint8x4_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget4_u32))) +svuint32_t svget4(svuint32x4_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget4_u64))) +svuint64_t svget4(svuint64x4_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget4_u16))) +svuint16_t svget4(svuint16x4_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget4_s8))) +svint8_t svget4(svint8x4_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget4_f64))) +svfloat64_t svget4(svfloat64x4_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget4_f32))) +svfloat32_t svget4(svfloat32x4_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget4_f16))) +svfloat16_t svget4(svfloat16x4_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget4_s32))) +svint32_t svget4(svint32x4_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget4_s64))) +svint64_t svget4(svint64x4_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget4_s16))) +svint16_t svget4(svint16x4_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svinsr_n_u8))) +svuint8_t svinsr(svuint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svinsr_n_u32))) +svuint32_t svinsr(svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svinsr_n_u64))) +svuint64_t svinsr(svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svinsr_n_u16))) +svuint16_t svinsr(svuint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svinsr_n_s8))) +svint8_t svinsr(svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svinsr_n_f64))) +svfloat64_t svinsr(svfloat64_t, float64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svinsr_n_f32))) +svfloat32_t svinsr(svfloat32_t, float32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svinsr_n_f16))) +svfloat16_t svinsr(svfloat16_t, float16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svinsr_n_s32))) +svint32_t svinsr(svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svinsr_n_s64))) +svint64_t svinsr(svint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svinsr_n_s16))) +svint16_t svinsr(svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlasta_u8))) +uint8_t svlasta(svbool_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlasta_u32))) +uint32_t svlasta(svbool_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlasta_u64))) +uint64_t svlasta(svbool_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlasta_u16))) +uint16_t svlasta(svbool_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlasta_s8))) +int8_t svlasta(svbool_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlasta_f64))) +float64_t svlasta(svbool_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlasta_f32))) +float32_t svlasta(svbool_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlasta_f16))) +float16_t svlasta(svbool_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlasta_s32))) +int32_t svlasta(svbool_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlasta_s64))) +int64_t svlasta(svbool_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlasta_s16))) +int16_t svlasta(svbool_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlastb_u8))) +uint8_t svlastb(svbool_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlastb_u32))) +uint32_t svlastb(svbool_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlastb_u64))) +uint64_t svlastb(svbool_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlastb_u16))) +uint16_t svlastb(svbool_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlastb_s8))) +int8_t svlastb(svbool_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlastb_f64))) +float64_t svlastb(svbool_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlastb_f32))) +float32_t svlastb(svbool_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlastb_f16))) +float16_t svlastb(svbool_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlastb_s32))) +int32_t svlastb(svbool_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlastb_s64))) +int64_t svlastb(svbool_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlastb_s16))) +int16_t svlastb(svbool_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_u8))) +svuint8_t svld1(svbool_t, uint8_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_u32))) +svuint32_t svld1(svbool_t, uint32_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_u64))) +svuint64_t svld1(svbool_t, uint64_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_u16))) +svuint16_t svld1(svbool_t, uint16_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_s8))) +svint8_t svld1(svbool_t, int8_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_f64))) +svfloat64_t svld1(svbool_t, float64_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_f32))) +svfloat32_t svld1(svbool_t, float32_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_f16))) +svfloat16_t svld1(svbool_t, float16_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_s32))) +svint32_t svld1(svbool_t, int32_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_s64))) +svint64_t svld1(svbool_t, int64_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_s16))) +svint16_t svld1(svbool_t, int16_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_vnum_u8))) +svuint8_t svld1_vnum(svbool_t, uint8_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_vnum_u32))) +svuint32_t svld1_vnum(svbool_t, uint32_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_vnum_u64))) +svuint64_t svld1_vnum(svbool_t, uint64_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_vnum_u16))) +svuint16_t svld1_vnum(svbool_t, uint16_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_vnum_s8))) +svint8_t svld1_vnum(svbool_t, int8_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_vnum_f64))) +svfloat64_t svld1_vnum(svbool_t, float64_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_vnum_f32))) +svfloat32_t svld1_vnum(svbool_t, float32_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_vnum_f16))) +svfloat16_t svld1_vnum(svbool_t, float16_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_vnum_s32))) +svint32_t svld1_vnum(svbool_t, int32_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_vnum_s64))) +svint64_t svld1_vnum(svbool_t, int64_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_vnum_s16))) +svint16_t svld1_vnum(svbool_t, int16_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1rq_u8))) +svuint8_t svld1rq(svbool_t, uint8_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1rq_u32))) +svuint32_t svld1rq(svbool_t, uint32_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1rq_u64))) +svuint64_t svld1rq(svbool_t, uint64_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1rq_u16))) +svuint16_t svld1rq(svbool_t, uint16_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1rq_s8))) +svint8_t svld1rq(svbool_t, int8_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1rq_f64))) +svfloat64_t svld1rq(svbool_t, float64_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1rq_f32))) +svfloat32_t svld1rq(svbool_t, float32_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1rq_f16))) +svfloat16_t svld1rq(svbool_t, float16_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1rq_s32))) +svint32_t svld1rq(svbool_t, int32_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1rq_s64))) +svint64_t svld1rq(svbool_t, int64_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1rq_s16))) +svint16_t svld1rq(svbool_t, int16_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2_u8))) +svuint8x2_t svld2(svbool_t, uint8_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2_u32))) +svuint32x2_t svld2(svbool_t, uint32_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2_u64))) +svuint64x2_t svld2(svbool_t, uint64_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2_u16))) +svuint16x2_t svld2(svbool_t, uint16_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2_s8))) +svint8x2_t svld2(svbool_t, int8_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2_f64))) +svfloat64x2_t svld2(svbool_t, float64_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2_f32))) +svfloat32x2_t svld2(svbool_t, float32_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2_f16))) +svfloat16x2_t svld2(svbool_t, float16_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2_s32))) +svint32x2_t svld2(svbool_t, int32_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2_s64))) +svint64x2_t svld2(svbool_t, int64_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2_s16))) +svint16x2_t svld2(svbool_t, int16_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2_vnum_u8))) +svuint8x2_t svld2_vnum(svbool_t, uint8_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2_vnum_u32))) +svuint32x2_t svld2_vnum(svbool_t, uint32_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2_vnum_u64))) +svuint64x2_t svld2_vnum(svbool_t, uint64_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2_vnum_u16))) +svuint16x2_t svld2_vnum(svbool_t, uint16_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2_vnum_s8))) +svint8x2_t svld2_vnum(svbool_t, int8_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2_vnum_f64))) +svfloat64x2_t svld2_vnum(svbool_t, float64_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2_vnum_f32))) +svfloat32x2_t svld2_vnum(svbool_t, float32_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2_vnum_f16))) +svfloat16x2_t svld2_vnum(svbool_t, float16_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2_vnum_s32))) +svint32x2_t svld2_vnum(svbool_t, int32_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2_vnum_s64))) +svint64x2_t svld2_vnum(svbool_t, int64_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2_vnum_s16))) +svint16x2_t svld2_vnum(svbool_t, int16_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3_u8))) +svuint8x3_t svld3(svbool_t, uint8_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3_u32))) +svuint32x3_t svld3(svbool_t, uint32_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3_u64))) +svuint64x3_t svld3(svbool_t, uint64_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3_u16))) +svuint16x3_t svld3(svbool_t, uint16_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3_s8))) +svint8x3_t svld3(svbool_t, int8_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3_f64))) +svfloat64x3_t svld3(svbool_t, float64_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3_f32))) +svfloat32x3_t svld3(svbool_t, float32_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3_f16))) +svfloat16x3_t svld3(svbool_t, float16_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3_s32))) +svint32x3_t svld3(svbool_t, int32_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3_s64))) +svint64x3_t svld3(svbool_t, int64_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3_s16))) +svint16x3_t svld3(svbool_t, int16_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3_vnum_u8))) +svuint8x3_t svld3_vnum(svbool_t, uint8_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3_vnum_u32))) +svuint32x3_t svld3_vnum(svbool_t, uint32_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3_vnum_u64))) +svuint64x3_t svld3_vnum(svbool_t, uint64_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3_vnum_u16))) +svuint16x3_t svld3_vnum(svbool_t, uint16_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3_vnum_s8))) +svint8x3_t svld3_vnum(svbool_t, int8_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3_vnum_f64))) +svfloat64x3_t svld3_vnum(svbool_t, float64_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3_vnum_f32))) +svfloat32x3_t svld3_vnum(svbool_t, float32_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3_vnum_f16))) +svfloat16x3_t svld3_vnum(svbool_t, float16_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3_vnum_s32))) +svint32x3_t svld3_vnum(svbool_t, int32_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3_vnum_s64))) +svint64x3_t svld3_vnum(svbool_t, int64_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3_vnum_s16))) +svint16x3_t svld3_vnum(svbool_t, int16_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4_u8))) +svuint8x4_t svld4(svbool_t, uint8_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4_u32))) +svuint32x4_t svld4(svbool_t, uint32_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4_u64))) +svuint64x4_t svld4(svbool_t, uint64_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4_u16))) +svuint16x4_t svld4(svbool_t, uint16_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4_s8))) +svint8x4_t svld4(svbool_t, int8_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4_f64))) +svfloat64x4_t svld4(svbool_t, float64_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4_f32))) +svfloat32x4_t svld4(svbool_t, float32_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4_f16))) +svfloat16x4_t svld4(svbool_t, float16_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4_s32))) +svint32x4_t svld4(svbool_t, int32_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4_s64))) +svint64x4_t svld4(svbool_t, int64_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4_s16))) +svint16x4_t svld4(svbool_t, int16_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4_vnum_u8))) +svuint8x4_t svld4_vnum(svbool_t, uint8_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4_vnum_u32))) +svuint32x4_t svld4_vnum(svbool_t, uint32_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4_vnum_u64))) +svuint64x4_t svld4_vnum(svbool_t, uint64_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4_vnum_u16))) +svuint16x4_t svld4_vnum(svbool_t, uint16_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4_vnum_s8))) +svint8x4_t svld4_vnum(svbool_t, int8_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4_vnum_f64))) +svfloat64x4_t svld4_vnum(svbool_t, float64_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4_vnum_f32))) +svfloat32x4_t svld4_vnum(svbool_t, float32_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4_vnum_f16))) +svfloat16x4_t svld4_vnum(svbool_t, float16_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4_vnum_s32))) +svint32x4_t svld4_vnum(svbool_t, int32_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4_vnum_s64))) +svint64x4_t svld4_vnum(svbool_t, int64_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4_vnum_s16))) +svint16x4_t svld4_vnum(svbool_t, int16_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_u8))) +svuint8_t svldnt1(svbool_t, uint8_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_u32))) +svuint32_t svldnt1(svbool_t, uint32_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_u64))) +svuint64_t svldnt1(svbool_t, uint64_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_u16))) +svuint16_t svldnt1(svbool_t, uint16_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_s8))) +svint8_t svldnt1(svbool_t, int8_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_f64))) +svfloat64_t svldnt1(svbool_t, float64_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_f32))) +svfloat32_t svldnt1(svbool_t, float32_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_f16))) +svfloat16_t svldnt1(svbool_t, float16_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_s32))) +svint32_t svldnt1(svbool_t, int32_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_s64))) +svint64_t svldnt1(svbool_t, int64_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_s16))) +svint16_t svldnt1(svbool_t, int16_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_vnum_u8))) +svuint8_t svldnt1_vnum(svbool_t, uint8_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_vnum_u32))) +svuint32_t svldnt1_vnum(svbool_t, uint32_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_vnum_u64))) +svuint64_t svldnt1_vnum(svbool_t, uint64_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_vnum_u16))) +svuint16_t svldnt1_vnum(svbool_t, uint16_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_vnum_s8))) +svint8_t svldnt1_vnum(svbool_t, int8_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_vnum_f64))) +svfloat64_t svldnt1_vnum(svbool_t, float64_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_vnum_f32))) +svfloat32_t svldnt1_vnum(svbool_t, float32_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_vnum_f16))) +svfloat16_t svldnt1_vnum(svbool_t, float16_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_vnum_s32))) +svint32_t svldnt1_vnum(svbool_t, int32_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_vnum_s64))) +svint64_t svldnt1_vnum(svbool_t, int64_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_vnum_s16))) +svint16_t svldnt1_vnum(svbool_t, int16_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlen_u8))) +uint64_t svlen(svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlen_u32))) +uint64_t svlen(svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlen_u64))) +uint64_t svlen(svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlen_u16))) +uint64_t svlen(svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlen_s8))) +uint64_t svlen(svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlen_f64))) +uint64_t svlen(svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlen_f32))) +uint64_t svlen(svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlen_f16))) +uint64_t svlen(svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlen_s32))) +uint64_t svlen(svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlen_s64))) +uint64_t svlen(svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlen_s16))) +uint64_t svlen(svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_n_u8_m))) +svuint8_t svlsl_m(svbool_t, svuint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_n_u32_m))) +svuint32_t svlsl_m(svbool_t, svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_n_u64_m))) +svuint64_t svlsl_m(svbool_t, svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_n_u16_m))) +svuint16_t svlsl_m(svbool_t, svuint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_n_s8_m))) +svint8_t svlsl_m(svbool_t, svint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_n_s32_m))) +svint32_t svlsl_m(svbool_t, svint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_n_s64_m))) +svint64_t svlsl_m(svbool_t, svint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_n_s16_m))) +svint16_t svlsl_m(svbool_t, svint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_n_u8_x))) +svuint8_t svlsl_x(svbool_t, svuint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_n_u32_x))) +svuint32_t svlsl_x(svbool_t, svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_n_u64_x))) +svuint64_t svlsl_x(svbool_t, svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_n_u16_x))) +svuint16_t svlsl_x(svbool_t, svuint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_n_s8_x))) +svint8_t svlsl_x(svbool_t, svint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_n_s32_x))) +svint32_t svlsl_x(svbool_t, svint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_n_s64_x))) +svint64_t svlsl_x(svbool_t, svint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_n_s16_x))) +svint16_t svlsl_x(svbool_t, svint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_n_u8_z))) +svuint8_t svlsl_z(svbool_t, svuint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_n_u32_z))) +svuint32_t svlsl_z(svbool_t, svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_n_u64_z))) +svuint64_t svlsl_z(svbool_t, svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_n_u16_z))) +svuint16_t svlsl_z(svbool_t, svuint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_n_s8_z))) +svint8_t svlsl_z(svbool_t, svint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_n_s32_z))) +svint32_t svlsl_z(svbool_t, svint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_n_s64_z))) +svint64_t svlsl_z(svbool_t, svint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_n_s16_z))) +svint16_t svlsl_z(svbool_t, svint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_u8_m))) +svuint8_t svlsl_m(svbool_t, svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_u32_m))) +svuint32_t svlsl_m(svbool_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_u64_m))) +svuint64_t svlsl_m(svbool_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_u16_m))) +svuint16_t svlsl_m(svbool_t, svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_s8_m))) +svint8_t svlsl_m(svbool_t, svint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_s32_m))) +svint32_t svlsl_m(svbool_t, svint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_s64_m))) +svint64_t svlsl_m(svbool_t, svint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_s16_m))) +svint16_t svlsl_m(svbool_t, svint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_u8_x))) +svuint8_t svlsl_x(svbool_t, svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_u32_x))) +svuint32_t svlsl_x(svbool_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_u64_x))) +svuint64_t svlsl_x(svbool_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_u16_x))) +svuint16_t svlsl_x(svbool_t, svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_s8_x))) +svint8_t svlsl_x(svbool_t, svint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_s32_x))) +svint32_t svlsl_x(svbool_t, svint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_s64_x))) +svint64_t svlsl_x(svbool_t, svint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_s16_x))) +svint16_t svlsl_x(svbool_t, svint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_u8_z))) +svuint8_t svlsl_z(svbool_t, svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_u32_z))) +svuint32_t svlsl_z(svbool_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_u64_z))) +svuint64_t svlsl_z(svbool_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_u16_z))) +svuint16_t svlsl_z(svbool_t, svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_s8_z))) +svint8_t svlsl_z(svbool_t, svint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_s32_z))) +svint32_t svlsl_z(svbool_t, svint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_s64_z))) +svint64_t svlsl_z(svbool_t, svint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_s16_z))) +svint16_t svlsl_z(svbool_t, svint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_n_u8_m))) +svuint8_t svlsl_wide_m(svbool_t, svuint8_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_n_u32_m))) +svuint32_t svlsl_wide_m(svbool_t, svuint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_n_u16_m))) +svuint16_t svlsl_wide_m(svbool_t, svuint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_n_s8_m))) +svint8_t svlsl_wide_m(svbool_t, svint8_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_n_s32_m))) +svint32_t svlsl_wide_m(svbool_t, svint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_n_s16_m))) +svint16_t svlsl_wide_m(svbool_t, svint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_n_u8_x))) +svuint8_t svlsl_wide_x(svbool_t, svuint8_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_n_u32_x))) +svuint32_t svlsl_wide_x(svbool_t, svuint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_n_u16_x))) +svuint16_t svlsl_wide_x(svbool_t, svuint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_n_s8_x))) +svint8_t svlsl_wide_x(svbool_t, svint8_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_n_s32_x))) +svint32_t svlsl_wide_x(svbool_t, svint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_n_s16_x))) +svint16_t svlsl_wide_x(svbool_t, svint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_n_u8_z))) +svuint8_t svlsl_wide_z(svbool_t, svuint8_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_n_u32_z))) +svuint32_t svlsl_wide_z(svbool_t, svuint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_n_u16_z))) +svuint16_t svlsl_wide_z(svbool_t, svuint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_n_s8_z))) +svint8_t svlsl_wide_z(svbool_t, svint8_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_n_s32_z))) +svint32_t svlsl_wide_z(svbool_t, svint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_n_s16_z))) +svint16_t svlsl_wide_z(svbool_t, svint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_u8_m))) +svuint8_t svlsl_wide_m(svbool_t, svuint8_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_u32_m))) +svuint32_t svlsl_wide_m(svbool_t, svuint32_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_u16_m))) +svuint16_t svlsl_wide_m(svbool_t, svuint16_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_s8_m))) +svint8_t svlsl_wide_m(svbool_t, svint8_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_s32_m))) +svint32_t svlsl_wide_m(svbool_t, svint32_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_s16_m))) +svint16_t svlsl_wide_m(svbool_t, svint16_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_u8_x))) +svuint8_t svlsl_wide_x(svbool_t, svuint8_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_u32_x))) +svuint32_t svlsl_wide_x(svbool_t, svuint32_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_u16_x))) +svuint16_t svlsl_wide_x(svbool_t, svuint16_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_s8_x))) +svint8_t svlsl_wide_x(svbool_t, svint8_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_s32_x))) +svint32_t svlsl_wide_x(svbool_t, svint32_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_s16_x))) +svint16_t svlsl_wide_x(svbool_t, svint16_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_u8_z))) +svuint8_t svlsl_wide_z(svbool_t, svuint8_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_u32_z))) +svuint32_t svlsl_wide_z(svbool_t, svuint32_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_u16_z))) +svuint16_t svlsl_wide_z(svbool_t, svuint16_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_s8_z))) +svint8_t svlsl_wide_z(svbool_t, svint8_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_s32_z))) +svint32_t svlsl_wide_z(svbool_t, svint32_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_s16_z))) +svint16_t svlsl_wide_z(svbool_t, svint16_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_n_u8_m))) +svuint8_t svlsr_m(svbool_t, svuint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_n_u32_m))) +svuint32_t svlsr_m(svbool_t, svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_n_u64_m))) +svuint64_t svlsr_m(svbool_t, svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_n_u16_m))) +svuint16_t svlsr_m(svbool_t, svuint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_n_u8_x))) +svuint8_t svlsr_x(svbool_t, svuint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_n_u32_x))) +svuint32_t svlsr_x(svbool_t, svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_n_u64_x))) +svuint64_t svlsr_x(svbool_t, svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_n_u16_x))) +svuint16_t svlsr_x(svbool_t, svuint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_n_u8_z))) +svuint8_t svlsr_z(svbool_t, svuint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_n_u32_z))) +svuint32_t svlsr_z(svbool_t, svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_n_u64_z))) +svuint64_t svlsr_z(svbool_t, svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_n_u16_z))) +svuint16_t svlsr_z(svbool_t, svuint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_u8_m))) +svuint8_t svlsr_m(svbool_t, svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_u32_m))) +svuint32_t svlsr_m(svbool_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_u64_m))) +svuint64_t svlsr_m(svbool_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_u16_m))) +svuint16_t svlsr_m(svbool_t, svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_u8_x))) +svuint8_t svlsr_x(svbool_t, svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_u32_x))) +svuint32_t svlsr_x(svbool_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_u64_x))) +svuint64_t svlsr_x(svbool_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_u16_x))) +svuint16_t svlsr_x(svbool_t, svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_u8_z))) +svuint8_t svlsr_z(svbool_t, svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_u32_z))) +svuint32_t svlsr_z(svbool_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_u64_z))) +svuint64_t svlsr_z(svbool_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_u16_z))) +svuint16_t svlsr_z(svbool_t, svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_wide_n_u8_m))) +svuint8_t svlsr_wide_m(svbool_t, svuint8_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_wide_n_u32_m))) +svuint32_t svlsr_wide_m(svbool_t, svuint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_wide_n_u16_m))) +svuint16_t svlsr_wide_m(svbool_t, svuint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_wide_n_u8_x))) +svuint8_t svlsr_wide_x(svbool_t, svuint8_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_wide_n_u32_x))) +svuint32_t svlsr_wide_x(svbool_t, svuint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_wide_n_u16_x))) +svuint16_t svlsr_wide_x(svbool_t, svuint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_wide_n_u8_z))) +svuint8_t svlsr_wide_z(svbool_t, svuint8_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_wide_n_u32_z))) +svuint32_t svlsr_wide_z(svbool_t, svuint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_wide_n_u16_z))) +svuint16_t svlsr_wide_z(svbool_t, svuint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_wide_u8_m))) +svuint8_t svlsr_wide_m(svbool_t, svuint8_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_wide_u32_m))) +svuint32_t svlsr_wide_m(svbool_t, svuint32_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_wide_u16_m))) +svuint16_t svlsr_wide_m(svbool_t, svuint16_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_wide_u8_x))) +svuint8_t svlsr_wide_x(svbool_t, svuint8_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_wide_u32_x))) +svuint32_t svlsr_wide_x(svbool_t, svuint32_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_wide_u16_x))) +svuint16_t svlsr_wide_x(svbool_t, svuint16_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_wide_u8_z))) +svuint8_t svlsr_wide_z(svbool_t, svuint8_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_wide_u32_z))) +svuint32_t svlsr_wide_z(svbool_t, svuint32_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_wide_u16_z))) +svuint16_t svlsr_wide_z(svbool_t, svuint16_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_f64_m))) +svfloat64_t svmad_m(svbool_t, svfloat64_t, svfloat64_t, float64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_f32_m))) +svfloat32_t svmad_m(svbool_t, svfloat32_t, svfloat32_t, float32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_f16_m))) +svfloat16_t svmad_m(svbool_t, svfloat16_t, svfloat16_t, float16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_f64_x))) +svfloat64_t svmad_x(svbool_t, svfloat64_t, svfloat64_t, float64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_f32_x))) +svfloat32_t svmad_x(svbool_t, svfloat32_t, svfloat32_t, float32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_f16_x))) +svfloat16_t svmad_x(svbool_t, svfloat16_t, svfloat16_t, float16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_f64_z))) +svfloat64_t svmad_z(svbool_t, svfloat64_t, svfloat64_t, float64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_f32_z))) +svfloat32_t svmad_z(svbool_t, svfloat32_t, svfloat32_t, float32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_f16_z))) +svfloat16_t svmad_z(svbool_t, svfloat16_t, svfloat16_t, float16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_u8_m))) +svuint8_t svmad_m(svbool_t, svuint8_t, svuint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_u32_m))) +svuint32_t svmad_m(svbool_t, svuint32_t, svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_u64_m))) +svuint64_t svmad_m(svbool_t, svuint64_t, svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_u16_m))) +svuint16_t svmad_m(svbool_t, svuint16_t, svuint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_s8_m))) +svint8_t svmad_m(svbool_t, svint8_t, svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_s32_m))) +svint32_t svmad_m(svbool_t, svint32_t, svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_s64_m))) +svint64_t svmad_m(svbool_t, svint64_t, svint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_s16_m))) +svint16_t svmad_m(svbool_t, svint16_t, svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_u8_x))) +svuint8_t svmad_x(svbool_t, svuint8_t, svuint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_u32_x))) +svuint32_t svmad_x(svbool_t, svuint32_t, svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_u64_x))) +svuint64_t svmad_x(svbool_t, svuint64_t, svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_u16_x))) +svuint16_t svmad_x(svbool_t, svuint16_t, svuint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_s8_x))) +svint8_t svmad_x(svbool_t, svint8_t, svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_s32_x))) +svint32_t svmad_x(svbool_t, svint32_t, svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_s64_x))) +svint64_t svmad_x(svbool_t, svint64_t, svint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_s16_x))) +svint16_t svmad_x(svbool_t, svint16_t, svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_u8_z))) +svuint8_t svmad_z(svbool_t, svuint8_t, svuint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_u32_z))) +svuint32_t svmad_z(svbool_t, svuint32_t, svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_u64_z))) +svuint64_t svmad_z(svbool_t, svuint64_t, svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_u16_z))) +svuint16_t svmad_z(svbool_t, svuint16_t, svuint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_s8_z))) +svint8_t svmad_z(svbool_t, svint8_t, svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_s32_z))) +svint32_t svmad_z(svbool_t, svint32_t, svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_s64_z))) +svint64_t svmad_z(svbool_t, svint64_t, svint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_s16_z))) +svint16_t svmad_z(svbool_t, svint16_t, svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_f64_m))) +svfloat64_t svmad_m(svbool_t, svfloat64_t, svfloat64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_f32_m))) +svfloat32_t svmad_m(svbool_t, svfloat32_t, svfloat32_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_f16_m))) +svfloat16_t svmad_m(svbool_t, svfloat16_t, svfloat16_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_f64_x))) +svfloat64_t svmad_x(svbool_t, svfloat64_t, svfloat64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_f32_x))) +svfloat32_t svmad_x(svbool_t, svfloat32_t, svfloat32_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_f16_x))) +svfloat16_t svmad_x(svbool_t, svfloat16_t, svfloat16_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_f64_z))) +svfloat64_t svmad_z(svbool_t, svfloat64_t, svfloat64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_f32_z))) +svfloat32_t svmad_z(svbool_t, svfloat32_t, svfloat32_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_f16_z))) +svfloat16_t svmad_z(svbool_t, svfloat16_t, svfloat16_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_u8_m))) +svuint8_t svmad_m(svbool_t, svuint8_t, svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_u32_m))) +svuint32_t svmad_m(svbool_t, svuint32_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_u64_m))) +svuint64_t svmad_m(svbool_t, svuint64_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_u16_m))) +svuint16_t svmad_m(svbool_t, svuint16_t, svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_s8_m))) +svint8_t svmad_m(svbool_t, svint8_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_s32_m))) +svint32_t svmad_m(svbool_t, svint32_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_s64_m))) +svint64_t svmad_m(svbool_t, svint64_t, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_s16_m))) +svint16_t svmad_m(svbool_t, svint16_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_u8_x))) +svuint8_t svmad_x(svbool_t, svuint8_t, svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_u32_x))) +svuint32_t svmad_x(svbool_t, svuint32_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_u64_x))) +svuint64_t svmad_x(svbool_t, svuint64_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_u16_x))) +svuint16_t svmad_x(svbool_t, svuint16_t, svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_s8_x))) +svint8_t svmad_x(svbool_t, svint8_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_s32_x))) +svint32_t svmad_x(svbool_t, svint32_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_s64_x))) +svint64_t svmad_x(svbool_t, svint64_t, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_s16_x))) +svint16_t svmad_x(svbool_t, svint16_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_u8_z))) +svuint8_t svmad_z(svbool_t, svuint8_t, svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_u32_z))) +svuint32_t svmad_z(svbool_t, svuint32_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_u64_z))) +svuint64_t svmad_z(svbool_t, svuint64_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_u16_z))) +svuint16_t svmad_z(svbool_t, svuint16_t, svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_s8_z))) +svint8_t svmad_z(svbool_t, svint8_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_s32_z))) +svint32_t svmad_z(svbool_t, svint32_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_s64_z))) +svint64_t svmad_z(svbool_t, svint64_t, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_s16_z))) +svint16_t svmad_z(svbool_t, svint16_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_f64_m))) +svfloat64_t svmax_m(svbool_t, svfloat64_t, float64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_f32_m))) +svfloat32_t svmax_m(svbool_t, svfloat32_t, float32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_f16_m))) +svfloat16_t svmax_m(svbool_t, svfloat16_t, float16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_f64_x))) +svfloat64_t svmax_x(svbool_t, svfloat64_t, float64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_f32_x))) +svfloat32_t svmax_x(svbool_t, svfloat32_t, float32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_f16_x))) +svfloat16_t svmax_x(svbool_t, svfloat16_t, float16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_f64_z))) +svfloat64_t svmax_z(svbool_t, svfloat64_t, float64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_f32_z))) +svfloat32_t svmax_z(svbool_t, svfloat32_t, float32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_f16_z))) +svfloat16_t svmax_z(svbool_t, svfloat16_t, float16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_s8_m))) +svint8_t svmax_m(svbool_t, svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_s32_m))) +svint32_t svmax_m(svbool_t, svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_s64_m))) +svint64_t svmax_m(svbool_t, svint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_s16_m))) +svint16_t svmax_m(svbool_t, svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_s8_x))) +svint8_t svmax_x(svbool_t, svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_s32_x))) +svint32_t svmax_x(svbool_t, svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_s64_x))) +svint64_t svmax_x(svbool_t, svint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_s16_x))) +svint16_t svmax_x(svbool_t, svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_s8_z))) +svint8_t svmax_z(svbool_t, svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_s32_z))) +svint32_t svmax_z(svbool_t, svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_s64_z))) +svint64_t svmax_z(svbool_t, svint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_s16_z))) +svint16_t svmax_z(svbool_t, svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_u8_m))) +svuint8_t svmax_m(svbool_t, svuint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_u32_m))) +svuint32_t svmax_m(svbool_t, svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_u64_m))) +svuint64_t svmax_m(svbool_t, svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_u16_m))) +svuint16_t svmax_m(svbool_t, svuint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_u8_x))) +svuint8_t svmax_x(svbool_t, svuint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_u32_x))) +svuint32_t svmax_x(svbool_t, svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_u64_x))) +svuint64_t svmax_x(svbool_t, svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_u16_x))) +svuint16_t svmax_x(svbool_t, svuint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_u8_z))) +svuint8_t svmax_z(svbool_t, svuint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_u32_z))) +svuint32_t svmax_z(svbool_t, svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_u64_z))) +svuint64_t svmax_z(svbool_t, svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_u16_z))) +svuint16_t svmax_z(svbool_t, svuint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_f64_m))) +svfloat64_t svmax_m(svbool_t, svfloat64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_f32_m))) +svfloat32_t svmax_m(svbool_t, svfloat32_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_f16_m))) +svfloat16_t svmax_m(svbool_t, svfloat16_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_f64_x))) +svfloat64_t svmax_x(svbool_t, svfloat64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_f32_x))) +svfloat32_t svmax_x(svbool_t, svfloat32_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_f16_x))) +svfloat16_t svmax_x(svbool_t, svfloat16_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_f64_z))) +svfloat64_t svmax_z(svbool_t, svfloat64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_f32_z))) +svfloat32_t svmax_z(svbool_t, svfloat32_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_f16_z))) +svfloat16_t svmax_z(svbool_t, svfloat16_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_s8_m))) +svint8_t svmax_m(svbool_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_s32_m))) +svint32_t svmax_m(svbool_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_s64_m))) +svint64_t svmax_m(svbool_t, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_s16_m))) +svint16_t svmax_m(svbool_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_s8_x))) +svint8_t svmax_x(svbool_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_s32_x))) +svint32_t svmax_x(svbool_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_s64_x))) +svint64_t svmax_x(svbool_t, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_s16_x))) +svint16_t svmax_x(svbool_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_s8_z))) +svint8_t svmax_z(svbool_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_s32_z))) +svint32_t svmax_z(svbool_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_s64_z))) +svint64_t svmax_z(svbool_t, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_s16_z))) +svint16_t svmax_z(svbool_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_u8_m))) +svuint8_t svmax_m(svbool_t, svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_u32_m))) +svuint32_t svmax_m(svbool_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_u64_m))) +svuint64_t svmax_m(svbool_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_u16_m))) +svuint16_t svmax_m(svbool_t, svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_u8_x))) +svuint8_t svmax_x(svbool_t, svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_u32_x))) +svuint32_t svmax_x(svbool_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_u64_x))) +svuint64_t svmax_x(svbool_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_u16_x))) +svuint16_t svmax_x(svbool_t, svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_u8_z))) +svuint8_t svmax_z(svbool_t, svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_u32_z))) +svuint32_t svmax_z(svbool_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_u64_z))) +svuint64_t svmax_z(svbool_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_u16_z))) +svuint16_t svmax_z(svbool_t, svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnm_n_f64_m))) +svfloat64_t svmaxnm_m(svbool_t, svfloat64_t, float64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnm_n_f32_m))) +svfloat32_t svmaxnm_m(svbool_t, svfloat32_t, float32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnm_n_f16_m))) +svfloat16_t svmaxnm_m(svbool_t, svfloat16_t, float16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnm_n_f64_x))) +svfloat64_t svmaxnm_x(svbool_t, svfloat64_t, float64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnm_n_f32_x))) +svfloat32_t svmaxnm_x(svbool_t, svfloat32_t, float32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnm_n_f16_x))) +svfloat16_t svmaxnm_x(svbool_t, svfloat16_t, float16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnm_n_f64_z))) +svfloat64_t svmaxnm_z(svbool_t, svfloat64_t, float64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnm_n_f32_z))) +svfloat32_t svmaxnm_z(svbool_t, svfloat32_t, float32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnm_n_f16_z))) +svfloat16_t svmaxnm_z(svbool_t, svfloat16_t, float16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnm_f64_m))) +svfloat64_t svmaxnm_m(svbool_t, svfloat64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnm_f32_m))) +svfloat32_t svmaxnm_m(svbool_t, svfloat32_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnm_f16_m))) +svfloat16_t svmaxnm_m(svbool_t, svfloat16_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnm_f64_x))) +svfloat64_t svmaxnm_x(svbool_t, svfloat64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnm_f32_x))) +svfloat32_t svmaxnm_x(svbool_t, svfloat32_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnm_f16_x))) +svfloat16_t svmaxnm_x(svbool_t, svfloat16_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnm_f64_z))) +svfloat64_t svmaxnm_z(svbool_t, svfloat64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnm_f32_z))) +svfloat32_t svmaxnm_z(svbool_t, svfloat32_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnm_f16_z))) +svfloat16_t svmaxnm_z(svbool_t, svfloat16_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnmv_f64))) +float64_t svmaxnmv(svbool_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnmv_f32))) +float32_t svmaxnmv(svbool_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnmv_f16))) +float16_t svmaxnmv(svbool_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxv_f64))) +float64_t svmaxv(svbool_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxv_f32))) +float32_t svmaxv(svbool_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxv_f16))) +float16_t svmaxv(svbool_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxv_s8))) +int8_t svmaxv(svbool_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxv_s32))) +int32_t svmaxv(svbool_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxv_s64))) +int64_t svmaxv(svbool_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxv_s16))) +int16_t svmaxv(svbool_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxv_u8))) +uint8_t svmaxv(svbool_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxv_u32))) +uint32_t svmaxv(svbool_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxv_u64))) +uint64_t svmaxv(svbool_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxv_u16))) +uint16_t svmaxv(svbool_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_f64_m))) +svfloat64_t svmin_m(svbool_t, svfloat64_t, float64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_f32_m))) +svfloat32_t svmin_m(svbool_t, svfloat32_t, float32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_f16_m))) +svfloat16_t svmin_m(svbool_t, svfloat16_t, float16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_f64_x))) +svfloat64_t svmin_x(svbool_t, svfloat64_t, float64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_f32_x))) +svfloat32_t svmin_x(svbool_t, svfloat32_t, float32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_f16_x))) +svfloat16_t svmin_x(svbool_t, svfloat16_t, float16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_f64_z))) +svfloat64_t svmin_z(svbool_t, svfloat64_t, float64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_f32_z))) +svfloat32_t svmin_z(svbool_t, svfloat32_t, float32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_f16_z))) +svfloat16_t svmin_z(svbool_t, svfloat16_t, float16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_s8_m))) +svint8_t svmin_m(svbool_t, svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_s32_m))) +svint32_t svmin_m(svbool_t, svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_s64_m))) +svint64_t svmin_m(svbool_t, svint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_s16_m))) +svint16_t svmin_m(svbool_t, svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_s8_x))) +svint8_t svmin_x(svbool_t, svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_s32_x))) +svint32_t svmin_x(svbool_t, svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_s64_x))) +svint64_t svmin_x(svbool_t, svint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_s16_x))) +svint16_t svmin_x(svbool_t, svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_s8_z))) +svint8_t svmin_z(svbool_t, svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_s32_z))) +svint32_t svmin_z(svbool_t, svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_s64_z))) +svint64_t svmin_z(svbool_t, svint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_s16_z))) +svint16_t svmin_z(svbool_t, svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_u8_m))) +svuint8_t svmin_m(svbool_t, svuint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_u32_m))) +svuint32_t svmin_m(svbool_t, svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_u64_m))) +svuint64_t svmin_m(svbool_t, svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_u16_m))) +svuint16_t svmin_m(svbool_t, svuint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_u8_x))) +svuint8_t svmin_x(svbool_t, svuint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_u32_x))) +svuint32_t svmin_x(svbool_t, svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_u64_x))) +svuint64_t svmin_x(svbool_t, svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_u16_x))) +svuint16_t svmin_x(svbool_t, svuint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_u8_z))) +svuint8_t svmin_z(svbool_t, svuint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_u32_z))) +svuint32_t svmin_z(svbool_t, svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_u64_z))) +svuint64_t svmin_z(svbool_t, svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_u16_z))) +svuint16_t svmin_z(svbool_t, svuint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_f64_m))) +svfloat64_t svmin_m(svbool_t, svfloat64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_f32_m))) +svfloat32_t svmin_m(svbool_t, svfloat32_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_f16_m))) +svfloat16_t svmin_m(svbool_t, svfloat16_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_f64_x))) +svfloat64_t svmin_x(svbool_t, svfloat64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_f32_x))) +svfloat32_t svmin_x(svbool_t, svfloat32_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_f16_x))) +svfloat16_t svmin_x(svbool_t, svfloat16_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_f64_z))) +svfloat64_t svmin_z(svbool_t, svfloat64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_f32_z))) +svfloat32_t svmin_z(svbool_t, svfloat32_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_f16_z))) +svfloat16_t svmin_z(svbool_t, svfloat16_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_s8_m))) +svint8_t svmin_m(svbool_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_s32_m))) +svint32_t svmin_m(svbool_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_s64_m))) +svint64_t svmin_m(svbool_t, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_s16_m))) +svint16_t svmin_m(svbool_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_s8_x))) +svint8_t svmin_x(svbool_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_s32_x))) +svint32_t svmin_x(svbool_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_s64_x))) +svint64_t svmin_x(svbool_t, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_s16_x))) +svint16_t svmin_x(svbool_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_s8_z))) +svint8_t svmin_z(svbool_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_s32_z))) +svint32_t svmin_z(svbool_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_s64_z))) +svint64_t svmin_z(svbool_t, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_s16_z))) +svint16_t svmin_z(svbool_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_u8_m))) +svuint8_t svmin_m(svbool_t, svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_u32_m))) +svuint32_t svmin_m(svbool_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_u64_m))) +svuint64_t svmin_m(svbool_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_u16_m))) +svuint16_t svmin_m(svbool_t, svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_u8_x))) +svuint8_t svmin_x(svbool_t, svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_u32_x))) +svuint32_t svmin_x(svbool_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_u64_x))) +svuint64_t svmin_x(svbool_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_u16_x))) +svuint16_t svmin_x(svbool_t, svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_u8_z))) +svuint8_t svmin_z(svbool_t, svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_u32_z))) +svuint32_t svmin_z(svbool_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_u64_z))) +svuint64_t svmin_z(svbool_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_u16_z))) +svuint16_t svmin_z(svbool_t, svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnm_n_f64_m))) +svfloat64_t svminnm_m(svbool_t, svfloat64_t, float64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnm_n_f32_m))) +svfloat32_t svminnm_m(svbool_t, svfloat32_t, float32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnm_n_f16_m))) +svfloat16_t svminnm_m(svbool_t, svfloat16_t, float16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnm_n_f64_x))) +svfloat64_t svminnm_x(svbool_t, svfloat64_t, float64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnm_n_f32_x))) +svfloat32_t svminnm_x(svbool_t, svfloat32_t, float32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnm_n_f16_x))) +svfloat16_t svminnm_x(svbool_t, svfloat16_t, float16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnm_n_f64_z))) +svfloat64_t svminnm_z(svbool_t, svfloat64_t, float64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnm_n_f32_z))) +svfloat32_t svminnm_z(svbool_t, svfloat32_t, float32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnm_n_f16_z))) +svfloat16_t svminnm_z(svbool_t, svfloat16_t, float16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnm_f64_m))) +svfloat64_t svminnm_m(svbool_t, svfloat64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnm_f32_m))) +svfloat32_t svminnm_m(svbool_t, svfloat32_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnm_f16_m))) +svfloat16_t svminnm_m(svbool_t, svfloat16_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnm_f64_x))) +svfloat64_t svminnm_x(svbool_t, svfloat64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnm_f32_x))) +svfloat32_t svminnm_x(svbool_t, svfloat32_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnm_f16_x))) +svfloat16_t svminnm_x(svbool_t, svfloat16_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnm_f64_z))) +svfloat64_t svminnm_z(svbool_t, svfloat64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnm_f32_z))) +svfloat32_t svminnm_z(svbool_t, svfloat32_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnm_f16_z))) +svfloat16_t svminnm_z(svbool_t, svfloat16_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnmv_f64))) +float64_t svminnmv(svbool_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnmv_f32))) +float32_t svminnmv(svbool_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnmv_f16))) +float16_t svminnmv(svbool_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminv_f64))) +float64_t svminv(svbool_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminv_f32))) +float32_t svminv(svbool_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminv_f16))) +float16_t svminv(svbool_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminv_s8))) +int8_t svminv(svbool_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminv_s32))) +int32_t svminv(svbool_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminv_s64))) +int64_t svminv(svbool_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminv_s16))) +int16_t svminv(svbool_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminv_u8))) +uint8_t svminv(svbool_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminv_u32))) +uint32_t svminv(svbool_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminv_u64))) +uint64_t svminv(svbool_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminv_u16))) +uint16_t svminv(svbool_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_f64_m))) +svfloat64_t svmla_m(svbool_t, svfloat64_t, svfloat64_t, float64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_f32_m))) +svfloat32_t svmla_m(svbool_t, svfloat32_t, svfloat32_t, float32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_f16_m))) +svfloat16_t svmla_m(svbool_t, svfloat16_t, svfloat16_t, float16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_f64_x))) +svfloat64_t svmla_x(svbool_t, svfloat64_t, svfloat64_t, float64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_f32_x))) +svfloat32_t svmla_x(svbool_t, svfloat32_t, svfloat32_t, float32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_f16_x))) +svfloat16_t svmla_x(svbool_t, svfloat16_t, svfloat16_t, float16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_f64_z))) +svfloat64_t svmla_z(svbool_t, svfloat64_t, svfloat64_t, float64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_f32_z))) +svfloat32_t svmla_z(svbool_t, svfloat32_t, svfloat32_t, float32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_f16_z))) +svfloat16_t svmla_z(svbool_t, svfloat16_t, svfloat16_t, float16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_u8_m))) +svuint8_t svmla_m(svbool_t, svuint8_t, svuint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_u32_m))) +svuint32_t svmla_m(svbool_t, svuint32_t, svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_u64_m))) +svuint64_t svmla_m(svbool_t, svuint64_t, svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_u16_m))) +svuint16_t svmla_m(svbool_t, svuint16_t, svuint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_s8_m))) +svint8_t svmla_m(svbool_t, svint8_t, svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_s32_m))) +svint32_t svmla_m(svbool_t, svint32_t, svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_s64_m))) +svint64_t svmla_m(svbool_t, svint64_t, svint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_s16_m))) +svint16_t svmla_m(svbool_t, svint16_t, svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_u8_x))) +svuint8_t svmla_x(svbool_t, svuint8_t, svuint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_u32_x))) +svuint32_t svmla_x(svbool_t, svuint32_t, svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_u64_x))) +svuint64_t svmla_x(svbool_t, svuint64_t, svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_u16_x))) +svuint16_t svmla_x(svbool_t, svuint16_t, svuint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_s8_x))) +svint8_t svmla_x(svbool_t, svint8_t, svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_s32_x))) +svint32_t svmla_x(svbool_t, svint32_t, svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_s64_x))) +svint64_t svmla_x(svbool_t, svint64_t, svint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_s16_x))) +svint16_t svmla_x(svbool_t, svint16_t, svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_u8_z))) +svuint8_t svmla_z(svbool_t, svuint8_t, svuint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_u32_z))) +svuint32_t svmla_z(svbool_t, svuint32_t, svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_u64_z))) +svuint64_t svmla_z(svbool_t, svuint64_t, svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_u16_z))) +svuint16_t svmla_z(svbool_t, svuint16_t, svuint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_s8_z))) +svint8_t svmla_z(svbool_t, svint8_t, svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_s32_z))) +svint32_t svmla_z(svbool_t, svint32_t, svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_s64_z))) +svint64_t svmla_z(svbool_t, svint64_t, svint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_s16_z))) +svint16_t svmla_z(svbool_t, svint16_t, svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_f64_m))) +svfloat64_t svmla_m(svbool_t, svfloat64_t, svfloat64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_f32_m))) +svfloat32_t svmla_m(svbool_t, svfloat32_t, svfloat32_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_f16_m))) +svfloat16_t svmla_m(svbool_t, svfloat16_t, svfloat16_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_f64_x))) +svfloat64_t svmla_x(svbool_t, svfloat64_t, svfloat64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_f32_x))) +svfloat32_t svmla_x(svbool_t, svfloat32_t, svfloat32_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_f16_x))) +svfloat16_t svmla_x(svbool_t, svfloat16_t, svfloat16_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_f64_z))) +svfloat64_t svmla_z(svbool_t, svfloat64_t, svfloat64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_f32_z))) +svfloat32_t svmla_z(svbool_t, svfloat32_t, svfloat32_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_f16_z))) +svfloat16_t svmla_z(svbool_t, svfloat16_t, svfloat16_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_u8_m))) +svuint8_t svmla_m(svbool_t, svuint8_t, svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_u32_m))) +svuint32_t svmla_m(svbool_t, svuint32_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_u64_m))) +svuint64_t svmla_m(svbool_t, svuint64_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_u16_m))) +svuint16_t svmla_m(svbool_t, svuint16_t, svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_s8_m))) +svint8_t svmla_m(svbool_t, svint8_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_s32_m))) +svint32_t svmla_m(svbool_t, svint32_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_s64_m))) +svint64_t svmla_m(svbool_t, svint64_t, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_s16_m))) +svint16_t svmla_m(svbool_t, svint16_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_u8_x))) +svuint8_t svmla_x(svbool_t, svuint8_t, svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_u32_x))) +svuint32_t svmla_x(svbool_t, svuint32_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_u64_x))) +svuint64_t svmla_x(svbool_t, svuint64_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_u16_x))) +svuint16_t svmla_x(svbool_t, svuint16_t, svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_s8_x))) +svint8_t svmla_x(svbool_t, svint8_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_s32_x))) +svint32_t svmla_x(svbool_t, svint32_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_s64_x))) +svint64_t svmla_x(svbool_t, svint64_t, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_s16_x))) +svint16_t svmla_x(svbool_t, svint16_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_u8_z))) +svuint8_t svmla_z(svbool_t, svuint8_t, svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_u32_z))) +svuint32_t svmla_z(svbool_t, svuint32_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_u64_z))) +svuint64_t svmla_z(svbool_t, svuint64_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_u16_z))) +svuint16_t svmla_z(svbool_t, svuint16_t, svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_s8_z))) +svint8_t svmla_z(svbool_t, svint8_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_s32_z))) +svint32_t svmla_z(svbool_t, svint32_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_s64_z))) +svint64_t svmla_z(svbool_t, svint64_t, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_s16_z))) +svint16_t svmla_z(svbool_t, svint16_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_lane_f64))) +svfloat64_t svmla_lane(svfloat64_t, svfloat64_t, svfloat64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_lane_f32))) +svfloat32_t svmla_lane(svfloat32_t, svfloat32_t, svfloat32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_lane_f16))) +svfloat16_t svmla_lane(svfloat16_t, svfloat16_t, svfloat16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_f64_m))) +svfloat64_t svmls_m(svbool_t, svfloat64_t, svfloat64_t, float64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_f32_m))) +svfloat32_t svmls_m(svbool_t, svfloat32_t, svfloat32_t, float32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_f16_m))) +svfloat16_t svmls_m(svbool_t, svfloat16_t, svfloat16_t, float16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_f64_x))) +svfloat64_t svmls_x(svbool_t, svfloat64_t, svfloat64_t, float64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_f32_x))) +svfloat32_t svmls_x(svbool_t, svfloat32_t, svfloat32_t, float32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_f16_x))) +svfloat16_t svmls_x(svbool_t, svfloat16_t, svfloat16_t, float16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_f64_z))) +svfloat64_t svmls_z(svbool_t, svfloat64_t, svfloat64_t, float64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_f32_z))) +svfloat32_t svmls_z(svbool_t, svfloat32_t, svfloat32_t, float32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_f16_z))) +svfloat16_t svmls_z(svbool_t, svfloat16_t, svfloat16_t, float16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_u8_m))) +svuint8_t svmls_m(svbool_t, svuint8_t, svuint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_u32_m))) +svuint32_t svmls_m(svbool_t, svuint32_t, svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_u64_m))) +svuint64_t svmls_m(svbool_t, svuint64_t, svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_u16_m))) +svuint16_t svmls_m(svbool_t, svuint16_t, svuint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_s8_m))) +svint8_t svmls_m(svbool_t, svint8_t, svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_s32_m))) +svint32_t svmls_m(svbool_t, svint32_t, svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_s64_m))) +svint64_t svmls_m(svbool_t, svint64_t, svint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_s16_m))) +svint16_t svmls_m(svbool_t, svint16_t, svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_u8_x))) +svuint8_t svmls_x(svbool_t, svuint8_t, svuint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_u32_x))) +svuint32_t svmls_x(svbool_t, svuint32_t, svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_u64_x))) +svuint64_t svmls_x(svbool_t, svuint64_t, svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_u16_x))) +svuint16_t svmls_x(svbool_t, svuint16_t, svuint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_s8_x))) +svint8_t svmls_x(svbool_t, svint8_t, svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_s32_x))) +svint32_t svmls_x(svbool_t, svint32_t, svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_s64_x))) +svint64_t svmls_x(svbool_t, svint64_t, svint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_s16_x))) +svint16_t svmls_x(svbool_t, svint16_t, svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_u8_z))) +svuint8_t svmls_z(svbool_t, svuint8_t, svuint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_u32_z))) +svuint32_t svmls_z(svbool_t, svuint32_t, svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_u64_z))) +svuint64_t svmls_z(svbool_t, svuint64_t, svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_u16_z))) +svuint16_t svmls_z(svbool_t, svuint16_t, svuint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_s8_z))) +svint8_t svmls_z(svbool_t, svint8_t, svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_s32_z))) +svint32_t svmls_z(svbool_t, svint32_t, svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_s64_z))) +svint64_t svmls_z(svbool_t, svint64_t, svint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_s16_z))) +svint16_t svmls_z(svbool_t, svint16_t, svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_f64_m))) +svfloat64_t svmls_m(svbool_t, svfloat64_t, svfloat64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_f32_m))) +svfloat32_t svmls_m(svbool_t, svfloat32_t, svfloat32_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_f16_m))) +svfloat16_t svmls_m(svbool_t, svfloat16_t, svfloat16_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_f64_x))) +svfloat64_t svmls_x(svbool_t, svfloat64_t, svfloat64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_f32_x))) +svfloat32_t svmls_x(svbool_t, svfloat32_t, svfloat32_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_f16_x))) +svfloat16_t svmls_x(svbool_t, svfloat16_t, svfloat16_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_f64_z))) +svfloat64_t svmls_z(svbool_t, svfloat64_t, svfloat64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_f32_z))) +svfloat32_t svmls_z(svbool_t, svfloat32_t, svfloat32_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_f16_z))) +svfloat16_t svmls_z(svbool_t, svfloat16_t, svfloat16_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_u8_m))) +svuint8_t svmls_m(svbool_t, svuint8_t, svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_u32_m))) +svuint32_t svmls_m(svbool_t, svuint32_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_u64_m))) +svuint64_t svmls_m(svbool_t, svuint64_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_u16_m))) +svuint16_t svmls_m(svbool_t, svuint16_t, svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_s8_m))) +svint8_t svmls_m(svbool_t, svint8_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_s32_m))) +svint32_t svmls_m(svbool_t, svint32_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_s64_m))) +svint64_t svmls_m(svbool_t, svint64_t, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_s16_m))) +svint16_t svmls_m(svbool_t, svint16_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_u8_x))) +svuint8_t svmls_x(svbool_t, svuint8_t, svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_u32_x))) +svuint32_t svmls_x(svbool_t, svuint32_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_u64_x))) +svuint64_t svmls_x(svbool_t, svuint64_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_u16_x))) +svuint16_t svmls_x(svbool_t, svuint16_t, svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_s8_x))) +svint8_t svmls_x(svbool_t, svint8_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_s32_x))) +svint32_t svmls_x(svbool_t, svint32_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_s64_x))) +svint64_t svmls_x(svbool_t, svint64_t, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_s16_x))) +svint16_t svmls_x(svbool_t, svint16_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_u8_z))) +svuint8_t svmls_z(svbool_t, svuint8_t, svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_u32_z))) +svuint32_t svmls_z(svbool_t, svuint32_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_u64_z))) +svuint64_t svmls_z(svbool_t, svuint64_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_u16_z))) +svuint16_t svmls_z(svbool_t, svuint16_t, svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_s8_z))) +svint8_t svmls_z(svbool_t, svint8_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_s32_z))) +svint32_t svmls_z(svbool_t, svint32_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_s64_z))) +svint64_t svmls_z(svbool_t, svint64_t, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_s16_z))) +svint16_t svmls_z(svbool_t, svint16_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_lane_f64))) +svfloat64_t svmls_lane(svfloat64_t, svfloat64_t, svfloat64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_lane_f32))) +svfloat32_t svmls_lane(svfloat32_t, svfloat32_t, svfloat32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_lane_f16))) +svfloat16_t svmls_lane(svfloat16_t, svfloat16_t, svfloat16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmov_b_z))) +svbool_t svmov_z(svbool_t, svbool_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_f64_m))) +svfloat64_t svmsb_m(svbool_t, svfloat64_t, svfloat64_t, float64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_f32_m))) +svfloat32_t svmsb_m(svbool_t, svfloat32_t, svfloat32_t, float32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_f16_m))) +svfloat16_t svmsb_m(svbool_t, svfloat16_t, svfloat16_t, float16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_f64_x))) +svfloat64_t svmsb_x(svbool_t, svfloat64_t, svfloat64_t, float64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_f32_x))) +svfloat32_t svmsb_x(svbool_t, svfloat32_t, svfloat32_t, float32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_f16_x))) +svfloat16_t svmsb_x(svbool_t, svfloat16_t, svfloat16_t, float16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_f64_z))) +svfloat64_t svmsb_z(svbool_t, svfloat64_t, svfloat64_t, float64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_f32_z))) +svfloat32_t svmsb_z(svbool_t, svfloat32_t, svfloat32_t, float32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_f16_z))) +svfloat16_t svmsb_z(svbool_t, svfloat16_t, svfloat16_t, float16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_u8_m))) +svuint8_t svmsb_m(svbool_t, svuint8_t, svuint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_u32_m))) +svuint32_t svmsb_m(svbool_t, svuint32_t, svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_u64_m))) +svuint64_t svmsb_m(svbool_t, svuint64_t, svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_u16_m))) +svuint16_t svmsb_m(svbool_t, svuint16_t, svuint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_s8_m))) +svint8_t svmsb_m(svbool_t, svint8_t, svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_s32_m))) +svint32_t svmsb_m(svbool_t, svint32_t, svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_s64_m))) +svint64_t svmsb_m(svbool_t, svint64_t, svint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_s16_m))) +svint16_t svmsb_m(svbool_t, svint16_t, svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_u8_x))) +svuint8_t svmsb_x(svbool_t, svuint8_t, svuint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_u32_x))) +svuint32_t svmsb_x(svbool_t, svuint32_t, svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_u64_x))) +svuint64_t svmsb_x(svbool_t, svuint64_t, svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_u16_x))) +svuint16_t svmsb_x(svbool_t, svuint16_t, svuint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_s8_x))) +svint8_t svmsb_x(svbool_t, svint8_t, svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_s32_x))) +svint32_t svmsb_x(svbool_t, svint32_t, svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_s64_x))) +svint64_t svmsb_x(svbool_t, svint64_t, svint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_s16_x))) +svint16_t svmsb_x(svbool_t, svint16_t, svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_u8_z))) +svuint8_t svmsb_z(svbool_t, svuint8_t, svuint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_u32_z))) +svuint32_t svmsb_z(svbool_t, svuint32_t, svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_u64_z))) +svuint64_t svmsb_z(svbool_t, svuint64_t, svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_u16_z))) +svuint16_t svmsb_z(svbool_t, svuint16_t, svuint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_s8_z))) +svint8_t svmsb_z(svbool_t, svint8_t, svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_s32_z))) +svint32_t svmsb_z(svbool_t, svint32_t, svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_s64_z))) +svint64_t svmsb_z(svbool_t, svint64_t, svint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_s16_z))) +svint16_t svmsb_z(svbool_t, svint16_t, svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_f64_m))) +svfloat64_t svmsb_m(svbool_t, svfloat64_t, svfloat64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_f32_m))) +svfloat32_t svmsb_m(svbool_t, svfloat32_t, svfloat32_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_f16_m))) +svfloat16_t svmsb_m(svbool_t, svfloat16_t, svfloat16_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_f64_x))) +svfloat64_t svmsb_x(svbool_t, svfloat64_t, svfloat64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_f32_x))) +svfloat32_t svmsb_x(svbool_t, svfloat32_t, svfloat32_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_f16_x))) +svfloat16_t svmsb_x(svbool_t, svfloat16_t, svfloat16_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_f64_z))) +svfloat64_t svmsb_z(svbool_t, svfloat64_t, svfloat64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_f32_z))) +svfloat32_t svmsb_z(svbool_t, svfloat32_t, svfloat32_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_f16_z))) +svfloat16_t svmsb_z(svbool_t, svfloat16_t, svfloat16_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_u8_m))) +svuint8_t svmsb_m(svbool_t, svuint8_t, svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_u32_m))) +svuint32_t svmsb_m(svbool_t, svuint32_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_u64_m))) +svuint64_t svmsb_m(svbool_t, svuint64_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_u16_m))) +svuint16_t svmsb_m(svbool_t, svuint16_t, svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_s8_m))) +svint8_t svmsb_m(svbool_t, svint8_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_s32_m))) +svint32_t svmsb_m(svbool_t, svint32_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_s64_m))) +svint64_t svmsb_m(svbool_t, svint64_t, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_s16_m))) +svint16_t svmsb_m(svbool_t, svint16_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_u8_x))) +svuint8_t svmsb_x(svbool_t, svuint8_t, svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_u32_x))) +svuint32_t svmsb_x(svbool_t, svuint32_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_u64_x))) +svuint64_t svmsb_x(svbool_t, svuint64_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_u16_x))) +svuint16_t svmsb_x(svbool_t, svuint16_t, svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_s8_x))) +svint8_t svmsb_x(svbool_t, svint8_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_s32_x))) +svint32_t svmsb_x(svbool_t, svint32_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_s64_x))) +svint64_t svmsb_x(svbool_t, svint64_t, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_s16_x))) +svint16_t svmsb_x(svbool_t, svint16_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_u8_z))) +svuint8_t svmsb_z(svbool_t, svuint8_t, svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_u32_z))) +svuint32_t svmsb_z(svbool_t, svuint32_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_u64_z))) +svuint64_t svmsb_z(svbool_t, svuint64_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_u16_z))) +svuint16_t svmsb_z(svbool_t, svuint16_t, svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_s8_z))) +svint8_t svmsb_z(svbool_t, svint8_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_s32_z))) +svint32_t svmsb_z(svbool_t, svint32_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_s64_z))) +svint64_t svmsb_z(svbool_t, svint64_t, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_s16_z))) +svint16_t svmsb_z(svbool_t, svint16_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_f64_m))) +svfloat64_t svmul_m(svbool_t, svfloat64_t, float64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_f32_m))) +svfloat32_t svmul_m(svbool_t, svfloat32_t, float32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_f16_m))) +svfloat16_t svmul_m(svbool_t, svfloat16_t, float16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_f64_x))) +svfloat64_t svmul_x(svbool_t, svfloat64_t, float64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_f32_x))) +svfloat32_t svmul_x(svbool_t, svfloat32_t, float32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_f16_x))) +svfloat16_t svmul_x(svbool_t, svfloat16_t, float16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_f64_z))) +svfloat64_t svmul_z(svbool_t, svfloat64_t, float64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_f32_z))) +svfloat32_t svmul_z(svbool_t, svfloat32_t, float32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_f16_z))) +svfloat16_t svmul_z(svbool_t, svfloat16_t, float16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_u8_m))) +svuint8_t svmul_m(svbool_t, svuint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_u32_m))) +svuint32_t svmul_m(svbool_t, svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_u64_m))) +svuint64_t svmul_m(svbool_t, svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_u16_m))) +svuint16_t svmul_m(svbool_t, svuint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_s8_m))) +svint8_t svmul_m(svbool_t, svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_s32_m))) +svint32_t svmul_m(svbool_t, svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_s64_m))) +svint64_t svmul_m(svbool_t, svint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_s16_m))) +svint16_t svmul_m(svbool_t, svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_u8_x))) +svuint8_t svmul_x(svbool_t, svuint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_u32_x))) +svuint32_t svmul_x(svbool_t, svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_u64_x))) +svuint64_t svmul_x(svbool_t, svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_u16_x))) +svuint16_t svmul_x(svbool_t, svuint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_s8_x))) +svint8_t svmul_x(svbool_t, svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_s32_x))) +svint32_t svmul_x(svbool_t, svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_s64_x))) +svint64_t svmul_x(svbool_t, svint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_s16_x))) +svint16_t svmul_x(svbool_t, svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_u8_z))) +svuint8_t svmul_z(svbool_t, svuint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_u32_z))) +svuint32_t svmul_z(svbool_t, svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_u64_z))) +svuint64_t svmul_z(svbool_t, svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_u16_z))) +svuint16_t svmul_z(svbool_t, svuint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_s8_z))) +svint8_t svmul_z(svbool_t, svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_s32_z))) +svint32_t svmul_z(svbool_t, svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_s64_z))) +svint64_t svmul_z(svbool_t, svint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_s16_z))) +svint16_t svmul_z(svbool_t, svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_f64_m))) +svfloat64_t svmul_m(svbool_t, svfloat64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_f32_m))) +svfloat32_t svmul_m(svbool_t, svfloat32_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_f16_m))) +svfloat16_t svmul_m(svbool_t, svfloat16_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_f64_x))) +svfloat64_t svmul_x(svbool_t, svfloat64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_f32_x))) +svfloat32_t svmul_x(svbool_t, svfloat32_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_f16_x))) +svfloat16_t svmul_x(svbool_t, svfloat16_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_f64_z))) +svfloat64_t svmul_z(svbool_t, svfloat64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_f32_z))) +svfloat32_t svmul_z(svbool_t, svfloat32_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_f16_z))) +svfloat16_t svmul_z(svbool_t, svfloat16_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_u8_m))) +svuint8_t svmul_m(svbool_t, svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_u32_m))) +svuint32_t svmul_m(svbool_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_u64_m))) +svuint64_t svmul_m(svbool_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_u16_m))) +svuint16_t svmul_m(svbool_t, svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_s8_m))) +svint8_t svmul_m(svbool_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_s32_m))) +svint32_t svmul_m(svbool_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_s64_m))) +svint64_t svmul_m(svbool_t, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_s16_m))) +svint16_t svmul_m(svbool_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_u8_x))) +svuint8_t svmul_x(svbool_t, svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_u32_x))) +svuint32_t svmul_x(svbool_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_u64_x))) +svuint64_t svmul_x(svbool_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_u16_x))) +svuint16_t svmul_x(svbool_t, svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_s8_x))) +svint8_t svmul_x(svbool_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_s32_x))) +svint32_t svmul_x(svbool_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_s64_x))) +svint64_t svmul_x(svbool_t, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_s16_x))) +svint16_t svmul_x(svbool_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_u8_z))) +svuint8_t svmul_z(svbool_t, svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_u32_z))) +svuint32_t svmul_z(svbool_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_u64_z))) +svuint64_t svmul_z(svbool_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_u16_z))) +svuint16_t svmul_z(svbool_t, svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_s8_z))) +svint8_t svmul_z(svbool_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_s32_z))) +svint32_t svmul_z(svbool_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_s64_z))) +svint64_t svmul_z(svbool_t, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_s16_z))) +svint16_t svmul_z(svbool_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_lane_f64))) +svfloat64_t svmul_lane(svfloat64_t, svfloat64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_lane_f32))) +svfloat32_t svmul_lane(svfloat32_t, svfloat32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_lane_f16))) +svfloat16_t svmul_lane(svfloat16_t, svfloat16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_n_s8_m))) +svint8_t svmulh_m(svbool_t, svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_n_s32_m))) +svint32_t svmulh_m(svbool_t, svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_n_s64_m))) +svint64_t svmulh_m(svbool_t, svint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_n_s16_m))) +svint16_t svmulh_m(svbool_t, svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_n_s8_x))) +svint8_t svmulh_x(svbool_t, svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_n_s32_x))) +svint32_t svmulh_x(svbool_t, svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_n_s64_x))) +svint64_t svmulh_x(svbool_t, svint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_n_s16_x))) +svint16_t svmulh_x(svbool_t, svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_n_s8_z))) +svint8_t svmulh_z(svbool_t, svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_n_s32_z))) +svint32_t svmulh_z(svbool_t, svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_n_s64_z))) +svint64_t svmulh_z(svbool_t, svint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_n_s16_z))) +svint16_t svmulh_z(svbool_t, svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_n_u8_m))) +svuint8_t svmulh_m(svbool_t, svuint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_n_u32_m))) +svuint32_t svmulh_m(svbool_t, svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_n_u64_m))) +svuint64_t svmulh_m(svbool_t, svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_n_u16_m))) +svuint16_t svmulh_m(svbool_t, svuint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_n_u8_x))) +svuint8_t svmulh_x(svbool_t, svuint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_n_u32_x))) +svuint32_t svmulh_x(svbool_t, svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_n_u64_x))) +svuint64_t svmulh_x(svbool_t, svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_n_u16_x))) +svuint16_t svmulh_x(svbool_t, svuint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_n_u8_z))) +svuint8_t svmulh_z(svbool_t, svuint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_n_u32_z))) +svuint32_t svmulh_z(svbool_t, svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_n_u64_z))) +svuint64_t svmulh_z(svbool_t, svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_n_u16_z))) +svuint16_t svmulh_z(svbool_t, svuint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_s8_m))) +svint8_t svmulh_m(svbool_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_s32_m))) +svint32_t svmulh_m(svbool_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_s64_m))) +svint64_t svmulh_m(svbool_t, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_s16_m))) +svint16_t svmulh_m(svbool_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_s8_x))) +svint8_t svmulh_x(svbool_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_s32_x))) +svint32_t svmulh_x(svbool_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_s64_x))) +svint64_t svmulh_x(svbool_t, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_s16_x))) +svint16_t svmulh_x(svbool_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_s8_z))) +svint8_t svmulh_z(svbool_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_s32_z))) +svint32_t svmulh_z(svbool_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_s64_z))) +svint64_t svmulh_z(svbool_t, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_s16_z))) +svint16_t svmulh_z(svbool_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_u8_m))) +svuint8_t svmulh_m(svbool_t, svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_u32_m))) +svuint32_t svmulh_m(svbool_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_u64_m))) +svuint64_t svmulh_m(svbool_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_u16_m))) +svuint16_t svmulh_m(svbool_t, svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_u8_x))) +svuint8_t svmulh_x(svbool_t, svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_u32_x))) +svuint32_t svmulh_x(svbool_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_u64_x))) +svuint64_t svmulh_x(svbool_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_u16_x))) +svuint16_t svmulh_x(svbool_t, svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_u8_z))) +svuint8_t svmulh_z(svbool_t, svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_u32_z))) +svuint32_t svmulh_z(svbool_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_u64_z))) +svuint64_t svmulh_z(svbool_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_u16_z))) +svuint16_t svmulh_z(svbool_t, svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulx_n_f64_m))) +svfloat64_t svmulx_m(svbool_t, svfloat64_t, float64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulx_n_f32_m))) +svfloat32_t svmulx_m(svbool_t, svfloat32_t, float32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulx_n_f16_m))) +svfloat16_t svmulx_m(svbool_t, svfloat16_t, float16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulx_n_f64_x))) +svfloat64_t svmulx_x(svbool_t, svfloat64_t, float64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulx_n_f32_x))) +svfloat32_t svmulx_x(svbool_t, svfloat32_t, float32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulx_n_f16_x))) +svfloat16_t svmulx_x(svbool_t, svfloat16_t, float16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulx_n_f64_z))) +svfloat64_t svmulx_z(svbool_t, svfloat64_t, float64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulx_n_f32_z))) +svfloat32_t svmulx_z(svbool_t, svfloat32_t, float32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulx_n_f16_z))) +svfloat16_t svmulx_z(svbool_t, svfloat16_t, float16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulx_f64_m))) +svfloat64_t svmulx_m(svbool_t, svfloat64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulx_f32_m))) +svfloat32_t svmulx_m(svbool_t, svfloat32_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulx_f16_m))) +svfloat16_t svmulx_m(svbool_t, svfloat16_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulx_f64_x))) +svfloat64_t svmulx_x(svbool_t, svfloat64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulx_f32_x))) +svfloat32_t svmulx_x(svbool_t, svfloat32_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulx_f16_x))) +svfloat16_t svmulx_x(svbool_t, svfloat16_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulx_f64_z))) +svfloat64_t svmulx_z(svbool_t, svfloat64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulx_f32_z))) +svfloat32_t svmulx_z(svbool_t, svfloat32_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulx_f16_z))) +svfloat16_t svmulx_z(svbool_t, svfloat16_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnand_b_z))) +svbool_t svnand_z(svbool_t, svbool_t, svbool_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svneg_f64_m))) +svfloat64_t svneg_m(svfloat64_t, svbool_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svneg_f32_m))) +svfloat32_t svneg_m(svfloat32_t, svbool_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svneg_f16_m))) +svfloat16_t svneg_m(svfloat16_t, svbool_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svneg_f64_x))) +svfloat64_t svneg_x(svbool_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svneg_f32_x))) +svfloat32_t svneg_x(svbool_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svneg_f16_x))) +svfloat16_t svneg_x(svbool_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svneg_f64_z))) +svfloat64_t svneg_z(svbool_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svneg_f32_z))) +svfloat32_t svneg_z(svbool_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svneg_f16_z))) +svfloat16_t svneg_z(svbool_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svneg_s8_m))) +svint8_t svneg_m(svint8_t, svbool_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svneg_s32_m))) +svint32_t svneg_m(svint32_t, svbool_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svneg_s64_m))) +svint64_t svneg_m(svint64_t, svbool_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svneg_s16_m))) +svint16_t svneg_m(svint16_t, svbool_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svneg_s8_x))) +svint8_t svneg_x(svbool_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svneg_s32_x))) +svint32_t svneg_x(svbool_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svneg_s64_x))) +svint64_t svneg_x(svbool_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svneg_s16_x))) +svint16_t svneg_x(svbool_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svneg_s8_z))) +svint8_t svneg_z(svbool_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svneg_s32_z))) +svint32_t svneg_z(svbool_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svneg_s64_z))) +svint64_t svneg_z(svbool_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svneg_s16_z))) +svint16_t svneg_z(svbool_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmad_n_f64_m))) +svfloat64_t svnmad_m(svbool_t, svfloat64_t, svfloat64_t, float64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmad_n_f32_m))) +svfloat32_t svnmad_m(svbool_t, svfloat32_t, svfloat32_t, float32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmad_n_f16_m))) +svfloat16_t svnmad_m(svbool_t, svfloat16_t, svfloat16_t, float16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmad_n_f64_x))) +svfloat64_t svnmad_x(svbool_t, svfloat64_t, svfloat64_t, float64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmad_n_f32_x))) +svfloat32_t svnmad_x(svbool_t, svfloat32_t, svfloat32_t, float32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmad_n_f16_x))) +svfloat16_t svnmad_x(svbool_t, svfloat16_t, svfloat16_t, float16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmad_n_f64_z))) +svfloat64_t svnmad_z(svbool_t, svfloat64_t, svfloat64_t, float64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmad_n_f32_z))) +svfloat32_t svnmad_z(svbool_t, svfloat32_t, svfloat32_t, float32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmad_n_f16_z))) +svfloat16_t svnmad_z(svbool_t, svfloat16_t, svfloat16_t, float16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmad_f64_m))) +svfloat64_t svnmad_m(svbool_t, svfloat64_t, svfloat64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmad_f32_m))) +svfloat32_t svnmad_m(svbool_t, svfloat32_t, svfloat32_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmad_f16_m))) +svfloat16_t svnmad_m(svbool_t, svfloat16_t, svfloat16_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmad_f64_x))) +svfloat64_t svnmad_x(svbool_t, svfloat64_t, svfloat64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmad_f32_x))) +svfloat32_t svnmad_x(svbool_t, svfloat32_t, svfloat32_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmad_f16_x))) +svfloat16_t svnmad_x(svbool_t, svfloat16_t, svfloat16_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmad_f64_z))) +svfloat64_t svnmad_z(svbool_t, svfloat64_t, svfloat64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmad_f32_z))) +svfloat32_t svnmad_z(svbool_t, svfloat32_t, svfloat32_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmad_f16_z))) +svfloat16_t svnmad_z(svbool_t, svfloat16_t, svfloat16_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmla_n_f64_m))) +svfloat64_t svnmla_m(svbool_t, svfloat64_t, svfloat64_t, float64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmla_n_f32_m))) +svfloat32_t svnmla_m(svbool_t, svfloat32_t, svfloat32_t, float32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmla_n_f16_m))) +svfloat16_t svnmla_m(svbool_t, svfloat16_t, svfloat16_t, float16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmla_n_f64_x))) +svfloat64_t svnmla_x(svbool_t, svfloat64_t, svfloat64_t, float64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmla_n_f32_x))) +svfloat32_t svnmla_x(svbool_t, svfloat32_t, svfloat32_t, float32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmla_n_f16_x))) +svfloat16_t svnmla_x(svbool_t, svfloat16_t, svfloat16_t, float16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmla_n_f64_z))) +svfloat64_t svnmla_z(svbool_t, svfloat64_t, svfloat64_t, float64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmla_n_f32_z))) +svfloat32_t svnmla_z(svbool_t, svfloat32_t, svfloat32_t, float32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmla_n_f16_z))) +svfloat16_t svnmla_z(svbool_t, svfloat16_t, svfloat16_t, float16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmla_f64_m))) +svfloat64_t svnmla_m(svbool_t, svfloat64_t, svfloat64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmla_f32_m))) +svfloat32_t svnmla_m(svbool_t, svfloat32_t, svfloat32_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmla_f16_m))) +svfloat16_t svnmla_m(svbool_t, svfloat16_t, svfloat16_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmla_f64_x))) +svfloat64_t svnmla_x(svbool_t, svfloat64_t, svfloat64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmla_f32_x))) +svfloat32_t svnmla_x(svbool_t, svfloat32_t, svfloat32_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmla_f16_x))) +svfloat16_t svnmla_x(svbool_t, svfloat16_t, svfloat16_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmla_f64_z))) +svfloat64_t svnmla_z(svbool_t, svfloat64_t, svfloat64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmla_f32_z))) +svfloat32_t svnmla_z(svbool_t, svfloat32_t, svfloat32_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmla_f16_z))) +svfloat16_t svnmla_z(svbool_t, svfloat16_t, svfloat16_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmls_n_f64_m))) +svfloat64_t svnmls_m(svbool_t, svfloat64_t, svfloat64_t, float64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmls_n_f32_m))) +svfloat32_t svnmls_m(svbool_t, svfloat32_t, svfloat32_t, float32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmls_n_f16_m))) +svfloat16_t svnmls_m(svbool_t, svfloat16_t, svfloat16_t, float16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmls_n_f64_x))) +svfloat64_t svnmls_x(svbool_t, svfloat64_t, svfloat64_t, float64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmls_n_f32_x))) +svfloat32_t svnmls_x(svbool_t, svfloat32_t, svfloat32_t, float32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmls_n_f16_x))) +svfloat16_t svnmls_x(svbool_t, svfloat16_t, svfloat16_t, float16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmls_n_f64_z))) +svfloat64_t svnmls_z(svbool_t, svfloat64_t, svfloat64_t, float64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmls_n_f32_z))) +svfloat32_t svnmls_z(svbool_t, svfloat32_t, svfloat32_t, float32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmls_n_f16_z))) +svfloat16_t svnmls_z(svbool_t, svfloat16_t, svfloat16_t, float16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmls_f64_m))) +svfloat64_t svnmls_m(svbool_t, svfloat64_t, svfloat64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmls_f32_m))) +svfloat32_t svnmls_m(svbool_t, svfloat32_t, svfloat32_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmls_f16_m))) +svfloat16_t svnmls_m(svbool_t, svfloat16_t, svfloat16_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmls_f64_x))) +svfloat64_t svnmls_x(svbool_t, svfloat64_t, svfloat64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmls_f32_x))) +svfloat32_t svnmls_x(svbool_t, svfloat32_t, svfloat32_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmls_f16_x))) +svfloat16_t svnmls_x(svbool_t, svfloat16_t, svfloat16_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmls_f64_z))) +svfloat64_t svnmls_z(svbool_t, svfloat64_t, svfloat64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmls_f32_z))) +svfloat32_t svnmls_z(svbool_t, svfloat32_t, svfloat32_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmls_f16_z))) +svfloat16_t svnmls_z(svbool_t, svfloat16_t, svfloat16_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmsb_n_f64_m))) +svfloat64_t svnmsb_m(svbool_t, svfloat64_t, svfloat64_t, float64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmsb_n_f32_m))) +svfloat32_t svnmsb_m(svbool_t, svfloat32_t, svfloat32_t, float32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmsb_n_f16_m))) +svfloat16_t svnmsb_m(svbool_t, svfloat16_t, svfloat16_t, float16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmsb_n_f64_x))) +svfloat64_t svnmsb_x(svbool_t, svfloat64_t, svfloat64_t, float64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmsb_n_f32_x))) +svfloat32_t svnmsb_x(svbool_t, svfloat32_t, svfloat32_t, float32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmsb_n_f16_x))) +svfloat16_t svnmsb_x(svbool_t, svfloat16_t, svfloat16_t, float16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmsb_n_f64_z))) +svfloat64_t svnmsb_z(svbool_t, svfloat64_t, svfloat64_t, float64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmsb_n_f32_z))) +svfloat32_t svnmsb_z(svbool_t, svfloat32_t, svfloat32_t, float32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmsb_n_f16_z))) +svfloat16_t svnmsb_z(svbool_t, svfloat16_t, svfloat16_t, float16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmsb_f64_m))) +svfloat64_t svnmsb_m(svbool_t, svfloat64_t, svfloat64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmsb_f32_m))) +svfloat32_t svnmsb_m(svbool_t, svfloat32_t, svfloat32_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmsb_f16_m))) +svfloat16_t svnmsb_m(svbool_t, svfloat16_t, svfloat16_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmsb_f64_x))) +svfloat64_t svnmsb_x(svbool_t, svfloat64_t, svfloat64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmsb_f32_x))) +svfloat32_t svnmsb_x(svbool_t, svfloat32_t, svfloat32_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmsb_f16_x))) +svfloat16_t svnmsb_x(svbool_t, svfloat16_t, svfloat16_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmsb_f64_z))) +svfloat64_t svnmsb_z(svbool_t, svfloat64_t, svfloat64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmsb_f32_z))) +svfloat32_t svnmsb_z(svbool_t, svfloat32_t, svfloat32_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmsb_f16_z))) +svfloat16_t svnmsb_z(svbool_t, svfloat16_t, svfloat16_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnor_b_z))) +svbool_t svnor_z(svbool_t, svbool_t, svbool_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnot_b_z))) +svbool_t svnot_z(svbool_t, svbool_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnot_u8_m))) +svuint8_t svnot_m(svuint8_t, svbool_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnot_u32_m))) +svuint32_t svnot_m(svuint32_t, svbool_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnot_u64_m))) +svuint64_t svnot_m(svuint64_t, svbool_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnot_u16_m))) +svuint16_t svnot_m(svuint16_t, svbool_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnot_s8_m))) +svint8_t svnot_m(svint8_t, svbool_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnot_s32_m))) +svint32_t svnot_m(svint32_t, svbool_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnot_s64_m))) +svint64_t svnot_m(svint64_t, svbool_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnot_s16_m))) +svint16_t svnot_m(svint16_t, svbool_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnot_u8_x))) +svuint8_t svnot_x(svbool_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnot_u32_x))) +svuint32_t svnot_x(svbool_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnot_u64_x))) +svuint64_t svnot_x(svbool_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnot_u16_x))) +svuint16_t svnot_x(svbool_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnot_s8_x))) +svint8_t svnot_x(svbool_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnot_s32_x))) +svint32_t svnot_x(svbool_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnot_s64_x))) +svint64_t svnot_x(svbool_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnot_s16_x))) +svint16_t svnot_x(svbool_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnot_u8_z))) +svuint8_t svnot_z(svbool_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnot_u32_z))) +svuint32_t svnot_z(svbool_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnot_u64_z))) +svuint64_t svnot_z(svbool_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnot_u16_z))) +svuint16_t svnot_z(svbool_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnot_s8_z))) +svint8_t svnot_z(svbool_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnot_s32_z))) +svint32_t svnot_z(svbool_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnot_s64_z))) +svint64_t svnot_z(svbool_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnot_s16_z))) +svint16_t svnot_z(svbool_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorn_b_z))) +svbool_t svorn_z(svbool_t, svbool_t, svbool_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_b_z))) +svbool_t svorr_z(svbool_t, svbool_t, svbool_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_n_u8_m))) +svuint8_t svorr_m(svbool_t, svuint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_n_u32_m))) +svuint32_t svorr_m(svbool_t, svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_n_u64_m))) +svuint64_t svorr_m(svbool_t, svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_n_u16_m))) +svuint16_t svorr_m(svbool_t, svuint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_n_s8_m))) +svint8_t svorr_m(svbool_t, svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_n_s32_m))) +svint32_t svorr_m(svbool_t, svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_n_s64_m))) +svint64_t svorr_m(svbool_t, svint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_n_s16_m))) +svint16_t svorr_m(svbool_t, svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_n_u8_x))) +svuint8_t svorr_x(svbool_t, svuint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_n_u32_x))) +svuint32_t svorr_x(svbool_t, svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_n_u64_x))) +svuint64_t svorr_x(svbool_t, svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_n_u16_x))) +svuint16_t svorr_x(svbool_t, svuint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_n_s8_x))) +svint8_t svorr_x(svbool_t, svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_n_s32_x))) +svint32_t svorr_x(svbool_t, svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_n_s64_x))) +svint64_t svorr_x(svbool_t, svint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_n_s16_x))) +svint16_t svorr_x(svbool_t, svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_n_u8_z))) +svuint8_t svorr_z(svbool_t, svuint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_n_u32_z))) +svuint32_t svorr_z(svbool_t, svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_n_u64_z))) +svuint64_t svorr_z(svbool_t, svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_n_u16_z))) +svuint16_t svorr_z(svbool_t, svuint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_n_s8_z))) +svint8_t svorr_z(svbool_t, svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_n_s32_z))) +svint32_t svorr_z(svbool_t, svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_n_s64_z))) +svint64_t svorr_z(svbool_t, svint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_n_s16_z))) +svint16_t svorr_z(svbool_t, svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_u8_m))) +svuint8_t svorr_m(svbool_t, svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_u32_m))) +svuint32_t svorr_m(svbool_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_u64_m))) +svuint64_t svorr_m(svbool_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_u16_m))) +svuint16_t svorr_m(svbool_t, svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_s8_m))) +svint8_t svorr_m(svbool_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_s32_m))) +svint32_t svorr_m(svbool_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_s64_m))) +svint64_t svorr_m(svbool_t, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_s16_m))) +svint16_t svorr_m(svbool_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_u8_x))) +svuint8_t svorr_x(svbool_t, svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_u32_x))) +svuint32_t svorr_x(svbool_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_u64_x))) +svuint64_t svorr_x(svbool_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_u16_x))) +svuint16_t svorr_x(svbool_t, svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_s8_x))) +svint8_t svorr_x(svbool_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_s32_x))) +svint32_t svorr_x(svbool_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_s64_x))) +svint64_t svorr_x(svbool_t, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_s16_x))) +svint16_t svorr_x(svbool_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_u8_z))) +svuint8_t svorr_z(svbool_t, svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_u32_z))) +svuint32_t svorr_z(svbool_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_u64_z))) +svuint64_t svorr_z(svbool_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_u16_z))) +svuint16_t svorr_z(svbool_t, svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_s8_z))) +svint8_t svorr_z(svbool_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_s32_z))) +svint32_t svorr_z(svbool_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_s64_z))) +svint64_t svorr_z(svbool_t, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_s16_z))) +svint16_t svorr_z(svbool_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorv_u8))) +uint8_t svorv(svbool_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorv_u32))) +uint32_t svorv(svbool_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorv_u64))) +uint64_t svorv(svbool_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorv_u16))) +uint16_t svorv(svbool_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorv_s8))) +int8_t svorv(svbool_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorv_s32))) +int32_t svorv(svbool_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorv_s64))) +int64_t svorv(svbool_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorv_s16))) +int16_t svorv(svbool_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpfalse_b))) +svbool_t svpfalse(void); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpfirst_b))) +svbool_t svpfirst(svbool_t, svbool_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_s8))) +svint8_t svqadd(svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_s32))) +svint32_t svqadd(svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_s64))) +svint64_t svqadd(svint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_s16))) +svint16_t svqadd(svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_u8))) +svuint8_t svqadd(svuint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_u32))) +svuint32_t svqadd(svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_u64))) +svuint64_t svqadd(svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_u16))) +svuint16_t svqadd(svuint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_s8))) +svint8_t svqadd(svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_s32))) +svint32_t svqadd(svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_s64))) +svint64_t svqadd(svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_s16))) +svint16_t svqadd(svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_u8))) +svuint8_t svqadd(svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_u32))) +svuint32_t svqadd(svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_u64))) +svuint64_t svqadd(svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_u16))) +svuint16_t svqadd(svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecb_n_s32))) +int32_t svqdecb(int32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecb_n_s64))) +int64_t svqdecb(int64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecb_n_u32))) +uint32_t svqdecb(uint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecb_n_u64))) +uint64_t svqdecb(uint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecb_pat_n_s32))) +int32_t svqdecb_pat(int32_t, enum svpattern, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecb_pat_n_s64))) +int64_t svqdecb_pat(int64_t, enum svpattern, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecb_pat_n_u32))) +uint32_t svqdecb_pat(uint32_t, enum svpattern, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecb_pat_n_u64))) +uint64_t svqdecb_pat(uint64_t, enum svpattern, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecd_n_s32))) +int32_t svqdecd(int32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecd_n_s64))) +int64_t svqdecd(int64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecd_n_u32))) +uint32_t svqdecd(uint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecd_n_u64))) +uint64_t svqdecd(uint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecd_s64))) +svint64_t svqdecd(svint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecd_u64))) +svuint64_t svqdecd(svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecd_pat_n_s32))) +int32_t svqdecd_pat(int32_t, enum svpattern, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecd_pat_n_s64))) +int64_t svqdecd_pat(int64_t, enum svpattern, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecd_pat_n_u32))) +uint32_t svqdecd_pat(uint32_t, enum svpattern, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecd_pat_n_u64))) +uint64_t svqdecd_pat(uint64_t, enum svpattern, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecd_pat_s64))) +svint64_t svqdecd_pat(svint64_t, enum svpattern, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecd_pat_u64))) +svuint64_t svqdecd_pat(svuint64_t, enum svpattern, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdech_n_s32))) +int32_t svqdech(int32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdech_n_s64))) +int64_t svqdech(int64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdech_n_u32))) +uint32_t svqdech(uint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdech_n_u64))) +uint64_t svqdech(uint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdech_s16))) +svint16_t svqdech(svint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdech_u16))) +svuint16_t svqdech(svuint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdech_pat_n_s32))) +int32_t svqdech_pat(int32_t, enum svpattern, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdech_pat_n_s64))) +int64_t svqdech_pat(int64_t, enum svpattern, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdech_pat_n_u32))) +uint32_t svqdech_pat(uint32_t, enum svpattern, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdech_pat_n_u64))) +uint64_t svqdech_pat(uint64_t, enum svpattern, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdech_pat_s16))) +svint16_t svqdech_pat(svint16_t, enum svpattern, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdech_pat_u16))) +svuint16_t svqdech_pat(svuint16_t, enum svpattern, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecp_n_s32_b8))) +int32_t svqdecp_b8(int32_t, svbool_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecp_n_s32_b32))) +int32_t svqdecp_b32(int32_t, svbool_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecp_n_s32_b64))) +int32_t svqdecp_b64(int32_t, svbool_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecp_n_s32_b16))) +int32_t svqdecp_b16(int32_t, svbool_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecp_n_s64_b8))) +int64_t svqdecp_b8(int64_t, svbool_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecp_n_s64_b32))) +int64_t svqdecp_b32(int64_t, svbool_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecp_n_s64_b64))) +int64_t svqdecp_b64(int64_t, svbool_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecp_n_s64_b16))) +int64_t svqdecp_b16(int64_t, svbool_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecp_n_u32_b8))) +uint32_t svqdecp_b8(uint32_t, svbool_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecp_n_u32_b32))) +uint32_t svqdecp_b32(uint32_t, svbool_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecp_n_u32_b64))) +uint32_t svqdecp_b64(uint32_t, svbool_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecp_n_u32_b16))) +uint32_t svqdecp_b16(uint32_t, svbool_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecp_n_u64_b8))) +uint64_t svqdecp_b8(uint64_t, svbool_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecp_n_u64_b32))) +uint64_t svqdecp_b32(uint64_t, svbool_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecp_n_u64_b64))) +uint64_t svqdecp_b64(uint64_t, svbool_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecp_n_u64_b16))) +uint64_t svqdecp_b16(uint64_t, svbool_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecp_s32))) +svint32_t svqdecp(svint32_t, svbool_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecp_s64))) +svint64_t svqdecp(svint64_t, svbool_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecp_s16))) +svint16_t svqdecp(svint16_t, svbool_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecp_u32))) +svuint32_t svqdecp(svuint32_t, svbool_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecp_u64))) +svuint64_t svqdecp(svuint64_t, svbool_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecp_u16))) +svuint16_t svqdecp(svuint16_t, svbool_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecw_n_s32))) +int32_t svqdecw(int32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecw_n_s64))) +int64_t svqdecw(int64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecw_n_u32))) +uint32_t svqdecw(uint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecw_n_u64))) +uint64_t svqdecw(uint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecw_s32))) +svint32_t svqdecw(svint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecw_u32))) +svuint32_t svqdecw(svuint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecw_pat_n_s32))) +int32_t svqdecw_pat(int32_t, enum svpattern, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecw_pat_n_s64))) +int64_t svqdecw_pat(int64_t, enum svpattern, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecw_pat_n_u32))) +uint32_t svqdecw_pat(uint32_t, enum svpattern, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecw_pat_n_u64))) +uint64_t svqdecw_pat(uint64_t, enum svpattern, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecw_pat_s32))) +svint32_t svqdecw_pat(svint32_t, enum svpattern, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecw_pat_u32))) +svuint32_t svqdecw_pat(svuint32_t, enum svpattern, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincb_n_s32))) +int32_t svqincb(int32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincb_n_s64))) +int64_t svqincb(int64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincb_n_u32))) +uint32_t svqincb(uint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincb_n_u64))) +uint64_t svqincb(uint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincb_pat_n_s32))) +int32_t svqincb_pat(int32_t, enum svpattern, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincb_pat_n_s64))) +int64_t svqincb_pat(int64_t, enum svpattern, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincb_pat_n_u32))) +uint32_t svqincb_pat(uint32_t, enum svpattern, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincb_pat_n_u64))) +uint64_t svqincb_pat(uint64_t, enum svpattern, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincd_n_s32))) +int32_t svqincd(int32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincd_n_s64))) +int64_t svqincd(int64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincd_n_u32))) +uint32_t svqincd(uint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincd_n_u64))) +uint64_t svqincd(uint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincd_s64))) +svint64_t svqincd(svint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincd_u64))) +svuint64_t svqincd(svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincd_pat_n_s32))) +int32_t svqincd_pat(int32_t, enum svpattern, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincd_pat_n_s64))) +int64_t svqincd_pat(int64_t, enum svpattern, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincd_pat_n_u32))) +uint32_t svqincd_pat(uint32_t, enum svpattern, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincd_pat_n_u64))) +uint64_t svqincd_pat(uint64_t, enum svpattern, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincd_pat_s64))) +svint64_t svqincd_pat(svint64_t, enum svpattern, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincd_pat_u64))) +svuint64_t svqincd_pat(svuint64_t, enum svpattern, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqinch_n_s32))) +int32_t svqinch(int32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqinch_n_s64))) +int64_t svqinch(int64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqinch_n_u32))) +uint32_t svqinch(uint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqinch_n_u64))) +uint64_t svqinch(uint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqinch_s16))) +svint16_t svqinch(svint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqinch_u16))) +svuint16_t svqinch(svuint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqinch_pat_n_s32))) +int32_t svqinch_pat(int32_t, enum svpattern, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqinch_pat_n_s64))) +int64_t svqinch_pat(int64_t, enum svpattern, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqinch_pat_n_u32))) +uint32_t svqinch_pat(uint32_t, enum svpattern, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqinch_pat_n_u64))) +uint64_t svqinch_pat(uint64_t, enum svpattern, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqinch_pat_s16))) +svint16_t svqinch_pat(svint16_t, enum svpattern, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqinch_pat_u16))) +svuint16_t svqinch_pat(svuint16_t, enum svpattern, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincp_n_s32_b8))) +int32_t svqincp_b8(int32_t, svbool_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincp_n_s32_b32))) +int32_t svqincp_b32(int32_t, svbool_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincp_n_s32_b64))) +int32_t svqincp_b64(int32_t, svbool_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincp_n_s32_b16))) +int32_t svqincp_b16(int32_t, svbool_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincp_n_s64_b8))) +int64_t svqincp_b8(int64_t, svbool_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincp_n_s64_b32))) +int64_t svqincp_b32(int64_t, svbool_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincp_n_s64_b64))) +int64_t svqincp_b64(int64_t, svbool_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincp_n_s64_b16))) +int64_t svqincp_b16(int64_t, svbool_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincp_n_u32_b8))) +uint32_t svqincp_b8(uint32_t, svbool_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincp_n_u32_b32))) +uint32_t svqincp_b32(uint32_t, svbool_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincp_n_u32_b64))) +uint32_t svqincp_b64(uint32_t, svbool_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincp_n_u32_b16))) +uint32_t svqincp_b16(uint32_t, svbool_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincp_n_u64_b8))) +uint64_t svqincp_b8(uint64_t, svbool_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincp_n_u64_b32))) +uint64_t svqincp_b32(uint64_t, svbool_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincp_n_u64_b64))) +uint64_t svqincp_b64(uint64_t, svbool_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincp_n_u64_b16))) +uint64_t svqincp_b16(uint64_t, svbool_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincp_s32))) +svint32_t svqincp(svint32_t, svbool_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincp_s64))) +svint64_t svqincp(svint64_t, svbool_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincp_s16))) +svint16_t svqincp(svint16_t, svbool_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincp_u32))) +svuint32_t svqincp(svuint32_t, svbool_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincp_u64))) +svuint64_t svqincp(svuint64_t, svbool_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincp_u16))) +svuint16_t svqincp(svuint16_t, svbool_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincw_n_s32))) +int32_t svqincw(int32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincw_n_s64))) +int64_t svqincw(int64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincw_n_u32))) +uint32_t svqincw(uint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincw_n_u64))) +uint64_t svqincw(uint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincw_s32))) +svint32_t svqincw(svint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincw_u32))) +svuint32_t svqincw(svuint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincw_pat_n_s32))) +int32_t svqincw_pat(int32_t, enum svpattern, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincw_pat_n_s64))) +int64_t svqincw_pat(int64_t, enum svpattern, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincw_pat_n_u32))) +uint32_t svqincw_pat(uint32_t, enum svpattern, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincw_pat_n_u64))) +uint64_t svqincw_pat(uint64_t, enum svpattern, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincw_pat_s32))) +svint32_t svqincw_pat(svint32_t, enum svpattern, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincw_pat_u32))) +svuint32_t svqincw_pat(svuint32_t, enum svpattern, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_s8))) +svint8_t svqsub(svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_s32))) +svint32_t svqsub(svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_s64))) +svint64_t svqsub(svint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_s16))) +svint16_t svqsub(svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_u8))) +svuint8_t svqsub(svuint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_u32))) +svuint32_t svqsub(svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_u64))) +svuint64_t svqsub(svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_u16))) +svuint16_t svqsub(svuint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_s8))) +svint8_t svqsub(svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_s32))) +svint32_t svqsub(svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_s64))) +svint64_t svqsub(svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_s16))) +svint16_t svqsub(svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_u8))) +svuint8_t svqsub(svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_u32))) +svuint32_t svqsub(svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_u64))) +svuint64_t svqsub(svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_u16))) +svuint16_t svqsub(svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrbit_u8_m))) +svuint8_t svrbit_m(svuint8_t, svbool_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrbit_u32_m))) +svuint32_t svrbit_m(svuint32_t, svbool_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrbit_u64_m))) +svuint64_t svrbit_m(svuint64_t, svbool_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrbit_u16_m))) +svuint16_t svrbit_m(svuint16_t, svbool_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrbit_s8_m))) +svint8_t svrbit_m(svint8_t, svbool_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrbit_s32_m))) +svint32_t svrbit_m(svint32_t, svbool_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrbit_s64_m))) +svint64_t svrbit_m(svint64_t, svbool_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrbit_s16_m))) +svint16_t svrbit_m(svint16_t, svbool_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrbit_u8_x))) +svuint8_t svrbit_x(svbool_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrbit_u32_x))) +svuint32_t svrbit_x(svbool_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrbit_u64_x))) +svuint64_t svrbit_x(svbool_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrbit_u16_x))) +svuint16_t svrbit_x(svbool_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrbit_s8_x))) +svint8_t svrbit_x(svbool_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrbit_s32_x))) +svint32_t svrbit_x(svbool_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrbit_s64_x))) +svint64_t svrbit_x(svbool_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrbit_s16_x))) +svint16_t svrbit_x(svbool_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrbit_u8_z))) +svuint8_t svrbit_z(svbool_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrbit_u32_z))) +svuint32_t svrbit_z(svbool_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrbit_u64_z))) +svuint64_t svrbit_z(svbool_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrbit_u16_z))) +svuint16_t svrbit_z(svbool_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrbit_s8_z))) +svint8_t svrbit_z(svbool_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrbit_s32_z))) +svint32_t svrbit_z(svbool_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrbit_s64_z))) +svint64_t svrbit_z(svbool_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrbit_s16_z))) +svint16_t svrbit_z(svbool_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrecpe_f64))) +svfloat64_t svrecpe(svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrecpe_f32))) +svfloat32_t svrecpe(svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrecpe_f16))) +svfloat16_t svrecpe(svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrecps_f64))) +svfloat64_t svrecps(svfloat64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrecps_f32))) +svfloat32_t svrecps(svfloat32_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrecps_f16))) +svfloat16_t svrecps(svfloat16_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrecpx_f64_m))) +svfloat64_t svrecpx_m(svfloat64_t, svbool_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrecpx_f32_m))) +svfloat32_t svrecpx_m(svfloat32_t, svbool_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrecpx_f16_m))) +svfloat16_t svrecpx_m(svfloat16_t, svbool_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrecpx_f64_x))) +svfloat64_t svrecpx_x(svbool_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrecpx_f32_x))) +svfloat32_t svrecpx_x(svbool_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrecpx_f16_x))) +svfloat16_t svrecpx_x(svbool_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrecpx_f64_z))) +svfloat64_t svrecpx_z(svbool_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrecpx_f32_z))) +svfloat32_t svrecpx_z(svbool_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrecpx_f16_z))) +svfloat16_t svrecpx_z(svbool_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrev_u8))) +svuint8_t svrev(svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrev_u32))) +svuint32_t svrev(svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrev_u64))) +svuint64_t svrev(svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrev_u16))) +svuint16_t svrev(svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrev_s8))) +svint8_t svrev(svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrev_f64))) +svfloat64_t svrev(svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrev_f32))) +svfloat32_t svrev(svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrev_f16))) +svfloat16_t svrev(svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrev_s32))) +svint32_t svrev(svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrev_s64))) +svint64_t svrev(svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrev_s16))) +svint16_t svrev(svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevb_u32_m))) +svuint32_t svrevb_m(svuint32_t, svbool_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevb_u64_m))) +svuint64_t svrevb_m(svuint64_t, svbool_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevb_u16_m))) +svuint16_t svrevb_m(svuint16_t, svbool_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevb_s32_m))) +svint32_t svrevb_m(svint32_t, svbool_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevb_s64_m))) +svint64_t svrevb_m(svint64_t, svbool_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevb_s16_m))) +svint16_t svrevb_m(svint16_t, svbool_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevb_u32_x))) +svuint32_t svrevb_x(svbool_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevb_u64_x))) +svuint64_t svrevb_x(svbool_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevb_u16_x))) +svuint16_t svrevb_x(svbool_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevb_s32_x))) +svint32_t svrevb_x(svbool_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevb_s64_x))) +svint64_t svrevb_x(svbool_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevb_s16_x))) +svint16_t svrevb_x(svbool_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevb_u32_z))) +svuint32_t svrevb_z(svbool_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevb_u64_z))) +svuint64_t svrevb_z(svbool_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevb_u16_z))) +svuint16_t svrevb_z(svbool_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevb_s32_z))) +svint32_t svrevb_z(svbool_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevb_s64_z))) +svint64_t svrevb_z(svbool_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevb_s16_z))) +svint16_t svrevb_z(svbool_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevh_u32_m))) +svuint32_t svrevh_m(svuint32_t, svbool_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevh_u64_m))) +svuint64_t svrevh_m(svuint64_t, svbool_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevh_s32_m))) +svint32_t svrevh_m(svint32_t, svbool_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevh_s64_m))) +svint64_t svrevh_m(svint64_t, svbool_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevh_u32_x))) +svuint32_t svrevh_x(svbool_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevh_u64_x))) +svuint64_t svrevh_x(svbool_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevh_s32_x))) +svint32_t svrevh_x(svbool_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevh_s64_x))) +svint64_t svrevh_x(svbool_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevh_u32_z))) +svuint32_t svrevh_z(svbool_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevh_u64_z))) +svuint64_t svrevh_z(svbool_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevh_s32_z))) +svint32_t svrevh_z(svbool_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevh_s64_z))) +svint64_t svrevh_z(svbool_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevw_u64_m))) +svuint64_t svrevw_m(svuint64_t, svbool_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevw_s64_m))) +svint64_t svrevw_m(svint64_t, svbool_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevw_u64_x))) +svuint64_t svrevw_x(svbool_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevw_s64_x))) +svint64_t svrevw_x(svbool_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevw_u64_z))) +svuint64_t svrevw_z(svbool_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevw_s64_z))) +svint64_t svrevw_z(svbool_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrinta_f64_m))) +svfloat64_t svrinta_m(svfloat64_t, svbool_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrinta_f32_m))) +svfloat32_t svrinta_m(svfloat32_t, svbool_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrinta_f16_m))) +svfloat16_t svrinta_m(svfloat16_t, svbool_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrinta_f64_x))) +svfloat64_t svrinta_x(svbool_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrinta_f32_x))) +svfloat32_t svrinta_x(svbool_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrinta_f16_x))) +svfloat16_t svrinta_x(svbool_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrinta_f64_z))) +svfloat64_t svrinta_z(svbool_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrinta_f32_z))) +svfloat32_t svrinta_z(svbool_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrinta_f16_z))) +svfloat16_t svrinta_z(svbool_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrinti_f64_m))) +svfloat64_t svrinti_m(svfloat64_t, svbool_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrinti_f32_m))) +svfloat32_t svrinti_m(svfloat32_t, svbool_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrinti_f16_m))) +svfloat16_t svrinti_m(svfloat16_t, svbool_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrinti_f64_x))) +svfloat64_t svrinti_x(svbool_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrinti_f32_x))) +svfloat32_t svrinti_x(svbool_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrinti_f16_x))) +svfloat16_t svrinti_x(svbool_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrinti_f64_z))) +svfloat64_t svrinti_z(svbool_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrinti_f32_z))) +svfloat32_t svrinti_z(svbool_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrinti_f16_z))) +svfloat16_t svrinti_z(svbool_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintm_f64_m))) +svfloat64_t svrintm_m(svfloat64_t, svbool_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintm_f32_m))) +svfloat32_t svrintm_m(svfloat32_t, svbool_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintm_f16_m))) +svfloat16_t svrintm_m(svfloat16_t, svbool_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintm_f64_x))) +svfloat64_t svrintm_x(svbool_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintm_f32_x))) +svfloat32_t svrintm_x(svbool_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintm_f16_x))) +svfloat16_t svrintm_x(svbool_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintm_f64_z))) +svfloat64_t svrintm_z(svbool_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintm_f32_z))) +svfloat32_t svrintm_z(svbool_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintm_f16_z))) +svfloat16_t svrintm_z(svbool_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintn_f64_m))) +svfloat64_t svrintn_m(svfloat64_t, svbool_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintn_f32_m))) +svfloat32_t svrintn_m(svfloat32_t, svbool_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintn_f16_m))) +svfloat16_t svrintn_m(svfloat16_t, svbool_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintn_f64_x))) +svfloat64_t svrintn_x(svbool_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintn_f32_x))) +svfloat32_t svrintn_x(svbool_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintn_f16_x))) +svfloat16_t svrintn_x(svbool_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintn_f64_z))) +svfloat64_t svrintn_z(svbool_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintn_f32_z))) +svfloat32_t svrintn_z(svbool_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintn_f16_z))) +svfloat16_t svrintn_z(svbool_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintp_f64_m))) +svfloat64_t svrintp_m(svfloat64_t, svbool_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintp_f32_m))) +svfloat32_t svrintp_m(svfloat32_t, svbool_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintp_f16_m))) +svfloat16_t svrintp_m(svfloat16_t, svbool_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintp_f64_x))) +svfloat64_t svrintp_x(svbool_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintp_f32_x))) +svfloat32_t svrintp_x(svbool_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintp_f16_x))) +svfloat16_t svrintp_x(svbool_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintp_f64_z))) +svfloat64_t svrintp_z(svbool_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintp_f32_z))) +svfloat32_t svrintp_z(svbool_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintp_f16_z))) +svfloat16_t svrintp_z(svbool_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintx_f64_m))) +svfloat64_t svrintx_m(svfloat64_t, svbool_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintx_f32_m))) +svfloat32_t svrintx_m(svfloat32_t, svbool_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintx_f16_m))) +svfloat16_t svrintx_m(svfloat16_t, svbool_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintx_f64_x))) +svfloat64_t svrintx_x(svbool_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintx_f32_x))) +svfloat32_t svrintx_x(svbool_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintx_f16_x))) +svfloat16_t svrintx_x(svbool_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintx_f64_z))) +svfloat64_t svrintx_z(svbool_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintx_f32_z))) +svfloat32_t svrintx_z(svbool_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintx_f16_z))) +svfloat16_t svrintx_z(svbool_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintz_f64_m))) +svfloat64_t svrintz_m(svfloat64_t, svbool_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintz_f32_m))) +svfloat32_t svrintz_m(svfloat32_t, svbool_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintz_f16_m))) +svfloat16_t svrintz_m(svfloat16_t, svbool_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintz_f64_x))) +svfloat64_t svrintz_x(svbool_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintz_f32_x))) +svfloat32_t svrintz_x(svbool_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintz_f16_x))) +svfloat16_t svrintz_x(svbool_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintz_f64_z))) +svfloat64_t svrintz_z(svbool_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintz_f32_z))) +svfloat32_t svrintz_z(svbool_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintz_f16_z))) +svfloat16_t svrintz_z(svbool_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsqrte_f64))) +svfloat64_t svrsqrte(svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsqrte_f32))) +svfloat32_t svrsqrte(svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsqrte_f16))) +svfloat16_t svrsqrte(svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsqrts_f64))) +svfloat64_t svrsqrts(svfloat64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsqrts_f32))) +svfloat32_t svrsqrts(svfloat32_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsqrts_f16))) +svfloat16_t svrsqrts(svfloat16_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svscale_n_f64_m))) +svfloat64_t svscale_m(svbool_t, svfloat64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svscale_n_f32_m))) +svfloat32_t svscale_m(svbool_t, svfloat32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svscale_n_f16_m))) +svfloat16_t svscale_m(svbool_t, svfloat16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svscale_n_f64_x))) +svfloat64_t svscale_x(svbool_t, svfloat64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svscale_n_f32_x))) +svfloat32_t svscale_x(svbool_t, svfloat32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svscale_n_f16_x))) +svfloat16_t svscale_x(svbool_t, svfloat16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svscale_n_f64_z))) +svfloat64_t svscale_z(svbool_t, svfloat64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svscale_n_f32_z))) +svfloat32_t svscale_z(svbool_t, svfloat32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svscale_n_f16_z))) +svfloat16_t svscale_z(svbool_t, svfloat16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svscale_f64_m))) +svfloat64_t svscale_m(svbool_t, svfloat64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svscale_f32_m))) +svfloat32_t svscale_m(svbool_t, svfloat32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svscale_f16_m))) +svfloat16_t svscale_m(svbool_t, svfloat16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svscale_f64_x))) +svfloat64_t svscale_x(svbool_t, svfloat64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svscale_f32_x))) +svfloat32_t svscale_x(svbool_t, svfloat32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svscale_f16_x))) +svfloat16_t svscale_x(svbool_t, svfloat16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svscale_f64_z))) +svfloat64_t svscale_z(svbool_t, svfloat64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svscale_f32_z))) +svfloat32_t svscale_z(svbool_t, svfloat32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svscale_f16_z))) +svfloat16_t svscale_z(svbool_t, svfloat16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsel_b))) +svbool_t svsel(svbool_t, svbool_t, svbool_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsel_u8))) +svuint8_t svsel(svbool_t, svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsel_u32))) +svuint32_t svsel(svbool_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsel_u64))) +svuint64_t svsel(svbool_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsel_u16))) +svuint16_t svsel(svbool_t, svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsel_s8))) +svint8_t svsel(svbool_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsel_f64))) +svfloat64_t svsel(svbool_t, svfloat64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsel_f32))) +svfloat32_t svsel(svbool_t, svfloat32_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsel_f16))) +svfloat16_t svsel(svbool_t, svfloat16_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsel_s32))) +svint32_t svsel(svbool_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsel_s64))) +svint64_t svsel(svbool_t, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsel_s16))) +svint16_t svsel(svbool_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset2_u8))) +svuint8x2_t svset2(svuint8x2_t, uint64_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset2_u32))) +svuint32x2_t svset2(svuint32x2_t, uint64_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset2_u64))) +svuint64x2_t svset2(svuint64x2_t, uint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset2_u16))) +svuint16x2_t svset2(svuint16x2_t, uint64_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset2_s8))) +svint8x2_t svset2(svint8x2_t, uint64_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset2_f64))) +svfloat64x2_t svset2(svfloat64x2_t, uint64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset2_f32))) +svfloat32x2_t svset2(svfloat32x2_t, uint64_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset2_f16))) +svfloat16x2_t svset2(svfloat16x2_t, uint64_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset2_s32))) +svint32x2_t svset2(svint32x2_t, uint64_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset2_s64))) +svint64x2_t svset2(svint64x2_t, uint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset2_s16))) +svint16x2_t svset2(svint16x2_t, uint64_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset3_u8))) +svuint8x3_t svset3(svuint8x3_t, uint64_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset3_u32))) +svuint32x3_t svset3(svuint32x3_t, uint64_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset3_u64))) +svuint64x3_t svset3(svuint64x3_t, uint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset3_u16))) +svuint16x3_t svset3(svuint16x3_t, uint64_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset3_s8))) +svint8x3_t svset3(svint8x3_t, uint64_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset3_f64))) +svfloat64x3_t svset3(svfloat64x3_t, uint64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset3_f32))) +svfloat32x3_t svset3(svfloat32x3_t, uint64_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset3_f16))) +svfloat16x3_t svset3(svfloat16x3_t, uint64_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset3_s32))) +svint32x3_t svset3(svint32x3_t, uint64_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset3_s64))) +svint64x3_t svset3(svint64x3_t, uint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset3_s16))) +svint16x3_t svset3(svint16x3_t, uint64_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset4_u8))) +svuint8x4_t svset4(svuint8x4_t, uint64_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset4_u32))) +svuint32x4_t svset4(svuint32x4_t, uint64_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset4_u64))) +svuint64x4_t svset4(svuint64x4_t, uint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset4_u16))) +svuint16x4_t svset4(svuint16x4_t, uint64_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset4_s8))) +svint8x4_t svset4(svint8x4_t, uint64_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset4_f64))) +svfloat64x4_t svset4(svfloat64x4_t, uint64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset4_f32))) +svfloat32x4_t svset4(svfloat32x4_t, uint64_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset4_f16))) +svfloat16x4_t svset4(svfloat16x4_t, uint64_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset4_s32))) +svint32x4_t svset4(svint32x4_t, uint64_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset4_s64))) +svint64x4_t svset4(svint64x4_t, uint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset4_s16))) +svint16x4_t svset4(svint16x4_t, uint64_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsplice_u8))) +svuint8_t svsplice(svbool_t, svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsplice_u32))) +svuint32_t svsplice(svbool_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsplice_u64))) +svuint64_t svsplice(svbool_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsplice_u16))) +svuint16_t svsplice(svbool_t, svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsplice_s8))) +svint8_t svsplice(svbool_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsplice_f64))) +svfloat64_t svsplice(svbool_t, svfloat64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsplice_f32))) +svfloat32_t svsplice(svbool_t, svfloat32_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsplice_f16))) +svfloat16_t svsplice(svbool_t, svfloat16_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsplice_s32))) +svint32_t svsplice(svbool_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsplice_s64))) +svint64_t svsplice(svbool_t, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsplice_s16))) +svint16_t svsplice(svbool_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqrt_f64_m))) +svfloat64_t svsqrt_m(svfloat64_t, svbool_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqrt_f32_m))) +svfloat32_t svsqrt_m(svfloat32_t, svbool_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqrt_f16_m))) +svfloat16_t svsqrt_m(svfloat16_t, svbool_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqrt_f64_x))) +svfloat64_t svsqrt_x(svbool_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqrt_f32_x))) +svfloat32_t svsqrt_x(svbool_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqrt_f16_x))) +svfloat16_t svsqrt_x(svbool_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqrt_f64_z))) +svfloat64_t svsqrt_z(svbool_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqrt_f32_z))) +svfloat32_t svsqrt_z(svbool_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqrt_f16_z))) +svfloat16_t svsqrt_z(svbool_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_u8))) +void svst1(svbool_t, uint8_t *, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_u32))) +void svst1(svbool_t, uint32_t *, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_u64))) +void svst1(svbool_t, uint64_t *, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_u16))) +void svst1(svbool_t, uint16_t *, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_s8))) +void svst1(svbool_t, int8_t *, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_f64))) +void svst1(svbool_t, float64_t *, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_f32))) +void svst1(svbool_t, float32_t *, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_f16))) +void svst1(svbool_t, float16_t *, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_s32))) +void svst1(svbool_t, int32_t *, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_s64))) +void svst1(svbool_t, int64_t *, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_s16))) +void svst1(svbool_t, int16_t *, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_vnum_u8))) +void svst1_vnum(svbool_t, uint8_t *, int64_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_vnum_u32))) +void svst1_vnum(svbool_t, uint32_t *, int64_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_vnum_u64))) +void svst1_vnum(svbool_t, uint64_t *, int64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_vnum_u16))) +void svst1_vnum(svbool_t, uint16_t *, int64_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_vnum_s8))) +void svst1_vnum(svbool_t, int8_t *, int64_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_vnum_f64))) +void svst1_vnum(svbool_t, float64_t *, int64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_vnum_f32))) +void svst1_vnum(svbool_t, float32_t *, int64_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_vnum_f16))) +void svst1_vnum(svbool_t, float16_t *, int64_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_vnum_s32))) +void svst1_vnum(svbool_t, int32_t *, int64_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_vnum_s64))) +void svst1_vnum(svbool_t, int64_t *, int64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_vnum_s16))) +void svst1_vnum(svbool_t, int16_t *, int64_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_s32))) +void svst1b(svbool_t, int8_t *, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_s64))) +void svst1b(svbool_t, int8_t *, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_s16))) +void svst1b(svbool_t, int8_t *, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_u32))) +void svst1b(svbool_t, uint8_t *, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_u64))) +void svst1b(svbool_t, uint8_t *, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_u16))) +void svst1b(svbool_t, uint8_t *, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_vnum_s32))) +void svst1b_vnum(svbool_t, int8_t *, int64_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_vnum_s64))) +void svst1b_vnum(svbool_t, int8_t *, int64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_vnum_s16))) +void svst1b_vnum(svbool_t, int8_t *, int64_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_vnum_u32))) +void svst1b_vnum(svbool_t, uint8_t *, int64_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_vnum_u64))) +void svst1b_vnum(svbool_t, uint8_t *, int64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_vnum_u16))) +void svst1b_vnum(svbool_t, uint8_t *, int64_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_s32))) +void svst1h(svbool_t, int16_t *, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_s64))) +void svst1h(svbool_t, int16_t *, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_u32))) +void svst1h(svbool_t, uint16_t *, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_u64))) +void svst1h(svbool_t, uint16_t *, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_vnum_s32))) +void svst1h_vnum(svbool_t, int16_t *, int64_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_vnum_s64))) +void svst1h_vnum(svbool_t, int16_t *, int64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_vnum_u32))) +void svst1h_vnum(svbool_t, uint16_t *, int64_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_vnum_u64))) +void svst1h_vnum(svbool_t, uint16_t *, int64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1w_s64))) +void svst1w(svbool_t, int32_t *, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1w_u64))) +void svst1w(svbool_t, uint32_t *, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1w_vnum_s64))) +void svst1w_vnum(svbool_t, int32_t *, int64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1w_vnum_u64))) +void svst1w_vnum(svbool_t, uint32_t *, int64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2_u8))) +void svst2(svbool_t, uint8_t *, svuint8x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2_u32))) +void svst2(svbool_t, uint32_t *, svuint32x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2_u64))) +void svst2(svbool_t, uint64_t *, svuint64x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2_u16))) +void svst2(svbool_t, uint16_t *, svuint16x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2_s8))) +void svst2(svbool_t, int8_t *, svint8x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2_f64))) +void svst2(svbool_t, float64_t *, svfloat64x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2_f32))) +void svst2(svbool_t, float32_t *, svfloat32x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2_f16))) +void svst2(svbool_t, float16_t *, svfloat16x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2_s32))) +void svst2(svbool_t, int32_t *, svint32x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2_s64))) +void svst2(svbool_t, int64_t *, svint64x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2_s16))) +void svst2(svbool_t, int16_t *, svint16x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2_vnum_u8))) +void svst2_vnum(svbool_t, uint8_t *, int64_t, svuint8x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2_vnum_u32))) +void svst2_vnum(svbool_t, uint32_t *, int64_t, svuint32x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2_vnum_u64))) +void svst2_vnum(svbool_t, uint64_t *, int64_t, svuint64x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2_vnum_u16))) +void svst2_vnum(svbool_t, uint16_t *, int64_t, svuint16x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2_vnum_s8))) +void svst2_vnum(svbool_t, int8_t *, int64_t, svint8x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2_vnum_f64))) +void svst2_vnum(svbool_t, float64_t *, int64_t, svfloat64x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2_vnum_f32))) +void svst2_vnum(svbool_t, float32_t *, int64_t, svfloat32x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2_vnum_f16))) +void svst2_vnum(svbool_t, float16_t *, int64_t, svfloat16x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2_vnum_s32))) +void svst2_vnum(svbool_t, int32_t *, int64_t, svint32x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2_vnum_s64))) +void svst2_vnum(svbool_t, int64_t *, int64_t, svint64x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2_vnum_s16))) +void svst2_vnum(svbool_t, int16_t *, int64_t, svint16x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3_u8))) +void svst3(svbool_t, uint8_t *, svuint8x3_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3_u32))) +void svst3(svbool_t, uint32_t *, svuint32x3_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3_u64))) +void svst3(svbool_t, uint64_t *, svuint64x3_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3_u16))) +void svst3(svbool_t, uint16_t *, svuint16x3_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3_s8))) +void svst3(svbool_t, int8_t *, svint8x3_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3_f64))) +void svst3(svbool_t, float64_t *, svfloat64x3_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3_f32))) +void svst3(svbool_t, float32_t *, svfloat32x3_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3_f16))) +void svst3(svbool_t, float16_t *, svfloat16x3_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3_s32))) +void svst3(svbool_t, int32_t *, svint32x3_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3_s64))) +void svst3(svbool_t, int64_t *, svint64x3_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3_s16))) +void svst3(svbool_t, int16_t *, svint16x3_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3_vnum_u8))) +void svst3_vnum(svbool_t, uint8_t *, int64_t, svuint8x3_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3_vnum_u32))) +void svst3_vnum(svbool_t, uint32_t *, int64_t, svuint32x3_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3_vnum_u64))) +void svst3_vnum(svbool_t, uint64_t *, int64_t, svuint64x3_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3_vnum_u16))) +void svst3_vnum(svbool_t, uint16_t *, int64_t, svuint16x3_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3_vnum_s8))) +void svst3_vnum(svbool_t, int8_t *, int64_t, svint8x3_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3_vnum_f64))) +void svst3_vnum(svbool_t, float64_t *, int64_t, svfloat64x3_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3_vnum_f32))) +void svst3_vnum(svbool_t, float32_t *, int64_t, svfloat32x3_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3_vnum_f16))) +void svst3_vnum(svbool_t, float16_t *, int64_t, svfloat16x3_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3_vnum_s32))) +void svst3_vnum(svbool_t, int32_t *, int64_t, svint32x3_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3_vnum_s64))) +void svst3_vnum(svbool_t, int64_t *, int64_t, svint64x3_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3_vnum_s16))) +void svst3_vnum(svbool_t, int16_t *, int64_t, svint16x3_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4_u8))) +void svst4(svbool_t, uint8_t *, svuint8x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4_u32))) +void svst4(svbool_t, uint32_t *, svuint32x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4_u64))) +void svst4(svbool_t, uint64_t *, svuint64x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4_u16))) +void svst4(svbool_t, uint16_t *, svuint16x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4_s8))) +void svst4(svbool_t, int8_t *, svint8x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4_f64))) +void svst4(svbool_t, float64_t *, svfloat64x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4_f32))) +void svst4(svbool_t, float32_t *, svfloat32x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4_f16))) +void svst4(svbool_t, float16_t *, svfloat16x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4_s32))) +void svst4(svbool_t, int32_t *, svint32x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4_s64))) +void svst4(svbool_t, int64_t *, svint64x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4_s16))) +void svst4(svbool_t, int16_t *, svint16x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4_vnum_u8))) +void svst4_vnum(svbool_t, uint8_t *, int64_t, svuint8x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4_vnum_u32))) +void svst4_vnum(svbool_t, uint32_t *, int64_t, svuint32x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4_vnum_u64))) +void svst4_vnum(svbool_t, uint64_t *, int64_t, svuint64x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4_vnum_u16))) +void svst4_vnum(svbool_t, uint16_t *, int64_t, svuint16x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4_vnum_s8))) +void svst4_vnum(svbool_t, int8_t *, int64_t, svint8x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4_vnum_f64))) +void svst4_vnum(svbool_t, float64_t *, int64_t, svfloat64x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4_vnum_f32))) +void svst4_vnum(svbool_t, float32_t *, int64_t, svfloat32x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4_vnum_f16))) +void svst4_vnum(svbool_t, float16_t *, int64_t, svfloat16x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4_vnum_s32))) +void svst4_vnum(svbool_t, int32_t *, int64_t, svint32x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4_vnum_s64))) +void svst4_vnum(svbool_t, int64_t *, int64_t, svint64x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4_vnum_s16))) +void svst4_vnum(svbool_t, int16_t *, int64_t, svint16x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_u8))) +void svstnt1(svbool_t, uint8_t *, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_u32))) +void svstnt1(svbool_t, uint32_t *, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_u64))) +void svstnt1(svbool_t, uint64_t *, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_u16))) +void svstnt1(svbool_t, uint16_t *, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_s8))) +void svstnt1(svbool_t, int8_t *, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_f64))) +void svstnt1(svbool_t, float64_t *, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_f32))) +void svstnt1(svbool_t, float32_t *, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_f16))) +void svstnt1(svbool_t, float16_t *, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_s32))) +void svstnt1(svbool_t, int32_t *, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_s64))) +void svstnt1(svbool_t, int64_t *, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_s16))) +void svstnt1(svbool_t, int16_t *, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_vnum_u8))) +void svstnt1_vnum(svbool_t, uint8_t *, int64_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_vnum_u32))) +void svstnt1_vnum(svbool_t, uint32_t *, int64_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_vnum_u64))) +void svstnt1_vnum(svbool_t, uint64_t *, int64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_vnum_u16))) +void svstnt1_vnum(svbool_t, uint16_t *, int64_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_vnum_s8))) +void svstnt1_vnum(svbool_t, int8_t *, int64_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_vnum_f64))) +void svstnt1_vnum(svbool_t, float64_t *, int64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_vnum_f32))) +void svstnt1_vnum(svbool_t, float32_t *, int64_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_vnum_f16))) +void svstnt1_vnum(svbool_t, float16_t *, int64_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_vnum_s32))) +void svstnt1_vnum(svbool_t, int32_t *, int64_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_vnum_s64))) +void svstnt1_vnum(svbool_t, int64_t *, int64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_vnum_s16))) +void svstnt1_vnum(svbool_t, int16_t *, int64_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_f64_m))) +svfloat64_t svsub_m(svbool_t, svfloat64_t, float64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_f32_m))) +svfloat32_t svsub_m(svbool_t, svfloat32_t, float32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_f16_m))) +svfloat16_t svsub_m(svbool_t, svfloat16_t, float16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_f64_x))) +svfloat64_t svsub_x(svbool_t, svfloat64_t, float64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_f32_x))) +svfloat32_t svsub_x(svbool_t, svfloat32_t, float32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_f16_x))) +svfloat16_t svsub_x(svbool_t, svfloat16_t, float16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_f64_z))) +svfloat64_t svsub_z(svbool_t, svfloat64_t, float64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_f32_z))) +svfloat32_t svsub_z(svbool_t, svfloat32_t, float32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_f16_z))) +svfloat16_t svsub_z(svbool_t, svfloat16_t, float16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_u8_m))) +svuint8_t svsub_m(svbool_t, svuint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_u32_m))) +svuint32_t svsub_m(svbool_t, svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_u64_m))) +svuint64_t svsub_m(svbool_t, svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_u16_m))) +svuint16_t svsub_m(svbool_t, svuint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_s8_m))) +svint8_t svsub_m(svbool_t, svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_s32_m))) +svint32_t svsub_m(svbool_t, svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_s64_m))) +svint64_t svsub_m(svbool_t, svint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_s16_m))) +svint16_t svsub_m(svbool_t, svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_u8_x))) +svuint8_t svsub_x(svbool_t, svuint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_u32_x))) +svuint32_t svsub_x(svbool_t, svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_u64_x))) +svuint64_t svsub_x(svbool_t, svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_u16_x))) +svuint16_t svsub_x(svbool_t, svuint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_s8_x))) +svint8_t svsub_x(svbool_t, svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_s32_x))) +svint32_t svsub_x(svbool_t, svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_s64_x))) +svint64_t svsub_x(svbool_t, svint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_s16_x))) +svint16_t svsub_x(svbool_t, svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_u8_z))) +svuint8_t svsub_z(svbool_t, svuint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_u32_z))) +svuint32_t svsub_z(svbool_t, svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_u64_z))) +svuint64_t svsub_z(svbool_t, svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_u16_z))) +svuint16_t svsub_z(svbool_t, svuint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_s8_z))) +svint8_t svsub_z(svbool_t, svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_s32_z))) +svint32_t svsub_z(svbool_t, svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_s64_z))) +svint64_t svsub_z(svbool_t, svint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_s16_z))) +svint16_t svsub_z(svbool_t, svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_f64_m))) +svfloat64_t svsub_m(svbool_t, svfloat64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_f32_m))) +svfloat32_t svsub_m(svbool_t, svfloat32_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_f16_m))) +svfloat16_t svsub_m(svbool_t, svfloat16_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_f64_x))) +svfloat64_t svsub_x(svbool_t, svfloat64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_f32_x))) +svfloat32_t svsub_x(svbool_t, svfloat32_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_f16_x))) +svfloat16_t svsub_x(svbool_t, svfloat16_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_f64_z))) +svfloat64_t svsub_z(svbool_t, svfloat64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_f32_z))) +svfloat32_t svsub_z(svbool_t, svfloat32_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_f16_z))) +svfloat16_t svsub_z(svbool_t, svfloat16_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_u8_m))) +svuint8_t svsub_m(svbool_t, svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_u32_m))) +svuint32_t svsub_m(svbool_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_u64_m))) +svuint64_t svsub_m(svbool_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_u16_m))) +svuint16_t svsub_m(svbool_t, svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_s8_m))) +svint8_t svsub_m(svbool_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_s32_m))) +svint32_t svsub_m(svbool_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_s64_m))) +svint64_t svsub_m(svbool_t, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_s16_m))) +svint16_t svsub_m(svbool_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_u8_x))) +svuint8_t svsub_x(svbool_t, svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_u32_x))) +svuint32_t svsub_x(svbool_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_u64_x))) +svuint64_t svsub_x(svbool_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_u16_x))) +svuint16_t svsub_x(svbool_t, svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_s8_x))) +svint8_t svsub_x(svbool_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_s32_x))) +svint32_t svsub_x(svbool_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_s64_x))) +svint64_t svsub_x(svbool_t, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_s16_x))) +svint16_t svsub_x(svbool_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_u8_z))) +svuint8_t svsub_z(svbool_t, svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_u32_z))) +svuint32_t svsub_z(svbool_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_u64_z))) +svuint64_t svsub_z(svbool_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_u16_z))) +svuint16_t svsub_z(svbool_t, svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_s8_z))) +svint8_t svsub_z(svbool_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_s32_z))) +svint32_t svsub_z(svbool_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_s64_z))) +svint64_t svsub_z(svbool_t, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_s16_z))) +svint16_t svsub_z(svbool_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_f64_m))) +svfloat64_t svsubr_m(svbool_t, svfloat64_t, float64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_f32_m))) +svfloat32_t svsubr_m(svbool_t, svfloat32_t, float32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_f16_m))) +svfloat16_t svsubr_m(svbool_t, svfloat16_t, float16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_f64_x))) +svfloat64_t svsubr_x(svbool_t, svfloat64_t, float64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_f32_x))) +svfloat32_t svsubr_x(svbool_t, svfloat32_t, float32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_f16_x))) +svfloat16_t svsubr_x(svbool_t, svfloat16_t, float16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_f64_z))) +svfloat64_t svsubr_z(svbool_t, svfloat64_t, float64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_f32_z))) +svfloat32_t svsubr_z(svbool_t, svfloat32_t, float32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_f16_z))) +svfloat16_t svsubr_z(svbool_t, svfloat16_t, float16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_u8_m))) +svuint8_t svsubr_m(svbool_t, svuint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_u32_m))) +svuint32_t svsubr_m(svbool_t, svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_u64_m))) +svuint64_t svsubr_m(svbool_t, svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_u16_m))) +svuint16_t svsubr_m(svbool_t, svuint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_s8_m))) +svint8_t svsubr_m(svbool_t, svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_s32_m))) +svint32_t svsubr_m(svbool_t, svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_s64_m))) +svint64_t svsubr_m(svbool_t, svint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_s16_m))) +svint16_t svsubr_m(svbool_t, svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_u8_x))) +svuint8_t svsubr_x(svbool_t, svuint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_u32_x))) +svuint32_t svsubr_x(svbool_t, svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_u64_x))) +svuint64_t svsubr_x(svbool_t, svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_u16_x))) +svuint16_t svsubr_x(svbool_t, svuint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_s8_x))) +svint8_t svsubr_x(svbool_t, svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_s32_x))) +svint32_t svsubr_x(svbool_t, svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_s64_x))) +svint64_t svsubr_x(svbool_t, svint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_s16_x))) +svint16_t svsubr_x(svbool_t, svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_u8_z))) +svuint8_t svsubr_z(svbool_t, svuint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_u32_z))) +svuint32_t svsubr_z(svbool_t, svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_u64_z))) +svuint64_t svsubr_z(svbool_t, svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_u16_z))) +svuint16_t svsubr_z(svbool_t, svuint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_s8_z))) +svint8_t svsubr_z(svbool_t, svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_s32_z))) +svint32_t svsubr_z(svbool_t, svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_s64_z))) +svint64_t svsubr_z(svbool_t, svint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_s16_z))) +svint16_t svsubr_z(svbool_t, svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_f64_m))) +svfloat64_t svsubr_m(svbool_t, svfloat64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_f32_m))) +svfloat32_t svsubr_m(svbool_t, svfloat32_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_f16_m))) +svfloat16_t svsubr_m(svbool_t, svfloat16_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_f64_x))) +svfloat64_t svsubr_x(svbool_t, svfloat64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_f32_x))) +svfloat32_t svsubr_x(svbool_t, svfloat32_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_f16_x))) +svfloat16_t svsubr_x(svbool_t, svfloat16_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_f64_z))) +svfloat64_t svsubr_z(svbool_t, svfloat64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_f32_z))) +svfloat32_t svsubr_z(svbool_t, svfloat32_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_f16_z))) +svfloat16_t svsubr_z(svbool_t, svfloat16_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_u8_m))) +svuint8_t svsubr_m(svbool_t, svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_u32_m))) +svuint32_t svsubr_m(svbool_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_u64_m))) +svuint64_t svsubr_m(svbool_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_u16_m))) +svuint16_t svsubr_m(svbool_t, svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_s8_m))) +svint8_t svsubr_m(svbool_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_s32_m))) +svint32_t svsubr_m(svbool_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_s64_m))) +svint64_t svsubr_m(svbool_t, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_s16_m))) +svint16_t svsubr_m(svbool_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_u8_x))) +svuint8_t svsubr_x(svbool_t, svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_u32_x))) +svuint32_t svsubr_x(svbool_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_u64_x))) +svuint64_t svsubr_x(svbool_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_u16_x))) +svuint16_t svsubr_x(svbool_t, svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_s8_x))) +svint8_t svsubr_x(svbool_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_s32_x))) +svint32_t svsubr_x(svbool_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_s64_x))) +svint64_t svsubr_x(svbool_t, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_s16_x))) +svint16_t svsubr_x(svbool_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_u8_z))) +svuint8_t svsubr_z(svbool_t, svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_u32_z))) +svuint32_t svsubr_z(svbool_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_u64_z))) +svuint64_t svsubr_z(svbool_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_u16_z))) +svuint16_t svsubr_z(svbool_t, svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_s8_z))) +svint8_t svsubr_z(svbool_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_s32_z))) +svint32_t svsubr_z(svbool_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_s64_z))) +svint64_t svsubr_z(svbool_t, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_s16_z))) +svint16_t svsubr_z(svbool_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbl_u8))) +svuint8_t svtbl(svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbl_u32))) +svuint32_t svtbl(svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbl_u64))) +svuint64_t svtbl(svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbl_u16))) +svuint16_t svtbl(svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbl_s8))) +svint8_t svtbl(svint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbl_f64))) +svfloat64_t svtbl(svfloat64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbl_f32))) +svfloat32_t svtbl(svfloat32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbl_f16))) +svfloat16_t svtbl(svfloat16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbl_s32))) +svint32_t svtbl(svint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbl_s64))) +svint64_t svtbl(svint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbl_s16))) +svint16_t svtbl(svint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1_u8))) +svuint8_t svtrn1(svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1_u32))) +svuint32_t svtrn1(svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1_u64))) +svuint64_t svtrn1(svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1_u16))) +svuint16_t svtrn1(svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1_s8))) +svint8_t svtrn1(svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1_f64))) +svfloat64_t svtrn1(svfloat64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1_f32))) +svfloat32_t svtrn1(svfloat32_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1_f16))) +svfloat16_t svtrn1(svfloat16_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1_s32))) +svint32_t svtrn1(svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1_s64))) +svint64_t svtrn1(svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1_s16))) +svint16_t svtrn1(svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2_u8))) +svuint8_t svtrn2(svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2_u32))) +svuint32_t svtrn2(svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2_u64))) +svuint64_t svtrn2(svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2_u16))) +svuint16_t svtrn2(svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2_s8))) +svint8_t svtrn2(svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2_f64))) +svfloat64_t svtrn2(svfloat64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2_f32))) +svfloat32_t svtrn2(svfloat32_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2_f16))) +svfloat16_t svtrn2(svfloat16_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2_s32))) +svint32_t svtrn2(svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2_s64))) +svint64_t svtrn2(svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2_s16))) +svint16_t svtrn2(svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svunpkhi_b))) +svbool_t svunpkhi(svbool_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svunpkhi_s32))) +svint32_t svunpkhi(svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svunpkhi_s64))) +svint64_t svunpkhi(svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svunpkhi_s16))) +svint16_t svunpkhi(svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svunpkhi_u32))) +svuint32_t svunpkhi(svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svunpkhi_u64))) +svuint64_t svunpkhi(svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svunpkhi_u16))) +svuint16_t svunpkhi(svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svunpklo_b))) +svbool_t svunpklo(svbool_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svunpklo_s32))) +svint32_t svunpklo(svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svunpklo_s64))) +svint64_t svunpklo(svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svunpklo_s16))) +svint16_t svunpklo(svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svunpklo_u32))) +svuint32_t svunpklo(svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svunpklo_u64))) +svuint64_t svunpklo(svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svunpklo_u16))) +svuint16_t svunpklo(svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1_u8))) +svuint8_t svuzp1(svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1_u32))) +svuint32_t svuzp1(svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1_u64))) +svuint64_t svuzp1(svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1_u16))) +svuint16_t svuzp1(svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1_s8))) +svint8_t svuzp1(svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1_f64))) +svfloat64_t svuzp1(svfloat64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1_f32))) +svfloat32_t svuzp1(svfloat32_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1_f16))) +svfloat16_t svuzp1(svfloat16_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1_s32))) +svint32_t svuzp1(svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1_s64))) +svint64_t svuzp1(svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1_s16))) +svint16_t svuzp1(svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2_u8))) +svuint8_t svuzp2(svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2_u32))) +svuint32_t svuzp2(svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2_u64))) +svuint64_t svuzp2(svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2_u16))) +svuint16_t svuzp2(svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2_s8))) +svint8_t svuzp2(svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2_f64))) +svfloat64_t svuzp2(svfloat64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2_f32))) +svfloat32_t svuzp2(svfloat32_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2_f16))) +svfloat16_t svuzp2(svfloat16_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2_s32))) +svint32_t svuzp2(svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2_s64))) +svint64_t svuzp2(svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2_s16))) +svint16_t svuzp2(svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilele_b8_s32))) +svbool_t svwhilele_b8(int32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilele_b32_s32))) +svbool_t svwhilele_b32(int32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilele_b64_s32))) +svbool_t svwhilele_b64(int32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilele_b16_s32))) +svbool_t svwhilele_b16(int32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilele_b8_s64))) +svbool_t svwhilele_b8(int64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilele_b32_s64))) +svbool_t svwhilele_b32(int64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilele_b64_s64))) +svbool_t svwhilele_b64(int64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilele_b16_s64))) +svbool_t svwhilele_b16(int64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilele_b8_u32))) +svbool_t svwhilele_b8(uint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilele_b32_u32))) +svbool_t svwhilele_b32(uint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilele_b64_u32))) +svbool_t svwhilele_b64(uint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilele_b16_u32))) +svbool_t svwhilele_b16(uint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilele_b8_u64))) +svbool_t svwhilele_b8(uint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilele_b32_u64))) +svbool_t svwhilele_b32(uint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilele_b64_u64))) +svbool_t svwhilele_b64(uint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilele_b16_u64))) +svbool_t svwhilele_b16(uint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilelt_b8_u32))) +svbool_t svwhilelt_b8(uint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilelt_b32_u32))) +svbool_t svwhilelt_b32(uint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilelt_b64_u32))) +svbool_t svwhilelt_b64(uint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilelt_b16_u32))) +svbool_t svwhilelt_b16(uint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilelt_b8_u64))) +svbool_t svwhilelt_b8(uint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilelt_b32_u64))) +svbool_t svwhilelt_b32(uint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilelt_b64_u64))) +svbool_t svwhilelt_b64(uint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilelt_b16_u64))) +svbool_t svwhilelt_b16(uint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilelt_b8_s32))) +svbool_t svwhilelt_b8(int32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilelt_b32_s32))) +svbool_t svwhilelt_b32(int32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilelt_b64_s32))) +svbool_t svwhilelt_b64(int32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilelt_b16_s32))) +svbool_t svwhilelt_b16(int32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilelt_b8_s64))) +svbool_t svwhilelt_b8(int64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilelt_b32_s64))) +svbool_t svwhilelt_b32(int64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilelt_b64_s64))) +svbool_t svwhilelt_b64(int64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilelt_b16_s64))) +svbool_t svwhilelt_b16(int64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1_u8))) +svuint8_t svzip1(svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1_u32))) +svuint32_t svzip1(svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1_u64))) +svuint64_t svzip1(svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1_u16))) +svuint16_t svzip1(svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1_s8))) +svint8_t svzip1(svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1_f64))) +svfloat64_t svzip1(svfloat64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1_f32))) +svfloat32_t svzip1(svfloat32_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1_f16))) +svfloat16_t svzip1(svfloat16_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1_s32))) +svint32_t svzip1(svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1_s64))) +svint64_t svzip1(svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1_s16))) +svint16_t svzip1(svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2_u8))) +svuint8_t svzip2(svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2_u32))) +svuint32_t svzip2(svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2_u64))) +svuint64_t svzip2(svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2_u16))) +svuint16_t svzip2(svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2_s8))) +svint8_t svzip2(svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2_f64))) +svfloat64_t svzip2(svfloat64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2_f32))) +svfloat32_t svzip2(svfloat32_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2_f16))) +svfloat16_t svzip2(svfloat16_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2_s32))) +svint32_t svzip2(svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2_s64))) +svint64_t svzip2(svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2_s16))) +svint16_t svzip2(svint16_t, svint16_t); +#define svcvtnt_bf16_x svcvtnt_bf16_m +#define svcvtnt_bf16_f32_x svcvtnt_bf16_f32_m +#define svcvtnt_f16_x svcvtnt_f16_m +#define svcvtnt_f16_f32_x svcvtnt_f16_f32_m +#define svcvtnt_f32_x svcvtnt_f32_m +#define svcvtnt_f32_f64_x svcvtnt_f32_f64_m + +#define svcvtxnt_f32_x svcvtxnt_f32_m +#define svcvtxnt_f32_f64_x svcvtxnt_f32_f64_m + +#ifdef __cplusplus +} // extern "C" +#endif + +#undef __ai + +#undef __aio + +#endif /* __ARM_SVE_H */ diff --git a/third_party/aarch64/clang/arm_vector_types.h b/third_party/aarch64/clang/arm_vector_types.h new file mode 100644 index 000000000..8e79d39a6 --- /dev/null +++ b/third_party/aarch64/clang/arm_vector_types.h @@ -0,0 +1,345 @@ +/*===---- arm_vector_types - ARM vector type ------=== + * + * + * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. + * See https://llvm.org/LICENSE.txt for license information. + * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + * + *===-----------------------------------------------------------------------=== + */ + +#if !defined(__ARM_NEON_H) && !defined(__ARM_SVE_H) +#error "This file should not be used standalone. Please include arm_neon.h or arm_sve.h instead" + +#endif +#ifndef __ARM_NEON_TYPES_H +#define __ARM_NEON_TYPES_H +typedef float float32_t; +typedef __fp16 float16_t; +#if defined(__aarch64__) || defined(__arm64ec__) +typedef double float64_t; +#endif + +typedef __attribute__((neon_vector_type(8))) int8_t int8x8_t; +typedef __attribute__((neon_vector_type(16))) int8_t int8x16_t; +typedef __attribute__((neon_vector_type(4))) int16_t int16x4_t; +typedef __attribute__((neon_vector_type(8))) int16_t int16x8_t; +typedef __attribute__((neon_vector_type(2))) int32_t int32x2_t; +typedef __attribute__((neon_vector_type(4))) int32_t int32x4_t; +typedef __attribute__((neon_vector_type(1))) int64_t int64x1_t; +typedef __attribute__((neon_vector_type(2))) int64_t int64x2_t; +typedef __attribute__((neon_vector_type(8))) uint8_t uint8x8_t; +typedef __attribute__((neon_vector_type(16))) uint8_t uint8x16_t; +typedef __attribute__((neon_vector_type(4))) uint16_t uint16x4_t; +typedef __attribute__((neon_vector_type(8))) uint16_t uint16x8_t; +typedef __attribute__((neon_vector_type(2))) uint32_t uint32x2_t; +typedef __attribute__((neon_vector_type(4))) uint32_t uint32x4_t; +typedef __attribute__((neon_vector_type(1))) uint64_t uint64x1_t; +typedef __attribute__((neon_vector_type(2))) uint64_t uint64x2_t; +typedef __attribute__((neon_vector_type(4))) float16_t float16x4_t; +typedef __attribute__((neon_vector_type(8))) float16_t float16x8_t; +typedef __attribute__((neon_vector_type(2))) float32_t float32x2_t; +typedef __attribute__((neon_vector_type(4))) float32_t float32x4_t; +#if defined(__aarch64__) || defined(__arm64ec__) +typedef __attribute__((neon_vector_type(1))) float64_t float64x1_t; +typedef __attribute__((neon_vector_type(2))) float64_t float64x2_t; +#endif + +typedef struct int8x8x2_t { + int8x8_t val[2]; +} int8x8x2_t; + +typedef struct int8x16x2_t { + int8x16_t val[2]; +} int8x16x2_t; + +typedef struct int16x4x2_t { + int16x4_t val[2]; +} int16x4x2_t; + +typedef struct int16x8x2_t { + int16x8_t val[2]; +} int16x8x2_t; + +typedef struct int32x2x2_t { + int32x2_t val[2]; +} int32x2x2_t; + +typedef struct int32x4x2_t { + int32x4_t val[2]; +} int32x4x2_t; + +typedef struct int64x1x2_t { + int64x1_t val[2]; +} int64x1x2_t; + +typedef struct int64x2x2_t { + int64x2_t val[2]; +} int64x2x2_t; + +typedef struct uint8x8x2_t { + uint8x8_t val[2]; +} uint8x8x2_t; + +typedef struct uint8x16x2_t { + uint8x16_t val[2]; +} uint8x16x2_t; + +typedef struct uint16x4x2_t { + uint16x4_t val[2]; +} uint16x4x2_t; + +typedef struct uint16x8x2_t { + uint16x8_t val[2]; +} uint16x8x2_t; + +typedef struct uint32x2x2_t { + uint32x2_t val[2]; +} uint32x2x2_t; + +typedef struct uint32x4x2_t { + uint32x4_t val[2]; +} uint32x4x2_t; + +typedef struct uint64x1x2_t { + uint64x1_t val[2]; +} uint64x1x2_t; + +typedef struct uint64x2x2_t { + uint64x2_t val[2]; +} uint64x2x2_t; + +typedef struct float16x4x2_t { + float16x4_t val[2]; +} float16x4x2_t; + +typedef struct float16x8x2_t { + float16x8_t val[2]; +} float16x8x2_t; + +typedef struct float32x2x2_t { + float32x2_t val[2]; +} float32x2x2_t; + +typedef struct float32x4x2_t { + float32x4_t val[2]; +} float32x4x2_t; + +#if defined(__aarch64__) || defined(__arm64ec__) +typedef struct float64x1x2_t { + float64x1_t val[2]; +} float64x1x2_t; + +typedef struct float64x2x2_t { + float64x2_t val[2]; +} float64x2x2_t; + +#endif +typedef struct int8x8x3_t { + int8x8_t val[3]; +} int8x8x3_t; + +typedef struct int8x16x3_t { + int8x16_t val[3]; +} int8x16x3_t; + +typedef struct int16x4x3_t { + int16x4_t val[3]; +} int16x4x3_t; + +typedef struct int16x8x3_t { + int16x8_t val[3]; +} int16x8x3_t; + +typedef struct int32x2x3_t { + int32x2_t val[3]; +} int32x2x3_t; + +typedef struct int32x4x3_t { + int32x4_t val[3]; +} int32x4x3_t; + +typedef struct int64x1x3_t { + int64x1_t val[3]; +} int64x1x3_t; + +typedef struct int64x2x3_t { + int64x2_t val[3]; +} int64x2x3_t; + +typedef struct uint8x8x3_t { + uint8x8_t val[3]; +} uint8x8x3_t; + +typedef struct uint8x16x3_t { + uint8x16_t val[3]; +} uint8x16x3_t; + +typedef struct uint16x4x3_t { + uint16x4_t val[3]; +} uint16x4x3_t; + +typedef struct uint16x8x3_t { + uint16x8_t val[3]; +} uint16x8x3_t; + +typedef struct uint32x2x3_t { + uint32x2_t val[3]; +} uint32x2x3_t; + +typedef struct uint32x4x3_t { + uint32x4_t val[3]; +} uint32x4x3_t; + +typedef struct uint64x1x3_t { + uint64x1_t val[3]; +} uint64x1x3_t; + +typedef struct uint64x2x3_t { + uint64x2_t val[3]; +} uint64x2x3_t; + +typedef struct float16x4x3_t { + float16x4_t val[3]; +} float16x4x3_t; + +typedef struct float16x8x3_t { + float16x8_t val[3]; +} float16x8x3_t; + +typedef struct float32x2x3_t { + float32x2_t val[3]; +} float32x2x3_t; + +typedef struct float32x4x3_t { + float32x4_t val[3]; +} float32x4x3_t; + +#if defined(__aarch64__) || defined(__arm64ec__) +typedef struct float64x1x3_t { + float64x1_t val[3]; +} float64x1x3_t; + +typedef struct float64x2x3_t { + float64x2_t val[3]; +} float64x2x3_t; + +#endif +typedef struct int8x8x4_t { + int8x8_t val[4]; +} int8x8x4_t; + +typedef struct int8x16x4_t { + int8x16_t val[4]; +} int8x16x4_t; + +typedef struct int16x4x4_t { + int16x4_t val[4]; +} int16x4x4_t; + +typedef struct int16x8x4_t { + int16x8_t val[4]; +} int16x8x4_t; + +typedef struct int32x2x4_t { + int32x2_t val[4]; +} int32x2x4_t; + +typedef struct int32x4x4_t { + int32x4_t val[4]; +} int32x4x4_t; + +typedef struct int64x1x4_t { + int64x1_t val[4]; +} int64x1x4_t; + +typedef struct int64x2x4_t { + int64x2_t val[4]; +} int64x2x4_t; + +typedef struct uint8x8x4_t { + uint8x8_t val[4]; +} uint8x8x4_t; + +typedef struct uint8x16x4_t { + uint8x16_t val[4]; +} uint8x16x4_t; + +typedef struct uint16x4x4_t { + uint16x4_t val[4]; +} uint16x4x4_t; + +typedef struct uint16x8x4_t { + uint16x8_t val[4]; +} uint16x8x4_t; + +typedef struct uint32x2x4_t { + uint32x2_t val[4]; +} uint32x2x4_t; + +typedef struct uint32x4x4_t { + uint32x4_t val[4]; +} uint32x4x4_t; + +typedef struct uint64x1x4_t { + uint64x1_t val[4]; +} uint64x1x4_t; + +typedef struct uint64x2x4_t { + uint64x2_t val[4]; +} uint64x2x4_t; + +typedef struct float16x4x4_t { + float16x4_t val[4]; +} float16x4x4_t; + +typedef struct float16x8x4_t { + float16x8_t val[4]; +} float16x8x4_t; + +typedef struct float32x2x4_t { + float32x2_t val[4]; +} float32x2x4_t; + +typedef struct float32x4x4_t { + float32x4_t val[4]; +} float32x4x4_t; + +#if defined(__aarch64__) || defined(__arm64ec__) +typedef struct float64x1x4_t { + float64x1_t val[4]; +} float64x1x4_t; + +typedef struct float64x2x4_t { + float64x2_t val[4]; +} float64x2x4_t; + +#endif +typedef __attribute__((neon_vector_type(4))) bfloat16_t bfloat16x4_t; +typedef __attribute__((neon_vector_type(8))) bfloat16_t bfloat16x8_t; + +typedef struct bfloat16x4x2_t { + bfloat16x4_t val[2]; +} bfloat16x4x2_t; + +typedef struct bfloat16x8x2_t { + bfloat16x8_t val[2]; +} bfloat16x8x2_t; + +typedef struct bfloat16x4x3_t { + bfloat16x4_t val[3]; +} bfloat16x4x3_t; + +typedef struct bfloat16x8x3_t { + bfloat16x8_t val[3]; +} bfloat16x8x3_t; + +typedef struct bfloat16x4x4_t { + bfloat16x4_t val[4]; +} bfloat16x4x4_t; + +typedef struct bfloat16x8x4_t { + bfloat16x8_t val[4]; +} bfloat16x8x4_t; + +#endif // __ARM_NEON_TYPES_H diff --git a/third_party/aarch64/clang/armintr.h b/third_party/aarch64/clang/armintr.h new file mode 100644 index 000000000..300ed4ee4 --- /dev/null +++ b/third_party/aarch64/clang/armintr.h @@ -0,0 +1,31 @@ +/*===---- armintr.h - ARM Windows intrinsics -------------------------------=== + * + * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. + * See https://llvm.org/LICENSE.txt for license information. + * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + * + *===-----------------------------------------------------------------------=== + */ + +/* Only include this if we're compiling for the windows platform. */ +#ifndef _MSC_VER +#include_next +#else + +#ifndef __ARMINTR_H +#define __ARMINTR_H + +typedef enum +{ + _ARM_BARRIER_SY = 0xF, + _ARM_BARRIER_ST = 0xE, + _ARM_BARRIER_ISH = 0xB, + _ARM_BARRIER_ISHST = 0xA, + _ARM_BARRIER_NSH = 0x7, + _ARM_BARRIER_NSHST = 0x6, + _ARM_BARRIER_OSH = 0x3, + _ARM_BARRIER_OSHST = 0x2 +} _ARMINTR_BARRIER_TYPE; + +#endif /* __ARMINTR_H */ +#endif /* _MSC_VER */ diff --git a/third_party/awk/run.c b/third_party/awk/run.c index e0ab6208d..4d5b28aef 100644 --- a/third_party/awk/run.c +++ b/third_party/awk/run.c @@ -495,7 +495,7 @@ makearraystring(Node *p, const char *func) if (!adjbuf(&buf, &bufsz, tlen + 1, recsize, 0, func)) { FATAL("%s: out of memory %s[%s...]", - func, x->nval, buf); + func ? func : "NULL", x->nval, buf); } memcpy(buf + blen, s, slen); if (nsub) { diff --git a/third_party/double-conversion/BUILD.mk b/third_party/double-conversion/BUILD.mk index 10da7f072..847f02f5e 100644 --- a/third_party/double-conversion/BUILD.mk +++ b/third_party/double-conversion/BUILD.mk @@ -34,7 +34,8 @@ THIRD_PARTY_DOUBLECONVERSION_A_DIRECTDEPS = \ LIBC_MEM \ LIBC_STR \ LIBC_TINYMATH \ - THIRD_PARTY_LIBCXXABI + THIRD_PARTY_LIBCXXABI \ + THIRD_PARTY_LIBUNWIND THIRD_PARTY_DOUBLECONVERSION_A_DEPS := \ $(call uniq,$(foreach x,$(THIRD_PARTY_DOUBLECONVERSION_A_DIRECTDEPS),$($(x)))) diff --git a/third_party/intel/BUILD.mk b/third_party/intel/BUILD.mk index fb82e1fbc..7c810ce96 100644 --- a/third_party/intel/BUILD.mk +++ b/third_party/intel/BUILD.mk @@ -3,4 +3,4 @@ PKGS += THIRD_PARTY_INTEL THIRD_PARTY_INTEL_HDRS = $(filter %.h,$(THIRD_PARTY_INTEL_FILES)) -THIRD_PARTY_INTEL_FILES := $(wildcard third_party/intel/*) +THIRD_PARTY_INTEL_FILES := $(wildcard third_party/intel/*) $(wildcard third_party/intel/clang/*) diff --git a/third_party/intel/clang/__wmmintrin_aes.h b/third_party/intel/clang/__wmmintrin_aes.h new file mode 100644 index 000000000..3010b3871 --- /dev/null +++ b/third_party/intel/clang/__wmmintrin_aes.h @@ -0,0 +1,140 @@ +/*===---- __wmmintrin_aes.h - AES intrinsics -------------------------------=== + * + * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. + * See https://llvm.org/LICENSE.txt for license information. + * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + * + *===-----------------------------------------------------------------------=== + */ + +#ifndef __WMMINTRIN_H +#error "Never use <__wmmintrin_aes.h> directly; include instead." +#endif + +#ifndef __WMMINTRIN_AES_H +#define __WMMINTRIN_AES_H + +/* Define the default attributes for the functions in this file. */ +#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("aes"), __min_vector_width__(128))) + +/// Performs a single round of AES encryption using the Equivalent +/// Inverse Cipher, transforming the state value from the first source +/// operand using a 128-bit round key value contained in the second source +/// operand, and writes the result to the destination. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VAESENC instruction. +/// +/// \param __V +/// A 128-bit integer vector containing the state value. +/// \param __R +/// A 128-bit integer vector containing the round key value. +/// \returns A 128-bit integer vector containing the encrypted value. +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_aesenc_si128(__m128i __V, __m128i __R) +{ + return (__m128i)__builtin_ia32_aesenc128((__v2di)__V, (__v2di)__R); +} + +/// Performs the final round of AES encryption using the Equivalent +/// Inverse Cipher, transforming the state value from the first source +/// operand using a 128-bit round key value contained in the second source +/// operand, and writes the result to the destination. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VAESENCLAST instruction. +/// +/// \param __V +/// A 128-bit integer vector containing the state value. +/// \param __R +/// A 128-bit integer vector containing the round key value. +/// \returns A 128-bit integer vector containing the encrypted value. +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_aesenclast_si128(__m128i __V, __m128i __R) +{ + return (__m128i)__builtin_ia32_aesenclast128((__v2di)__V, (__v2di)__R); +} + +/// Performs a single round of AES decryption using the Equivalent +/// Inverse Cipher, transforming the state value from the first source +/// operand using a 128-bit round key value contained in the second source +/// operand, and writes the result to the destination. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VAESDEC instruction. +/// +/// \param __V +/// A 128-bit integer vector containing the state value. +/// \param __R +/// A 128-bit integer vector containing the round key value. +/// \returns A 128-bit integer vector containing the decrypted value. +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_aesdec_si128(__m128i __V, __m128i __R) +{ + return (__m128i)__builtin_ia32_aesdec128((__v2di)__V, (__v2di)__R); +} + +/// Performs the final round of AES decryption using the Equivalent +/// Inverse Cipher, transforming the state value from the first source +/// operand using a 128-bit round key value contained in the second source +/// operand, and writes the result to the destination. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VAESDECLAST instruction. +/// +/// \param __V +/// A 128-bit integer vector containing the state value. +/// \param __R +/// A 128-bit integer vector containing the round key value. +/// \returns A 128-bit integer vector containing the decrypted value. +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_aesdeclast_si128(__m128i __V, __m128i __R) +{ + return (__m128i)__builtin_ia32_aesdeclast128((__v2di)__V, (__v2di)__R); +} + +/// Applies the AES InvMixColumns() transformation to an expanded key +/// contained in the source operand, and writes the result to the +/// destination. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VAESIMC instruction. +/// +/// \param __V +/// A 128-bit integer vector containing the expanded key. +/// \returns A 128-bit integer vector containing the transformed value. +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_aesimc_si128(__m128i __V) +{ + return (__m128i)__builtin_ia32_aesimc128((__v2di)__V); +} + +/// Generates a round key for AES encryption, operating on 128-bit data +/// specified in the first source operand and using an 8-bit round constant +/// specified by the second source operand, and writes the result to the +/// destination. +/// +/// \headerfile +/// +/// \code +/// __m128i _mm_aeskeygenassist_si128(__m128i C, const int R); +/// \endcode +/// +/// This intrinsic corresponds to the AESKEYGENASSIST instruction. +/// +/// \param C +/// A 128-bit integer vector that is used to generate the AES encryption key. +/// \param R +/// An 8-bit round constant used to generate the AES encryption key. +/// \returns A 128-bit round key for AES encryption. +#define _mm_aeskeygenassist_si128(C, R) \ + ((__m128i)__builtin_ia32_aeskeygenassist128((__v2di)(__m128i)(C), (int)(R))) + +#undef __DEFAULT_FN_ATTRS + +#endif /* __WMMINTRIN_AES_H */ diff --git a/third_party/intel/clang/__wmmintrin_pclmul.h b/third_party/intel/clang/__wmmintrin_pclmul.h new file mode 100644 index 000000000..c9a6d50bd --- /dev/null +++ b/third_party/intel/clang/__wmmintrin_pclmul.h @@ -0,0 +1,48 @@ +/*===---- __wmmintrin_pclmul.h - PCMUL intrinsics ---------------------------=== + * + * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. + * See https://llvm.org/LICENSE.txt for license information. + * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + * + *===-----------------------------------------------------------------------=== + */ + +#ifndef __WMMINTRIN_H +#error "Never use <__wmmintrin_pclmul.h> directly; include instead." +#endif + +#ifndef __WMMINTRIN_PCLMUL_H +#define __WMMINTRIN_PCLMUL_H + +/// Multiplies two 64-bit integer values, which are selected from source +/// operands using the immediate-value operand. The multiplication is a +/// carry-less multiplication, and the 128-bit integer product is stored in +/// the destination. +/// +/// \headerfile +/// +/// \code +/// __m128i _mm_clmulepi64_si128(__m128i X, __m128i Y, const int I); +/// \endcode +/// +/// This intrinsic corresponds to the VPCLMULQDQ instruction. +/// +/// \param X +/// A 128-bit vector of [2 x i64] containing one of the source operands. +/// \param Y +/// A 128-bit vector of [2 x i64] containing one of the source operands. +/// \param I +/// An immediate value specifying which 64-bit values to select from the +/// operands. Bit 0 is used to select a value from operand \a X, and bit +/// 4 is used to select a value from operand \a Y: \n +/// Bit[0]=0 indicates that bits[63:0] of operand \a X are used. \n +/// Bit[0]=1 indicates that bits[127:64] of operand \a X are used. \n +/// Bit[4]=0 indicates that bits[63:0] of operand \a Y are used. \n +/// Bit[4]=1 indicates that bits[127:64] of operand \a Y are used. +/// \returns The 128-bit integer vector containing the result of the carry-less +/// multiplication of the selected 64-bit values. +#define _mm_clmulepi64_si128(X, Y, I) \ + ((__m128i)__builtin_ia32_pclmulqdq128((__v2di)(__m128i)(X), \ + (__v2di)(__m128i)(Y), (char)(I))) + +#endif /* __WMMINTRIN_PCLMUL_H */ diff --git a/third_party/intel/clang/adcintrin.h b/third_party/intel/clang/adcintrin.h new file mode 100644 index 000000000..0065a1b54 --- /dev/null +++ b/third_party/intel/clang/adcintrin.h @@ -0,0 +1,160 @@ +/*===---- adcintrin.h - ADC intrinsics -------------------------------------=== + * + * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. + * See https://llvm.org/LICENSE.txt for license information. + * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + * + *===-----------------------------------------------------------------------=== + */ + +#ifndef __ADCINTRIN_H +#define __ADCINTRIN_H + +#if !defined(__i386__) && !defined(__x86_64__) +#error "This header is only meant to be used on x86 and x64 architecture" +#endif + +/* Define the default attributes for the functions in this file. */ +#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__)) + +/* Use C++ inline semantics in C++, GNU inline for C mode. */ +#if defined(__cplusplus) +#define __INLINE __inline +#else +#define __INLINE static __inline +#endif + +#if defined(__cplusplus) +extern "C" { +#endif + +/// Adds unsigned 32-bit integers \a __x and \a __y, plus 0 or 1 as indicated +/// by the carry flag \a __cf. Stores the unsigned 32-bit sum in the memory +/// at \a __p, and returns the 8-bit carry-out (carry flag). +/// +/// \code{.operation} +/// temp := (__cf == 0) ? 0 : 1 +/// Store32(__p, __x + __y + temp) +/// result := CF +/// \endcode +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c ADC instruction. +/// +/// \param __cf +/// The 8-bit unsigned carry flag; any non-zero value indicates carry. +/// \param __x +/// A 32-bit unsigned addend. +/// \param __y +/// A 32-bit unsigned addend. +/// \param __p +/// Pointer to memory for storing the sum. +/// \returns The 8-bit unsigned carry-out value. +__INLINE unsigned char __DEFAULT_FN_ATTRS _addcarry_u32(unsigned char __cf, + unsigned int __x, + unsigned int __y, + unsigned int *__p) { + return __builtin_ia32_addcarryx_u32(__cf, __x, __y, __p); +} + +/// Adds unsigned 32-bit integer \a __y to 0 or 1 as indicated by the carry +/// flag \a __cf, and subtracts the result from unsigned 32-bit integer +/// \a __x. Stores the unsigned 32-bit difference in the memory at \a __p, +/// and returns the 8-bit carry-out (carry or overflow flag). +/// +/// \code{.operation} +/// temp := (__cf == 0) ? 0 : 1 +/// Store32(__p, __x - (__y + temp)) +/// result := CF +/// \endcode +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c SBB instruction. +/// +/// \param __cf +/// The 8-bit unsigned carry flag; any non-zero value indicates carry. +/// \param __x +/// The 32-bit unsigned minuend. +/// \param __y +/// The 32-bit unsigned subtrahend. +/// \param __p +/// Pointer to memory for storing the difference. +/// \returns The 8-bit unsigned carry-out value. +__INLINE unsigned char __DEFAULT_FN_ATTRS _subborrow_u32(unsigned char __cf, + unsigned int __x, + unsigned int __y, + unsigned int *__p) { + return __builtin_ia32_subborrow_u32(__cf, __x, __y, __p); +} + +#ifdef __x86_64__ +/// Adds unsigned 64-bit integers \a __x and \a __y, plus 0 or 1 as indicated +/// by the carry flag \a __cf. Stores the unsigned 64-bit sum in the memory +/// at \a __p, and returns the 8-bit carry-out (carry flag). +/// +/// \code{.operation} +/// temp := (__cf == 0) ? 0 : 1 +/// Store64(__p, __x + __y + temp) +/// result := CF +/// \endcode +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c ADC instruction. +/// +/// \param __cf +/// The 8-bit unsigned carry flag; any non-zero value indicates carry. +/// \param __x +/// A 64-bit unsigned addend. +/// \param __y +/// A 64-bit unsigned addend. +/// \param __p +/// Pointer to memory for storing the sum. +/// \returns The 8-bit unsigned carry-out value. +__INLINE unsigned char __DEFAULT_FN_ATTRS +_addcarry_u64(unsigned char __cf, unsigned long long __x, + unsigned long long __y, unsigned long long *__p) { + return __builtin_ia32_addcarryx_u64(__cf, __x, __y, __p); +} + +/// Adds unsigned 64-bit integer \a __y to 0 or 1 as indicated by the carry +/// flag \a __cf, and subtracts the result from unsigned 64-bit integer +/// \a __x. Stores the unsigned 64-bit difference in the memory at \a __p, +/// and returns the 8-bit carry-out (carry or overflow flag). +/// +/// \code{.operation} +/// temp := (__cf == 0) ? 0 : 1 +/// Store64(__p, __x - (__y + temp)) +/// result := CF +/// \endcode +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c ADC instruction. +/// +/// \param __cf +/// The 8-bit unsigned carry flag; any non-zero value indicates carry. +/// \param __x +/// The 64-bit unsigned minuend. +/// \param __y +/// The 64-bit unsigned subtrahend. +/// \param __p +/// Pointer to memory for storing the difference. +/// \returns The 8-bit unsigned carry-out value. +__INLINE unsigned char __DEFAULT_FN_ATTRS +_subborrow_u64(unsigned char __cf, unsigned long long __x, + unsigned long long __y, unsigned long long *__p) { + return __builtin_ia32_subborrow_u64(__cf, __x, __y, __p); +} +#endif + +#if defined(__cplusplus) +} +#endif + +#undef __INLINE +#undef __DEFAULT_FN_ATTRS + +#endif /* __ADCINTRIN_H */ diff --git a/third_party/intel/clang/adxintrin.h b/third_party/intel/clang/adxintrin.h new file mode 100644 index 000000000..bc6a4caf3 --- /dev/null +++ b/third_party/intel/clang/adxintrin.h @@ -0,0 +1,102 @@ +/*===---- adxintrin.h - ADX intrinsics -------------------------------------=== + * + * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. + * See https://llvm.org/LICENSE.txt for license information. + * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + * + *===-----------------------------------------------------------------------=== + */ + +#ifndef __IMMINTRIN_H +#error "Never use directly; include instead." +#endif + +#ifndef __ADXINTRIN_H +#define __ADXINTRIN_H + +/* Define the default attributes for the functions in this file. */ +#define __DEFAULT_FN_ATTRS \ + __attribute__((__always_inline__, __nodebug__, __target__("adx"))) + +/* Use C++ inline semantics in C++, GNU inline for C mode. */ +#if defined(__cplusplus) +#define __INLINE __inline +#else +#define __INLINE static __inline +#endif + +#if defined(__cplusplus) +extern "C" { +#endif + +/* Intrinsics that are available only if __ADX__ is defined. */ + +/// Adds unsigned 32-bit integers \a __x and \a __y, plus 0 or 1 as indicated +/// by the carry flag \a __cf. Stores the unsigned 32-bit sum in the memory +/// at \a __p, and returns the 8-bit carry-out (carry flag). +/// +/// \code{.operation} +/// temp := (__cf == 0) ? 0 : 1 +/// Store32(__p, __x + __y + temp) +/// result := CF +/// \endcode +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c ADCX instruction. +/// +/// \param __cf +/// The 8-bit unsigned carry flag; any non-zero value indicates carry. +/// \param __x +/// A 32-bit unsigned addend. +/// \param __y +/// A 32-bit unsigned addend. +/// \param __p +/// Pointer to memory for storing the sum. +/// \returns The 8-bit unsigned carry-out value. +__INLINE unsigned char __DEFAULT_FN_ATTRS _addcarryx_u32(unsigned char __cf, + unsigned int __x, + unsigned int __y, + unsigned int *__p) { + return __builtin_ia32_addcarryx_u32(__cf, __x, __y, __p); +} + +#ifdef __x86_64__ +/// Adds unsigned 64-bit integers \a __x and \a __y, plus 0 or 1 as indicated +/// by the carry flag \a __cf. Stores the unsigned 64-bit sum in the memory +/// at \a __p, and returns the 8-bit carry-out (carry flag). +/// +/// \code{.operation} +/// temp := (__cf == 0) ? 0 : 1 +/// Store64(__p, __x + __y + temp) +/// result := CF +/// \endcode +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c ADCX instruction. +/// +/// \param __cf +/// The 8-bit unsigned carry flag; any non-zero value indicates carry. +/// \param __x +/// A 64-bit unsigned addend. +/// \param __y +/// A 64-bit unsigned addend. +/// \param __p +/// Pointer to memory for storing the sum. +/// \returns The 8-bit unsigned carry-out value. +__INLINE unsigned char __DEFAULT_FN_ATTRS +_addcarryx_u64(unsigned char __cf, unsigned long long __x, + unsigned long long __y, unsigned long long *__p) { + return __builtin_ia32_addcarryx_u64(__cf, __x, __y, __p); +} +#endif + +#if defined(__cplusplus) +} +#endif + +#undef __INLINE +#undef __DEFAULT_FN_ATTRS + +#endif /* __ADXINTRIN_H */ diff --git a/third_party/intel/clang/ammintrin.h b/third_party/intel/clang/ammintrin.h new file mode 100644 index 000000000..edf08e8c5 --- /dev/null +++ b/third_party/intel/clang/ammintrin.h @@ -0,0 +1,183 @@ +/*===---- ammintrin.h - SSE4a intrinsics -----------------------------------=== + * + * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. + * See https://llvm.org/LICENSE.txt for license information. + * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + * + *===-----------------------------------------------------------------------=== + */ + +#ifndef __AMMINTRIN_H +#define __AMMINTRIN_H + +#if !defined(__i386__) && !defined(__x86_64__) +#error "This header is only meant to be used on x86 and x64 architecture" +#endif + +#include "pmmintrin.h" + +/* Define the default attributes for the functions in this file. */ +#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("sse4a"), __min_vector_width__(128))) + +/// Extracts the specified bits from the lower 64 bits of the 128-bit +/// integer vector operand at the index \a idx and of the length \a len. +/// +/// \headerfile +/// +/// \code +/// __m128i _mm_extracti_si64(__m128i x, const int len, const int idx); +/// \endcode +/// +/// This intrinsic corresponds to the EXTRQ instruction. +/// +/// \param x +/// The value from which bits are extracted. +/// \param len +/// Bits [5:0] specify the length; the other bits are ignored. If bits [5:0] +/// are zero, the length is interpreted as 64. +/// \param idx +/// Bits [5:0] specify the index of the least significant bit; the other +/// bits are ignored. If the sum of the index and length is greater than 64, +/// the result is undefined. If the length and index are both zero, bits +/// [63:0] of parameter \a x are extracted. If the length is zero but the +/// index is non-zero, the result is undefined. +/// \returns A 128-bit integer vector whose lower 64 bits contain the bits +/// extracted from the source operand. +#define _mm_extracti_si64(x, len, idx) \ + ((__m128i)__builtin_ia32_extrqi((__v2di)(__m128i)(x), \ + (char)(len), (char)(idx))) + +/// Extracts the specified bits from the lower 64 bits of the 128-bit +/// integer vector operand at the index and of the length specified by +/// \a __y. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the EXTRQ instruction. +/// +/// \param __x +/// The value from which bits are extracted. +/// \param __y +/// Specifies the index of the least significant bit at [13:8] and the +/// length at [5:0]; all other bits are ignored. If bits [5:0] are zero, the +/// length is interpreted as 64. If the sum of the index and length is +/// greater than 64, the result is undefined. If the length and index are +/// both zero, bits [63:0] of parameter \a __x are extracted. If the length +/// is zero but the index is non-zero, the result is undefined. +/// \returns A 128-bit vector whose lower 64 bits contain the bits extracted +/// from the source operand. +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_extract_si64(__m128i __x, __m128i __y) +{ + return (__m128i)__builtin_ia32_extrq((__v2di)__x, (__v16qi)__y); +} + +/// Inserts bits of a specified length from the source integer vector +/// \a y into the lower 64 bits of the destination integer vector \a x at +/// the index \a idx and of the length \a len. +/// +/// \headerfile +/// +/// \code +/// __m128i _mm_inserti_si64(__m128i x, __m128i y, const int len, +/// const int idx); +/// \endcode +/// +/// This intrinsic corresponds to the INSERTQ instruction. +/// +/// \param x +/// The destination operand where bits will be inserted. The inserted bits +/// are defined by the length \a len and by the index \a idx specifying the +/// least significant bit. +/// \param y +/// The source operand containing the bits to be extracted. The extracted +/// bits are the least significant bits of operand \a y of length \a len. +/// \param len +/// Bits [5:0] specify the length; the other bits are ignored. If bits [5:0] +/// are zero, the length is interpreted as 64. +/// \param idx +/// Bits [5:0] specify the index of the least significant bit; the other +/// bits are ignored. If the sum of the index and length is greater than 64, +/// the result is undefined. If the length and index are both zero, bits +/// [63:0] of parameter \a y are inserted into parameter \a x. If the length +/// is zero but the index is non-zero, the result is undefined. +/// \returns A 128-bit integer vector containing the original lower 64-bits of +/// destination operand \a x with the specified bitfields replaced by the +/// lower bits of source operand \a y. The upper 64 bits of the return value +/// are undefined. +#define _mm_inserti_si64(x, y, len, idx) \ + ((__m128i)__builtin_ia32_insertqi((__v2di)(__m128i)(x), \ + (__v2di)(__m128i)(y), \ + (char)(len), (char)(idx))) + +/// Inserts bits of a specified length from the source integer vector +/// \a __y into the lower 64 bits of the destination integer vector \a __x +/// at the index and of the length specified by \a __y. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the INSERTQ instruction. +/// +/// \param __x +/// The destination operand where bits will be inserted. The inserted bits +/// are defined by the length and by the index of the least significant bit +/// specified by operand \a __y. +/// \param __y +/// The source operand containing the bits to be extracted. The extracted +/// bits are the least significant bits of operand \a __y with length +/// specified by bits [69:64]. These are inserted into the destination at the +/// index specified by bits [77:72]; all other bits are ignored. If bits +/// [69:64] are zero, the length is interpreted as 64. If the sum of the +/// index and length is greater than 64, the result is undefined. If the +/// length and index are both zero, bits [63:0] of parameter \a __y are +/// inserted into parameter \a __x. If the length is zero but the index is +/// non-zero, the result is undefined. +/// \returns A 128-bit integer vector containing the original lower 64-bits of +/// destination operand \a __x with the specified bitfields replaced by the +/// lower bits of source operand \a __y. The upper 64 bits of the return +/// value are undefined. +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_insert_si64(__m128i __x, __m128i __y) +{ + return (__m128i)__builtin_ia32_insertq((__v2di)__x, (__v2di)__y); +} + +/// Stores a 64-bit double-precision value in a 64-bit memory location. +/// To minimize caching, the data is flagged as non-temporal (unlikely to be +/// used again soon). +/// +/// \headerfile +/// +/// This intrinsic corresponds to the MOVNTSD instruction. +/// +/// \param __p +/// The 64-bit memory location used to store the register value. +/// \param __a +/// The 64-bit double-precision floating-point register value to be stored. +static __inline__ void __DEFAULT_FN_ATTRS +_mm_stream_sd(void *__p, __m128d __a) +{ + __builtin_ia32_movntsd((double *)__p, (__v2df)__a); +} + +/// Stores a 32-bit single-precision floating-point value in a 32-bit +/// memory location. To minimize caching, the data is flagged as +/// non-temporal (unlikely to be used again soon). +/// +/// \headerfile +/// +/// This intrinsic corresponds to the MOVNTSS instruction. +/// +/// \param __p +/// The 32-bit memory location used to store the register value. +/// \param __a +/// The 32-bit single-precision floating-point register value to be stored. +static __inline__ void __DEFAULT_FN_ATTRS +_mm_stream_ss(void *__p, __m128 __a) +{ + __builtin_ia32_movntss((float *)__p, (__v4sf)__a); +} + +#undef __DEFAULT_FN_ATTRS + +#endif /* __AMMINTRIN_H */ diff --git a/third_party/intel/clang/amxcomplexintrin.h b/third_party/intel/clang/amxcomplexintrin.h new file mode 100644 index 000000000..84ef972fc --- /dev/null +++ b/third_party/intel/clang/amxcomplexintrin.h @@ -0,0 +1,169 @@ +/*===--------- amxcomplexintrin.h - AMXCOMPLEX intrinsics -*- C++ -*---------=== + * + * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. + * See https://llvm.org/LICENSE.txt for license information. + * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + * + *===------------------------------------------------------------------------=== + */ + +#ifndef __IMMINTRIN_H +#error "Never use directly; include instead." +#endif // __IMMINTRIN_H + +#ifndef __AMX_COMPLEXINTRIN_H +#define __AMX_COMPLEXINTRIN_H +#ifdef __x86_64__ + +#define __DEFAULT_FN_ATTRS_COMPLEX \ + __attribute__((__always_inline__, __nodebug__, __target__("amx-complex"))) + +/// Perform matrix multiplication of two tiles containing complex elements and +/// accumulate the results into a packed single precision tile. Each dword +/// element in input tiles \a a and \a b is interpreted as a complex number +/// with FP16 real part and FP16 imaginary part. +/// Calculates the imaginary part of the result. For each possible combination +/// of (row of \a a, column of \a b), it performs a set of multiplication +/// and accumulations on all corresponding complex numbers (one from \a a +/// and one from \a b). The imaginary part of the \a a element is multiplied +/// with the real part of the corresponding \a b element, and the real part +/// of the \a a element is multiplied with the imaginary part of the +/// corresponding \a b elements. The two accumulated results are added, and +/// then accumulated into the corresponding row and column of \a dst. +/// +/// \headerfile +/// +/// \code +/// void _tile_cmmimfp16ps(__tile dst, __tile a, __tile b); +/// \endcode +/// +/// \code{.operation} +/// FOR m := 0 TO dst.rows - 1 +/// tmp := dst.row[m] +/// FOR k := 0 TO (a.colsb / 4) - 1 +/// FOR n := 0 TO (dst.colsb / 4) - 1 +/// tmp.fp32[n] += FP32(a.row[m].fp16[2*k+0]) * FP32(b.row[k].fp16[2*n+1]) +/// tmp.fp32[n] += FP32(a.row[m].fp16[2*k+1]) * FP32(b.row[k].fp16[2*n+0]) +/// ENDFOR +/// ENDFOR +/// write_row_and_zero(dst, m, tmp, dst.colsb) +/// ENDFOR +/// zero_upper_rows(dst, dst.rows) +/// zero_tileconfig_start() +/// \endcode +/// +/// This intrinsic corresponds to the \c TCMMIMFP16PS instruction. +/// +/// \param dst +/// The destination tile. Max size is 1024 Bytes. +/// \param a +/// The 1st source tile. Max size is 1024 Bytes. +/// \param b +/// The 2nd source tile. Max size is 1024 Bytes. +#define _tile_cmmimfp16ps(dst, a, b) __builtin_ia32_tcmmimfp16ps(dst, a, b) + +/// Perform matrix multiplication of two tiles containing complex elements and +/// accumulate the results into a packed single precision tile. Each dword +/// element in input tiles \a a and \a b is interpreted as a complex number +/// with FP16 real part and FP16 imaginary part. +/// Calculates the real part of the result. For each possible combination +/// of (row of \a a, column of \a b), it performs a set of multiplication +/// and accumulations on all corresponding complex numbers (one from \a a +/// and one from \a b). The real part of the \a a element is multiplied +/// with the real part of the corresponding \a b element, and the negated +/// imaginary part of the \a a element is multiplied with the imaginary +/// part of the corresponding \a b elements. The two accumulated results +/// are added, and then accumulated into the corresponding row and column +/// of \a dst. +/// +/// \headerfile +/// +/// \code +/// void _tile_cmmrlfp16ps(__tile dst, __tile a, __tile b); +/// \endcode +/// +/// \code{.operation} +/// FOR m := 0 TO dst.rows - 1 +/// tmp := dst.row[m] +/// FOR k := 0 TO (a.colsb / 4) - 1 +/// FOR n := 0 TO (dst.colsb / 4) - 1 +/// tmp.fp32[n] += FP32(a.row[m].fp16[2*k+0]) * FP32(b.row[k].fp16[2*n+0]) +/// tmp.fp32[n] += FP32(-a.row[m].fp16[2*k+1]) * FP32(b.row[k].fp16[2*n+1]) +/// ENDFOR +/// ENDFOR +/// write_row_and_zero(dst, m, tmp, dst.colsb) +/// ENDFOR +/// zero_upper_rows(dst, dst.rows) +/// zero_tileconfig_start() +/// \endcode +/// +/// This intrinsic corresponds to the \c TCMMIMFP16PS instruction. +/// +/// \param dst +/// The destination tile. Max size is 1024 Bytes. +/// \param a +/// The 1st source tile. Max size is 1024 Bytes. +/// \param b +/// The 2nd source tile. Max size is 1024 Bytes. +#define _tile_cmmrlfp16ps(dst, a, b) __builtin_ia32_tcmmrlfp16ps(dst, a, b) + +static __inline__ _tile1024i __DEFAULT_FN_ATTRS_COMPLEX +_tile_cmmimfp16ps_internal(unsigned short m, unsigned short n, unsigned short k, + _tile1024i dst, _tile1024i src1, _tile1024i src2) { + return __builtin_ia32_tcmmimfp16ps_internal(m, n, k, dst, src1, src2); +} + +static __inline__ _tile1024i __DEFAULT_FN_ATTRS_COMPLEX +_tile_cmmrlfp16ps_internal(unsigned short m, unsigned short n, unsigned short k, + _tile1024i dst, _tile1024i src1, _tile1024i src2) { + return __builtin_ia32_tcmmrlfp16ps_internal(m, n, k, dst, src1, src2); +} + +/// Perform matrix multiplication of two tiles containing complex elements and +/// accumulate the results into a packed single precision tile. Each dword +/// element in input tiles src0 and src1 is interpreted as a complex number with +/// FP16 real part and FP16 imaginary part. +/// This function calculates the imaginary part of the result. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the TCMMIMFP16PS instruction. +/// +/// \param dst +/// The destination tile. Max size is 1024 Bytes. +/// \param src0 +/// The 1st source tile. Max size is 1024 Bytes. +/// \param src1 +/// The 2nd source tile. Max size is 1024 Bytes. +__DEFAULT_FN_ATTRS_COMPLEX +static void __tile_cmmimfp16ps(__tile1024i *dst, __tile1024i src0, + __tile1024i src1) { + dst->tile = _tile_cmmimfp16ps_internal(src0.row, src1.col, src0.col, + dst->tile, src0.tile, src1.tile); +} + +/// Perform matrix multiplication of two tiles containing complex elements and +/// accumulate the results into a packed single precision tile. Each dword +/// element in input tiles src0 and src1 is interpreted as a complex number with +/// FP16 real part and FP16 imaginary part. +/// This function calculates the real part of the result. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the TCMMRLFP16PS instruction. +/// +/// \param dst +/// The destination tile. Max size is 1024 Bytes. +/// \param src0 +/// The 1st source tile. Max size is 1024 Bytes. +/// \param src1 +/// The 2nd source tile. Max size is 1024 Bytes. +__DEFAULT_FN_ATTRS_COMPLEX +static void __tile_cmmrlfp16ps(__tile1024i *dst, __tile1024i src0, + __tile1024i src1) { + dst->tile = _tile_cmmrlfp16ps_internal(src0.row, src1.col, src0.col, + dst->tile, src0.tile, src1.tile); +} + +#endif // __x86_64__ +#endif // __AMX_COMPLEXINTRIN_H diff --git a/third_party/intel/clang/amxfp16intrin.h b/third_party/intel/clang/amxfp16intrin.h new file mode 100644 index 000000000..ed798245d --- /dev/null +++ b/third_party/intel/clang/amxfp16intrin.h @@ -0,0 +1,58 @@ +/*===------------- amxfp16intrin.h - AMX_FP16 intrinsics -*- C++ -*---------=== + * + * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. + * See https://llvm.org/LICENSE.txt for license information. + * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + * + *===------------------------------------------------------------------------=== + */ + +#ifndef __IMMINTRIN_H +#error "Never use directly; use instead." +#endif /* __IMMINTRIN_H */ + +#ifndef __AMX_FP16INTRIN_H +#define __AMX_FP16INTRIN_H +#ifdef __x86_64__ + +/// Compute dot-product of FP16 (16-bit) floating-point pairs in tiles \a a +/// and \a b, accumulating the intermediate single-precision (32-bit) +/// floating-point elements with elements in \a dst, and store the 32-bit +/// result back to tile \a dst. +/// +/// \headerfile +/// +/// \code +/// void _tile_dpfp16ps (__tile dst, __tile a, __tile b) +/// \endcode +/// +/// \code{.operation} +/// FOR m := 0 TO dst.rows - 1 +/// tmp := dst.row[m] +/// FOR k := 0 TO (a.colsb / 4) - 1 +/// FOR n := 0 TO (dst.colsb / 4) - 1 +/// tmp.fp32[n] += FP32(a.row[m].fp16[2*k+0]) * +/// FP32(b.row[k].fp16[2*n+0]) +/// tmp.fp32[n] += FP32(a.row[m].fp16[2*k+1]) * +/// FP32(b.row[k].fp16[2*n+1]) +/// ENDFOR +/// ENDFOR +/// write_row_and_zero(dst, m, tmp, dst.colsb) +/// ENDFOR +/// zero_upper_rows(dst, dst.rows) +/// zero_tileconfig_start() +/// \endcode +/// +/// This intrinsic corresponds to the \c TDPFP16PS instruction. +/// +/// \param dst +/// The destination tile. Max size is 1024 Bytes. +/// \param a +/// The 1st source tile. Max size is 1024 Bytes. +/// \param b +/// The 2nd source tile. Max size is 1024 Bytes. +#define _tile_dpfp16ps(dst, a, b) \ + __builtin_ia32_tdpfp16ps(dst, a, b) + +#endif /* __x86_64__ */ +#endif /* __AMX_FP16INTRIN_H */ diff --git a/third_party/intel/clang/amxintrin.h b/third_party/intel/clang/amxintrin.h new file mode 100644 index 000000000..baa56f5b2 --- /dev/null +++ b/third_party/intel/clang/amxintrin.h @@ -0,0 +1,524 @@ +/*===--------------- amxintrin.h - AMX intrinsics -*- C/C++ -*---------------=== + * + * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. + * See https://llvm.org/LICENSE.txt for license information. + * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + * + *===------------------------------------------------------------------------=== + */ + +#ifndef __IMMINTRIN_H +#error "Never use directly; include instead." +#endif /* __IMMINTRIN_H */ + +#ifndef __AMXINTRIN_H +#define __AMXINTRIN_H +#ifdef __x86_64__ + +/* Define the default attributes for the functions in this file. */ +#define __DEFAULT_FN_ATTRS_TILE \ + __attribute__((__always_inline__, __nodebug__, __target__("amx-tile"))) +#define __DEFAULT_FN_ATTRS_INT8 \ + __attribute__((__always_inline__, __nodebug__, __target__("amx-int8"))) +#define __DEFAULT_FN_ATTRS_BF16 \ + __attribute__((__always_inline__, __nodebug__, __target__("amx-bf16"))) +#define __DEFAULT_FN_ATTRS_FP16 \ + __attribute__((__always_inline__, __nodebug__, __target__("amx-fp16"))) + +/// Load tile configuration from a 64-byte memory location specified by +/// "mem_addr". The tile configuration includes the tile type palette, the +/// number of bytes per row, and the number of rows. If the specified +/// palette_id is zero, that signifies the init state for both the tile +/// config and the tile data, and the tiles are zeroed. Any invalid +/// configurations will result in #GP fault. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the LDTILECFG instruction. +/// +/// \param __config +/// A pointer to 512-bits configuration +static __inline__ void __DEFAULT_FN_ATTRS_TILE +_tile_loadconfig(const void *__config) { + __builtin_ia32_tile_loadconfig(__config); +} + +/// Stores the current tile configuration to a 64-byte memory location +/// specified by "mem_addr". The tile configuration includes the tile type +/// palette, the number of bytes per row, and the number of rows. If tiles +/// are not configured, all zeroes will be stored to memory. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the STTILECFG instruction. +/// +/// \param __config +/// A pointer to 512-bits configuration +static __inline__ void __DEFAULT_FN_ATTRS_TILE +_tile_storeconfig(void *__config) { + __builtin_ia32_tile_storeconfig(__config); +} + +/// Release the tile configuration to return to the init state, which +/// releases all storage it currently holds. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the TILERELEASE instruction. +static __inline__ void __DEFAULT_FN_ATTRS_TILE _tile_release(void) { + __builtin_ia32_tilerelease(); +} + +/// Load tile rows from memory specifieid by "base" address and "stride" into +/// destination tile "dst" using the tile configuration previously configured +/// via "_tile_loadconfig". +/// +/// \headerfile +/// +/// This intrinsic corresponds to the TILELOADD instruction. +/// +/// \param dst +/// A destination tile. Max size is 1024 Bytes. +/// \param base +/// A pointer to base address. +/// \param stride +/// The stride between the rows' data to be loaded in memory. +#define _tile_loadd(dst, base, stride) \ + __builtin_ia32_tileloadd64((dst), ((const void *)(base)), \ + (__SIZE_TYPE__)(stride)) + +/// Load tile rows from memory specifieid by "base" address and "stride" into +/// destination tile "dst" using the tile configuration previously configured +/// via "_tile_loadconfig". This intrinsic provides a hint to the implementation +/// that the data will likely not be reused in the near future and the data +/// caching can be optimized accordingly. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the TILELOADDT1 instruction. +/// +/// \param dst +/// A destination tile. Max size is 1024 Bytes. +/// \param base +/// A pointer to base address. +/// \param stride +/// The stride between the rows' data to be loaded in memory. +#define _tile_stream_loadd(dst, base, stride) \ + __builtin_ia32_tileloaddt164((dst), ((const void *)(base)), \ + (__SIZE_TYPE__)(stride)) + +/// Store the tile specified by "src" to memory specifieid by "base" address and +/// "stride" using the tile configuration previously configured via +/// "_tile_loadconfig". +/// +/// \headerfile +/// +/// This intrinsic corresponds to the TILESTORED instruction. +/// +/// \param dst +/// A destination tile. Max size is 1024 Bytes. +/// \param base +/// A pointer to base address. +/// \param stride +/// The stride between the rows' data to be stored in memory. +#define _tile_stored(dst, base, stride) \ + __builtin_ia32_tilestored64((dst), ((void *)(base)), (__SIZE_TYPE__)(stride)) + +/// Zero the tile specified by "tdest". +/// +/// \headerfile +/// +/// This intrinsic corresponds to the TILEZERO instruction. +/// +/// \param tile +/// The destination tile to be zero. Max size is 1024 Bytes. +#define _tile_zero(tile) __builtin_ia32_tilezero((tile)) + +/// Compute dot-product of bytes in tiles with a source/destination accumulator. +/// Multiply groups of 4 adjacent pairs of signed 8-bit integers in src0 with +/// corresponding signed 8-bit integers in src1, producing 4 intermediate 32-bit +/// results. Sum these 4 results with the corresponding 32-bit integer in "dst", +/// and store the 32-bit result back to tile "dst". +/// +/// \headerfile +/// +/// This intrinsic corresponds to the TDPBSSD instruction. +/// +/// \param dst +/// The destination tile. Max size is 1024 Bytes. +/// \param src0 +/// The 1st source tile. Max size is 1024 Bytes. +/// \param src1 +/// The 2nd source tile. Max size is 1024 Bytes. +#define _tile_dpbssd(dst, src0, src1) \ + __builtin_ia32_tdpbssd((dst), (src0), (src1)) + +/// Compute dot-product of bytes in tiles with a source/destination accumulator. +/// Multiply groups of 4 adjacent pairs of signed 8-bit integers in src0 with +/// corresponding unsigned 8-bit integers in src1, producing 4 intermediate +/// 32-bit results. Sum these 4 results with the corresponding 32-bit integer +/// in "dst", and store the 32-bit result back to tile "dst". +/// +/// \headerfile +/// +/// This intrinsic corresponds to the TDPBSUD instruction. +/// +/// \param dst +/// The destination tile. Max size is 1024 Bytes. +/// \param src0 +/// The 1st source tile. Max size is 1024 Bytes. +/// \param src1 +/// The 2nd source tile. Max size is 1024 Bytes. +#define _tile_dpbsud(dst, src0, src1) \ + __builtin_ia32_tdpbsud((dst), (src0), (src1)) + +/// Compute dot-product of bytes in tiles with a source/destination accumulator. +/// Multiply groups of 4 adjacent pairs of unsigned 8-bit integers in src0 with +/// corresponding signed 8-bit integers in src1, producing 4 intermediate 32-bit +/// results. Sum these 4 results with the corresponding 32-bit integer in "dst", +/// and store the 32-bit result back to tile "dst". +/// +/// \headerfile +/// +/// This intrinsic corresponds to the TDPBUSD instruction. +/// +/// \param dst +/// The destination tile. Max size is 1024 Bytes. +/// \param src0 +/// The 1st source tile. Max size is 1024 Bytes. +/// \param src1 +/// The 2nd source tile. Max size is 1024 Bytes. +#define _tile_dpbusd(dst, src0, src1) \ + __builtin_ia32_tdpbusd((dst), (src0), (src1)) + +/// Compute dot-product of bytes in tiles with a source/destination accumulator. +/// Multiply groups of 4 adjacent pairs of unsigned 8-bit integers in src0 with +/// corresponding unsigned 8-bit integers in src1, producing 4 intermediate +/// 32-bit results. Sum these 4 results with the corresponding 32-bit integer in +/// "dst", and store the 32-bit result back to tile "dst". +/// +/// \headerfile +/// +/// This intrinsic corresponds to the TDPBUUD instruction. +/// +/// \param dst +/// The destination tile. Max size is 1024 Bytes. +/// \param src0 +/// The 1st source tile. Max size is 1024 Bytes. +/// \param src1 +/// The 2nd source tile. Max size is 1024 Bytes. +#define _tile_dpbuud(dst, src0, src1) \ + __builtin_ia32_tdpbuud((dst), (src0), (src1)) + +/// Compute dot-product of BF16 (16-bit) floating-point pairs in tiles src0 and +/// src1, accumulating the intermediate single-precision (32-bit) floating-point +/// elements with elements in "dst", and store the 32-bit result back to tile +/// "dst". +/// +/// \headerfile +/// +/// This intrinsic corresponds to the TDPBF16PS instruction. +/// +/// \param dst +/// The destination tile. Max size is 1024 Bytes. +/// \param src0 +/// The 1st source tile. Max size is 1024 Bytes. +/// \param src1 +/// The 2nd source tile. Max size is 1024 Bytes. +#define _tile_dpbf16ps(dst, src0, src1) \ + __builtin_ia32_tdpbf16ps((dst), (src0), (src1)) + +/// AMX tile register size can be configured, the maximum size is 16x64=1024 +/// bytes. Since there is no 2D type in llvm IR, we use vector type to +/// represent 2D tile and the fixed size is maximum amx tile register size. +typedef int _tile1024i __attribute__((__vector_size__(1024), __aligned__(64))); + +/// This is internal intrinsic. C/C++ user should avoid calling it directly. +static __inline__ _tile1024i __DEFAULT_FN_ATTRS_INT8 +_tile_loadd_internal(unsigned short m, unsigned short n, const void *base, + __SIZE_TYPE__ stride) { + return __builtin_ia32_tileloadd64_internal(m, n, base, + (__SIZE_TYPE__)(stride)); +} + +/// This is internal intrinsic. C/C++ user should avoid calling it directly. +static __inline__ _tile1024i __DEFAULT_FN_ATTRS_INT8 +_tile_loaddt1_internal(unsigned short m, unsigned short n, const void *base, + __SIZE_TYPE__ stride) { + return __builtin_ia32_tileloaddt164_internal(m, n, base, + (__SIZE_TYPE__)(stride)); +} + +/// This is internal intrinsic. C/C++ user should avoid calling it directly. +static __inline__ _tile1024i __DEFAULT_FN_ATTRS_INT8 +_tile_dpbssd_internal(unsigned short m, unsigned short n, unsigned short k, + _tile1024i dst, _tile1024i src1, _tile1024i src2) { + return __builtin_ia32_tdpbssd_internal(m, n, k, dst, src1, src2); +} + +/// This is internal intrinsic. C/C++ user should avoid calling it directly. +static __inline__ _tile1024i __DEFAULT_FN_ATTRS_INT8 +_tile_dpbsud_internal(unsigned short m, unsigned short n, unsigned short k, + _tile1024i dst, _tile1024i src1, _tile1024i src2) { + return __builtin_ia32_tdpbsud_internal(m, n, k, dst, src1, src2); +} + +/// This is internal intrinsic. C/C++ user should avoid calling it directly. +static __inline__ _tile1024i __DEFAULT_FN_ATTRS_INT8 +_tile_dpbusd_internal(unsigned short m, unsigned short n, unsigned short k, + _tile1024i dst, _tile1024i src1, _tile1024i src2) { + return __builtin_ia32_tdpbusd_internal(m, n, k, dst, src1, src2); +} + +/// This is internal intrinsic. C/C++ user should avoid calling it directly. +static __inline__ _tile1024i __DEFAULT_FN_ATTRS_INT8 +_tile_dpbuud_internal(unsigned short m, unsigned short n, unsigned short k, + _tile1024i dst, _tile1024i src1, _tile1024i src2) { + return __builtin_ia32_tdpbuud_internal(m, n, k, dst, src1, src2); +} + +/// This is internal intrinsic. C/C++ user should avoid calling it directly. +static __inline__ void __DEFAULT_FN_ATTRS_INT8 +_tile_stored_internal(unsigned short m, unsigned short n, void *base, + __SIZE_TYPE__ stride, _tile1024i tile) { + return __builtin_ia32_tilestored64_internal(m, n, base, + (__SIZE_TYPE__)(stride), tile); +} + +/// This is internal intrinsic. C/C++ user should avoid calling it directly. +static __inline__ _tile1024i __DEFAULT_FN_ATTRS_BF16 +_tile_dpbf16ps_internal(unsigned short m, unsigned short n, unsigned short k, + _tile1024i dst, _tile1024i src1, _tile1024i src2) { + return __builtin_ia32_tdpbf16ps_internal(m, n, k, dst, src1, src2); +} + +/// This is internal intrinsic. C/C++ user should avoid calling it directly. +static __inline__ _tile1024i __DEFAULT_FN_ATTRS_FP16 +_tile_dpfp16ps_internal(unsigned short m, unsigned short n, unsigned short k, + _tile1024i dst, _tile1024i src1, _tile1024i src2) { + return __builtin_ia32_tdpfp16ps_internal(m, n, k, dst, src1, src2); +} + +/// This struct pack the shape and tile data together for user. We suggest +/// initializing the struct as early as possible, because compiler depends +/// on the shape information to do configure. The constant value is preferred +/// for optimization by compiler. +typedef struct __tile1024i_str { + const unsigned short row; + const unsigned short col; + _tile1024i tile; +} __tile1024i; + +/// Load tile rows from memory specifieid by "base" address and "stride" into +/// destination tile "dst". +/// +/// \headerfile +/// +/// This intrinsic corresponds to the TILELOADD instruction. +/// +/// \param dst +/// A destination tile. Max size is 1024 Bytes. +/// \param base +/// A pointer to base address. +/// \param stride +/// The stride between the rows' data to be loaded in memory. +__DEFAULT_FN_ATTRS_TILE +static __inline__ void __tile_loadd(__tile1024i *dst, const void *base, + __SIZE_TYPE__ stride) { + dst->tile = _tile_loadd_internal(dst->row, dst->col, base, stride); +} + +/// Load tile rows from memory specifieid by "base" address and "stride" into +/// destination tile "dst". This intrinsic provides a hint to the implementation +/// that the data will likely not be reused in the near future and the data +/// caching can be optimized accordingly. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the TILELOADDT1 instruction. +/// +/// \param dst +/// A destination tile. Max size is 1024 Bytes. +/// \param base +/// A pointer to base address. +/// \param stride +/// The stride between the rows' data to be loaded in memory. +__DEFAULT_FN_ATTRS_TILE +static __inline__ void __tile_stream_loadd(__tile1024i *dst, const void *base, + __SIZE_TYPE__ stride) { + dst->tile = _tile_loaddt1_internal(dst->row, dst->col, base, stride); +} + +/// Compute dot-product of bytes in tiles with a source/destination accumulator. +/// Multiply groups of 4 adjacent pairs of signed 8-bit integers in src0 with +/// corresponding signed 8-bit integers in src1, producing 4 intermediate 32-bit +/// results. Sum these 4 results with the corresponding 32-bit integer in "dst", +/// and store the 32-bit result back to tile "dst". +/// +/// \headerfile +/// +/// This intrinsic corresponds to the TDPBSSD instruction. +/// +/// \param dst +/// The destination tile. Max size is 1024 Bytes. +/// \param src0 +/// The 1st source tile. Max size is 1024 Bytes. +/// \param src1 +/// The 2nd source tile. Max size is 1024 Bytes. +__DEFAULT_FN_ATTRS_INT8 +static __inline__ void __tile_dpbssd(__tile1024i *dst, __tile1024i src0, + __tile1024i src1) { + dst->tile = _tile_dpbssd_internal(src0.row, src1.col, src0.col, dst->tile, + src0.tile, src1.tile); +} + +/// Compute dot-product of bytes in tiles with a source/destination accumulator. +/// Multiply groups of 4 adjacent pairs of signed 8-bit integers in src0 with +/// corresponding unsigned 8-bit integers in src1, producing 4 intermediate +/// 32-bit results. Sum these 4 results with the corresponding 32-bit integer +/// in "dst", and store the 32-bit result back to tile "dst". +/// +/// \headerfile +/// +/// This intrinsic corresponds to the TDPBSUD instruction. +/// +/// \param dst +/// The destination tile. Max size is 1024 Bytes. +/// \param src0 +/// The 1st source tile. Max size is 1024 Bytes. +/// \param src1 +/// The 2nd source tile. Max size is 1024 Bytes. +__DEFAULT_FN_ATTRS_INT8 +static __inline__ void __tile_dpbsud(__tile1024i *dst, __tile1024i src0, + __tile1024i src1) { + dst->tile = _tile_dpbsud_internal(src0.row, src1.col, src0.col, dst->tile, + src0.tile, src1.tile); +} + +/// Compute dot-product of bytes in tiles with a source/destination accumulator. +/// Multiply groups of 4 adjacent pairs of unsigned 8-bit integers in src0 with +/// corresponding signed 8-bit integers in src1, producing 4 intermediate 32-bit +/// results. Sum these 4 results with the corresponding 32-bit integer in "dst", +/// and store the 32-bit result back to tile "dst". +/// +/// \headerfile +/// +/// This intrinsic corresponds to the TDPBUSD instruction. +/// +/// \param dst +/// The destination tile. Max size is 1024 Bytes. +/// \param src0 +/// The 1st source tile. Max size is 1024 Bytes. +/// \param src1 +/// The 2nd source tile. Max size is 1024 Bytes. +__DEFAULT_FN_ATTRS_INT8 +static __inline__ void __tile_dpbusd(__tile1024i *dst, __tile1024i src0, + __tile1024i src1) { + dst->tile = _tile_dpbusd_internal(src0.row, src1.col, src0.col, dst->tile, + src0.tile, src1.tile); +} + +/// Compute dot-product of bytes in tiles with a source/destination accumulator. +/// Multiply groups of 4 adjacent pairs of unsigned 8-bit integers in src0 with +/// corresponding unsigned 8-bit integers in src1, producing 4 intermediate +/// 32-bit results. Sum these 4 results with the corresponding 32-bit integer in +/// "dst", and store the 32-bit result back to tile "dst". +/// +/// \headerfile +/// +/// This intrinsic corresponds to the TDPBUUD instruction. +/// +/// \param dst +/// The destination tile. Max size is 1024 Bytes. +/// \param src0 +/// The 1st source tile. Max size is 1024 Bytes. +/// \param src1 +/// The 2nd source tile. Max size is 1024 Bytes. +__DEFAULT_FN_ATTRS_INT8 +static __inline__ void __tile_dpbuud(__tile1024i *dst, __tile1024i src0, + __tile1024i src1) { + dst->tile = _tile_dpbuud_internal(src0.row, src1.col, src0.col, dst->tile, + src0.tile, src1.tile); +} + +/// Store the tile specified by "src" to memory specifieid by "base" address and +/// "stride". +/// +/// \headerfile +/// +/// This intrinsic corresponds to the TILESTORED instruction. +/// +/// \param base +/// A pointer to base address. +/// \param stride +/// The stride between the rows' data to be stored in memory. +__DEFAULT_FN_ATTRS_TILE +static __inline__ void __tile_stored(void *base, __SIZE_TYPE__ stride, + __tile1024i src) { + _tile_stored_internal(src.row, src.col, base, stride, src.tile); +} + +/// Zero the tile specified by "dst". +/// +/// \headerfile +/// +/// This intrinsic corresponds to the TILEZERO instruction. +/// +/// \param dst +/// The destination tile to be zero. Max size is 1024 Bytes. +__DEFAULT_FN_ATTRS_TILE +static __inline__ void __tile_zero(__tile1024i *dst) { + dst->tile = __builtin_ia32_tilezero_internal(dst->row, dst->col); +} + +/// Compute dot-product of BF16 (16-bit) floating-point pairs in tiles src0 and +/// src1, accumulating the intermediate single-precision (32-bit) floating-point +/// elements with elements in "dst", and store the 32-bit result back to tile +/// "dst". +/// +/// \headerfile +/// +/// This intrinsic corresponds to the TDPBF16PS instruction. +/// +/// \param dst +/// The destination tile. Max size is 1024 Bytes. +/// \param src0 +/// The 1st source tile. Max size is 1024 Bytes. +/// \param src1 +/// The 2nd source tile. Max size is 1024 Bytes. +__DEFAULT_FN_ATTRS_BF16 +static __inline__ void __tile_dpbf16ps(__tile1024i *dst, __tile1024i src0, + __tile1024i src1) { + dst->tile = _tile_dpbf16ps_internal(src0.row, src1.col, src0.col, dst->tile, + src0.tile, src1.tile); +} + +/// Compute dot-product of FP16 (16-bit) floating-point pairs in tiles src0 and +/// src1, accumulating the intermediate single-precision (32-bit) floating-point +/// elements with elements in "dst", and store the 32-bit result back to tile +/// "dst". +/// +/// \headerfile +/// +/// This intrinsic corresponds to the TDPFP16PS instruction. +/// +/// \param dst +/// The destination tile. Max size is 1024 Bytes. +/// \param src0 +/// The 1st source tile. Max size is 1024 Bytes. +/// \param src1 +/// The 2nd source tile. Max size is 1024 Bytes. +__DEFAULT_FN_ATTRS_FP16 +static __inline__ void __tile_dpfp16ps(__tile1024i *dst, __tile1024i src0, + __tile1024i src1) { + dst->tile = _tile_dpfp16ps_internal(src0.row, src1.col, src0.col, dst->tile, + src0.tile, src1.tile); +} + +#undef __DEFAULT_FN_ATTRS_TILE +#undef __DEFAULT_FN_ATTRS_INT8 +#undef __DEFAULT_FN_ATTRS_BF16 +#undef __DEFAULT_FN_ATTRS_FP16 + +#endif /* __x86_64__ */ +#endif /* __AMXINTRIN_H */ diff --git a/third_party/intel/clang/avx2intrin.h b/third_party/intel/clang/avx2intrin.h new file mode 100644 index 000000000..096cae01b --- /dev/null +++ b/third_party/intel/clang/avx2intrin.h @@ -0,0 +1,5284 @@ +/*===---- avx2intrin.h - AVX2 intrinsics -----------------------------------=== + * + * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. + * See https://llvm.org/LICENSE.txt for license information. + * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + * + *===-----------------------------------------------------------------------=== + */ + +#ifndef __IMMINTRIN_H +#error "Never use directly; include instead." +#endif + +#ifndef __AVX2INTRIN_H +#define __AVX2INTRIN_H + +/* Define the default attributes for the functions in this file. */ +#define __DEFAULT_FN_ATTRS256 \ + __attribute__((__always_inline__, __nodebug__, \ + __target__("avx2,no-evex512"), __min_vector_width__(256))) +#define __DEFAULT_FN_ATTRS128 \ + __attribute__((__always_inline__, __nodebug__, \ + __target__("avx2,no-evex512"), __min_vector_width__(128))) + +/* SSE4 Multiple Packed Sums of Absolute Difference. */ +/// Computes sixteen sum of absolute difference (SAD) operations on sets of +/// four unsigned 8-bit integers from the 256-bit integer vectors \a X and +/// \a Y. +/// +/// Eight SAD results are computed using the lower half of the input +/// vectors, and another eight using the upper half. These 16-bit values +/// are returned in the lower and upper halves of the 256-bit result, +/// respectively. +/// +/// A single SAD operation selects four bytes from \a X and four bytes from +/// \a Y as input. It computes the differences between each \a X byte and +/// the corresponding \a Y byte, takes the absolute value of each +/// difference, and sums these four values to form one 16-bit result. The +/// intrinsic computes 16 of these results with different sets of input +/// bytes. +/// +/// For each set of eight results, the SAD operations use the same four +/// bytes from \a Y; the starting bit position for these four bytes is +/// specified by \a M[1:0] times 32. The eight operations use successive +/// sets of four bytes from \a X; the starting bit position for the first +/// set of four bytes is specified by \a M[2] times 32. These bit positions +/// are all relative to the 128-bit lane for each set of eight operations. +/// +/// \code{.operation} +/// r := 0 +/// FOR i := 0 TO 1 +/// j := i*3 +/// Ybase := M[j+1:j]*32 + i*128 +/// Xbase := M[j+2]*32 + i*128 +/// FOR k := 0 TO 3 +/// temp0 := ABS(X[Xbase+7:Xbase] - Y[Ybase+7:Ybase]) +/// temp1 := ABS(X[Xbase+15:Xbase+8] - Y[Ybase+15:Ybase+8]) +/// temp2 := ABS(X[Xbase+23:Xbase+16] - Y[Ybase+23:Ybase+16]) +/// temp3 := ABS(X[Xbase+31:Xbase+24] - Y[Ybase+31:Ybase+24]) +/// result[r+15:r] := temp0 + temp1 + temp2 + temp3 +/// Xbase := Xbase + 8 +/// r := r + 16 +/// ENDFOR +/// ENDFOR +/// \endcode +/// +/// \headerfile +/// +/// \code +/// __m256i _mm256_mpsadbw_epu8(__m256i X, __m256i Y, const int M); +/// \endcode +/// +/// This intrinsic corresponds to the \c VMPSADBW instruction. +/// +/// \param X +/// A 256-bit integer vector containing one of the inputs. +/// \param Y +/// A 256-bit integer vector containing one of the inputs. +/// \param M +/// An unsigned immediate value specifying the starting positions of the +/// bytes to operate on. +/// \returns A 256-bit vector of [16 x i16] containing the result. +#define _mm256_mpsadbw_epu8(X, Y, M) \ + ((__m256i)__builtin_ia32_mpsadbw256((__v32qi)(__m256i)(X), \ + (__v32qi)(__m256i)(Y), (int)(M))) + +/// Computes the absolute value of each signed byte in the 256-bit integer +/// vector \a __a and returns each value in the corresponding byte of +/// the result. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c VPABSB instruction. +/// +/// \param __a +/// A 256-bit integer vector. +/// \returns A 256-bit integer vector containing the result. +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_abs_epi8(__m256i __a) +{ + return (__m256i)__builtin_elementwise_abs((__v32qs)__a); +} + +/// Computes the absolute value of each signed 16-bit element in the 256-bit +/// vector of [16 x i16] in \a __a and returns each value in the +/// corresponding element of the result. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c VPABSW instruction. +/// +/// \param __a +/// A 256-bit vector of [16 x i16]. +/// \returns A 256-bit vector of [16 x i16] containing the result. +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_abs_epi16(__m256i __a) +{ + return (__m256i)__builtin_elementwise_abs((__v16hi)__a); +} + +/// Computes the absolute value of each signed 32-bit element in the 256-bit +/// vector of [8 x i32] in \a __a and returns each value in the +/// corresponding element of the result. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c VPABSD instruction. +/// +/// \param __a +/// A 256-bit vector of [8 x i32]. +/// \returns A 256-bit vector of [8 x i32] containing the result. +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_abs_epi32(__m256i __a) +{ + return (__m256i)__builtin_elementwise_abs((__v8si)__a); +} + +/// Converts the elements of two 256-bit vectors of [16 x i16] to 8-bit +/// integers using signed saturation, and returns the 256-bit result. +/// +/// \code{.operation} +/// FOR i := 0 TO 7 +/// j := i*16 +/// k := i*8 +/// result[7+k:k] := SATURATE8(__a[15+j:j]) +/// result[71+k:64+k] := SATURATE8(__b[15+j:j]) +/// result[135+k:128+k] := SATURATE8(__a[143+j:128+j]) +/// result[199+k:192+k] := SATURATE8(__b[143+j:128+j]) +/// ENDFOR +/// \endcode +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c VPACKSSWB instruction. +/// +/// \param __a +/// A 256-bit vector of [16 x i16] used to generate result[63:0] and +/// result[191:128]. +/// \param __b +/// A 256-bit vector of [16 x i16] used to generate result[127:64] and +/// result[255:192]. +/// \returns A 256-bit integer vector containing the result. +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_packs_epi16(__m256i __a, __m256i __b) +{ + return (__m256i)__builtin_ia32_packsswb256((__v16hi)__a, (__v16hi)__b); +} + +/// Converts the elements of two 256-bit vectors of [8 x i32] to 16-bit +/// integers using signed saturation, and returns the resulting 256-bit +/// vector of [16 x i16]. +/// +/// \code{.operation} +/// FOR i := 0 TO 3 +/// j := i*32 +/// k := i*16 +/// result[15+k:k] := SATURATE16(__a[31+j:j]) +/// result[79+k:64+k] := SATURATE16(__b[31+j:j]) +/// result[143+k:128+k] := SATURATE16(__a[159+j:128+j]) +/// result[207+k:192+k] := SATURATE16(__b[159+j:128+j]) +/// ENDFOR +/// \endcode +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c VPACKSSDW instruction. +/// +/// \param __a +/// A 256-bit vector of [8 x i32] used to generate result[63:0] and +/// result[191:128]. +/// \param __b +/// A 256-bit vector of [8 x i32] used to generate result[127:64] and +/// result[255:192]. +/// \returns A 256-bit vector of [16 x i16] containing the result. +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_packs_epi32(__m256i __a, __m256i __b) +{ + return (__m256i)__builtin_ia32_packssdw256((__v8si)__a, (__v8si)__b); +} + +/// Converts elements from two 256-bit vectors of [16 x i16] to 8-bit integers +/// using unsigned saturation, and returns the 256-bit result. +/// +/// \code{.operation} +/// FOR i := 0 TO 7 +/// j := i*16 +/// k := i*8 +/// result[7+k:k] := SATURATE8U(__a[15+j:j]) +/// result[71+k:64+k] := SATURATE8U(__b[15+j:j]) +/// result[135+k:128+k] := SATURATE8U(__a[143+j:128+j]) +/// result[199+k:192+k] := SATURATE8U(__b[143+j:128+j]) +/// ENDFOR +/// \endcode +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c VPACKUSWB instruction. +/// +/// \param __a +/// A 256-bit vector of [16 x i16] used to generate result[63:0] and +/// result[191:128]. +/// \param __b +/// A 256-bit vector of [16 x i16] used to generate result[127:64] and +/// result[255:192]. +/// \returns A 256-bit integer vector containing the result. +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_packus_epi16(__m256i __a, __m256i __b) +{ + return (__m256i)__builtin_ia32_packuswb256((__v16hi)__a, (__v16hi)__b); +} + +/// Converts elements from two 256-bit vectors of [8 x i32] to 16-bit integers +/// using unsigned saturation, and returns the resulting 256-bit vector of +/// [16 x i16]. +/// +/// \code{.operation} +/// FOR i := 0 TO 3 +/// j := i*32 +/// k := i*16 +/// result[15+k:k] := SATURATE16U(__V1[31+j:j]) +/// result[79+k:64+k] := SATURATE16U(__V2[31+j:j]) +/// result[143+k:128+k] := SATURATE16U(__V1[159+j:128+j]) +/// result[207+k:192+k] := SATURATE16U(__V2[159+j:128+j]) +/// ENDFOR +/// \endcode +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c VPACKUSDW instruction. +/// +/// \param __V1 +/// A 256-bit vector of [8 x i32] used to generate result[63:0] and +/// result[191:128]. +/// \param __V2 +/// A 256-bit vector of [8 x i32] used to generate result[127:64] and +/// result[255:192]. +/// \returns A 256-bit vector of [16 x i16] containing the result. +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_packus_epi32(__m256i __V1, __m256i __V2) +{ + return (__m256i) __builtin_ia32_packusdw256((__v8si)__V1, (__v8si)__V2); +} + +/// Adds 8-bit integers from corresponding bytes of two 256-bit integer +/// vectors and returns the lower 8 bits of each sum in the corresponding +/// byte of the 256-bit integer vector result (overflow is ignored). +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c VPADDB instruction. +/// +/// \param __a +/// A 256-bit integer vector containing one of the source operands. +/// \param __b +/// A 256-bit integer vector containing one of the source operands. +/// \returns A 256-bit integer vector containing the sums. +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_add_epi8(__m256i __a, __m256i __b) +{ + return (__m256i)((__v32qu)__a + (__v32qu)__b); +} + +/// Adds 16-bit integers from corresponding elements of two 256-bit vectors of +/// [16 x i16] and returns the lower 16 bits of each sum in the +/// corresponding element of the [16 x i16] result (overflow is ignored). +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c VPADDW instruction. +/// +/// \param __a +/// A 256-bit vector of [16 x i16] containing one of the source operands. +/// \param __b +/// A 256-bit vector of [16 x i16] containing one of the source operands. +/// \returns A 256-bit vector of [16 x i16] containing the sums. +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_add_epi16(__m256i __a, __m256i __b) +{ + return (__m256i)((__v16hu)__a + (__v16hu)__b); +} + +/// Adds 32-bit integers from corresponding elements of two 256-bit vectors of +/// [8 x i32] and returns the lower 32 bits of each sum in the corresponding +/// element of the [8 x i32] result (overflow is ignored). +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c VPADDD instruction. +/// +/// \param __a +/// A 256-bit vector of [8 x i32] containing one of the source operands. +/// \param __b +/// A 256-bit vector of [8 x i32] containing one of the source operands. +/// \returns A 256-bit vector of [8 x i32] containing the sums. +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_add_epi32(__m256i __a, __m256i __b) +{ + return (__m256i)((__v8su)__a + (__v8su)__b); +} + +/// Adds 64-bit integers from corresponding elements of two 256-bit vectors of +/// [4 x i64] and returns the lower 64 bits of each sum in the corresponding +/// element of the [4 x i64] result (overflow is ignored). +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c VPADDQ instruction. +/// +/// \param __a +/// A 256-bit vector of [4 x i64] containing one of the source operands. +/// \param __b +/// A 256-bit vector of [4 x i64] containing one of the source operands. +/// \returns A 256-bit vector of [4 x i64] containing the sums. +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_add_epi64(__m256i __a, __m256i __b) +{ + return (__m256i)((__v4du)__a + (__v4du)__b); +} + +/// Adds 8-bit integers from corresponding bytes of two 256-bit integer +/// vectors using signed saturation, and returns each sum in the +/// corresponding byte of the 256-bit integer vector result. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c VPADDSB instruction. +/// +/// \param __a +/// A 256-bit integer vector containing one of the source operands. +/// \param __b +/// A 256-bit integer vector containing one of the source operands. +/// \returns A 256-bit integer vector containing the sums. +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_adds_epi8(__m256i __a, __m256i __b) +{ + return (__m256i)__builtin_elementwise_add_sat((__v32qs)__a, (__v32qs)__b); +} + +/// Adds 16-bit integers from corresponding elements of two 256-bit vectors of +/// [16 x i16] using signed saturation, and returns the [16 x i16] result. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c VPADDSW instruction. +/// +/// \param __a +/// A 256-bit vector of [16 x i16] containing one of the source operands. +/// \param __b +/// A 256-bit vector of [16 x i16] containing one of the source operands. +/// \returns A 256-bit vector of [16 x i16] containing the sums. +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_adds_epi16(__m256i __a, __m256i __b) +{ + return (__m256i)__builtin_elementwise_add_sat((__v16hi)__a, (__v16hi)__b); +} + +/// Adds 8-bit integers from corresponding bytes of two 256-bit integer +/// vectors using unsigned saturation, and returns each sum in the +/// corresponding byte of the 256-bit integer vector result. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c VPADDUSB instruction. +/// +/// \param __a +/// A 256-bit integer vector containing one of the source operands. +/// \param __b +/// A 256-bit integer vector containing one of the source operands. +/// \returns A 256-bit integer vector containing the sums. +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_adds_epu8(__m256i __a, __m256i __b) +{ + return (__m256i)__builtin_elementwise_add_sat((__v32qu)__a, (__v32qu)__b); +} + +/// Adds 16-bit integers from corresponding elements of two 256-bit vectors of +/// [16 x i16] using unsigned saturation, and returns the [16 x i16] result. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c VPADDUSW instruction. +/// +/// \param __a +/// A 256-bit vector of [16 x i16] containing one of the source operands. +/// \param __b +/// A 256-bit vector of [16 x i16] containing one of the source operands. +/// \returns A 256-bit vector of [16 x i16] containing the sums. +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_adds_epu16(__m256i __a, __m256i __b) +{ + return (__m256i)__builtin_elementwise_add_sat((__v16hu)__a, (__v16hu)__b); +} + +/// Uses the lower half of the 256-bit vector \a a as the upper half of a +/// temporary 256-bit value, and the lower half of the 256-bit vector \a b +/// as the lower half of the temporary value. Right-shifts the temporary +/// value by \a n bytes, and uses the lower 16 bytes of the shifted value +/// as the lower 16 bytes of the result. Uses the upper halves of \a a and +/// \a b to make another temporary value, right shifts by \a n, and uses +/// the lower 16 bytes of the shifted value as the upper 16 bytes of the +/// result. +/// +/// \headerfile +/// +/// \code +/// __m256i _mm256_alignr_epi8(__m256i a, __m256i b, const int n); +/// \endcode +/// +/// This intrinsic corresponds to the \c VPALIGNR instruction. +/// +/// \param a +/// A 256-bit integer vector containing source values. +/// \param b +/// A 256-bit integer vector containing source values. +/// \param n +/// An immediate value specifying the number of bytes to shift. +/// \returns A 256-bit integer vector containing the result. +#define _mm256_alignr_epi8(a, b, n) \ + ((__m256i)__builtin_ia32_palignr256((__v32qi)(__m256i)(a), \ + (__v32qi)(__m256i)(b), (n))) + +/// Computes the bitwise AND of the 256-bit integer vectors in \a __a and +/// \a __b. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c VPAND instruction. +/// +/// \param __a +/// A 256-bit integer vector. +/// \param __b +/// A 256-bit integer vector. +/// \returns A 256-bit integer vector containing the result. +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_and_si256(__m256i __a, __m256i __b) +{ + return (__m256i)((__v4du)__a & (__v4du)__b); +} + +/// Computes the bitwise AND of the 256-bit integer vector in \a __b with +/// the bitwise NOT of the 256-bit integer vector in \a __a. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c VPANDN instruction. +/// +/// \param __a +/// A 256-bit integer vector. +/// \param __b +/// A 256-bit integer vector. +/// \returns A 256-bit integer vector containing the result. +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_andnot_si256(__m256i __a, __m256i __b) +{ + return (__m256i)(~(__v4du)__a & (__v4du)__b); +} + +/// Computes the averages of the corresponding unsigned bytes in the two +/// 256-bit integer vectors in \a __a and \a __b and returns each +/// average in the corresponding byte of the 256-bit result. +/// +/// \code{.operation} +/// FOR i := 0 TO 31 +/// j := i*8 +/// result[j+7:j] := (__a[j+7:j] + __b[j+7:j] + 1) >> 1 +/// ENDFOR +/// \endcode +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c VPAVGB instruction. +/// +/// \param __a +/// A 256-bit integer vector. +/// \param __b +/// A 256-bit integer vector. +/// \returns A 256-bit integer vector containing the result. +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_avg_epu8(__m256i __a, __m256i __b) +{ + return (__m256i)__builtin_ia32_pavgb256((__v32qi)__a, (__v32qi)__b); +} + +/// Computes the averages of the corresponding unsigned 16-bit integers in +/// the two 256-bit vectors of [16 x i16] in \a __a and \a __b and returns +/// each average in the corresponding element of the 256-bit result. +/// +/// \code{.operation} +/// FOR i := 0 TO 15 +/// j := i*16 +/// result[j+15:j] := (__a[j+15:j] + __b[j+15:j] + 1) >> 1 +/// ENDFOR +/// \endcode +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c VPAVGW instruction. +/// +/// \param __a +/// A 256-bit vector of [16 x i16]. +/// \param __b +/// A 256-bit vector of [16 x i16]. +/// \returns A 256-bit vector of [16 x i16] containing the result. +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_avg_epu16(__m256i __a, __m256i __b) +{ + return (__m256i)__builtin_ia32_pavgw256((__v16hi)__a, (__v16hi)__b); +} + +/// Merges 8-bit integer values from either of the two 256-bit vectors +/// \a __V1 or \a __V2, as specified by the 256-bit mask \a __M and returns +/// the resulting 256-bit integer vector. +/// +/// \code{.operation} +/// FOR i := 0 TO 31 +/// j := i*8 +/// IF __M[7+i] == 0 +/// result[7+j:j] := __V1[7+j:j] +/// ELSE +/// result[7+j:j] := __V2[7+j:j] +/// FI +/// ENDFOR +/// \endcode +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c VPBLENDVB instruction. +/// +/// \param __V1 +/// A 256-bit integer vector containing source values. +/// \param __V2 +/// A 256-bit integer vector containing source values. +/// \param __M +/// A 256-bit integer vector, with bit [7] of each byte specifying the +/// source for each corresponding byte of the result. When the mask bit +/// is 0, the byte is copied from \a __V1; otherwise, it is copied from +/// \a __V2. +/// \returns A 256-bit integer vector containing the result. +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_blendv_epi8(__m256i __V1, __m256i __V2, __m256i __M) +{ + return (__m256i)__builtin_ia32_pblendvb256((__v32qi)__V1, (__v32qi)__V2, + (__v32qi)__M); +} + +/// Merges 16-bit integer values from either of the two 256-bit vectors +/// \a V1 or \a V2, as specified by the immediate integer operand \a M, +/// and returns the resulting 256-bit vector of [16 x i16]. +/// +/// \code{.operation} +/// FOR i := 0 TO 7 +/// j := i*16 +/// IF M[i] == 0 +/// result[7+j:j] := V1[7+j:j] +/// result[135+j:128+j] := V1[135+j:128+j] +/// ELSE +/// result[7+j:j] := V2[7+j:j] +/// result[135+j:128+j] := V2[135+j:128+j] +/// FI +/// ENDFOR +/// \endcode +/// +/// \headerfile +/// +/// \code +/// __m256i _mm256_blend_epi16(__m256i V1, __m256i V2, const int M); +/// \endcode +/// +/// This intrinsic corresponds to the \c VPBLENDW instruction. +/// +/// \param V1 +/// A 256-bit vector of [16 x i16] containing source values. +/// \param V2 +/// A 256-bit vector of [16 x i16] containing source values. +/// \param M +/// An immediate 8-bit integer operand, with bits [7:0] specifying the +/// source for each element of the result. The position of the mask bit +/// corresponds to the index of a copied value. When a mask bit is 0, the +/// element is copied from \a V1; otherwise, it is copied from \a V2. +/// \a M[0] determines the source for elements 0 and 8, \a M[1] for +/// elements 1 and 9, and so forth. +/// \returns A 256-bit vector of [16 x i16] containing the result. +#define _mm256_blend_epi16(V1, V2, M) \ + ((__m256i)__builtin_ia32_pblendw256((__v16hi)(__m256i)(V1), \ + (__v16hi)(__m256i)(V2), (int)(M))) + +/// Compares corresponding bytes in the 256-bit integer vectors in \a __a and +/// \a __b for equality and returns the outcomes in the corresponding +/// bytes of the 256-bit result. +/// +/// \code{.operation} +/// FOR i := 0 TO 31 +/// j := i*8 +/// result[j+7:j] := (__a[j+7:j] == __b[j+7:j]) ? 0xFF : 0 +/// ENDFOR +/// \endcode +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c VPCMPEQB instruction. +/// +/// \param __a +/// A 256-bit integer vector containing one of the inputs. +/// \param __b +/// A 256-bit integer vector containing one of the inputs. +/// \returns A 256-bit integer vector containing the result. +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_cmpeq_epi8(__m256i __a, __m256i __b) +{ + return (__m256i)((__v32qi)__a == (__v32qi)__b); +} + +/// Compares corresponding elements in the 256-bit vectors of [16 x i16] in +/// \a __a and \a __b for equality and returns the outcomes in the +/// corresponding elements of the 256-bit result. +/// +/// \code{.operation} +/// FOR i := 0 TO 15 +/// j := i*16 +/// result[j+15:j] := (__a[j+15:j] == __b[j+15:j]) ? 0xFFFF : 0 +/// ENDFOR +/// \endcode +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c VPCMPEQW instruction. +/// +/// \param __a +/// A 256-bit vector of [16 x i16] containing one of the inputs. +/// \param __b +/// A 256-bit vector of [16 x i16] containing one of the inputs. +/// \returns A 256-bit vector of [16 x i16] containing the result. +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_cmpeq_epi16(__m256i __a, __m256i __b) +{ + return (__m256i)((__v16hi)__a == (__v16hi)__b); +} + +/// Compares corresponding elements in the 256-bit vectors of [8 x i32] in +/// \a __a and \a __b for equality and returns the outcomes in the +/// corresponding elements of the 256-bit result. +/// +/// \code{.operation} +/// FOR i := 0 TO 7 +/// j := i*32 +/// result[j+31:j] := (__a[j+31:j] == __b[j+31:j]) ? 0xFFFFFFFF : 0 +/// ENDFOR +/// \endcode +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c VPCMPEQD instruction. +/// +/// \param __a +/// A 256-bit vector of [8 x i32] containing one of the inputs. +/// \param __b +/// A 256-bit vector of [8 x i32] containing one of the inputs. +/// \returns A 256-bit vector of [8 x i32] containing the result. +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_cmpeq_epi32(__m256i __a, __m256i __b) +{ + return (__m256i)((__v8si)__a == (__v8si)__b); +} + +/// Compares corresponding elements in the 256-bit vectors of [4 x i64] in +/// \a __a and \a __b for equality and returns the outcomes in the +/// corresponding elements of the 256-bit result. +/// +/// \code{.operation} +/// FOR i := 0 TO 3 +/// j := i*64 +/// result[j+63:j] := (__a[j+63:j] == __b[j+63:j]) ? 0xFFFFFFFFFFFFFFFF : 0 +/// ENDFOR +/// \endcode +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c VPCMPEQQ instruction. +/// +/// \param __a +/// A 256-bit vector of [4 x i64] containing one of the inputs. +/// \param __b +/// A 256-bit vector of [4 x i64] containing one of the inputs. +/// \returns A 256-bit vector of [4 x i64] containing the result. +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_cmpeq_epi64(__m256i __a, __m256i __b) +{ + return (__m256i)((__v4di)__a == (__v4di)__b); +} + +/// Compares corresponding signed bytes in the 256-bit integer vectors in +/// \a __a and \a __b for greater-than and returns the outcomes in the +/// corresponding bytes of the 256-bit result. +/// +/// \code{.operation} +/// FOR i := 0 TO 31 +/// j := i*8 +/// result[j+7:j] := (__a[j+7:j] > __b[j+7:j]) ? 0xFF : 0 +/// ENDFOR +/// \endcode +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c VPCMPGTB instruction. +/// +/// \param __a +/// A 256-bit integer vector containing one of the inputs. +/// \param __b +/// A 256-bit integer vector containing one of the inputs. +/// \returns A 256-bit integer vector containing the result. +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_cmpgt_epi8(__m256i __a, __m256i __b) +{ + /* This function always performs a signed comparison, but __v32qi is a char + which may be signed or unsigned, so use __v32qs. */ + return (__m256i)((__v32qs)__a > (__v32qs)__b); +} + +/// Compares corresponding signed elements in the 256-bit vectors of +/// [16 x i16] in \a __a and \a __b for greater-than and returns the +/// outcomes in the corresponding elements of the 256-bit result. +/// +/// \code{.operation} +/// FOR i := 0 TO 15 +/// j := i*16 +/// result[j+15:j] := (__a[j+15:j] > __b[j+15:j]) ? 0xFFFF : 0 +/// ENDFOR +/// \endcode +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c VPCMPGTW instruction. +/// +/// \param __a +/// A 256-bit vector of [16 x i16] containing one of the inputs. +/// \param __b +/// A 256-bit vector of [16 x i16] containing one of the inputs. +/// \returns A 256-bit vector of [16 x i16] containing the result. +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_cmpgt_epi16(__m256i __a, __m256i __b) +{ + return (__m256i)((__v16hi)__a > (__v16hi)__b); +} + +/// Compares corresponding signed elements in the 256-bit vectors of +/// [8 x i32] in \a __a and \a __b for greater-than and returns the +/// outcomes in the corresponding elements of the 256-bit result. +/// +/// \code{.operation} +/// FOR i := 0 TO 7 +/// j := i*32 +/// result[j+31:j] := (__a[j+31:j] > __b[j+31:j]) ? 0xFFFFFFFF : 0 +/// ENDFOR +/// \endcode +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c VPCMPGTD instruction. +/// +/// \param __a +/// A 256-bit vector of [8 x i32] containing one of the inputs. +/// \param __b +/// A 256-bit vector of [8 x i32] containing one of the inputs. +/// \returns A 256-bit vector of [8 x i32] containing the result. +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_cmpgt_epi32(__m256i __a, __m256i __b) +{ + return (__m256i)((__v8si)__a > (__v8si)__b); +} + +/// Compares corresponding signed elements in the 256-bit vectors of +/// [4 x i64] in \a __a and \a __b for greater-than and returns the +/// outcomes in the corresponding elements of the 256-bit result. +/// +/// \code{.operation} +/// FOR i := 0 TO 3 +/// j := i*64 +/// result[j+63:j] := (__a[j+63:j] > __b[j+63:j]) ? 0xFFFFFFFFFFFFFFFF : 0 +/// ENDFOR +/// \endcode +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c VPCMPGTQ instruction. +/// +/// \param __a +/// A 256-bit vector of [4 x i64] containing one of the inputs. +/// \param __b +/// A 256-bit vector of [4 x i64] containing one of the inputs. +/// \returns A 256-bit vector of [4 x i64] containing the result. +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_cmpgt_epi64(__m256i __a, __m256i __b) +{ + return (__m256i)((__v4di)__a > (__v4di)__b); +} + +/// Horizontally adds the adjacent pairs of 16-bit integers from two 256-bit +/// vectors of [16 x i16] and returns the lower 16 bits of each sum in an +/// element of the [16 x i16] result (overflow is ignored). Sums from +/// \a __a are returned in the lower 64 bits of each 128-bit half of the +/// result; sums from \a __b are returned in the upper 64 bits of each +/// 128-bit half of the result. +/// +/// \code{.operation} +/// FOR i := 0 TO 1 +/// j := i*128 +/// result[j+15:j] := __a[j+15:j] + __a[j+31:j+16] +/// result[j+31:j+16] := __a[j+47:j+32] + __a[j+63:j+48] +/// result[j+47:j+32] := __a[j+79:j+64] + __a[j+95:j+80] +/// result[j+63:j+48] := __a[j+111:j+96] + __a[j+127:j+112] +/// result[j+79:j+64] := __b[j+15:j] + __b[j+31:j+16] +/// result[j+95:j+80] := __b[j+47:j+32] + __b[j+63:j+48] +/// result[j+111:j+96] := __b[j+79:j+64] + __b[j+95:j+80] +/// result[j+127:j+112] := __b[j+111:j+96] + __b[j+127:j+112] +/// ENDFOR +/// \endcode +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c VPHADDW instruction. +/// +/// \param __a +/// A 256-bit vector of [16 x i16] containing one of the source operands. +/// \param __b +/// A 256-bit vector of [16 x i16] containing one of the source operands. +/// \returns A 256-bit vector of [16 x i16] containing the sums. +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_hadd_epi16(__m256i __a, __m256i __b) +{ + return (__m256i)__builtin_ia32_phaddw256((__v16hi)__a, (__v16hi)__b); +} + +/// Horizontally adds the adjacent pairs of 32-bit integers from two 256-bit +/// vectors of [8 x i32] and returns the lower 32 bits of each sum in an +/// element of the [8 x i32] result (overflow is ignored). Sums from \a __a +/// are returned in the lower 64 bits of each 128-bit half of the result; +/// sums from \a __b are returned in the upper 64 bits of each 128-bit half +/// of the result. +/// +/// \code{.operation} +/// FOR i := 0 TO 1 +/// j := i*128 +/// result[j+31:j] := __a[j+31:j] + __a[j+63:j+32] +/// result[j+63:j+32] := __a[j+95:j+64] + __a[j+127:j+96] +/// result[j+95:j+64] := __b[j+31:j] + __b[j+63:j+32] +/// result[j+127:j+96] := __b[j+95:j+64] + __b[j+127:j+96] +/// ENDFOR +/// \endcode +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c VPHADDD instruction. +/// +/// \param __a +/// A 256-bit vector of [8 x i32] containing one of the source operands. +/// \param __b +/// A 256-bit vector of [8 x i32] containing one of the source operands. +/// \returns A 256-bit vector of [8 x i32] containing the sums. +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_hadd_epi32(__m256i __a, __m256i __b) +{ + return (__m256i)__builtin_ia32_phaddd256((__v8si)__a, (__v8si)__b); +} + +/// Horizontally adds the adjacent pairs of 16-bit integers from two 256-bit +/// vectors of [16 x i16] using signed saturation and returns each sum in +/// an element of the [16 x i16] result. Sums from \a __a are returned in +/// the lower 64 bits of each 128-bit half of the result; sums from \a __b +/// are returned in the upper 64 bits of each 128-bit half of the result. +/// +/// \code{.operation} +/// FOR i := 0 TO 1 +/// j := i*128 +/// result[j+15:j] := SATURATE16(__a[j+15:j] + __a[j+31:j+16]) +/// result[j+31:j+16] := SATURATE16(__a[j+47:j+32] + __a[j+63:j+48]) +/// result[j+47:j+32] := SATURATE16(__a[j+79:j+64] + __a[j+95:j+80]) +/// result[j+63:j+48] := SATURATE16(__a[j+111:j+96] + __a[j+127:j+112]) +/// result[j+79:j+64] := SATURATE16(__b[j+15:j] + __b[j+31:j+16]) +/// result[j+95:j+80] := SATURATE16(__b[j+47:j+32] + __b[j+63:j+48]) +/// result[j+111:j+96] := SATURATE16(__b[j+79:j+64] + __b[j+95:j+80]) +/// result[j+127:j+112] := SATURATE16(__b[j+111:j+96] + __b[j+127:j+112]) +/// ENDFOR +/// \endcode +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c VPHADDSW instruction. +/// +/// \param __a +/// A 256-bit vector of [16 x i16] containing one of the source operands. +/// \param __b +/// A 256-bit vector of [16 x i16] containing one of the source operands. +/// \returns A 256-bit vector of [16 x i16] containing the sums. +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_hadds_epi16(__m256i __a, __m256i __b) +{ + return (__m256i)__builtin_ia32_phaddsw256((__v16hi)__a, (__v16hi)__b); +} + +/// Horizontally subtracts adjacent pairs of 16-bit integers from two 256-bit +/// vectors of [16 x i16] and returns the lower 16 bits of each difference +/// in an element of the [16 x i16] result (overflow is ignored). +/// Differences from \a __a are returned in the lower 64 bits of each +/// 128-bit half of the result; differences from \a __b are returned in the +/// upper 64 bits of each 128-bit half of the result. +/// +/// \code{.operation} +/// FOR i := 0 TO 1 +/// j := i*128 +/// result[j+15:j] := __a[j+15:j] - __a[j+31:j+16] +/// result[j+31:j+16] := __a[j+47:j+32] - __a[j+63:j+48] +/// result[j+47:j+32] := __a[j+79:j+64] - __a[j+95:j+80] +/// result[j+63:j+48] := __a[j+111:j+96] - __a[j+127:j+112] +/// result[j+79:j+64] := __b[j+15:j] - __b[j+31:j+16] +/// result[j+95:j+80] := __b[j+47:j+32] - __b[j+63:j+48] +/// result[j+111:j+96] := __b[j+79:j+64] - __b[j+95:j+80] +/// result[j+127:j+112] := __b[j+111:j+96] - __b[j+127:j+112] +/// ENDFOR +/// \endcode +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c VPHSUBW instruction. +/// +/// \param __a +/// A 256-bit vector of [16 x i16] containing one of the source operands. +/// \param __b +/// A 256-bit vector of [16 x i16] containing one of the source operands. +/// \returns A 256-bit vector of [16 x i16] containing the differences. +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_hsub_epi16(__m256i __a, __m256i __b) +{ + return (__m256i)__builtin_ia32_phsubw256((__v16hi)__a, (__v16hi)__b); +} + +/// Horizontally subtracts adjacent pairs of 32-bit integers from two 256-bit +/// vectors of [8 x i32] and returns the lower 32 bits of each difference in +/// an element of the [8 x i32] result (overflow is ignored). Differences +/// from \a __a are returned in the lower 64 bits of each 128-bit half of +/// the result; differences from \a __b are returned in the upper 64 bits +/// of each 128-bit half of the result. +/// +/// \code{.operation} +/// FOR i := 0 TO 1 +/// j := i*128 +/// result[j+31:j] := __a[j+31:j] - __a[j+63:j+32] +/// result[j+63:j+32] := __a[j+95:j+64] - __a[j+127:j+96] +/// result[j+95:j+64] := __b[j+31:j] - __b[j+63:j+32] +/// result[j+127:j+96] := __b[j+95:j+64] - __b[j+127:j+96] +/// ENDFOR +/// \endcode +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c VPHSUBD instruction. +/// +/// \param __a +/// A 256-bit vector of [8 x i32] containing one of the source operands. +/// \param __b +/// A 256-bit vector of [8 x i32] containing one of the source operands. +/// \returns A 256-bit vector of [8 x i32] containing the differences. +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_hsub_epi32(__m256i __a, __m256i __b) +{ + return (__m256i)__builtin_ia32_phsubd256((__v8si)__a, (__v8si)__b); +} + +/// Horizontally subtracts adjacent pairs of 16-bit integers from two 256-bit +/// vectors of [16 x i16] using signed saturation and returns each sum in +/// an element of the [16 x i16] result. Differences from \a __a are +/// returned in the lower 64 bits of each 128-bit half of the result; +/// differences from \a __b are returned in the upper 64 bits of each +/// 128-bit half of the result. +/// +/// \code{.operation} +/// FOR i := 0 TO 1 +/// j := i*128 +/// result[j+15:j] := SATURATE16(__a[j+15:j] - __a[j+31:j+16]) +/// result[j+31:j+16] := SATURATE16(__a[j+47:j+32] - __a[j+63:j+48]) +/// result[j+47:j+32] := SATURATE16(__a[j+79:j+64] - __a[j+95:j+80]) +/// result[j+63:j+48] := SATURATE16(__a[j+111:j+96] - __a[j+127:j+112]) +/// result[j+79:j+64] := SATURATE16(__b[j+15:j] - __b[j+31:j+16]) +/// result[j+95:j+80] := SATURATE16(__b[j+47:j+32] - __b[j+63:j+48]) +/// result[j+111:j+96] := SATURATE16(__b[j+79:j+64] - __b[j+95:j+80]) +/// result[j+127:j+112] := SATURATE16(__b[j+111:j+96] - __b[j+127:j+112]) +/// ENDFOR +/// \endcode +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c VPHSUBSW instruction. +/// +/// \param __a +/// A 256-bit vector of [16 x i16] containing one of the source operands. +/// \param __b +/// A 256-bit vector of [16 x i16] containing one of the source operands. +/// \returns A 256-bit vector of [16 x i16] containing the differences. +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_hsubs_epi16(__m256i __a, __m256i __b) +{ + return (__m256i)__builtin_ia32_phsubsw256((__v16hi)__a, (__v16hi)__b); +} + +/// Multiplies each unsigned byte from the 256-bit integer vector in \a __a +/// with the corresponding signed byte from the 256-bit integer vector in +/// \a __b, forming signed 16-bit intermediate products. Adds adjacent +/// pairs of those products using signed saturation to form 16-bit sums +/// returned as elements of the [16 x i16] result. +/// +/// \code{.operation} +/// FOR i := 0 TO 15 +/// j := i*16 +/// temp1 := __a[j+7:j] * __b[j+7:j] +/// temp2 := __a[j+15:j+8] * __b[j+15:j+8] +/// result[j+15:j] := SATURATE16(temp1 + temp2) +/// ENDFOR +/// \endcode +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c VPMADDUBSW instruction. +/// +/// \param __a +/// A 256-bit vector containing one of the source operands. +/// \param __b +/// A 256-bit vector containing one of the source operands. +/// \returns A 256-bit vector of [16 x i16] containing the result. +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_maddubs_epi16(__m256i __a, __m256i __b) +{ + return (__m256i)__builtin_ia32_pmaddubsw256((__v32qi)__a, (__v32qi)__b); +} + +/// Multiplies corresponding 16-bit elements of two 256-bit vectors of +/// [16 x i16], forming 32-bit intermediate products, and adds pairs of +/// those products to form 32-bit sums returned as elements of the +/// [8 x i32] result. +/// +/// There is only one wraparound case: when all four of the 16-bit sources +/// are \c 0x8000, the result will be \c 0x80000000. +/// +/// \code{.operation} +/// FOR i := 0 TO 7 +/// j := i*32 +/// temp1 := __a[j+15:j] * __b[j+15:j] +/// temp2 := __a[j+31:j+16] * __b[j+31:j+16] +/// result[j+31:j] := temp1 + temp2 +/// ENDFOR +/// \endcode +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c VPMADDWD instruction. +/// +/// \param __a +/// A 256-bit vector of [16 x i16] containing one of the source operands. +/// \param __b +/// A 256-bit vector of [16 x i16] containing one of the source operands. +/// \returns A 256-bit vector of [8 x i32] containing the result. +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_madd_epi16(__m256i __a, __m256i __b) +{ + return (__m256i)__builtin_ia32_pmaddwd256((__v16hi)__a, (__v16hi)__b); +} + +/// Compares the corresponding signed bytes in the two 256-bit integer vectors +/// in \a __a and \a __b and returns the larger of each pair in the +/// corresponding byte of the 256-bit result. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c VPMAXSB instruction. +/// +/// \param __a +/// A 256-bit integer vector. +/// \param __b +/// A 256-bit integer vector. +/// \returns A 256-bit integer vector containing the result. +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_max_epi8(__m256i __a, __m256i __b) +{ + return (__m256i)__builtin_elementwise_max((__v32qs)__a, (__v32qs)__b); +} + +/// Compares the corresponding signed 16-bit integers in the two 256-bit +/// vectors of [16 x i16] in \a __a and \a __b and returns the larger of +/// each pair in the corresponding element of the 256-bit result. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c VPMAXSW instruction. +/// +/// \param __a +/// A 256-bit vector of [16 x i16]. +/// \param __b +/// A 256-bit vector of [16 x i16]. +/// \returns A 256-bit vector of [16 x i16] containing the result. +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_max_epi16(__m256i __a, __m256i __b) +{ + return (__m256i)__builtin_elementwise_max((__v16hi)__a, (__v16hi)__b); +} + +/// Compares the corresponding signed 32-bit integers in the two 256-bit +/// vectors of [8 x i32] in \a __a and \a __b and returns the larger of +/// each pair in the corresponding element of the 256-bit result. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c VPMAXSD instruction. +/// +/// \param __a +/// A 256-bit vector of [8 x i32]. +/// \param __b +/// A 256-bit vector of [8 x i32]. +/// \returns A 256-bit vector of [8 x i32] containing the result. +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_max_epi32(__m256i __a, __m256i __b) +{ + return (__m256i)__builtin_elementwise_max((__v8si)__a, (__v8si)__b); +} + +/// Compares the corresponding unsigned bytes in the two 256-bit integer +/// vectors in \a __a and \a __b and returns the larger of each pair in +/// the corresponding byte of the 256-bit result. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c VPMAXUB instruction. +/// +/// \param __a +/// A 256-bit integer vector. +/// \param __b +/// A 256-bit integer vector. +/// \returns A 256-bit integer vector containing the result. +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_max_epu8(__m256i __a, __m256i __b) +{ + return (__m256i)__builtin_elementwise_max((__v32qu)__a, (__v32qu)__b); +} + +/// Compares the corresponding unsigned 16-bit integers in the two 256-bit +/// vectors of [16 x i16] in \a __a and \a __b and returns the larger of +/// each pair in the corresponding element of the 256-bit result. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c VPMAXUW instruction. +/// +/// \param __a +/// A 256-bit vector of [16 x i16]. +/// \param __b +/// A 256-bit vector of [16 x i16]. +/// \returns A 256-bit vector of [16 x i16] containing the result. +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_max_epu16(__m256i __a, __m256i __b) +{ + return (__m256i)__builtin_elementwise_max((__v16hu)__a, (__v16hu)__b); +} + +/// Compares the corresponding unsigned 32-bit integers in the two 256-bit +/// vectors of [8 x i32] in \a __a and \a __b and returns the larger of +/// each pair in the corresponding element of the 256-bit result. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c VPMAXUD instruction. +/// +/// \param __a +/// A 256-bit vector of [8 x i32]. +/// \param __b +/// A 256-bit vector of [8 x i32]. +/// \returns A 256-bit vector of [8 x i32] containing the result. +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_max_epu32(__m256i __a, __m256i __b) +{ + return (__m256i)__builtin_elementwise_max((__v8su)__a, (__v8su)__b); +} + +/// Compares the corresponding signed bytes in the two 256-bit integer vectors +/// in \a __a and \a __b and returns the smaller of each pair in the +/// corresponding byte of the 256-bit result. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c VPMINSB instruction. +/// +/// \param __a +/// A 256-bit integer vector. +/// \param __b +/// A 256-bit integer vector. +/// \returns A 256-bit integer vector containing the result. +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_min_epi8(__m256i __a, __m256i __b) +{ + return (__m256i)__builtin_elementwise_min((__v32qs)__a, (__v32qs)__b); +} + +/// Compares the corresponding signed 16-bit integers in the two 256-bit +/// vectors of [16 x i16] in \a __a and \a __b and returns the smaller of +/// each pair in the corresponding element of the 256-bit result. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c VPMINSW instruction. +/// +/// \param __a +/// A 256-bit vector of [16 x i16]. +/// \param __b +/// A 256-bit vector of [16 x i16]. +/// \returns A 256-bit vector of [16 x i16] containing the result. +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_min_epi16(__m256i __a, __m256i __b) +{ + return (__m256i)__builtin_elementwise_min((__v16hi)__a, (__v16hi)__b); +} + +/// Compares the corresponding signed 32-bit integers in the two 256-bit +/// vectors of [8 x i32] in \a __a and \a __b and returns the smaller of +/// each pair in the corresponding element of the 256-bit result. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c VPMINSD instruction. +/// +/// \param __a +/// A 256-bit vector of [8 x i32]. +/// \param __b +/// A 256-bit vector of [8 x i32]. +/// \returns A 256-bit vector of [8 x i32] containing the result. +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_min_epi32(__m256i __a, __m256i __b) +{ + return (__m256i)__builtin_elementwise_min((__v8si)__a, (__v8si)__b); +} + +/// Compares the corresponding unsigned bytes in the two 256-bit integer +/// vectors in \a __a and \a __b and returns the smaller of each pair in +/// the corresponding byte of the 256-bit result. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c VPMINUB instruction. +/// +/// \param __a +/// A 256-bit integer vector. +/// \param __b +/// A 256-bit integer vector. +/// \returns A 256-bit integer vector containing the result. +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_min_epu8(__m256i __a, __m256i __b) +{ + return (__m256i)__builtin_elementwise_min((__v32qu)__a, (__v32qu)__b); +} + +/// Compares the corresponding unsigned 16-bit integers in the two 256-bit +/// vectors of [16 x i16] in \a __a and \a __b and returns the smaller of +/// each pair in the corresponding element of the 256-bit result. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c VPMINUW instruction. +/// +/// \param __a +/// A 256-bit vector of [16 x i16]. +/// \param __b +/// A 256-bit vector of [16 x i16]. +/// \returns A 256-bit vector of [16 x i16] containing the result. +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_min_epu16(__m256i __a, __m256i __b) +{ + return (__m256i)__builtin_elementwise_min((__v16hu)__a, (__v16hu)__b); +} + +/// Compares the corresponding unsigned 32-bit integers in the two 256-bit +/// vectors of [8 x i32] in \a __a and \a __b and returns the smaller of +/// each pair in the corresponding element of the 256-bit result. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c VPMINUD instruction. +/// +/// \param __a +/// A 256-bit vector of [8 x i32]. +/// \param __b +/// A 256-bit vector of [8 x i32]. +/// \returns A 256-bit vector of [8 x i32] containing the result. +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_min_epu32(__m256i __a, __m256i __b) +{ + return (__m256i)__builtin_elementwise_min((__v8su)__a, (__v8su)__b); +} + +/// Creates a 32-bit integer mask from the most significant bit of each byte +/// in the 256-bit integer vector in \a __a and returns the result. +/// +/// \code{.operation} +/// FOR i := 0 TO 31 +/// j := i*8 +/// result[i] := __a[j+7] +/// ENDFOR +/// \endcode +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c VPMOVMSKB instruction. +/// +/// \param __a +/// A 256-bit integer vector containing the source bytes. +/// \returns The 32-bit integer mask. +static __inline__ int __DEFAULT_FN_ATTRS256 +_mm256_movemask_epi8(__m256i __a) +{ + return __builtin_ia32_pmovmskb256((__v32qi)__a); +} + +/// Sign-extends bytes from the 128-bit integer vector in \a __V and returns +/// the 16-bit values in the corresponding elements of a 256-bit vector +/// of [16 x i16]. +/// +/// \code{.operation} +/// FOR i := 0 TO 15 +/// j := i*8 +/// k := i*16 +/// result[k+15:k] := SignExtend(__V[j+7:j]) +/// ENDFOR +/// \endcode +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c VPMOVSXBW instruction. +/// +/// \param __V +/// A 128-bit integer vector containing the source bytes. +/// \returns A 256-bit vector of [16 x i16] containing the sign-extended +/// values. +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_cvtepi8_epi16(__m128i __V) +{ + /* This function always performs a signed extension, but __v16qi is a char + which may be signed or unsigned, so use __v16qs. */ + return (__m256i)__builtin_convertvector((__v16qs)__V, __v16hi); +} + +/// Sign-extends bytes from the lower half of the 128-bit integer vector in +/// \a __V and returns the 32-bit values in the corresponding elements of a +/// 256-bit vector of [8 x i32]. +/// +/// \code{.operation} +/// FOR i := 0 TO 7 +/// j := i*8 +/// k := i*32 +/// result[k+31:k] := SignExtend(__V[j+7:j]) +/// ENDFOR +/// \endcode +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c VPMOVSXBD instruction. +/// +/// \param __V +/// A 128-bit integer vector containing the source bytes. +/// \returns A 256-bit vector of [8 x i32] containing the sign-extended +/// values. +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_cvtepi8_epi32(__m128i __V) +{ + /* This function always performs a signed extension, but __v16qi is a char + which may be signed or unsigned, so use __v16qs. */ + return (__m256i)__builtin_convertvector(__builtin_shufflevector((__v16qs)__V, (__v16qs)__V, 0, 1, 2, 3, 4, 5, 6, 7), __v8si); +} + +/// Sign-extends the first four bytes from the 128-bit integer vector in +/// \a __V and returns the 64-bit values in the corresponding elements of a +/// 256-bit vector of [4 x i64]. +/// +/// \code{.operation} +/// result[63:0] := SignExtend(__V[7:0]) +/// result[127:64] := SignExtend(__V[15:8]) +/// result[191:128] := SignExtend(__V[23:16]) +/// result[255:192] := SignExtend(__V[31:24]) +/// \endcode +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c VPMOVSXBQ instruction. +/// +/// \param __V +/// A 128-bit integer vector containing the source bytes. +/// \returns A 256-bit vector of [4 x i64] containing the sign-extended +/// values. +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_cvtepi8_epi64(__m128i __V) +{ + /* This function always performs a signed extension, but __v16qi is a char + which may be signed or unsigned, so use __v16qs. */ + return (__m256i)__builtin_convertvector(__builtin_shufflevector((__v16qs)__V, (__v16qs)__V, 0, 1, 2, 3), __v4di); +} + +/// Sign-extends 16-bit elements from the 128-bit vector of [8 x i16] in +/// \a __V and returns the 32-bit values in the corresponding elements of a +/// 256-bit vector of [8 x i32]. +/// +/// \code{.operation} +/// FOR i := 0 TO 7 +/// j := i*16 +/// k := i*32 +/// result[k+31:k] := SignExtend(__V[j+15:j]) +/// ENDFOR +/// \endcode +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c VPMOVSXWD instruction. +/// +/// \param __V +/// A 128-bit vector of [8 x i16] containing the source values. +/// \returns A 256-bit vector of [8 x i32] containing the sign-extended +/// values. +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_cvtepi16_epi32(__m128i __V) +{ + return (__m256i)__builtin_convertvector((__v8hi)__V, __v8si); +} + +/// Sign-extends 16-bit elements from the lower half of the 128-bit vector of +/// [8 x i16] in \a __V and returns the 64-bit values in the corresponding +/// elements of a 256-bit vector of [4 x i64]. +/// +/// \code{.operation} +/// result[63:0] := SignExtend(__V[15:0]) +/// result[127:64] := SignExtend(__V[31:16]) +/// result[191:128] := SignExtend(__V[47:32]) +/// result[255:192] := SignExtend(__V[64:48]) +/// \endcode +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c VPMOVSXWQ instruction. +/// +/// \param __V +/// A 128-bit vector of [8 x i16] containing the source values. +/// \returns A 256-bit vector of [4 x i64] containing the sign-extended +/// values. +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_cvtepi16_epi64(__m128i __V) +{ + return (__m256i)__builtin_convertvector(__builtin_shufflevector((__v8hi)__V, (__v8hi)__V, 0, 1, 2, 3), __v4di); +} + +/// Sign-extends 32-bit elements from the 128-bit vector of [4 x i32] in +/// \a __V and returns the 64-bit values in the corresponding elements of a +/// 256-bit vector of [4 x i64]. +/// +/// \code{.operation} +/// result[63:0] := SignExtend(__V[31:0]) +/// result[127:64] := SignExtend(__V[63:32]) +/// result[191:128] := SignExtend(__V[95:64]) +/// result[255:192] := SignExtend(__V[127:96]) +/// \endcode +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c VPMOVSXDQ instruction. +/// +/// \param __V +/// A 128-bit vector of [4 x i32] containing the source values. +/// \returns A 256-bit vector of [4 x i64] containing the sign-extended +/// values. +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_cvtepi32_epi64(__m128i __V) +{ + return (__m256i)__builtin_convertvector((__v4si)__V, __v4di); +} + +/// Zero-extends bytes from the 128-bit integer vector in \a __V and returns +/// the 16-bit values in the corresponding elements of a 256-bit vector +/// of [16 x i16]. +/// +/// \code{.operation} +/// FOR i := 0 TO 15 +/// j := i*8 +/// k := i*16 +/// result[k+15:k] := ZeroExtend(__V[j+7:j]) +/// ENDFOR +/// \endcode +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c VPMOVZXBW instruction. +/// +/// \param __V +/// A 128-bit integer vector containing the source bytes. +/// \returns A 256-bit vector of [16 x i16] containing the zero-extended +/// values. +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_cvtepu8_epi16(__m128i __V) +{ + return (__m256i)__builtin_convertvector((__v16qu)__V, __v16hi); +} + +/// Zero-extends bytes from the lower half of the 128-bit integer vector in +/// \a __V and returns the 32-bit values in the corresponding elements of a +/// 256-bit vector of [8 x i32]. +/// +/// \code{.operation} +/// FOR i := 0 TO 7 +/// j := i*8 +/// k := i*32 +/// result[k+31:k] := ZeroExtend(__V[j+7:j]) +/// ENDFOR +/// \endcode +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c VPMOVZXBD instruction. +/// +/// \param __V +/// A 128-bit integer vector containing the source bytes. +/// \returns A 256-bit vector of [8 x i32] containing the zero-extended +/// values. +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_cvtepu8_epi32(__m128i __V) +{ + return (__m256i)__builtin_convertvector(__builtin_shufflevector((__v16qu)__V, (__v16qu)__V, 0, 1, 2, 3, 4, 5, 6, 7), __v8si); +} + +/// Zero-extends the first four bytes from the 128-bit integer vector in +/// \a __V and returns the 64-bit values in the corresponding elements of a +/// 256-bit vector of [4 x i64]. +/// +/// \code{.operation} +/// result[63:0] := ZeroExtend(__V[7:0]) +/// result[127:64] := ZeroExtend(__V[15:8]) +/// result[191:128] := ZeroExtend(__V[23:16]) +/// result[255:192] := ZeroExtend(__V[31:24]) +/// \endcode +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c VPMOVZXBQ instruction. +/// +/// \param __V +/// A 128-bit integer vector containing the source bytes. +/// \returns A 256-bit vector of [4 x i64] containing the zero-extended +/// values. +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_cvtepu8_epi64(__m128i __V) +{ + return (__m256i)__builtin_convertvector(__builtin_shufflevector((__v16qu)__V, (__v16qu)__V, 0, 1, 2, 3), __v4di); +} + +/// Zero-extends 16-bit elements from the 128-bit vector of [8 x i16] in +/// \a __V and returns the 32-bit values in the corresponding elements of a +/// 256-bit vector of [8 x i32]. +/// +/// \code{.operation} +/// FOR i := 0 TO 7 +/// j := i*16 +/// k := i*32 +/// result[k+31:k] := ZeroExtend(__V[j+15:j]) +/// ENDFOR +/// \endcode +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c VPMOVZXWD instruction. +/// +/// \param __V +/// A 128-bit vector of [8 x i16] containing the source values. +/// \returns A 256-bit vector of [8 x i32] containing the zero-extended +/// values. +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_cvtepu16_epi32(__m128i __V) +{ + return (__m256i)__builtin_convertvector((__v8hu)__V, __v8si); +} + +/// Zero-extends 16-bit elements from the lower half of the 128-bit vector of +/// [8 x i16] in \a __V and returns the 64-bit values in the corresponding +/// elements of a 256-bit vector of [4 x i64]. +/// +/// \code{.operation} +/// result[63:0] := ZeroExtend(__V[15:0]) +/// result[127:64] := ZeroExtend(__V[31:16]) +/// result[191:128] := ZeroExtend(__V[47:32]) +/// result[255:192] := ZeroExtend(__V[64:48]) +/// \endcode +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c VPMOVSXWQ instruction. +/// +/// \param __V +/// A 128-bit vector of [8 x i16] containing the source values. +/// \returns A 256-bit vector of [4 x i64] containing the zero-extended +/// values. +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_cvtepu16_epi64(__m128i __V) +{ + return (__m256i)__builtin_convertvector(__builtin_shufflevector((__v8hu)__V, (__v8hu)__V, 0, 1, 2, 3), __v4di); +} + +/// Zero-extends 32-bit elements from the 128-bit vector of [4 x i32] in +/// \a __V and returns the 64-bit values in the corresponding elements of a +/// 256-bit vector of [4 x i64]. +/// +/// \code{.operation} +/// result[63:0] := ZeroExtend(__V[31:0]) +/// result[127:64] := ZeroExtend(__V[63:32]) +/// result[191:128] := ZeroExtend(__V[95:64]) +/// result[255:192] := ZeroExtend(__V[127:96]) +/// \endcode +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c VPMOVZXDQ instruction. +/// +/// \param __V +/// A 128-bit vector of [4 x i32] containing the source values. +/// \returns A 256-bit vector of [4 x i64] containing the zero-extended +/// values. +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_cvtepu32_epi64(__m128i __V) +{ + return (__m256i)__builtin_convertvector((__v4su)__V, __v4di); +} + +/// Multiplies signed 32-bit integers from even-numbered elements of two +/// 256-bit vectors of [8 x i32] and returns the 64-bit products in the +/// [4 x i64] result. +/// +/// \code{.operation} +/// result[63:0] := __a[31:0] * __b[31:0] +/// result[127:64] := __a[95:64] * __b[95:64] +/// result[191:128] := __a[159:128] * __b[159:128] +/// result[255:192] := __a[223:192] * __b[223:192] +/// \endcode +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c VPMULDQ instruction. +/// +/// \param __a +/// A 256-bit vector of [8 x i32] containing one of the source operands. +/// \param __b +/// A 256-bit vector of [8 x i32] containing one of the source operands. +/// \returns A 256-bit vector of [4 x i64] containing the products. +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_mul_epi32(__m256i __a, __m256i __b) +{ + return (__m256i)__builtin_ia32_pmuldq256((__v8si)__a, (__v8si)__b); +} + +/// Multiplies signed 16-bit integer elements of two 256-bit vectors of +/// [16 x i16], truncates the 32-bit results to the most significant 18 +/// bits, rounds by adding 1, and returns bits [16:1] of each rounded +/// product in the [16 x i16] result. +/// +/// \code{.operation} +/// FOR i := 0 TO 15 +/// j := i*16 +/// temp := ((__a[j+15:j] * __b[j+15:j]) >> 14) + 1 +/// result[j+15:j] := temp[16:1] +/// \endcode +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c VPMULHRSW instruction. +/// +/// \param __a +/// A 256-bit vector of [16 x i16] containing one of the source operands. +/// \param __b +/// A 256-bit vector of [16 x i16] containing one of the source operands. +/// \returns A 256-bit vector of [16 x i16] containing the rounded products. +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_mulhrs_epi16(__m256i __a, __m256i __b) +{ + return (__m256i)__builtin_ia32_pmulhrsw256((__v16hi)__a, (__v16hi)__b); +} + +/// Multiplies unsigned 16-bit integer elements of two 256-bit vectors of +/// [16 x i16], and returns the upper 16 bits of each 32-bit product in the +/// [16 x i16] result. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c VPMULHUW instruction. +/// +/// \param __a +/// A 256-bit vector of [16 x i16] containing one of the source operands. +/// \param __b +/// A 256-bit vector of [16 x i16] containing one of the source operands. +/// \returns A 256-bit vector of [16 x i16] containing the products. +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_mulhi_epu16(__m256i __a, __m256i __b) +{ + return (__m256i)__builtin_ia32_pmulhuw256((__v16hi)__a, (__v16hi)__b); +} + +/// Multiplies signed 16-bit integer elements of two 256-bit vectors of +/// [16 x i16], and returns the upper 16 bits of each 32-bit product in the +/// [16 x i16] result. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c VPMULHW instruction. +/// +/// \param __a +/// A 256-bit vector of [16 x i16] containing one of the source operands. +/// \param __b +/// A 256-bit vector of [16 x i16] containing one of the source operands. +/// \returns A 256-bit vector of [16 x i16] containing the products. +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_mulhi_epi16(__m256i __a, __m256i __b) +{ + return (__m256i)__builtin_ia32_pmulhw256((__v16hi)__a, (__v16hi)__b); +} + +/// Multiplies signed 16-bit integer elements of two 256-bit vectors of +/// [16 x i16], and returns the lower 16 bits of each 32-bit product in the +/// [16 x i16] result. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c VPMULLW instruction. +/// +/// \param __a +/// A 256-bit vector of [16 x i16] containing one of the source operands. +/// \param __b +/// A 256-bit vector of [16 x i16] containing one of the source operands. +/// \returns A 256-bit vector of [16 x i16] containing the products. +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_mullo_epi16(__m256i __a, __m256i __b) +{ + return (__m256i)((__v16hu)__a * (__v16hu)__b); +} + +/// Multiplies signed 32-bit integer elements of two 256-bit vectors of +/// [8 x i32], and returns the lower 32 bits of each 64-bit product in the +/// [8 x i32] result. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c VPMULLD instruction. +/// +/// \param __a +/// A 256-bit vector of [8 x i32] containing one of the source operands. +/// \param __b +/// A 256-bit vector of [8 x i32] containing one of the source operands. +/// \returns A 256-bit vector of [8 x i32] containing the products. +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_mullo_epi32 (__m256i __a, __m256i __b) +{ + return (__m256i)((__v8su)__a * (__v8su)__b); +} + +/// Multiplies unsigned 32-bit integers from even-numered elements of two +/// 256-bit vectors of [8 x i32] and returns the 64-bit products in the +/// [4 x i64] result. +/// +/// \code{.operation} +/// result[63:0] := __a[31:0] * __b[31:0] +/// result[127:64] := __a[95:64] * __b[95:64] +/// result[191:128] := __a[159:128] * __b[159:128] +/// result[255:192] := __a[223:192] * __b[223:192] +/// \endcode +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c VPMULUDQ instruction. +/// +/// \param __a +/// A 256-bit vector of [8 x i32] containing one of the source operands. +/// \param __b +/// A 256-bit vector of [8 x i32] containing one of the source operands. +/// \returns A 256-bit vector of [4 x i64] containing the products. +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_mul_epu32(__m256i __a, __m256i __b) +{ + return __builtin_ia32_pmuludq256((__v8si)__a, (__v8si)__b); +} + +/// Computes the bitwise OR of the 256-bit integer vectors in \a __a and +/// \a __b. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c VPOR instruction. +/// +/// \param __a +/// A 256-bit integer vector. +/// \param __b +/// A 256-bit integer vector. +/// \returns A 256-bit integer vector containing the result. +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_or_si256(__m256i __a, __m256i __b) +{ + return (__m256i)((__v4du)__a | (__v4du)__b); +} + +/// Computes four sum of absolute difference (SAD) operations on sets of eight +/// unsigned 8-bit integers from the 256-bit integer vectors \a __a and +/// \a __b. +/// +/// One SAD result is computed for each set of eight bytes from \a __a and +/// eight bytes from \a __b. The zero-extended SAD value is returned in the +/// corresponding 64-bit element of the result. +/// +/// A single SAD operation takes the differences between the corresponding +/// bytes of \a __a and \a __b, takes the absolute value of each difference, +/// and sums these eight values to form one 16-bit result. This operation +/// is repeated four times with successive sets of eight bytes. +/// +/// \code{.operation} +/// FOR i := 0 TO 3 +/// j := i*64 +/// temp0 := ABS(__a[j+7:j] - __b[j+7:j]) +/// temp1 := ABS(__a[j+15:j+8] - __b[j+15:j+8]) +/// temp2 := ABS(__a[j+23:j+16] - __b[j+23:j+16]) +/// temp3 := ABS(__a[j+31:j+24] - __b[j+31:j+24]) +/// temp4 := ABS(__a[j+39:j+32] - __b[j+39:j+32]) +/// temp5 := ABS(__a[j+47:j+40] - __b[j+47:j+40]) +/// temp6 := ABS(__a[j+55:j+48] - __b[j+55:j+48]) +/// temp7 := ABS(__a[j+63:j+56] - __b[j+63:j+56]) +/// result[j+15:j] := temp0 + temp1 + temp2 + temp3 + +/// temp4 + temp5 + temp6 + temp7 +/// result[j+63:j+16] := 0 +/// ENDFOR +/// \endcode +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c VPSADBW instruction. +/// +/// \param __a +/// A 256-bit integer vector. +/// \param __b +/// A 256-bit integer vector. +/// \returns A 256-bit integer vector containing the result. +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_sad_epu8(__m256i __a, __m256i __b) +{ + return __builtin_ia32_psadbw256((__v32qi)__a, (__v32qi)__b); +} + +/// Shuffles 8-bit integers in the 256-bit integer vector \a __a according +/// to control information in the 256-bit integer vector \a __b, and +/// returns the 256-bit result. In effect there are two separate 128-bit +/// shuffles in the lower and upper halves. +/// +/// \code{.operation} +/// FOR i := 0 TO 31 +/// j := i*8 +/// IF __b[j+7] == 1 +/// result[j+7:j] := 0 +/// ELSE +/// k := __b[j+3:j] * 8 +/// IF i > 15 +/// k := k + 128 +/// FI +/// result[j+7:j] := __a[k+7:k] +/// FI +/// ENDFOR +/// \endcode +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c VPSHUFB instruction. +/// +/// \param __a +/// A 256-bit integer vector containing source values. +/// \param __b +/// A 256-bit integer vector containing control information to determine +/// what goes into the corresponding byte of the result. If bit 7 of the +/// control byte is 1, the result byte is 0; otherwise, bits 3:0 of the +/// control byte specify the index (within the same 128-bit half) of \a __a +/// to copy to the result byte. +/// \returns A 256-bit integer vector containing the result. +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_shuffle_epi8(__m256i __a, __m256i __b) +{ + return (__m256i)__builtin_ia32_pshufb256((__v32qi)__a, (__v32qi)__b); +} + +/// Shuffles 32-bit integers from the 256-bit vector of [8 x i32] in \a a +/// according to control information in the integer literal \a imm, and +/// returns the 256-bit result. In effect there are two parallel 128-bit +/// shuffles in the lower and upper halves. +/// +/// \code{.operation} +/// FOR i := 0 to 3 +/// j := i*32 +/// k := (imm >> i*2)[1:0] * 32 +/// result[j+31:j] := a[k+31:k] +/// result[128+j+31:128+j] := a[128+k+31:128+k] +/// ENDFOR +/// \endcode +/// +/// \headerfile +/// +/// \code +/// __m256i _mm256_shuffle_epi32(__m256i a, const int imm); +/// \endcode +/// +/// This intrinsic corresponds to the \c VPSHUFB instruction. +/// +/// \param a +/// A 256-bit vector of [8 x i32] containing source values. +/// \param imm +/// An immediate 8-bit value specifying which elements to copy from \a a. +/// \a imm[1:0] specifies the index in \a a for elements 0 and 4 of the +/// result, \a imm[3:2] specifies the index for elements 1 and 5, and so +/// forth. +/// \returns A 256-bit vector of [8 x i32] containing the result. +#define _mm256_shuffle_epi32(a, imm) \ + ((__m256i)__builtin_ia32_pshufd256((__v8si)(__m256i)(a), (int)(imm))) + +/// Shuffles 16-bit integers from the 256-bit vector of [16 x i16] in \a a +/// according to control information in the integer literal \a imm, and +/// returns the 256-bit result. The upper 64 bits of each 128-bit half +/// are shuffled in parallel; the lower 64 bits of each 128-bit half are +/// copied from \a a unchanged. +/// +/// \code{.operation} +/// result[63:0] := a[63:0] +/// result[191:128] := a[191:128] +/// FOR i := 0 TO 3 +/// j := i * 16 + 64 +/// k := (imm >> i*2)[1:0] * 16 + 64 +/// result[j+15:j] := a[k+15:k] +/// result[128+j+15:128+j] := a[128+k+15:128+k] +/// ENDFOR +/// \endcode +/// +/// \headerfile +/// +/// \code +/// __m256i _mm256_shufflehi_epi16(__m256i a, const int imm); +/// \endcode +/// +/// This intrinsic corresponds to the \c VPSHUFHW instruction. +/// +/// \param a +/// A 256-bit vector of [16 x i16] containing source values. +/// \param imm +/// An immediate 8-bit value specifying which elements to copy from \a a. +/// \a imm[1:0] specifies the index in \a a for elements 4 and 8 of the +/// result, \a imm[3:2] specifies the index for elements 5 and 9, and so +/// forth. Indexes are offset by 4 (so 0 means index 4, and so forth). +/// \returns A 256-bit vector of [16 x i16] containing the result. +#define _mm256_shufflehi_epi16(a, imm) \ + ((__m256i)__builtin_ia32_pshufhw256((__v16hi)(__m256i)(a), (int)(imm))) + +/// Shuffles 16-bit integers from the 256-bit vector of [16 x i16] \a a +/// according to control information in the integer literal \a imm, and +/// returns the 256-bit [16 x i16] result. The lower 64 bits of each +/// 128-bit half are shuffled; the upper 64 bits of each 128-bit half are +/// copied from \a a unchanged. +/// +/// \code{.operation} +/// result[127:64] := a[127:64] +/// result[255:192] := a[255:192] +/// FOR i := 0 TO 3 +/// j := i * 16 +/// k := (imm >> i*2)[1:0] * 16 +/// result[j+15:j] := a[k+15:k] +/// result[128+j+15:128+j] := a[128+k+15:128+k] +/// ENDFOR +/// \endcode +/// +/// \headerfile +/// +/// \code +/// __m256i _mm256_shufflelo_epi16(__m256i a, const int imm); +/// \endcode +/// +/// This intrinsic corresponds to the \c VPSHUFLW instruction. +/// +/// \param a +/// A 256-bit vector of [16 x i16] to use as a source of data for the +/// result. +/// \param imm +/// An immediate 8-bit value specifying which elements to copy from \a a. +/// \a imm[1:0] specifies the index in \a a for elements 0 and 8 of the +/// result, \a imm[3:2] specifies the index for elements 1 and 9, and so +/// forth. +/// \returns A 256-bit vector of [16 x i16] containing the result. +#define _mm256_shufflelo_epi16(a, imm) \ + ((__m256i)__builtin_ia32_pshuflw256((__v16hi)(__m256i)(a), (int)(imm))) + +/// Sets each byte of the result to the corresponding byte of the 256-bit +/// integer vector in \a __a, the negative of that byte, or zero, depending +/// on whether the corresponding byte of the 256-bit integer vector in +/// \a __b is greater than zero, less than zero, or equal to zero, +/// respectively. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c VPSIGNB instruction. +/// +/// \param __a +/// A 256-bit integer vector. +/// \param __b +/// A 256-bit integer vector]. +/// \returns A 256-bit integer vector containing the result. +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_sign_epi8(__m256i __a, __m256i __b) +{ + return (__m256i)__builtin_ia32_psignb256((__v32qi)__a, (__v32qi)__b); +} + +/// Sets each element of the result to the corresponding element of the +/// 256-bit vector of [16 x i16] in \a __a, the negative of that element, +/// or zero, depending on whether the corresponding element of the 256-bit +/// vector of [16 x i16] in \a __b is greater than zero, less than zero, or +/// equal to zero, respectively. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c VPSIGNW instruction. +/// +/// \param __a +/// A 256-bit vector of [16 x i16]. +/// \param __b +/// A 256-bit vector of [16 x i16]. +/// \returns A 256-bit vector of [16 x i16] containing the result. +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_sign_epi16(__m256i __a, __m256i __b) +{ + return (__m256i)__builtin_ia32_psignw256((__v16hi)__a, (__v16hi)__b); +} + +/// Sets each element of the result to the corresponding element of the +/// 256-bit vector of [8 x i32] in \a __a, the negative of that element, or +/// zero, depending on whether the corresponding element of the 256-bit +/// vector of [8 x i32] in \a __b is greater than zero, less than zero, or +/// equal to zero, respectively. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c VPSIGND instruction. +/// +/// \param __a +/// A 256-bit vector of [8 x i32]. +/// \param __b +/// A 256-bit vector of [8 x i32]. +/// \returns A 256-bit vector of [8 x i32] containing the result. +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_sign_epi32(__m256i __a, __m256i __b) +{ + return (__m256i)__builtin_ia32_psignd256((__v8si)__a, (__v8si)__b); +} + +/// Shifts each 128-bit half of the 256-bit integer vector \a a left by +/// \a imm bytes, shifting in zero bytes, and returns the result. If \a imm +/// is greater than 15, the returned result is all zeroes. +/// +/// \headerfile +/// +/// \code +/// __m256i _mm256_slli_si256(__m256i a, const int imm); +/// \endcode +/// +/// This intrinsic corresponds to the \c VPSLLDQ instruction. +/// +/// \param a +/// A 256-bit integer vector to be shifted. +/// \param imm +/// An unsigned immediate value specifying the shift count (in bytes). +/// \returns A 256-bit integer vector containing the result. +#define _mm256_slli_si256(a, imm) \ + ((__m256i)__builtin_ia32_pslldqi256_byteshift((__v4di)(__m256i)(a), (int)(imm))) + +/// Shifts each 128-bit half of the 256-bit integer vector \a a left by +/// \a imm bytes, shifting in zero bytes, and returns the result. If \a imm +/// is greater than 15, the returned result is all zeroes. +/// +/// \headerfile +/// +/// \code +/// __m256i _mm256_bslli_epi128(__m256i a, const int imm); +/// \endcode +/// +/// This intrinsic corresponds to the \c VPSLLDQ instruction. +/// +/// \param a +/// A 256-bit integer vector to be shifted. +/// \param imm +/// An unsigned immediate value specifying the shift count (in bytes). +/// \returns A 256-bit integer vector containing the result. +#define _mm256_bslli_epi128(a, imm) \ + ((__m256i)__builtin_ia32_pslldqi256_byteshift((__v4di)(__m256i)(a), (int)(imm))) + +/// Shifts each 16-bit element of the 256-bit vector of [16 x i16] in \a __a +/// left by \a __count bits, shifting in zero bits, and returns the result. +/// If \a __count is greater than 15, the returned result is all zeroes. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c VPSLLW instruction. +/// +/// \param __a +/// A 256-bit vector of [16 x i16] to be shifted. +/// \param __count +/// An unsigned integer value specifying the shift count (in bits). +/// \returns A 256-bit vector of [16 x i16] containing the result. +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_slli_epi16(__m256i __a, int __count) +{ + return (__m256i)__builtin_ia32_psllwi256((__v16hi)__a, __count); +} + +/// Shifts each 16-bit element of the 256-bit vector of [16 x i16] in \a __a +/// left by the number of bits specified by the lower 64 bits of \a __count, +/// shifting in zero bits, and returns the result. If \a __count is greater +/// than 15, the returned result is all zeroes. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c VPSLLW instruction. +/// +/// \param __a +/// A 256-bit vector of [16 x i16] to be shifted. +/// \param __count +/// A 128-bit vector of [2 x i64] whose lower element gives the unsigned +/// shift count (in bits). The upper element is ignored. +/// \returns A 256-bit vector of [16 x i16] containing the result. +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_sll_epi16(__m256i __a, __m128i __count) +{ + return (__m256i)__builtin_ia32_psllw256((__v16hi)__a, (__v8hi)__count); +} + +/// Shifts each 32-bit element of the 256-bit vector of [8 x i32] in \a __a +/// left by \a __count bits, shifting in zero bits, and returns the result. +/// If \a __count is greater than 31, the returned result is all zeroes. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c VPSLLD instruction. +/// +/// \param __a +/// A 256-bit vector of [8 x i32] to be shifted. +/// \param __count +/// An unsigned integer value specifying the shift count (in bits). +/// \returns A 256-bit vector of [8 x i32] containing the result. +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_slli_epi32(__m256i __a, int __count) +{ + return (__m256i)__builtin_ia32_pslldi256((__v8si)__a, __count); +} + +/// Shifts each 32-bit element of the 256-bit vector of [8 x i32] in \a __a +/// left by the number of bits given in the lower 64 bits of \a __count, +/// shifting in zero bits, and returns the result. If \a __count is greater +/// than 31, the returned result is all zeroes. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c VPSLLD instruction. +/// +/// \param __a +/// A 256-bit vector of [8 x i32] to be shifted. +/// \param __count +/// A 128-bit vector of [2 x i64] whose lower element gives the unsigned +/// shift count (in bits). The upper element is ignored. +/// \returns A 256-bit vector of [8 x i32] containing the result. +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_sll_epi32(__m256i __a, __m128i __count) +{ + return (__m256i)__builtin_ia32_pslld256((__v8si)__a, (__v4si)__count); +} + +/// Shifts each 64-bit element of the 256-bit vector of [4 x i64] in \a __a +/// left by \a __count bits, shifting in zero bits, and returns the result. +/// If \a __count is greater than 63, the returned result is all zeroes. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c VPSLLQ instruction. +/// +/// \param __a +/// A 256-bit vector of [4 x i64] to be shifted. +/// \param __count +/// An unsigned integer value specifying the shift count (in bits). +/// \returns A 256-bit vector of [4 x i64] containing the result. +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_slli_epi64(__m256i __a, int __count) +{ + return __builtin_ia32_psllqi256((__v4di)__a, __count); +} + +/// Shifts each 64-bit element of the 256-bit vector of [4 x i64] in \a __a +/// left by the number of bits given in the lower 64 bits of \a __count, +/// shifting in zero bits, and returns the result. If \a __count is greater +/// than 63, the returned result is all zeroes. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c VPSLLQ instruction. +/// +/// \param __a +/// A 256-bit vector of [4 x i64] to be shifted. +/// \param __count +/// A 128-bit vector of [2 x i64] whose lower element gives the unsigned +/// shift count (in bits). The upper element is ignored. +/// \returns A 256-bit vector of [4 x i64] containing the result. +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_sll_epi64(__m256i __a, __m128i __count) +{ + return __builtin_ia32_psllq256((__v4di)__a, __count); +} + +/// Shifts each 16-bit element of the 256-bit vector of [16 x i16] in \a __a +/// right by \a __count bits, shifting in sign bits, and returns the result. +/// If \a __count is greater than 15, each element of the result is either +/// 0 or -1 according to the corresponding input sign bit. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c VPSRAW instruction. +/// +/// \param __a +/// A 256-bit vector of [16 x i16] to be shifted. +/// \param __count +/// An unsigned integer value specifying the shift count (in bits). +/// \returns A 256-bit vector of [16 x i16] containing the result. +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_srai_epi16(__m256i __a, int __count) +{ + return (__m256i)__builtin_ia32_psrawi256((__v16hi)__a, __count); +} + +/// Shifts each 16-bit element of the 256-bit vector of [16 x i16] in \a __a +/// right by the number of bits given in the lower 64 bits of \a __count, +/// shifting in sign bits, and returns the result. If \a __count is greater +/// than 15, each element of the result is either 0 or -1 according to the +/// corresponding input sign bit. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c VPSRAW instruction. +/// +/// \param __a +/// A 256-bit vector of [16 x i16] to be shifted. +/// \param __count +/// A 128-bit vector of [2 x i64] whose lower element gives the unsigned +/// shift count (in bits). The upper element is ignored. +/// \returns A 256-bit vector of [16 x i16] containing the result. +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_sra_epi16(__m256i __a, __m128i __count) +{ + return (__m256i)__builtin_ia32_psraw256((__v16hi)__a, (__v8hi)__count); +} + +/// Shifts each 32-bit element of the 256-bit vector of [8 x i32] in \a __a +/// right by \a __count bits, shifting in sign bits, and returns the result. +/// If \a __count is greater than 31, each element of the result is either +/// 0 or -1 according to the corresponding input sign bit. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c VPSRAD instruction. +/// +/// \param __a +/// A 256-bit vector of [8 x i32] to be shifted. +/// \param __count +/// An unsigned integer value specifying the shift count (in bits). +/// \returns A 256-bit vector of [8 x i32] containing the result. +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_srai_epi32(__m256i __a, int __count) +{ + return (__m256i)__builtin_ia32_psradi256((__v8si)__a, __count); +} + +/// Shifts each 32-bit element of the 256-bit vector of [8 x i32] in \a __a +/// right by the number of bits given in the lower 64 bits of \a __count, +/// shifting in sign bits, and returns the result. If \a __count is greater +/// than 31, each element of the result is either 0 or -1 according to the +/// corresponding input sign bit. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c VPSRAD instruction. +/// +/// \param __a +/// A 256-bit vector of [8 x i32] to be shifted. +/// \param __count +/// A 128-bit vector of [2 x i64] whose lower element gives the unsigned +/// shift count (in bits). The upper element is ignored. +/// \returns A 256-bit vector of [8 x i32] containing the result. +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_sra_epi32(__m256i __a, __m128i __count) +{ + return (__m256i)__builtin_ia32_psrad256((__v8si)__a, (__v4si)__count); +} + +/// Shifts each 128-bit half of the 256-bit integer vector in \a a right by +/// \a imm bytes, shifting in zero bytes, and returns the result. If +/// \a imm is greater than 15, the returned result is all zeroes. +/// +/// \headerfile +/// +/// \code +/// __m256i _mm256_srli_si256(__m256i a, const int imm); +/// \endcode +/// +/// This intrinsic corresponds to the \c VPSRLDQ instruction. +/// +/// \param a +/// A 256-bit integer vector to be shifted. +/// \param imm +/// An unsigned immediate value specifying the shift count (in bytes). +/// \returns A 256-bit integer vector containing the result. +#define _mm256_srli_si256(a, imm) \ + ((__m256i)__builtin_ia32_psrldqi256_byteshift((__m256i)(a), (int)(imm))) + +/// Shifts each 128-bit half of the 256-bit integer vector in \a a right by +/// \a imm bytes, shifting in zero bytes, and returns the result. If +/// \a imm is greater than 15, the returned result is all zeroes. +/// +/// \headerfile +/// +/// \code +/// __m256i _mm256_bsrli_epi128(__m256i a, const int imm); +/// \endcode +/// +/// This intrinsic corresponds to the \c VPSRLDQ instruction. +/// +/// \param a +/// A 256-bit integer vector to be shifted. +/// \param imm +/// An unsigned immediate value specifying the shift count (in bytes). +/// \returns A 256-bit integer vector containing the result. +#define _mm256_bsrli_epi128(a, imm) \ + ((__m256i)__builtin_ia32_psrldqi256_byteshift((__m256i)(a), (int)(imm))) + +/// Shifts each 16-bit element of the 256-bit vector of [16 x i16] in \a __a +/// right by \a __count bits, shifting in zero bits, and returns the result. +/// If \a __count is greater than 15, the returned result is all zeroes. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c VPSRLW instruction. +/// +/// \param __a +/// A 256-bit vector of [16 x i16] to be shifted. +/// \param __count +/// An unsigned integer value specifying the shift count (in bits). +/// \returns A 256-bit vector of [16 x i16] containing the result. +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_srli_epi16(__m256i __a, int __count) +{ + return (__m256i)__builtin_ia32_psrlwi256((__v16hi)__a, __count); +} + +/// Shifts each 16-bit element of the 256-bit vector of [16 x i16] in \a __a +/// right by the number of bits given in the lower 64 bits of \a __count, +/// shifting in zero bits, and returns the result. If \a __count is greater +/// than 15, the returned result is all zeroes. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c VPSRLW instruction. +/// +/// \param __a +/// A 256-bit vector of [16 x i16] to be shifted. +/// \param __count +/// A 128-bit vector of [2 x i64] whose lower element gives the unsigned +/// shift count (in bits). The upper element is ignored. +/// \returns A 256-bit vector of [16 x i16] containing the result. +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_srl_epi16(__m256i __a, __m128i __count) +{ + return (__m256i)__builtin_ia32_psrlw256((__v16hi)__a, (__v8hi)__count); +} + +/// Shifts each 32-bit element of the 256-bit vector of [8 x i32] in \a __a +/// right by \a __count bits, shifting in zero bits, and returns the result. +/// If \a __count is greater than 31, the returned result is all zeroes. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c VPSRLD instruction. +/// +/// \param __a +/// A 256-bit vector of [8 x i32] to be shifted. +/// \param __count +/// An unsigned integer value specifying the shift count (in bits). +/// \returns A 256-bit vector of [8 x i32] containing the result. +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_srli_epi32(__m256i __a, int __count) +{ + return (__m256i)__builtin_ia32_psrldi256((__v8si)__a, __count); +} + +/// Shifts each 32-bit element of the 256-bit vector of [8 x i32] in \a __a +/// right by the number of bits given in the lower 64 bits of \a __count, +/// shifting in zero bits, and returns the result. If \a __count is greater +/// than 31, the returned result is all zeroes. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c VPSRLD instruction. +/// +/// \param __a +/// A 256-bit vector of [8 x i32] to be shifted. +/// \param __count +/// A 128-bit vector of [2 x i64] whose lower element gives the unsigned +/// shift count (in bits). The upper element is ignored. +/// \returns A 256-bit vector of [8 x i32] containing the result. +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_srl_epi32(__m256i __a, __m128i __count) +{ + return (__m256i)__builtin_ia32_psrld256((__v8si)__a, (__v4si)__count); +} + +/// Shifts each 64-bit element of the 256-bit vector of [4 x i64] in \a __a +/// right by \a __count bits, shifting in zero bits, and returns the result. +/// If \a __count is greater than 63, the returned result is all zeroes. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c VPSRLQ instruction. +/// +/// \param __a +/// A 256-bit vector of [4 x i64] to be shifted. +/// \param __count +/// An unsigned integer value specifying the shift count (in bits). +/// \returns A 256-bit vector of [4 x i64] containing the result. +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_srli_epi64(__m256i __a, int __count) +{ + return __builtin_ia32_psrlqi256((__v4di)__a, __count); +} + +/// Shifts each 64-bit element of the 256-bit vector of [4 x i64] in \a __a +/// right by the number of bits given in the lower 64 bits of \a __count, +/// shifting in zero bits, and returns the result. If \a __count is greater +/// than 63, the returned result is all zeroes. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c VPSRLQ instruction. +/// +/// \param __a +/// A 256-bit vector of [4 x i64] to be shifted. +/// \param __count +/// A 128-bit vector of [2 x i64] whose lower element gives the unsigned +/// shift count (in bits). The upper element is ignored. +/// \returns A 256-bit vector of [4 x i64] containing the result. +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_srl_epi64(__m256i __a, __m128i __count) +{ + return __builtin_ia32_psrlq256((__v4di)__a, __count); +} + +/// Subtracts 8-bit integers from corresponding bytes of two 256-bit integer +/// vectors. Returns the lower 8 bits of each difference in the +/// corresponding byte of the 256-bit integer vector result (overflow is +/// ignored). +/// +/// \code{.operation} +/// FOR i := 0 TO 31 +/// j := i*8 +/// result[j+7:j] := __a[j+7:j] - __b[j+7:j] +/// ENDFOR +/// \endcode +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c VPSUBB instruction. +/// +/// \param __a +/// A 256-bit integer vector containing the minuends. +/// \param __b +/// A 256-bit integer vector containing the subtrahends. +/// \returns A 256-bit integer vector containing the differences. +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_sub_epi8(__m256i __a, __m256i __b) +{ + return (__m256i)((__v32qu)__a - (__v32qu)__b); +} + +/// Subtracts 16-bit integers from corresponding elements of two 256-bit +/// vectors of [16 x i16]. Returns the lower 16 bits of each difference in +/// the corresponding element of the [16 x i16] result (overflow is +/// ignored). +/// +/// \code{.operation} +/// FOR i := 0 TO 15 +/// j := i*16 +/// result[j+15:j] := __a[j+15:j] - __b[j+15:j] +/// ENDFOR +/// \endcode +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c VPSUBW instruction. +/// +/// \param __a +/// A 256-bit vector of [16 x i16] containing the minuends. +/// \param __b +/// A 256-bit vector of [16 x i16] containing the subtrahends. +/// \returns A 256-bit vector of [16 x i16] containing the differences. +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_sub_epi16(__m256i __a, __m256i __b) +{ + return (__m256i)((__v16hu)__a - (__v16hu)__b); +} + +/// Subtracts 32-bit integers from corresponding elements of two 256-bit +/// vectors of [8 x i32]. Returns the lower 32 bits of each difference in +/// the corresponding element of the [8 x i32] result (overflow is ignored). +/// +/// \code{.operation} +/// FOR i := 0 TO 7 +/// j := i*32 +/// result[j+31:j] := __a[j+31:j] - __b[j+31:j] +/// ENDFOR +/// \endcode +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c VPSUBD instruction. +/// +/// \param __a +/// A 256-bit vector of [8 x i32] containing the minuends. +/// \param __b +/// A 256-bit vector of [8 x i32] containing the subtrahends. +/// \returns A 256-bit vector of [8 x i32] containing the differences. +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_sub_epi32(__m256i __a, __m256i __b) +{ + return (__m256i)((__v8su)__a - (__v8su)__b); +} + +/// Subtracts 64-bit integers from corresponding elements of two 256-bit +/// vectors of [4 x i64]. Returns the lower 64 bits of each difference in +/// the corresponding element of the [4 x i64] result (overflow is ignored). +/// +/// \code{.operation} +/// FOR i := 0 TO 3 +/// j := i*64 +/// result[j+63:j] := __a[j+63:j] - __b[j+63:j] +/// ENDFOR +/// \endcode +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c VPSUBQ instruction. +/// +/// \param __a +/// A 256-bit vector of [4 x i64] containing the minuends. +/// \param __b +/// A 256-bit vector of [4 x i64] containing the subtrahends. +/// \returns A 256-bit vector of [4 x i64] containing the differences. +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_sub_epi64(__m256i __a, __m256i __b) +{ + return (__m256i)((__v4du)__a - (__v4du)__b); +} + +/// Subtracts 8-bit integers from corresponding bytes of two 256-bit integer +/// vectors using signed saturation, and returns each differences in the +/// corresponding byte of the 256-bit integer vector result. +/// +/// \code{.operation} +/// FOR i := 0 TO 31 +/// j := i*8 +/// result[j+7:j] := SATURATE8(__a[j+7:j] - __b[j+7:j]) +/// ENDFOR +/// \endcode +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c VPSUBSB instruction. +/// +/// \param __a +/// A 256-bit integer vector containing the minuends. +/// \param __b +/// A 256-bit integer vector containing the subtrahends. +/// \returns A 256-bit integer vector containing the differences. +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_subs_epi8(__m256i __a, __m256i __b) +{ + return (__m256i)__builtin_elementwise_sub_sat((__v32qs)__a, (__v32qs)__b); +} + +/// Subtracts 16-bit integers from corresponding elements of two 256-bit +/// vectors of [16 x i16] using signed saturation, and returns each +/// difference in the corresponding element of the [16 x i16] result. +/// +/// \code{.operation} +/// FOR i := 0 TO 15 +/// j := i*16 +/// result[j+7:j] := SATURATE16(__a[j+7:j] - __b[j+7:j]) +/// ENDFOR +/// \endcode +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c VPSUBSW instruction. +/// +/// \param __a +/// A 256-bit vector of [16 x i16] containing the minuends. +/// \param __b +/// A 256-bit vector of [16 x i16] containing the subtrahends. +/// \returns A 256-bit vector of [16 x i16] containing the differences. +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_subs_epi16(__m256i __a, __m256i __b) +{ + return (__m256i)__builtin_elementwise_sub_sat((__v16hi)__a, (__v16hi)__b); +} + +/// Subtracts 8-bit integers from corresponding bytes of two 256-bit integer +/// vectors using unsigned saturation, and returns each difference in the +/// corresponding byte of the 256-bit integer vector result. For each byte, +/// computes result = __a - __b . +/// +/// \code{.operation} +/// FOR i := 0 TO 31 +/// j := i*8 +/// result[j+7:j] := SATURATE8U(__a[j+7:j] - __b[j+7:j]) +/// ENDFOR +/// \endcode +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c VPSUBUSB instruction. +/// +/// \param __a +/// A 256-bit integer vector containing the minuends. +/// \param __b +/// A 256-bit integer vector containing the subtrahends. +/// \returns A 256-bit integer vector containing the differences. +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_subs_epu8(__m256i __a, __m256i __b) +{ + return (__m256i)__builtin_elementwise_sub_sat((__v32qu)__a, (__v32qu)__b); +} + +/// Subtracts 16-bit integers from corresponding elements of two 256-bit +/// vectors of [16 x i16] using unsigned saturation, and returns each +/// difference in the corresponding element of the [16 x i16] result. +/// +/// \code{.operation} +/// FOR i := 0 TO 15 +/// j := i*16 +/// result[j+15:j] := SATURATE16U(__a[j+15:j] - __b[j+15:j]) +/// ENDFOR +/// \endcode +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c VPSUBUSW instruction. +/// +/// \param __a +/// A 256-bit vector of [16 x i16] containing the minuends. +/// \param __b +/// A 256-bit vector of [16 x i16] containing the subtrahends. +/// \returns A 256-bit vector of [16 x i16] containing the differences. +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_subs_epu16(__m256i __a, __m256i __b) +{ + return (__m256i)__builtin_elementwise_sub_sat((__v16hu)__a, (__v16hu)__b); +} + +/// Unpacks and interleaves 8-bit integers from parts of the 256-bit integer +/// vectors in \a __a and \a __b to form the 256-bit result. Specifically, +/// uses the upper 64 bits of each 128-bit half of \a __a and \a __b as +/// input; other bits in these parameters are ignored. +/// +/// \code{.operation} +/// result[7:0] := __a[71:64] +/// result[15:8] := __b[71:64] +/// result[23:16] := __a[79:72] +/// result[31:24] := __b[79:72] +/// . . . +/// result[127:120] := __b[127:120] +/// result[135:128] := __a[199:192] +/// . . . +/// result[255:248] := __b[255:248] +/// \endcode +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c VPUNPCKHBW instruction. +/// +/// \param __a +/// A 256-bit integer vector used as the source for the even-numbered bytes +/// of the result. +/// \param __b +/// A 256-bit integer vector used as the source for the odd-numbered bytes +/// of the result. +/// \returns A 256-bit integer vector containing the result. +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_unpackhi_epi8(__m256i __a, __m256i __b) +{ + return (__m256i)__builtin_shufflevector((__v32qi)__a, (__v32qi)__b, 8, 32+8, 9, 32+9, 10, 32+10, 11, 32+11, 12, 32+12, 13, 32+13, 14, 32+14, 15, 32+15, 24, 32+24, 25, 32+25, 26, 32+26, 27, 32+27, 28, 32+28, 29, 32+29, 30, 32+30, 31, 32+31); +} + +/// Unpacks and interleaves 16-bit integers from parts of the 256-bit vectors +/// of [16 x i16] in \a __a and \a __b to return the resulting 256-bit +/// vector of [16 x i16]. Specifically, uses the upper 64 bits of each +/// 128-bit half of \a __a and \a __b as input; other bits in these +/// parameters are ignored. +/// +/// \code{.operation} +/// result[15:0] := __a[79:64] +/// result[31:16] := __b[79:64] +/// result[47:32] := __a[95:80] +/// result[63:48] := __b[95:80] +/// . . . +/// result[127:112] := __b[127:112] +/// result[143:128] := __a[211:196] +/// . . . +/// result[255:240] := __b[255:240] +/// \endcode +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c VPUNPCKHWD instruction. +/// +/// \param __a +/// A 256-bit vector of [16 x i16] used as the source for the even-numbered +/// elements of the result. +/// \param __b +/// A 256-bit vector of [16 x i16] used as the source for the odd-numbered +/// elements of the result. +/// \returns A 256-bit vector of [16 x i16] containing the result. +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_unpackhi_epi16(__m256i __a, __m256i __b) +{ + return (__m256i)__builtin_shufflevector((__v16hi)__a, (__v16hi)__b, 4, 16+4, 5, 16+5, 6, 16+6, 7, 16+7, 12, 16+12, 13, 16+13, 14, 16+14, 15, 16+15); +} + +/// Unpacks and interleaves 32-bit integers from parts of the 256-bit vectors +/// of [8 x i32] in \a __a and \a __b to return the resulting 256-bit vector +/// of [8 x i32]. Specifically, uses the upper 64 bits of each 128-bit half +/// of \a __a and \a __b as input; other bits in these parameters are +/// ignored. +/// +/// \code{.operation} +/// result[31:0] := __a[95:64] +/// result[63:32] := __b[95:64] +/// result[95:64] := __a[127:96] +/// result[127:96] := __b[127:96] +/// result[159:128] := __a[223:192] +/// result[191:160] := __b[223:192] +/// result[223:192] := __a[255:224] +/// result[255:224] := __b[255:224] +/// \endcode +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c VPUNPCKHDQ instruction. +/// +/// \param __a +/// A 256-bit vector of [8 x i32] used as the source for the even-numbered +/// elements of the result. +/// \param __b +/// A 256-bit vector of [8 x i32] used as the source for the odd-numbered +/// elements of the result. +/// \returns A 256-bit vector of [8 x i32] containing the result. +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_unpackhi_epi32(__m256i __a, __m256i __b) +{ + return (__m256i)__builtin_shufflevector((__v8si)__a, (__v8si)__b, 2, 8+2, 3, 8+3, 6, 8+6, 7, 8+7); +} + +/// Unpacks and interleaves 64-bit integers from parts of the 256-bit vectors +/// of [4 x i64] in \a __a and \a __b to return the resulting 256-bit vector +/// of [4 x i64]. Specifically, uses the upper 64 bits of each 128-bit half +/// of \a __a and \a __b as input; other bits in these parameters are +/// ignored. +/// +/// \code{.operation} +/// result[63:0] := __a[127:64] +/// result[127:64] := __b[127:64] +/// result[191:128] := __a[255:192] +/// result[255:192] := __b[255:192] +/// \endcode +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c VPUNPCKHQDQ instruction. +/// +/// \param __a +/// A 256-bit vector of [4 x i64] used as the source for the even-numbered +/// elements of the result. +/// \param __b +/// A 256-bit vector of [4 x i64] used as the source for the odd-numbered +/// elements of the result. +/// \returns A 256-bit vector of [4 x i64] containing the result. +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_unpackhi_epi64(__m256i __a, __m256i __b) +{ + return (__m256i)__builtin_shufflevector((__v4di)__a, (__v4di)__b, 1, 4+1, 3, 4+3); +} + +/// Unpacks and interleaves 8-bit integers from parts of the 256-bit integer +/// vectors in \a __a and \a __b to form the 256-bit result. Specifically, +/// uses the lower 64 bits of each 128-bit half of \a __a and \a __b as +/// input; other bits in these parameters are ignored. +/// +/// \code{.operation} +/// result[7:0] := __a[7:0] +/// result[15:8] := __b[7:0] +/// result[23:16] := __a[15:8] +/// result[31:24] := __b[15:8] +/// . . . +/// result[127:120] := __b[63:56] +/// result[135:128] := __a[135:128] +/// . . . +/// result[255:248] := __b[191:184] +/// \endcode +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c VPUNPCKLBW instruction. +/// +/// \param __a +/// A 256-bit integer vector used as the source for the even-numbered bytes +/// of the result. +/// \param __b +/// A 256-bit integer vector used as the source for the odd-numbered bytes +/// of the result. +/// \returns A 256-bit integer vector containing the result. +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_unpacklo_epi8(__m256i __a, __m256i __b) +{ + return (__m256i)__builtin_shufflevector((__v32qi)__a, (__v32qi)__b, 0, 32+0, 1, 32+1, 2, 32+2, 3, 32+3, 4, 32+4, 5, 32+5, 6, 32+6, 7, 32+7, 16, 32+16, 17, 32+17, 18, 32+18, 19, 32+19, 20, 32+20, 21, 32+21, 22, 32+22, 23, 32+23); +} + +/// Unpacks and interleaves 16-bit integers from parts of the 256-bit vectors +/// of [16 x i16] in \a __a and \a __b to return the resulting 256-bit +/// vector of [16 x i16]. Specifically, uses the lower 64 bits of each +/// 128-bit half of \a __a and \a __b as input; other bits in these +/// parameters are ignored. +/// +/// \code{.operation} +/// result[15:0] := __a[15:0] +/// result[31:16] := __b[15:0] +/// result[47:32] := __a[31:16] +/// result[63:48] := __b[31:16] +/// . . . +/// result[127:112] := __b[63:48] +/// result[143:128] := __a[143:128] +/// . . . +/// result[255:239] := __b[191:176] +/// \endcode +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c VPUNPCKLWD instruction. +/// +/// \param __a +/// A 256-bit vector of [16 x i16] used as the source for the even-numbered +/// elements of the result. +/// \param __b +/// A 256-bit vector of [16 x i16] used as the source for the odd-numbered +/// elements of the result. +/// \returns A 256-bit vector of [16 x i16] containing the result. +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_unpacklo_epi16(__m256i __a, __m256i __b) +{ + return (__m256i)__builtin_shufflevector((__v16hi)__a, (__v16hi)__b, 0, 16+0, 1, 16+1, 2, 16+2, 3, 16+3, 8, 16+8, 9, 16+9, 10, 16+10, 11, 16+11); +} + +/// Unpacks and interleaves 32-bit integers from parts of the 256-bit vectors +/// of [8 x i32] in \a __a and \a __b to return the resulting 256-bit vector +/// of [8 x i32]. Specifically, uses the lower 64 bits of each 128-bit half +/// of \a __a and \a __b as input; other bits in these parameters are +/// ignored. +/// +/// \code{.operation} +/// result[31:0] := __a[31:0] +/// result[63:32] := __b[31:0] +/// result[95:64] := __a[63:32] +/// result[127:96] := __b[63:32] +/// result[159:128] := __a[159:128] +/// result[191:160] := __b[159:128] +/// result[223:192] := __a[191:160] +/// result[255:224] := __b[191:190] +/// \endcode +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c VPUNPCKLDQ instruction. +/// +/// \param __a +/// A 256-bit vector of [8 x i32] used as the source for the even-numbered +/// elements of the result. +/// \param __b +/// A 256-bit vector of [8 x i32] used as the source for the odd-numbered +/// elements of the result. +/// \returns A 256-bit vector of [8 x i32] containing the result. +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_unpacklo_epi32(__m256i __a, __m256i __b) +{ + return (__m256i)__builtin_shufflevector((__v8si)__a, (__v8si)__b, 0, 8+0, 1, 8+1, 4, 8+4, 5, 8+5); +} + +/// Unpacks and interleaves 64-bit integers from parts of the 256-bit vectors +/// of [4 x i64] in \a __a and \a __b to return the resulting 256-bit vector +/// of [4 x i64]. Specifically, uses the lower 64 bits of each 128-bit half +/// of \a __a and \a __b as input; other bits in these parameters are +/// ignored. +/// +/// \code{.operation} +/// result[63:0] := __a[63:0] +/// result[127:64] := __b[63:0] +/// result[191:128] := __a[191:128] +/// result[255:192] := __b[191:128] +/// \endcode +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c VPUNPCKLQDQ instruction. +/// +/// \param __a +/// A 256-bit vector of [4 x i64] used as the source for the even-numbered +/// elements of the result. +/// \param __b +/// A 256-bit vector of [4 x i64] used as the source for the odd-numbered +/// elements of the result. +/// \returns A 256-bit vector of [4 x i64] containing the result. +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_unpacklo_epi64(__m256i __a, __m256i __b) +{ + return (__m256i)__builtin_shufflevector((__v4di)__a, (__v4di)__b, 0, 4+0, 2, 4+2); +} + +/// Computes the bitwise XOR of the 256-bit integer vectors in \a __a and +/// \a __b. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c VPXOR instruction. +/// +/// \param __a +/// A 256-bit integer vector. +/// \param __b +/// A 256-bit integer vector. +/// \returns A 256-bit integer vector containing the result. +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_xor_si256(__m256i __a, __m256i __b) +{ + return (__m256i)((__v4du)__a ^ (__v4du)__b); +} + +/// Loads the 256-bit integer vector from memory \a __V using a non-temporal +/// memory hint and returns the vector. \a __V must be aligned on a 32-byte +/// boundary. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c VMOVNTDQA instruction. +/// +/// \param __V +/// A pointer to the 32-byte aligned memory containing the vector to load. +/// \returns A 256-bit integer vector loaded from memory. +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_stream_load_si256(const void *__V) +{ + typedef __v4di __v4di_aligned __attribute__((aligned(32))); + return (__m256i)__builtin_nontemporal_load((const __v4di_aligned *)__V); +} + +/// Broadcasts the 32-bit floating-point value from the low element of the +/// 128-bit vector of [4 x float] in \a __X to all elements of the result's +/// 128-bit vector of [4 x float]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c VBROADCASTSS instruction. +/// +/// \param __X +/// A 128-bit vector of [4 x float] whose low element will be broadcast. +/// \returns A 128-bit vector of [4 x float] containing the result. +static __inline__ __m128 __DEFAULT_FN_ATTRS128 +_mm_broadcastss_ps(__m128 __X) +{ + return (__m128)__builtin_shufflevector((__v4sf)__X, (__v4sf)__X, 0, 0, 0, 0); +} + +/// Broadcasts the 64-bit floating-point value from the low element of the +/// 128-bit vector of [2 x double] in \a __a to both elements of the +/// result's 128-bit vector of [2 x double]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c MOVDDUP instruction. +/// +/// \param __a +/// A 128-bit vector of [2 x double] whose low element will be broadcast. +/// \returns A 128-bit vector of [2 x double] containing the result. +static __inline__ __m128d __DEFAULT_FN_ATTRS128 +_mm_broadcastsd_pd(__m128d __a) +{ + return __builtin_shufflevector((__v2df)__a, (__v2df)__a, 0, 0); +} + +/// Broadcasts the 32-bit floating-point value from the low element of the +/// 128-bit vector of [4 x float] in \a __X to all elements of the +/// result's 256-bit vector of [8 x float]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c VBROADCASTSS instruction. +/// +/// \param __X +/// A 128-bit vector of [4 x float] whose low element will be broadcast. +/// \returns A 256-bit vector of [8 x float] containing the result. +static __inline__ __m256 __DEFAULT_FN_ATTRS256 +_mm256_broadcastss_ps(__m128 __X) +{ + return (__m256)__builtin_shufflevector((__v4sf)__X, (__v4sf)__X, 0, 0, 0, 0, 0, 0, 0, 0); +} + +/// Broadcasts the 64-bit floating-point value from the low element of the +/// 128-bit vector of [2 x double] in \a __X to all elements of the +/// result's 256-bit vector of [4 x double]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c VBROADCASTSD instruction. +/// +/// \param __X +/// A 128-bit vector of [2 x double] whose low element will be broadcast. +/// \returns A 256-bit vector of [4 x double] containing the result. +static __inline__ __m256d __DEFAULT_FN_ATTRS256 +_mm256_broadcastsd_pd(__m128d __X) +{ + return (__m256d)__builtin_shufflevector((__v2df)__X, (__v2df)__X, 0, 0, 0, 0); +} + +/// Broadcasts the 128-bit integer data from \a __X to both the lower and +/// upper halves of the 256-bit result. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c VBROADCASTI128 instruction. +/// +/// \param __X +/// A 128-bit integer vector to be broadcast. +/// \returns A 256-bit integer vector containing the result. +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_broadcastsi128_si256(__m128i __X) +{ + return (__m256i)__builtin_shufflevector((__v2di)__X, (__v2di)__X, 0, 1, 0, 1); +} + +#define _mm_broadcastsi128_si256(X) _mm256_broadcastsi128_si256(X) + +/// Merges 32-bit integer elements from either of the two 128-bit vectors of +/// [4 x i32] in \a V1 or \a V2 to the result's 128-bit vector of [4 x i32], +/// as specified by the immediate integer operand \a M. +/// +/// \code{.operation} +/// FOR i := 0 TO 3 +/// j := i*32 +/// IF M[i] == 0 +/// result[31+j:j] := V1[31+j:j] +/// ELSE +/// result[31+j:j] := V2[32+j:j] +/// FI +/// ENDFOR +/// \endcode +/// +/// \headerfile +/// +/// \code +/// __m128i _mm_blend_epi32(__m128i V1, __m128i V2, const int M); +/// \endcode +/// +/// This intrinsic corresponds to the \c VPBLENDDD instruction. +/// +/// \param V1 +/// A 128-bit vector of [4 x i32] containing source values. +/// \param V2 +/// A 128-bit vector of [4 x i32] containing source values. +/// \param M +/// An immediate 8-bit integer operand, with bits [3:0] specifying the +/// source for each element of the result. The position of the mask bit +/// corresponds to the index of a copied value. When a mask bit is 0, the +/// element is copied from \a V1; otherwise, it is copied from \a V2. +/// \returns A 128-bit vector of [4 x i32] containing the result. +#define _mm_blend_epi32(V1, V2, M) \ + ((__m128i)__builtin_ia32_pblendd128((__v4si)(__m128i)(V1), \ + (__v4si)(__m128i)(V2), (int)(M))) + +/// Merges 32-bit integer elements from either of the two 256-bit vectors of +/// [8 x i32] in \a V1 or \a V2 to return a 256-bit vector of [8 x i32], +/// as specified by the immediate integer operand \a M. +/// +/// \code{.operation} +/// FOR i := 0 TO 7 +/// j := i*32 +/// IF M[i] == 0 +/// result[31+j:j] := V1[31+j:j] +/// ELSE +/// result[31+j:j] := V2[32+j:j] +/// FI +/// ENDFOR +/// \endcode +/// +/// \headerfile +/// +/// \code +/// __m256i _mm256_blend_epi32(__m256i V1, __m256i V2, const int M); +/// \endcode +/// +/// This intrinsic corresponds to the \c VPBLENDDD instruction. +/// +/// \param V1 +/// A 256-bit vector of [8 x i32] containing source values. +/// \param V2 +/// A 256-bit vector of [8 x i32] containing source values. +/// \param M +/// An immediate 8-bit integer operand, with bits [7:0] specifying the +/// source for each element of the result. The position of the mask bit +/// corresponds to the index of a copied value. When a mask bit is 0, the +/// element is copied from \a V1; otherwise, it is is copied from \a V2. +/// \returns A 256-bit vector of [8 x i32] containing the result. +#define _mm256_blend_epi32(V1, V2, M) \ + ((__m256i)__builtin_ia32_pblendd256((__v8si)(__m256i)(V1), \ + (__v8si)(__m256i)(V2), (int)(M))) + +/// Broadcasts the low byte from the 128-bit integer vector in \a __X to all +/// bytes of the 256-bit result. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c VPBROADCASTB instruction. +/// +/// \param __X +/// A 128-bit integer vector whose low byte will be broadcast. +/// \returns A 256-bit integer vector containing the result. +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_broadcastb_epi8(__m128i __X) +{ + return (__m256i)__builtin_shufflevector((__v16qi)__X, (__v16qi)__X, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0); +} + +/// Broadcasts the low element from the 128-bit vector of [8 x i16] in \a __X +/// to all elements of the result's 256-bit vector of [16 x i16]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c VPBROADCASTW instruction. +/// +/// \param __X +/// A 128-bit vector of [8 x i16] whose low element will be broadcast. +/// \returns A 256-bit vector of [16 x i16] containing the result. +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_broadcastw_epi16(__m128i __X) +{ + return (__m256i)__builtin_shufflevector((__v8hi)__X, (__v8hi)__X, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0); +} + +/// Broadcasts the low element from the 128-bit vector of [4 x i32] in \a __X +/// to all elements of the result's 256-bit vector of [8 x i32]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c VPBROADCASTD instruction. +/// +/// \param __X +/// A 128-bit vector of [4 x i32] whose low element will be broadcast. +/// \returns A 256-bit vector of [8 x i32] containing the result. +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_broadcastd_epi32(__m128i __X) +{ + return (__m256i)__builtin_shufflevector((__v4si)__X, (__v4si)__X, 0, 0, 0, 0, 0, 0, 0, 0); +} + +/// Broadcasts the low element from the 128-bit vector of [2 x i64] in \a __X +/// to all elements of the result's 256-bit vector of [4 x i64]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c VPBROADCASTQ instruction. +/// +/// \param __X +/// A 128-bit vector of [2 x i64] whose low element will be broadcast. +/// \returns A 256-bit vector of [4 x i64] containing the result. +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_broadcastq_epi64(__m128i __X) +{ + return (__m256i)__builtin_shufflevector((__v2di)__X, (__v2di)__X, 0, 0, 0, 0); +} + +/// Broadcasts the low byte from the 128-bit integer vector in \a __X to all +/// bytes of the 128-bit result. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c VPBROADCASTB instruction. +/// +/// \param __X +/// A 128-bit integer vector whose low byte will be broadcast. +/// \returns A 128-bit integer vector containing the result. +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_broadcastb_epi8(__m128i __X) +{ + return (__m128i)__builtin_shufflevector((__v16qi)__X, (__v16qi)__X, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0); +} + +/// Broadcasts the low element from the 128-bit vector of [8 x i16] in +/// \a __X to all elements of the result's 128-bit vector of [8 x i16]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c VPBROADCASTW instruction. +/// +/// \param __X +/// A 128-bit vector of [8 x i16] whose low element will be broadcast. +/// \returns A 128-bit vector of [8 x i16] containing the result. +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_broadcastw_epi16(__m128i __X) +{ + return (__m128i)__builtin_shufflevector((__v8hi)__X, (__v8hi)__X, 0, 0, 0, 0, 0, 0, 0, 0); +} + +/// Broadcasts the low element from the 128-bit vector of [4 x i32] in \a __X +/// to all elements of the result's vector of [4 x i32]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c VPBROADCASTD instruction. +/// +/// \param __X +/// A 128-bit vector of [4 x i32] whose low element will be broadcast. +/// \returns A 128-bit vector of [4 x i32] containing the result. +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_broadcastd_epi32(__m128i __X) +{ + return (__m128i)__builtin_shufflevector((__v4si)__X, (__v4si)__X, 0, 0, 0, 0); +} + +/// Broadcasts the low element from the 128-bit vector of [2 x i64] in \a __X +/// to both elements of the result's 128-bit vector of [2 x i64]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c VPBROADCASTQ instruction. +/// +/// \param __X +/// A 128-bit vector of [2 x i64] whose low element will be broadcast. +/// \returns A 128-bit vector of [2 x i64] containing the result. +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_broadcastq_epi64(__m128i __X) +{ + return (__m128i)__builtin_shufflevector((__v2di)__X, (__v2di)__X, 0, 0); +} + +/// Sets the result's 256-bit vector of [8 x i32] to copies of elements of the +/// 256-bit vector of [8 x i32] in \a __a as specified by indexes in the +/// elements of the 256-bit vector of [8 x i32] in \a __b. +/// +/// \code{.operation} +/// FOR i := 0 TO 7 +/// j := i*32 +/// k := __b[j+2:j] * 32 +/// result[j+31:j] := __a[k+31:k] +/// ENDFOR +/// \endcode +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c VPERMD instruction. +/// +/// \param __a +/// A 256-bit vector of [8 x i32] containing the source values. +/// \param __b +/// A 256-bit vector of [8 x i32] containing indexes of values to use from +/// \a __a. +/// \returns A 256-bit vector of [8 x i32] containing the result. +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_permutevar8x32_epi32(__m256i __a, __m256i __b) +{ + return (__m256i)__builtin_ia32_permvarsi256((__v8si)__a, (__v8si)__b); +} + +/// Sets the result's 256-bit vector of [4 x double] to copies of elements of +/// the 256-bit vector of [4 x double] in \a V as specified by the +/// immediate value \a M. +/// +/// \code{.operation} +/// FOR i := 0 TO 3 +/// j := i*64 +/// k := (M >> i*2)[1:0] * 64 +/// result[j+63:j] := V[k+63:k] +/// ENDFOR +/// \endcode +/// +/// \headerfile +/// +/// \code +/// __m256d _mm256_permute4x64_pd(__m256d V, const int M); +/// \endcode +/// +/// This intrinsic corresponds to the \c VPERMPD instruction. +/// +/// \param V +/// A 256-bit vector of [4 x double] containing the source values. +/// \param M +/// An immediate 8-bit value specifying which elements to copy from \a V. +/// \a M[1:0] specifies the index in \a a for element 0 of the result, +/// \a M[3:2] specifies the index for element 1, and so forth. +/// \returns A 256-bit vector of [4 x double] containing the result. +#define _mm256_permute4x64_pd(V, M) \ + ((__m256d)__builtin_ia32_permdf256((__v4df)(__m256d)(V), (int)(M))) + +/// Sets the result's 256-bit vector of [8 x float] to copies of elements of +/// the 256-bit vector of [8 x float] in \a __a as specified by indexes in +/// the elements of the 256-bit vector of [8 x i32] in \a __b. +/// +/// \code{.operation} +/// FOR i := 0 TO 7 +/// j := i*32 +/// k := __b[j+2:j] * 32 +/// result[j+31:j] := __a[k+31:k] +/// ENDFOR +/// \endcode +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c VPERMPS instruction. +/// +/// \param __a +/// A 256-bit vector of [8 x float] containing the source values. +/// \param __b +/// A 256-bit vector of [8 x i32] containing indexes of values to use from +/// \a __a. +/// \returns A 256-bit vector of [8 x float] containing the result. +static __inline__ __m256 __DEFAULT_FN_ATTRS256 +_mm256_permutevar8x32_ps(__m256 __a, __m256i __b) +{ + return (__m256)__builtin_ia32_permvarsf256((__v8sf)__a, (__v8si)__b); +} + +/// Sets the result's 256-bit vector of [4 x i64] result to copies of elements +/// of the 256-bit vector of [4 x i64] in \a V as specified by the +/// immediate value \a M. +/// +/// \code{.operation} +/// FOR i := 0 TO 3 +/// j := i*64 +/// k := (M >> i*2)[1:0] * 64 +/// result[j+63:j] := V[k+63:k] +/// ENDFOR +/// \endcode +/// +/// \headerfile +/// +/// \code +/// __m256i _mm256_permute4x64_epi64(__m256i V, const int M); +/// \endcode +/// +/// This intrinsic corresponds to the \c VPERMQ instruction. +/// +/// \param V +/// A 256-bit vector of [4 x i64] containing the source values. +/// \param M +/// An immediate 8-bit value specifying which elements to copy from \a V. +/// \a M[1:0] specifies the index in \a a for element 0 of the result, +/// \a M[3:2] specifies the index for element 1, and so forth. +/// \returns A 256-bit vector of [4 x i64] containing the result. +#define _mm256_permute4x64_epi64(V, M) \ + ((__m256i)__builtin_ia32_permdi256((__v4di)(__m256i)(V), (int)(M))) + +/// Sets each half of the 256-bit result either to zero or to one of the +/// four possible 128-bit halves of the 256-bit vectors \a V1 and \a V2, +/// as specified by the immediate value \a M. +/// +/// \code{.operation} +/// FOR i := 0 TO 1 +/// j := i*128 +/// k := M >> (i*4) +/// IF k[3] == 0 +/// CASE (k[1:0]) OF +/// 0: result[127+j:j] := V1[127:0] +/// 1: result[127+j:j] := V1[255:128] +/// 2: result[127+j:j] := V2[127:0] +/// 3: result[127+j:j] := V2[255:128] +/// ESAC +/// ELSE +/// result[127+j:j] := 0 +/// FI +/// ENDFOR +/// \endcode +/// +/// \headerfile +/// +/// \code +/// __m256i _mm256_permute2x128_si256(__m256i V1, __m256i V2, const int M); +/// \endcode +/// +/// This intrinsic corresponds to the \c VPERM2I128 instruction. +/// +/// \param V1 +/// A 256-bit integer vector containing source values. +/// \param V2 +/// A 256-bit integer vector containing source values. +/// \param M +/// An immediate value specifying how to form the result. Bits [3:0] +/// control the lower half of the result, bits [7:4] control the upper half. +/// Within each 4-bit control value, if bit 3 is 1, the result is zero, +/// otherwise bits [1:0] determine the source as follows. \n +/// 0: the lower half of \a V1 \n +/// 1: the upper half of \a V1 \n +/// 2: the lower half of \a V2 \n +/// 3: the upper half of \a V2 +/// \returns A 256-bit integer vector containing the result. +#define _mm256_permute2x128_si256(V1, V2, M) \ + ((__m256i)__builtin_ia32_permti256((__m256i)(V1), (__m256i)(V2), (int)(M))) + +/// Extracts half of the 256-bit vector \a V to the 128-bit result. If bit 0 +/// of the immediate \a M is zero, extracts the lower half of the result; +/// otherwise, extracts the upper half. +/// +/// \headerfile +/// +/// \code +/// __m128i _mm256_extracti128_si256(__m256i V, const int M); +/// \endcode +/// +/// This intrinsic corresponds to the \c VEXTRACTI128 instruction. +/// +/// \param V +/// A 256-bit integer vector containing the source values. +/// \param M +/// An immediate value specifying which half of \a V to extract. +/// \returns A 128-bit integer vector containing the result. +#define _mm256_extracti128_si256(V, M) \ + ((__m128i)__builtin_ia32_extract128i256((__v4di)(__m256i)(V), (int)(M))) + +/// Copies the 256-bit vector \a V1 to the result, then overwrites half of the +/// result with the 128-bit vector \a V2. If bit 0 of the immediate \a M +/// is zero, overwrites the lower half of the result; otherwise, +/// overwrites the upper half. +/// +/// \headerfile +/// +/// \code +/// __m256i _mm256_inserti128_si256(__m256i V1, __m128i V2, const int M); +/// \endcode +/// +/// This intrinsic corresponds to the \c VINSERTI128 instruction. +/// +/// \param V1 +/// A 256-bit integer vector containing a source value. +/// \param V2 +/// A 128-bit integer vector containing a source value. +/// \param M +/// An immediate value specifying where to put \a V2 in the result. +/// \returns A 256-bit integer vector containing the result. +#define _mm256_inserti128_si256(V1, V2, M) \ + ((__m256i)__builtin_ia32_insert128i256((__v4di)(__m256i)(V1), \ + (__v2di)(__m128i)(V2), (int)(M))) + +/// Conditionally loads eight 32-bit integer elements from memory \a __X, if +/// the most significant bit of the corresponding element in the mask +/// \a __M is set; otherwise, sets that element of the result to zero. +/// Returns the 256-bit [8 x i32] result. +/// +/// \code{.operation} +/// FOR i := 0 TO 7 +/// j := i*32 +/// IF __M[j+31] == 1 +/// result[j+31:j] := Load32(__X+(i*4)) +/// ELSE +/// result[j+31:j] := 0 +/// FI +/// ENDFOR +/// \endcode +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c VPMASKMOVD instruction. +/// +/// \param __X +/// A pointer to the memory used for loading values. +/// \param __M +/// A 256-bit vector of [8 x i32] containing the mask bits. +/// \returns A 256-bit vector of [8 x i32] containing the loaded or zeroed +/// elements. +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_maskload_epi32(int const *__X, __m256i __M) +{ + return (__m256i)__builtin_ia32_maskloadd256((const __v8si *)__X, (__v8si)__M); +} + +/// Conditionally loads four 64-bit integer elements from memory \a __X, if +/// the most significant bit of the corresponding element in the mask +/// \a __M is set; otherwise, sets that element of the result to zero. +/// Returns the 256-bit [4 x i64] result. +/// +/// \code{.operation} +/// FOR i := 0 TO 3 +/// j := i*64 +/// IF __M[j+63] == 1 +/// result[j+63:j] := Load64(__X+(i*8)) +/// ELSE +/// result[j+63:j] := 0 +/// FI +/// ENDFOR +/// \endcode +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c VPMASKMOVQ instruction. +/// +/// \param __X +/// A pointer to the memory used for loading values. +/// \param __M +/// A 256-bit vector of [4 x i64] containing the mask bits. +/// \returns A 256-bit vector of [4 x i64] containing the loaded or zeroed +/// elements. +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_maskload_epi64(long long const *__X, __m256i __M) +{ + return (__m256i)__builtin_ia32_maskloadq256((const __v4di *)__X, (__v4di)__M); +} + +/// Conditionally loads four 32-bit integer elements from memory \a __X, if +/// the most significant bit of the corresponding element in the mask +/// \a __M is set; otherwise, sets that element of the result to zero. +/// Returns the 128-bit [4 x i32] result. +/// +/// \code{.operation} +/// FOR i := 0 TO 3 +/// j := i*32 +/// IF __M[j+31] == 1 +/// result[j+31:j] := Load32(__X+(i*4)) +/// ELSE +/// result[j+31:j] := 0 +/// FI +/// ENDFOR +/// \endcode +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c VPMASKMOVD instruction. +/// +/// \param __X +/// A pointer to the memory used for loading values. +/// \param __M +/// A 128-bit vector of [4 x i32] containing the mask bits. +/// \returns A 128-bit vector of [4 x i32] containing the loaded or zeroed +/// elements. +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_maskload_epi32(int const *__X, __m128i __M) +{ + return (__m128i)__builtin_ia32_maskloadd((const __v4si *)__X, (__v4si)__M); +} + +/// Conditionally loads two 64-bit integer elements from memory \a __X, if +/// the most significant bit of the corresponding element in the mask +/// \a __M is set; otherwise, sets that element of the result to zero. +/// Returns the 128-bit [2 x i64] result. +/// +/// \code{.operation} +/// FOR i := 0 TO 1 +/// j := i*64 +/// IF __M[j+63] == 1 +/// result[j+63:j] := Load64(__X+(i*8)) +/// ELSE +/// result[j+63:j] := 0 +/// FI +/// ENDFOR +/// \endcode +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c VPMASKMOVQ instruction. +/// +/// \param __X +/// A pointer to the memory used for loading values. +/// \param __M +/// A 128-bit vector of [2 x i64] containing the mask bits. +/// \returns A 128-bit vector of [2 x i64] containing the loaded or zeroed +/// elements. +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_maskload_epi64(long long const *__X, __m128i __M) +{ + return (__m128i)__builtin_ia32_maskloadq((const __v2di *)__X, (__v2di)__M); +} + +/// Conditionally stores eight 32-bit integer elements from the 256-bit vector +/// of [8 x i32] in \a __Y to memory \a __X, if the most significant bit of +/// the corresponding element in the mask \a __M is set; otherwise, the +/// memory element is unchanged. +/// +/// \code{.operation} +/// FOR i := 0 TO 7 +/// j := i*32 +/// IF __M[j+31] == 1 +/// Store32(__X+(i*4), __Y[j+31:j]) +/// FI +/// ENDFOR +/// \endcode +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c VPMASKMOVD instruction. +/// +/// \param __X +/// A pointer to the memory used for storing values. +/// \param __M +/// A 256-bit vector of [8 x i32] containing the mask bits. +/// \param __Y +/// A 256-bit vector of [8 x i32] containing the values to store. +static __inline__ void __DEFAULT_FN_ATTRS256 +_mm256_maskstore_epi32(int *__X, __m256i __M, __m256i __Y) +{ + __builtin_ia32_maskstored256((__v8si *)__X, (__v8si)__M, (__v8si)__Y); +} + +/// Conditionally stores four 64-bit integer elements from the 256-bit vector +/// of [4 x i64] in \a __Y to memory \a __X, if the most significant bit of +/// the corresponding element in the mask \a __M is set; otherwise, the +/// memory element is unchanged. +/// +/// \code{.operation} +/// FOR i := 0 TO 3 +/// j := i*64 +/// IF __M[j+63] == 1 +/// Store64(__X+(i*8), __Y[j+63:j]) +/// FI +/// ENDFOR +/// \endcode +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c VPMASKMOVQ instruction. +/// +/// \param __X +/// A pointer to the memory used for storing values. +/// \param __M +/// A 256-bit vector of [4 x i64] containing the mask bits. +/// \param __Y +/// A 256-bit vector of [4 x i64] containing the values to store. +static __inline__ void __DEFAULT_FN_ATTRS256 +_mm256_maskstore_epi64(long long *__X, __m256i __M, __m256i __Y) +{ + __builtin_ia32_maskstoreq256((__v4di *)__X, (__v4di)__M, (__v4di)__Y); +} + +/// Conditionally stores four 32-bit integer elements from the 128-bit vector +/// of [4 x i32] in \a __Y to memory \a __X, if the most significant bit of +/// the corresponding element in the mask \a __M is set; otherwise, the +/// memory element is unchanged. +/// +/// \code{.operation} +/// FOR i := 0 TO 3 +/// j := i*32 +/// IF __M[j+31] == 1 +/// Store32(__X+(i*4), __Y[j+31:j]) +/// FI +/// ENDFOR +/// \endcode +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c VPMASKMOVD instruction. +/// +/// \param __X +/// A pointer to the memory used for storing values. +/// \param __M +/// A 128-bit vector of [4 x i32] containing the mask bits. +/// \param __Y +/// A 128-bit vector of [4 x i32] containing the values to store. +static __inline__ void __DEFAULT_FN_ATTRS128 +_mm_maskstore_epi32(int *__X, __m128i __M, __m128i __Y) +{ + __builtin_ia32_maskstored((__v4si *)__X, (__v4si)__M, (__v4si)__Y); +} + +/// Conditionally stores two 64-bit integer elements from the 128-bit vector +/// of [2 x i64] in \a __Y to memory \a __X, if the most significant bit of +/// the corresponding element in the mask \a __M is set; otherwise, the +/// memory element is unchanged. +/// +/// \code{.operation} +/// FOR i := 0 TO 1 +/// j := i*64 +/// IF __M[j+63] == 1 +/// Store64(__X+(i*8), __Y[j+63:j]) +/// FI +/// ENDFOR +/// \endcode +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c VPMASKMOVQ instruction. +/// +/// \param __X +/// A pointer to the memory used for storing values. +/// \param __M +/// A 128-bit vector of [2 x i64] containing the mask bits. +/// \param __Y +/// A 128-bit vector of [2 x i64] containing the values to store. +static __inline__ void __DEFAULT_FN_ATTRS128 +_mm_maskstore_epi64(long long *__X, __m128i __M, __m128i __Y) +{ + __builtin_ia32_maskstoreq(( __v2di *)__X, (__v2di)__M, (__v2di)__Y); +} + +/// Shifts each 32-bit element of the 256-bit vector of [8 x i32] in \a __X +/// left by the number of bits given in the corresponding element of the +/// 256-bit vector of [8 x i32] in \a __Y, shifting in zero bits, and +/// returns the result. If the shift count for any element is greater than +/// 31, the result for that element is zero. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c VPSLLVD instruction. +/// +/// \param __X +/// A 256-bit vector of [8 x i32] to be shifted. +/// \param __Y +/// A 256-bit vector of [8 x i32] containing the unsigned shift counts (in +/// bits). +/// \returns A 256-bit vector of [8 x i32] containing the result. +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_sllv_epi32(__m256i __X, __m256i __Y) +{ + return (__m256i)__builtin_ia32_psllv8si((__v8si)__X, (__v8si)__Y); +} + +/// Shifts each 32-bit element of the 128-bit vector of [4 x i32] in \a __X +/// left by the number of bits given in the corresponding element of the +/// 128-bit vector of [4 x i32] in \a __Y, shifting in zero bits, and +/// returns the result. If the shift count for any element is greater than +/// 31, the result for that element is zero. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c VPSLLVD instruction. +/// +/// \param __X +/// A 128-bit vector of [4 x i32] to be shifted. +/// \param __Y +/// A 128-bit vector of [4 x i32] containing the unsigned shift counts (in +/// bits). +/// \returns A 128-bit vector of [4 x i32] containing the result. +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_sllv_epi32(__m128i __X, __m128i __Y) +{ + return (__m128i)__builtin_ia32_psllv4si((__v4si)__X, (__v4si)__Y); +} + +/// Shifts each 64-bit element of the 256-bit vector of [4 x i64] in \a __X +/// left by the number of bits given in the corresponding element of the +/// 128-bit vector of [4 x i64] in \a __Y, shifting in zero bits, and +/// returns the result. If the shift count for any element is greater than +/// 63, the result for that element is zero. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c VPSLLVQ instruction. +/// +/// \param __X +/// A 256-bit vector of [4 x i64] to be shifted. +/// \param __Y +/// A 256-bit vector of [4 x i64] containing the unsigned shift counts (in +/// bits). +/// \returns A 256-bit vector of [4 x i64] containing the result. +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_sllv_epi64(__m256i __X, __m256i __Y) +{ + return (__m256i)__builtin_ia32_psllv4di((__v4di)__X, (__v4di)__Y); +} + +/// Shifts each 64-bit element of the 128-bit vector of [2 x i64] in \a __X +/// left by the number of bits given in the corresponding element of the +/// 128-bit vector of [2 x i64] in \a __Y, shifting in zero bits, and +/// returns the result. If the shift count for any element is greater than +/// 63, the result for that element is zero. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c VPSLLVQ instruction. +/// +/// \param __X +/// A 128-bit vector of [2 x i64] to be shifted. +/// \param __Y +/// A 128-bit vector of [2 x i64] containing the unsigned shift counts (in +/// bits). +/// \returns A 128-bit vector of [2 x i64] containing the result. +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_sllv_epi64(__m128i __X, __m128i __Y) +{ + return (__m128i)__builtin_ia32_psllv2di((__v2di)__X, (__v2di)__Y); +} + +/// Shifts each 32-bit element of the 256-bit vector of [8 x i32] in \a __X +/// right by the number of bits given in the corresponding element of the +/// 256-bit vector of [8 x i32] in \a __Y, shifting in sign bits, and +/// returns the result. If the shift count for any element is greater than +/// 31, the result for that element is 0 or -1 according to the sign bit +/// for that element. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c VPSRAVD instruction. +/// +/// \param __X +/// A 256-bit vector of [8 x i32] to be shifted. +/// \param __Y +/// A 256-bit vector of [8 x i32] containing the unsigned shift counts (in +/// bits). +/// \returns A 256-bit vector of [8 x i32] containing the result. +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_srav_epi32(__m256i __X, __m256i __Y) +{ + return (__m256i)__builtin_ia32_psrav8si((__v8si)__X, (__v8si)__Y); +} + +/// Shifts each 32-bit element of the 128-bit vector of [4 x i32] in \a __X +/// right by the number of bits given in the corresponding element of the +/// 128-bit vector of [4 x i32] in \a __Y, shifting in sign bits, and +/// returns the result. If the shift count for any element is greater than +/// 31, the result for that element is 0 or -1 according to the sign bit +/// for that element. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c VPSRAVD instruction. +/// +/// \param __X +/// A 128-bit vector of [4 x i32] to be shifted. +/// \param __Y +/// A 128-bit vector of [4 x i32] containing the unsigned shift counts (in +/// bits). +/// \returns A 128-bit vector of [4 x i32] containing the result. +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_srav_epi32(__m128i __X, __m128i __Y) +{ + return (__m128i)__builtin_ia32_psrav4si((__v4si)__X, (__v4si)__Y); +} + +/// Shifts each 32-bit element of the 256-bit vector of [8 x i32] in \a __X +/// right by the number of bits given in the corresponding element of the +/// 256-bit vector of [8 x i32] in \a __Y, shifting in zero bits, and +/// returns the result. If the shift count for any element is greater than +/// 31, the result for that element is zero. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c VPSRLVD instruction. +/// +/// \param __X +/// A 256-bit vector of [8 x i32] to be shifted. +/// \param __Y +/// A 256-bit vector of [8 x i32] containing the unsigned shift counts (in +/// bits). +/// \returns A 256-bit vector of [8 x i32] containing the result. +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_srlv_epi32(__m256i __X, __m256i __Y) +{ + return (__m256i)__builtin_ia32_psrlv8si((__v8si)__X, (__v8si)__Y); +} + +/// Shifts each 32-bit element of the 128-bit vector of [4 x i32] in \a __X +/// right by the number of bits given in the corresponding element of the +/// 128-bit vector of [4 x i32] in \a __Y, shifting in zero bits, and +/// returns the result. If the shift count for any element is greater than +/// 31, the result for that element is zero. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c VPSRLVD instruction. +/// +/// \param __X +/// A 128-bit vector of [4 x i32] to be shifted. +/// \param __Y +/// A 128-bit vector of [4 x i32] containing the unsigned shift counts (in +/// bits). +/// \returns A 128-bit vector of [4 x i32] containing the result. +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_srlv_epi32(__m128i __X, __m128i __Y) +{ + return (__m128i)__builtin_ia32_psrlv4si((__v4si)__X, (__v4si)__Y); +} + +/// Shifts each 64-bit element of the 256-bit vector of [4 x i64] in \a __X +/// right by the number of bits given in the corresponding element of the +/// 128-bit vector of [4 x i64] in \a __Y, shifting in zero bits, and +/// returns the result. If the shift count for any element is greater than +/// 63, the result for that element is zero. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c VPSRLVQ instruction. +/// +/// \param __X +/// A 256-bit vector of [4 x i64] to be shifted. +/// \param __Y +/// A 256-bit vector of [4 x i64] containing the unsigned shift counts (in +/// bits). +/// \returns A 256-bit vector of [4 x i64] containing the result. +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_srlv_epi64(__m256i __X, __m256i __Y) +{ + return (__m256i)__builtin_ia32_psrlv4di((__v4di)__X, (__v4di)__Y); +} + +/// Shifts each 64-bit element of the 128-bit vector of [2 x i64] in \a __X +/// right by the number of bits given in the corresponding element of the +/// 128-bit vector of [2 x i64] in \a __Y, shifting in zero bits, and +/// returns the result. If the shift count for any element is greater than +/// 63, the result for that element is zero. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c VPSRLVQ instruction. +/// +/// \param __X +/// A 128-bit vector of [2 x i64] to be shifted. +/// \param __Y +/// A 128-bit vector of [2 x i64] containing the unsigned shift counts (in +/// bits). +/// \returns A 128-bit vector of [2 x i64] containing the result. +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_srlv_epi64(__m128i __X, __m128i __Y) +{ + return (__m128i)__builtin_ia32_psrlv2di((__v2di)__X, (__v2di)__Y); +} + +/// Conditionally gathers two 64-bit floating-point values, either from the +/// 128-bit vector of [2 x double] in \a a, or from memory \a m using scaled +/// indexes from the 128-bit vector of [4 x i32] in \a i. The 128-bit vector +/// of [2 x double] in \a mask determines the source for each element. +/// +/// \code{.operation} +/// FOR element := 0 to 1 +/// j := element*64 +/// k := element*32 +/// IF mask[j+63] == 0 +/// result[j+63:j] := a[j+63:j] +/// ELSE +/// result[j+63:j] := Load64(m + SignExtend(i[k+31:k])*s) +/// FI +/// ENDFOR +/// \endcode +/// +/// \headerfile +/// +/// \code +/// __m128d _mm_mask_i32gather_pd(__m128d a, const double *m, __m128i i, +/// __m128d mask, const int s); +/// \endcode +/// +/// This intrinsic corresponds to the \c VGATHERDPD instruction. +/// +/// \param a +/// A 128-bit vector of [2 x double] used as the source when a mask bit is +/// zero. +/// \param m +/// A pointer to the memory used for loading values. +/// \param i +/// A 128-bit vector of [4 x i32] containing signed indexes into \a m. Only +/// the first two elements are used. +/// \param mask +/// A 128-bit vector of [2 x double] containing the mask. The most +/// significant bit of each element in the mask vector represents the mask +/// bits. If a mask bit is zero, the corresponding value from vector \a a +/// is gathered; otherwise the value is loaded from memory. +/// \param s +/// A literal constant scale factor for the indexes in \a i. Must be +/// 1, 2, 4, or 8. +/// \returns A 128-bit vector of [2 x double] containing the gathered values. +#define _mm_mask_i32gather_pd(a, m, i, mask, s) \ + ((__m128d)__builtin_ia32_gatherd_pd((__v2df)(__m128i)(a), \ + (double const *)(m), \ + (__v4si)(__m128i)(i), \ + (__v2df)(__m128d)(mask), (s))) + +/// Conditionally gathers four 64-bit floating-point values, either from the +/// 256-bit vector of [4 x double] in \a a, or from memory \a m using scaled +/// indexes from the 128-bit vector of [4 x i32] in \a i. The 256-bit vector +/// of [4 x double] in \a mask determines the source for each element. +/// +/// \code{.operation} +/// FOR element := 0 to 3 +/// j := element*64 +/// k := element*32 +/// IF mask[j+63] == 0 +/// result[j+63:j] := a[j+63:j] +/// ELSE +/// result[j+63:j] := Load64(m + SignExtend(i[k+31:k])*s) +/// FI +/// ENDFOR +/// \endcode +/// +/// \headerfile +/// +/// \code +/// __m256d _mm256_mask_i32gather_pd(__m256d a, const double *m, __m128i i, +/// __m256d mask, const int s); +/// \endcode +/// +/// This intrinsic corresponds to the \c VGATHERDPD instruction. +/// +/// \param a +/// A 256-bit vector of [4 x double] used as the source when a mask bit is +/// zero. +/// \param m +/// A pointer to the memory used for loading values. +/// \param i +/// A 128-bit vector of [4 x i32] containing signed indexes into \a m. +/// \param mask +/// A 256-bit vector of [4 x double] containing the mask. The most +/// significant bit of each element in the mask vector represents the mask +/// bits. If a mask bit is zero, the corresponding value from vector \a a +/// is gathered; otherwise the value is loaded from memory. +/// \param s +/// A literal constant scale factor for the indexes in \a i. Must be +/// 1, 2, 4, or 8. +/// \returns A 256-bit vector of [4 x double] containing the gathered values. +#define _mm256_mask_i32gather_pd(a, m, i, mask, s) \ + ((__m256d)__builtin_ia32_gatherd_pd256((__v4df)(__m256d)(a), \ + (double const *)(m), \ + (__v4si)(__m128i)(i), \ + (__v4df)(__m256d)(mask), (s))) + +/// Conditionally gathers two 64-bit floating-point values, either from the +/// 128-bit vector of [2 x double] in \a a, or from memory \a m using scaled +/// indexes from the 128-bit vector of [2 x i64] in \a i. The 128-bit vector +/// of [2 x double] in \a mask determines the source for each element. +/// +/// \code{.operation} +/// FOR element := 0 to 1 +/// j := element*64 +/// k := element*64 +/// IF mask[j+63] == 0 +/// result[j+63:j] := a[j+63:j] +/// ELSE +/// result[j+63:j] := Load64(m + SignExtend(i[k+63:k])*s) +/// FI +/// ENDFOR +/// \endcode +/// +/// \headerfile +/// +/// \code +/// __m128d _mm_mask_i64gather_pd(__m128d a, const double *m, __m128i i, +/// __m128d mask, const int s); +/// \endcode +/// +/// This intrinsic corresponds to the \c VGATHERQPD instruction. +/// +/// \param a +/// A 128-bit vector of [2 x double] used as the source when a mask bit is +/// zero. +/// \param m +/// A pointer to the memory used for loading values. +/// \param i +/// A 128-bit vector of [2 x i64] containing signed indexes into \a m. +/// \param mask +/// A 128-bit vector of [2 x double] containing the mask. The most +/// significant bit of each element in the mask vector represents the mask +/// bits. If a mask bit is zero, the corresponding value from vector \a a +/// is gathered; otherwise the value is loaded from memory. +/// \param s +/// A literal constant scale factor for the indexes in \a i. Must be +/// 1, 2, 4, or 8. +/// \returns A 128-bit vector of [2 x double] containing the gathered values. +#define _mm_mask_i64gather_pd(a, m, i, mask, s) \ + ((__m128d)__builtin_ia32_gatherq_pd((__v2df)(__m128d)(a), \ + (double const *)(m), \ + (__v2di)(__m128i)(i), \ + (__v2df)(__m128d)(mask), (s))) + +/// Conditionally gathers four 64-bit floating-point values, either from the +/// 256-bit vector of [4 x double] in \a a, or from memory \a m using scaled +/// indexes from the 256-bit vector of [4 x i64] in \a i. The 256-bit vector +/// of [4 x double] in \a mask determines the source for each element. +/// +/// \code{.operation} +/// FOR element := 0 to 3 +/// j := element*64 +/// k := element*64 +/// IF mask[j+63] == 0 +/// result[j+63:j] := a[j+63:j] +/// ELSE +/// result[j+63:j] := Load64(m + SignExtend(i[k+63:k])*s) +/// FI +/// ENDFOR +/// \endcode +/// +/// \headerfile +/// +/// \code +/// __m256d _mm256_mask_i64gather_pd(__m256d a, const double *m, __m256i i, +/// __m256d mask, const int s); +/// \endcode +/// +/// This intrinsic corresponds to the \c VGATHERQPD instruction. +/// +/// \param a +/// A 256-bit vector of [4 x double] used as the source when a mask bit is +/// zero. +/// \param m +/// A pointer to the memory used for loading values. +/// \param i +/// A 256-bit vector of [4 x i64] containing signed indexes into \a m. +/// \param mask +/// A 256-bit vector of [4 x double] containing the mask. The most +/// significant bit of each element in the mask vector represents the mask +/// bits. If a mask bit is zero, the corresponding value from vector \a a +/// is gathered; otherwise the value is loaded from memory. +/// \param s +/// A literal constant scale factor for the indexes in \a i. Must be +/// 1, 2, 4, or 8. +/// \returns A 256-bit vector of [4 x double] containing the gathered values. +#define _mm256_mask_i64gather_pd(a, m, i, mask, s) \ + ((__m256d)__builtin_ia32_gatherq_pd256((__v4df)(__m256d)(a), \ + (double const *)(m), \ + (__v4di)(__m256i)(i), \ + (__v4df)(__m256d)(mask), (s))) + +/// Conditionally gathers four 32-bit floating-point values, either from the +/// 128-bit vector of [4 x float] in \a a, or from memory \a m using scaled +/// indexes from the 128-bit vector of [4 x i32] in \a i. The 128-bit vector +/// of [4 x float] in \a mask determines the source for each element. +/// +/// \code{.operation} +/// FOR element := 0 to 3 +/// j := element*32 +/// k := element*32 +/// IF mask[j+31] == 0 +/// result[j+31:j] := a[j+31:j] +/// ELSE +/// result[j+31:j] := Load32(m + SignExtend(i[k+31:k])*s) +/// FI +/// ENDFOR +/// \endcode +/// +/// \headerfile +/// +/// \code +/// __m128 _mm_mask_i32gather_ps(__m128 a, const float *m, __m128i i, +/// __m128 mask, const int s); +/// \endcode +/// +/// This intrinsic corresponds to the \c VGATHERDPS instruction. +/// +/// \param a +/// A 128-bit vector of [4 x float] used as the source when a mask bit is +/// zero. +/// \param m +/// A pointer to the memory used for loading values. +/// \param i +/// A 128-bit vector of [4 x i32] containing signed indexes into \a m. +/// \param mask +/// A 128-bit vector of [4 x float] containing the mask. The most +/// significant bit of each element in the mask vector represents the mask +/// bits. If a mask bit is zero, the corresponding value from vector \a a +/// is gathered; otherwise the value is loaded from memory. +/// \param s +/// A literal constant scale factor for the indexes in \a i. Must be +/// 1, 2, 4, or 8. +/// \returns A 128-bit vector of [4 x float] containing the gathered values. +#define _mm_mask_i32gather_ps(a, m, i, mask, s) \ + ((__m128)__builtin_ia32_gatherd_ps((__v4sf)(__m128)(a), \ + (float const *)(m), \ + (__v4si)(__m128i)(i), \ + (__v4sf)(__m128)(mask), (s))) + +/// Conditionally gathers eight 32-bit floating-point values, either from the +/// 256-bit vector of [8 x float] in \a a, or from memory \a m using scaled +/// indexes from the 256-bit vector of [8 x i32] in \a i. The 256-bit vector +/// of [8 x float] in \a mask determines the source for each element. +/// +/// \code{.operation} +/// FOR element := 0 to 7 +/// j := element*32 +/// k := element*32 +/// IF mask[j+31] == 0 +/// result[j+31:j] := a[j+31:j] +/// ELSE +/// result[j+31:j] := Load32(m + SignExtend(i[k+31:k])*s) +/// FI +/// ENDFOR +/// \endcode +/// +/// \headerfile +/// +/// \code +/// __m256 _mm256_mask_i32gather_ps(__m256 a, const float *m, __m256i i, +/// __m256 mask, const int s); +/// \endcode +/// +/// This intrinsic corresponds to the \c VGATHERDPS instruction. +/// +/// \param a +/// A 256-bit vector of [8 x float] used as the source when a mask bit is +/// zero. +/// \param m +/// A pointer to the memory used for loading values. +/// \param i +/// A 256-bit vector of [8 x i32] containing signed indexes into \a m. +/// \param mask +/// A 256-bit vector of [8 x float] containing the mask. The most +/// significant bit of each element in the mask vector represents the mask +/// bits. If a mask bit is zero, the corresponding value from vector \a a +/// is gathered; otherwise the value is loaded from memory. +/// \param s +/// A literal constant scale factor for the indexes in \a i. Must be +/// 1, 2, 4, or 8. +/// \returns A 256-bit vector of [8 x float] containing the gathered values. +#define _mm256_mask_i32gather_ps(a, m, i, mask, s) \ + ((__m256)__builtin_ia32_gatherd_ps256((__v8sf)(__m256)(a), \ + (float const *)(m), \ + (__v8si)(__m256i)(i), \ + (__v8sf)(__m256)(mask), (s))) + +/// Conditionally gathers two 32-bit floating-point values, either from the +/// 128-bit vector of [4 x float] in \a a, or from memory \a m using scaled +/// indexes from the 128-bit vector of [2 x i64] in \a i. The 128-bit vector +/// of [4 x float] in \a mask determines the source for the lower two +/// elements. The upper two elements of the result are zeroed. +/// +/// \code{.operation} +/// FOR element := 0 to 1 +/// j := element*32 +/// k := element*64 +/// IF mask[j+31] == 0 +/// result[j+31:j] := a[j+31:j] +/// ELSE +/// result[j+31:j] := Load32(m + SignExtend(i[k+63:k])*s) +/// FI +/// ENDFOR +/// result[127:64] := 0 +/// \endcode +/// +/// \headerfile +/// +/// \code +/// __m128 _mm_mask_i64gather_ps(__m128 a, const float *m, __m128i i, +/// __m128 mask, const int s); +/// \endcode +/// +/// This intrinsic corresponds to the \c VGATHERQPS instruction. +/// +/// \param a +/// A 128-bit vector of [4 x float] used as the source when a mask bit is +/// zero. Only the first two elements are used. +/// \param m +/// A pointer to the memory used for loading values. +/// \param i +/// A 128-bit vector of [2 x i64] containing signed indexes into \a m. +/// \param mask +/// A 128-bit vector of [4 x float] containing the mask. The most +/// significant bit of each element in the mask vector represents the mask +/// bits. If a mask bit is zero, the corresponding value from vector \a a +/// is gathered; otherwise the value is loaded from memory. Only the first +/// two elements are used. +/// \param s +/// A literal constant scale factor for the indexes in \a i. Must be +/// 1, 2, 4, or 8. +/// \returns A 128-bit vector of [4 x float] containing the gathered values. +#define _mm_mask_i64gather_ps(a, m, i, mask, s) \ + ((__m128)__builtin_ia32_gatherq_ps((__v4sf)(__m128)(a), \ + (float const *)(m), \ + (__v2di)(__m128i)(i), \ + (__v4sf)(__m128)(mask), (s))) + +/// Conditionally gathers four 32-bit floating-point values, either from the +/// 128-bit vector of [4 x float] in \a a, or from memory \a m using scaled +/// indexes from the 256-bit vector of [4 x i64] in \a i. The 128-bit vector +/// of [4 x float] in \a mask determines the source for each element. +/// +/// \code{.operation} +/// FOR element := 0 to 3 +/// j := element*32 +/// k := element*64 +/// IF mask[j+31] == 0 +/// result[j+31:j] := a[j+31:j] +/// ELSE +/// result[j+31:j] := Load32(m + SignExtend(i[k+63:k])*s) +/// FI +/// ENDFOR +/// \endcode +/// +/// \headerfile +/// +/// \code +/// __m128 _mm256_mask_i64gather_ps(__m128 a, const float *m, __m256i i, +/// __m128 mask, const int s); +/// \endcode +/// +/// This intrinsic corresponds to the \c VGATHERQPS instruction. +/// +/// \param a +/// A 128-bit vector of [4 x float] used as the source when a mask bit is +/// zero. +/// \param m +/// A pointer to the memory used for loading values. +/// \param i +/// A 256-bit vector of [4 x i64] containing signed indexes into \a m. +/// \param mask +/// A 128-bit vector of [4 x float] containing the mask. The most +/// significant bit of each element in the mask vector represents the mask +/// bits. If a mask bit is zero, the corresponding value from vector \a a +/// is gathered; otherwise the value is loaded from memory. +/// \param s +/// A literal constant scale factor for the indexes in \a i. Must be +/// 1, 2, 4, or 8. +/// \returns A 128-bit vector of [4 x float] containing the gathered values. +#define _mm256_mask_i64gather_ps(a, m, i, mask, s) \ + ((__m128)__builtin_ia32_gatherq_ps256((__v4sf)(__m128)(a), \ + (float const *)(m), \ + (__v4di)(__m256i)(i), \ + (__v4sf)(__m128)(mask), (s))) + +/// Conditionally gathers four 32-bit integer values, either from the +/// 128-bit vector of [4 x i32] in \a a, or from memory \a m using scaled +/// indexes from the 128-bit vector of [4 x i32] in \a i. The 128-bit vector +/// of [4 x i32] in \a mask determines the source for each element. +/// +/// \code{.operation} +/// FOR element := 0 to 3 +/// j := element*32 +/// k := element*32 +/// IF mask[j+31] == 0 +/// result[j+31:j] := a[j+31:j] +/// ELSE +/// result[j+31:j] := Load32(m + SignExtend(i[k+31:k])*s) +/// FI +/// ENDFOR +/// \endcode +/// +/// \headerfile +/// +/// \code +/// __m128i _mm_mask_i32gather_epi32(__m128i a, const int *m, __m128i i, +/// __m128i mask, const int s); +/// \endcode +/// +/// This intrinsic corresponds to the \c VPGATHERDD instruction. +/// +/// \param a +/// A 128-bit vector of [4 x i32] used as the source when a mask bit is +/// zero. +/// \param m +/// A pointer to the memory used for loading values. +/// \param i +/// A 128-bit vector of [4 x i32] containing signed indexes into \a m. +/// \param mask +/// A 128-bit vector of [4 x i32] containing the mask. The most significant +/// bit of each element in the mask vector represents the mask bits. If a +/// mask bit is zero, the corresponding value from vector \a a is gathered; +/// otherwise the value is loaded from memory. +/// \param s +/// A literal constant scale factor for the indexes in \a i. Must be +/// 1, 2, 4, or 8. +/// \returns A 128-bit vector of [4 x i32] containing the gathered values. +#define _mm_mask_i32gather_epi32(a, m, i, mask, s) \ + ((__m128i)__builtin_ia32_gatherd_d((__v4si)(__m128i)(a), \ + (int const *)(m), \ + (__v4si)(__m128i)(i), \ + (__v4si)(__m128i)(mask), (s))) + +/// Conditionally gathers eight 32-bit integer values, either from the +/// 256-bit vector of [8 x i32] in \a a, or from memory \a m using scaled +/// indexes from the 256-bit vector of [8 x i32] in \a i. The 256-bit vector +/// of [8 x i32] in \a mask determines the source for each element. +/// +/// \code{.operation} +/// FOR element := 0 to 7 +/// j := element*32 +/// k := element*32 +/// IF mask[j+31] == 0 +/// result[j+31:j] := a[j+31:j] +/// ELSE +/// result[j+31:j] := Load32(m + SignExtend(i[k+31:k])*s) +/// FI +/// ENDFOR +/// \endcode +/// +/// \headerfile +/// +/// \code +/// __m256i _mm256_mask_i32gather_epi32(__m256i a, const int *m, __m256i i, +/// __m256i mask, const int s); +/// \endcode +/// +/// This intrinsic corresponds to the \c VPGATHERDD instruction. +/// +/// \param a +/// A 256-bit vector of [8 x i32] used as the source when a mask bit is +/// zero. +/// \param m +/// A pointer to the memory used for loading values. +/// \param i +/// A 256-bit vector of [8 x i32] containing signed indexes into \a m. +/// \param mask +/// A 256-bit vector of [8 x i32] containing the mask. The most significant +/// bit of each element in the mask vector represents the mask bits. If a +/// mask bit is zero, the corresponding value from vector \a a is gathered; +/// otherwise the value is loaded from memory. +/// \param s +/// A literal constant scale factor for the indexes in \a i. Must be +/// 1, 2, 4, or 8. +/// \returns A 256-bit vector of [8 x i32] containing the gathered values. +#define _mm256_mask_i32gather_epi32(a, m, i, mask, s) \ + ((__m256i)__builtin_ia32_gatherd_d256((__v8si)(__m256i)(a), \ + (int const *)(m), \ + (__v8si)(__m256i)(i), \ + (__v8si)(__m256i)(mask), (s))) + +/// Conditionally gathers two 32-bit integer values, either from the +/// 128-bit vector of [4 x i32] in \a a, or from memory \a m using scaled +/// indexes from the 128-bit vector of [2 x i64] in \a i. The 128-bit vector +/// of [4 x i32] in \a mask determines the source for the lower two +/// elements. The upper two elements of the result are zeroed. +/// +/// \code{.operation} +/// FOR element := 0 to 1 +/// j := element*32 +/// k := element*64 +/// IF mask[j+31] == 0 +/// result[j+31:j] := a[j+31:j] +/// ELSE +/// result[j+31:j] := Load32(m + SignExtend(i[k+63:k])*s) +/// FI +/// ENDFOR +/// result[127:64] := 0 +/// \endcode +/// +/// \headerfile +/// +/// \code +/// __m128i _mm_mask_i64gather_epi32(__m128i a, const int *m, __m128i i, +/// __m128i mask, const int s); +/// \endcode +/// +/// This intrinsic corresponds to the \c VPGATHERQD instruction. +/// +/// \param a +/// A 128-bit vector of [4 x i32] used as the source when a mask bit is +/// zero. Only the first two elements are used. +/// \param m +/// A pointer to the memory used for loading values. +/// \param i +/// A 128-bit vector of [2 x i64] containing indexes into \a m. +/// \param mask +/// A 128-bit vector of [4 x i32] containing the mask. The most significant +/// bit of each element in the mask vector represents the mask bits. If a +/// mask bit is zero, the corresponding value from vector \a a is gathered; +/// otherwise the value is loaded from memory. Only the first two elements +/// are used. +/// \param s +/// A literal constant scale factor for the indexes in \a i. Must be +/// 1, 2, 4, or 8. +/// \returns A 128-bit vector of [4 x i32] containing the gathered values. +#define _mm_mask_i64gather_epi32(a, m, i, mask, s) \ + ((__m128i)__builtin_ia32_gatherq_d((__v4si)(__m128i)(a), \ + (int const *)(m), \ + (__v2di)(__m128i)(i), \ + (__v4si)(__m128i)(mask), (s))) + +/// Conditionally gathers four 32-bit integer values, either from the +/// 128-bit vector of [4 x i32] in \a a, or from memory \a m using scaled +/// indexes from the 256-bit vector of [4 x i64] in \a i. The 128-bit vector +/// of [4 x i32] in \a mask determines the source for each element. +/// +/// \code{.operation} +/// FOR element := 0 to 3 +/// j := element*32 +/// k := element*64 +/// IF mask[j+31] == 0 +/// result[j+31:j] := a[j+31:j] +/// ELSE +/// result[j+31:j] := Load32(m + SignExtend(i[k+63:k])*s) +/// FI +/// ENDFOR +/// \endcode +/// +/// \headerfile +/// +/// \code +/// __m128i _mm256_mask_i64gather_epi32(__m128i a, const int *m, __m256i i, +/// __m128i mask, const int s); +/// \endcode +/// +/// This intrinsic corresponds to the \c VPGATHERQD instruction. +/// +/// \param a +/// A 128-bit vector of [4 x i32] used as the source when a mask bit is +/// zero. +/// \param m +/// A pointer to the memory used for loading values. +/// \param i +/// A 256-bit vector of [4 x i64] containing signed indexes into \a m. +/// \param mask +/// A 128-bit vector of [4 x i32] containing the mask. The most significant +/// bit of each element in the mask vector represents the mask bits. If a +/// mask bit is zero, the corresponding value from vector \a a is gathered; +/// otherwise the value is loaded from memory. +/// \param s +/// A literal constant scale factor for the indexes in \a i. Must be +/// 1, 2, 4, or 8. +/// \returns A 128-bit vector of [4 x i32] containing the gathered values. +#define _mm256_mask_i64gather_epi32(a, m, i, mask, s) \ + ((__m128i)__builtin_ia32_gatherq_d256((__v4si)(__m128i)(a), \ + (int const *)(m), \ + (__v4di)(__m256i)(i), \ + (__v4si)(__m128i)(mask), (s))) + +/// Conditionally gathers two 64-bit integer values, either from the +/// 128-bit vector of [2 x i64] in \a a, or from memory \a m using scaled +/// indexes from the 128-bit vector of [4 x i32] in \a i. The 128-bit vector +/// of [2 x i64] in \a mask determines the source for each element. +/// +/// \code{.operation} +/// FOR element := 0 to 1 +/// j := element*64 +/// k := element*32 +/// IF mask[j+63] == 0 +/// result[j+63:j] := a[j+63:j] +/// ELSE +/// result[j+63:j] := Load64(m + SignExtend(i[k+31:k])*s) +/// FI +/// ENDFOR +/// \endcode +/// +/// \headerfile +/// +/// \code +/// __m128i _mm_mask_i32gather_epi64(__m128i a, const long long *m, __m128i i, +/// __m128i mask, const int s); +/// \endcode +/// +/// This intrinsic corresponds to the \c VPGATHERDQ instruction. +/// +/// \param a +/// A 128-bit vector of [2 x i64] used as the source when a mask bit is +/// zero. +/// \param m +/// A pointer to the memory used for loading values. +/// \param i +/// A 128-bit vector of [4 x i32] containing signed indexes into \a m. Only +/// the first two elements are used. +/// \param mask +/// A 128-bit vector of [2 x i64] containing the mask. The most significant +/// bit of each element in the mask vector represents the mask bits. If a +/// mask bit is zero, the corresponding value from vector \a a is gathered; +/// otherwise the value is loaded from memory. +/// \param s +/// A literal constant scale factor for the indexes in \a i. Must be +/// 1, 2, 4, or 8. +/// \returns A 128-bit vector of [2 x i64] containing the gathered values. +#define _mm_mask_i32gather_epi64(a, m, i, mask, s) \ + ((__m128i)__builtin_ia32_gatherd_q((__v2di)(__m128i)(a), \ + (long long const *)(m), \ + (__v4si)(__m128i)(i), \ + (__v2di)(__m128i)(mask), (s))) + +/// Conditionally gathers four 64-bit integer values, either from the +/// 256-bit vector of [4 x i64] in \a a, or from memory \a m using scaled +/// indexes from the 128-bit vector of [4 x i32] in \a i. The 256-bit vector +/// of [4 x i64] in \a mask determines the source for each element. +/// +/// \code{.operation} +/// FOR element := 0 to 3 +/// j := element*64 +/// k := element*32 +/// IF mask[j+63] == 0 +/// result[j+63:j] := a[j+63:j] +/// ELSE +/// result[j+63:j] := Load64(m + SignExtend(i[k+31:k])*s) +/// FI +/// ENDFOR +/// \endcode +/// +/// \headerfile +/// +/// \code +/// __m256i _mm256_mask_i32gather_epi64(__m256i a, const long long *m, +/// __m128i i, __m256i mask, const int s); +/// \endcode +/// +/// This intrinsic corresponds to the \c VPGATHERDQ instruction. +/// +/// \param a +/// A 256-bit vector of [4 x i64] used as the source when a mask bit is +/// zero. +/// \param m +/// A pointer to the memory used for loading values. +/// \param i +/// A 128-bit vector of [4 x i32] containing signed indexes into \a m. +/// \param mask +/// A 256-bit vector of [4 x i64] containing the mask. The most significant +/// bit of each element in the mask vector represents the mask bits. If a +/// mask bit is zero, the corresponding value from vector \a a is gathered; +/// otherwise the value is loaded from memory. +/// \param s +/// A literal constant scale factor for the indexes in \a i. Must be +/// 1, 2, 4, or 8. +/// \returns A 256-bit vector of [4 x i64] containing the gathered values. +#define _mm256_mask_i32gather_epi64(a, m, i, mask, s) \ + ((__m256i)__builtin_ia32_gatherd_q256((__v4di)(__m256i)(a), \ + (long long const *)(m), \ + (__v4si)(__m128i)(i), \ + (__v4di)(__m256i)(mask), (s))) + +/// Conditionally gathers two 64-bit integer values, either from the +/// 128-bit vector of [2 x i64] in \a a, or from memory \a m using scaled +/// indexes from the 128-bit vector of [2 x i64] in \a i. The 128-bit vector +/// of [2 x i64] in \a mask determines the source for each element. +/// +/// \code{.operation} +/// FOR element := 0 to 1 +/// j := element*64 +/// k := element*64 +/// IF mask[j+63] == 0 +/// result[j+63:j] := a[j+63:j] +/// ELSE +/// result[j+63:j] := Load64(m + SignExtend(i[k+63:k])*s) +/// FI +/// ENDFOR +/// \endcode +/// +/// \headerfile +/// +/// \code +/// __m128i _mm_mask_i64gather_epi64(__m128i a, const long long *m, __m128i i, +/// __m128i mask, const int s); +/// \endcode +/// +/// This intrinsic corresponds to the \c VPGATHERQQ instruction. +/// +/// \param a +/// A 128-bit vector of [2 x i64] used as the source when a mask bit is +/// zero. +/// \param m +/// A pointer to the memory used for loading values. +/// \param i +/// A 128-bit vector of [2 x i64] containing signed indexes into \a m. +/// \param mask +/// A 128-bit vector of [2 x i64] containing the mask. The most significant +/// bit of each element in the mask vector represents the mask bits. If a +/// mask bit is zero, the corresponding value from vector \a a is gathered; +/// otherwise the value is loaded from memory. +/// \param s +/// A literal constant scale factor for the indexes in \a i. Must be +/// 1, 2, 4, or 8. +/// \returns A 128-bit vector of [2 x i64] containing the gathered values. +#define _mm_mask_i64gather_epi64(a, m, i, mask, s) \ + ((__m128i)__builtin_ia32_gatherq_q((__v2di)(__m128i)(a), \ + (long long const *)(m), \ + (__v2di)(__m128i)(i), \ + (__v2di)(__m128i)(mask), (s))) + +/// Conditionally gathers four 64-bit integer values, either from the +/// 256-bit vector of [4 x i64] in \a a, or from memory \a m using scaled +/// indexes from the 256-bit vector of [4 x i64] in \a i. The 256-bit vector +/// of [4 x i64] in \a mask determines the source for each element. +/// +/// \code{.operation} +/// FOR element := 0 to 3 +/// j := element*64 +/// k := element*64 +/// IF mask[j+63] == 0 +/// result[j+63:j] := a[j+63:j] +/// ELSE +/// result[j+63:j] := Load64(m + SignExtend(i[k+63:k])*s) +/// FI +/// ENDFOR +/// \endcode +/// +/// \headerfile +/// +/// \code +/// __m256i _mm256_mask_i64gather_epi64(__m256i a, const long long *m, +/// __m256i i, __m256i mask, const int s); +/// \endcode +/// +/// This intrinsic corresponds to the \c VPGATHERQQ instruction. +/// +/// \param a +/// A 256-bit vector of [4 x i64] used as the source when a mask bit is +/// zero. +/// \param m +/// A pointer to the memory used for loading values. +/// \param i +/// A 256-bit vector of [4 x i64] containing signed indexes into \a m. +/// \param mask +/// A 256-bit vector of [4 x i64] containing the mask. The most significant +/// bit of each element in the mask vector represents the mask bits. If a +/// mask bit is zero, the corresponding value from vector \a a is gathered; +/// otherwise the value is loaded from memory. +/// \param s +/// A literal constant scale factor for the indexes in \a i. Must be +/// 1, 2, 4, or 8. +/// \returns A 256-bit vector of [4 x i64] containing the gathered values. +#define _mm256_mask_i64gather_epi64(a, m, i, mask, s) \ + ((__m256i)__builtin_ia32_gatherq_q256((__v4di)(__m256i)(a), \ + (long long const *)(m), \ + (__v4di)(__m256i)(i), \ + (__v4di)(__m256i)(mask), (s))) + +/// Gathers two 64-bit floating-point values from memory \a m using scaled +/// indexes from the 128-bit vector of [4 x i32] in \a i. +/// +/// \code{.operation} +/// FOR element := 0 to 1 +/// j := element*64 +/// k := element*32 +/// result[j+63:j] := Load64(m + SignExtend(i[k+31:k])*s) +/// ENDFOR +/// \endcode +/// +/// \headerfile +/// +/// \code +/// __m128d _mm_i32gather_pd(const double *m, __m128i i, const int s); +/// \endcode +/// +/// This intrinsic corresponds to the \c VGATHERDPD instruction. +/// +/// \param m +/// A pointer to the memory used for loading values. +/// \param i +/// A 128-bit vector of [4 x i32] containing signed indexes into \a m. Only +/// the first two elements are used. +/// \param s +/// A literal constant scale factor for the indexes in \a i. Must be +/// 1, 2, 4, or 8. +/// \returns A 128-bit vector of [2 x double] containing the gathered values. +#define _mm_i32gather_pd(m, i, s) \ + ((__m128d)__builtin_ia32_gatherd_pd((__v2df)_mm_undefined_pd(), \ + (double const *)(m), \ + (__v4si)(__m128i)(i), \ + (__v2df)_mm_cmpeq_pd(_mm_setzero_pd(), \ + _mm_setzero_pd()), \ + (s))) + +/// Gathers four 64-bit floating-point values from memory \a m using scaled +/// indexes from the 128-bit vector of [4 x i32] in \a i. +/// +/// \code{.operation} +/// FOR element := 0 to 3 +/// j := element*64 +/// k := element*32 +/// result[j+63:j] := Load64(m + SignExtend(i[k+31:k])*s) +/// ENDFOR +/// \endcode +/// +/// \headerfile +/// +/// \code +/// __m256d _mm256_i32gather_pd(const double *m, __m128i i, const int s); +/// \endcode +/// +/// This intrinsic corresponds to the \c VGATHERDPD instruction. +/// +/// \param m +/// A pointer to the memory used for loading values. +/// \param i +/// A 128-bit vector of [4 x i32] containing signed indexes into \a m. +/// \param s +/// A literal constant scale factor for the indexes in \a i. Must be +/// 1, 2, 4, or 8. +/// \returns A 256-bit vector of [4 x double] containing the gathered values. +#define _mm256_i32gather_pd(m, i, s) \ + ((__m256d)__builtin_ia32_gatherd_pd256((__v4df)_mm256_undefined_pd(), \ + (double const *)(m), \ + (__v4si)(__m128i)(i), \ + (__v4df)_mm256_cmp_pd(_mm256_setzero_pd(), \ + _mm256_setzero_pd(), \ + _CMP_EQ_OQ), \ + (s))) + +/// Gathers two 64-bit floating-point values from memory \a m using scaled +/// indexes from the 128-bit vector of [2 x i64] in \a i. +/// +/// \code{.operation} +/// FOR element := 0 to 1 +/// j := element*64 +/// k := element*64 +/// result[j+63:j] := Load64(m + SignExtend(i[k+63:k])*s) +/// ENDFOR +/// \endcode +/// +/// \headerfile +/// +/// \code +/// __m128d _mm_i64gather_pd(const double *m, __m128i i, const int s); +/// \endcode +/// +/// This intrinsic corresponds to the \c VGATHERQPD instruction. +/// +/// \param m +/// A pointer to the memory used for loading values. +/// \param i +/// A 128-bit vector of [2 x i64] containing signed indexes into \a m. +/// \param s +/// A literal constant scale factor for the indexes in \a i. Must be +/// 1, 2, 4, or 8. +/// \returns A 128-bit vector of [2 x double] containing the gathered values. +#define _mm_i64gather_pd(m, i, s) \ + ((__m128d)__builtin_ia32_gatherq_pd((__v2df)_mm_undefined_pd(), \ + (double const *)(m), \ + (__v2di)(__m128i)(i), \ + (__v2df)_mm_cmpeq_pd(_mm_setzero_pd(), \ + _mm_setzero_pd()), \ + (s))) + +/// Gathers four 64-bit floating-point values from memory \a m using scaled +/// indexes from the 256-bit vector of [4 x i64] in \a i. +/// +/// \code{.operation} +/// FOR element := 0 to 3 +/// j := element*64 +/// k := element*64 +/// result[j+63:j] := Load64(m + SignExtend(i[k+63:k])*s) +/// ENDFOR +/// \endcode +/// +/// \headerfile +/// +/// \code +/// __m256d _mm256_i64gather_pd(const double *m, __m256i i, const int s); +/// \endcode +/// +/// This intrinsic corresponds to the \c VGATHERQPD instruction. +/// +/// \param m +/// A pointer to the memory used for loading values. +/// \param i +/// A 256-bit vector of [4 x i64] containing signed indexes into \a m. +/// \param s +/// A literal constant scale factor for the indexes in \a i. Must be +/// 1, 2, 4, or 8. +/// \returns A 256-bit vector of [4 x double] containing the gathered values. +#define _mm256_i64gather_pd(m, i, s) \ + ((__m256d)__builtin_ia32_gatherq_pd256((__v4df)_mm256_undefined_pd(), \ + (double const *)(m), \ + (__v4di)(__m256i)(i), \ + (__v4df)_mm256_cmp_pd(_mm256_setzero_pd(), \ + _mm256_setzero_pd(), \ + _CMP_EQ_OQ), \ + (s))) + +/// Gathers four 32-bit floating-point values from memory \a m using scaled +/// indexes from the 128-bit vector of [4 x i32] in \a i. +/// +/// \code{.operation} +/// FOR element := 0 to 3 +/// j := element*32 +/// k := element*32 +/// result[j+31:j] := Load32(m + SignExtend(i[k+31:k])*s) +/// ENDFOR +/// \endcode +/// +/// \headerfile +/// +/// \code +/// __m128 _mm_i32gather_ps(const float *m, __m128i i, const int s); +/// \endcode +/// +/// This intrinsic corresponds to the \c VGATHERDPS instruction. +/// +/// \param m +/// A pointer to the memory used for loading values. +/// \param i +/// A 128-bit vector of [4 x i32] containing signed indexes into \a m. +/// \param s +/// A literal constant scale factor for the indexes in \a i. Must be +/// 1, 2, 4, or 8. +/// \returns A 128-bit vector of [4 x float] containing the gathered values. +#define _mm_i32gather_ps(m, i, s) \ + ((__m128)__builtin_ia32_gatherd_ps((__v4sf)_mm_undefined_ps(), \ + (float const *)(m), \ + (__v4si)(__m128i)(i), \ + (__v4sf)_mm_cmpeq_ps(_mm_setzero_ps(), \ + _mm_setzero_ps()), \ + (s))) + +/// Gathers eight 32-bit floating-point values from memory \a m using scaled +/// indexes from the 256-bit vector of [8 x i32] in \a i. +/// +/// \code{.operation} +/// FOR element := 0 to 7 +/// j := element*32 +/// k := element*32 +/// result[j+31:j] := Load32(m + SignExtend(i[k+31:k])*s) +/// ENDFOR +/// \endcode +/// +/// \headerfile +/// +/// \code +/// __m256 _mm256_i32gather_ps(const float *m, __m256i i, const int s); +/// \endcode +/// +/// This intrinsic corresponds to the \c VGATHERDPS instruction. +/// +/// \param m +/// A pointer to the memory used for loading values. +/// \param i +/// A 256-bit vector of [8 x i32] containing signed indexes into \a m. +/// \param s +/// A literal constant scale factor for the indexes in \a i. Must be +/// 1, 2, 4, or 8. +/// \returns A 256-bit vector of [8 x float] containing the gathered values. +#define _mm256_i32gather_ps(m, i, s) \ + ((__m256)__builtin_ia32_gatherd_ps256((__v8sf)_mm256_undefined_ps(), \ + (float const *)(m), \ + (__v8si)(__m256i)(i), \ + (__v8sf)_mm256_cmp_ps(_mm256_setzero_ps(), \ + _mm256_setzero_ps(), \ + _CMP_EQ_OQ), \ + (s))) + +/// Gathers two 32-bit floating-point values from memory \a m using scaled +/// indexes from the 128-bit vector of [2 x i64] in \a i. The upper two +/// elements of the result are zeroed. +/// +/// \code{.operation} +/// FOR element := 0 to 1 +/// j := element*32 +/// k := element*64 +/// result[j+31:j] := Load32(m + SignExtend(i[k+63:k])*s) +/// ENDFOR +/// result[127:64] := 0 +/// \endcode +/// +/// \headerfile +/// +/// \code +/// __m128 _mm_i64gather_ps(const float *m, __m128i i, const int s); +/// \endcode +/// +/// This intrinsic corresponds to the \c VGATHERQPS instruction. +/// +/// \param m +/// A pointer to the memory used for loading values. +/// \param i +/// A 128-bit vector of [2 x i64] containing signed indexes into \a m. +/// \param s +/// A literal constant scale factor for the indexes in \a i. Must be +/// 1, 2, 4, or 8. +/// \returns A 128-bit vector of [4 x float] containing the gathered values. +#define _mm_i64gather_ps(m, i, s) \ + ((__m128)__builtin_ia32_gatherq_ps((__v4sf)_mm_undefined_ps(), \ + (float const *)(m), \ + (__v2di)(__m128i)(i), \ + (__v4sf)_mm_cmpeq_ps(_mm_setzero_ps(), \ + _mm_setzero_ps()), \ + (s))) + +/// Gathers four 32-bit floating-point values from memory \a m using scaled +/// indexes from the 256-bit vector of [4 x i64] in \a i. +/// +/// \code{.operation} +/// FOR element := 0 to 3 +/// j := element*32 +/// k := element*64 +/// result[j+31:j] := Load32(m + SignExtend(i[k+64:k])*s) +/// ENDFOR +/// \endcode +/// +/// \headerfile +/// +/// \code +/// __m128 _mm256_i64gather_ps(const float *m, __m256i i, const int s); +/// \endcode +/// +/// This intrinsic corresponds to the \c VGATHERQPS instruction. +/// +/// \param m +/// A pointer to the memory used for loading values. +/// \param i +/// A 256-bit vector of [4 x i64] containing signed indexes into \a m. +/// \param s +/// A literal constant scale factor for the indexes in \a i. Must be +/// 1, 2, 4, or 8. +/// \returns A 128-bit vector of [4 x float] containing the gathered values. +#define _mm256_i64gather_ps(m, i, s) \ + ((__m128)__builtin_ia32_gatherq_ps256((__v4sf)_mm_undefined_ps(), \ + (float const *)(m), \ + (__v4di)(__m256i)(i), \ + (__v4sf)_mm_cmpeq_ps(_mm_setzero_ps(), \ + _mm_setzero_ps()), \ + (s))) + +/// Gathers four 32-bit floating-point values from memory \a m using scaled +/// indexes from the 128-bit vector of [4 x i32] in \a i. +/// +/// \code{.operation} +/// FOR element := 0 to 3 +/// j := element*32 +/// k := element*32 +/// result[j+31:j] := Load32(m + SignExtend(i[k+31:k])*s) +/// ENDFOR +/// \endcode +/// +/// \headerfile +/// +/// \code +/// __m128i _mm_i32gather_epi32(const int *m, __m128i i, const int s); +/// \endcode +/// +/// This intrinsic corresponds to the \c VPGATHERDD instruction. +/// +/// \param m +/// A pointer to the memory used for loading values. +/// \param i +/// A 128-bit vector of [4 x i32] containing signed indexes into \a m. +/// \param s +/// A literal constant scale factor for the indexes in \a i. Must be +/// 1, 2, 4, or 8. +/// \returns A 128-bit vector of [4 x i32] containing the gathered values. +#define _mm_i32gather_epi32(m, i, s) \ + ((__m128i)__builtin_ia32_gatherd_d((__v4si)_mm_undefined_si128(), \ + (int const *)(m), (__v4si)(__m128i)(i), \ + (__v4si)_mm_set1_epi32(-1), (s))) + +/// Gathers eight 32-bit floating-point values from memory \a m using scaled +/// indexes from the 256-bit vector of [8 x i32] in \a i. +/// +/// \code{.operation} +/// FOR element := 0 to 7 +/// j := element*32 +/// k := element*32 +/// result[j+31:j] := Load32(m + SignExtend(i[k+31:k])*s) +/// ENDFOR +/// \endcode +/// +/// \headerfile +/// +/// \code +/// __m256i _mm256_i32gather_epi32(const int *m, __m256i i, const int s); +/// \endcode +/// +/// This intrinsic corresponds to the \c VPGATHERDD instruction. +/// +/// \param m +/// A pointer to the memory used for loading values. +/// \param i +/// A 256-bit vector of [8 x i32] containing signed indexes into \a m. +/// \param s +/// A literal constant scale factor for the indexes in \a i. Must be +/// 1, 2, 4, or 8. +/// \returns A 256-bit vector of [8 x i32] containing the gathered values. +#define _mm256_i32gather_epi32(m, i, s) \ + ((__m256i)__builtin_ia32_gatherd_d256((__v8si)_mm256_undefined_si256(), \ + (int const *)(m), (__v8si)(__m256i)(i), \ + (__v8si)_mm256_set1_epi32(-1), (s))) + +/// Gathers two 32-bit integer values from memory \a m using scaled indexes +/// from the 128-bit vector of [2 x i64] in \a i. The upper two elements +/// of the result are zeroed. +/// +/// \code{.operation} +/// FOR element := 0 to 1 +/// j := element*32 +/// k := element*64 +/// result[j+31:j] := Load32(m + SignExtend(i[k+63:k])*s) +/// ENDFOR +/// result[127:64] := 0 +/// \endcode +/// +/// \headerfile +/// +/// \code +/// __m128i _mm_i64gather_epi32(const int *m, __m128i i, const int s); +/// \endcode +/// +/// This intrinsic corresponds to the \c VPGATHERQD instruction. +/// +/// \param m +/// A pointer to the memory used for loading values. +/// \param i +/// A 128-bit vector of [2 x i64] containing signed indexes into \a m. +/// \param s +/// A literal constant scale factor for the indexes in \a i. Must be +/// 1, 2, 4, or 8. +/// \returns A 128-bit vector of [4 x i32] containing the gathered values. +#define _mm_i64gather_epi32(m, i, s) \ + ((__m128i)__builtin_ia32_gatherq_d((__v4si)_mm_undefined_si128(), \ + (int const *)(m), (__v2di)(__m128i)(i), \ + (__v4si)_mm_set1_epi32(-1), (s))) + +/// Gathers four 32-bit integer values from memory \a m using scaled indexes +/// from the 256-bit vector of [4 x i64] in \a i. +/// +/// \code{.operation} +/// FOR element := 0 to 3 +/// j := element*32 +/// k := element*64 +/// result[j+31:j] := Load32(m + SignExtend(i[k+63:k])*s) +/// ENDFOR +/// \endcode +/// +/// \headerfile +/// +/// \code +/// __m128i _mm256_i64gather_epi32(const int *m, __m256i i, const int s); +/// \endcode +/// +/// This intrinsic corresponds to the \c VPGATHERQD instruction. +/// +/// \param m +/// A pointer to the memory used for loading values. +/// \param i +/// A 256-bit vector of [4 x i64] containing signed indexes into \a m. +/// \param s +/// A literal constant scale factor for the indexes in \a i. Must be +/// 1, 2, 4, or 8. +/// \returns A 128-bit vector of [4 x i32] containing the gathered values. +#define _mm256_i64gather_epi32(m, i, s) \ + ((__m128i)__builtin_ia32_gatherq_d256((__v4si)_mm_undefined_si128(), \ + (int const *)(m), (__v4di)(__m256i)(i), \ + (__v4si)_mm_set1_epi32(-1), (s))) + +/// Gathers two 64-bit integer values from memory \a m using scaled indexes +/// from the 128-bit vector of [4 x i32] in \a i. +/// +/// \code{.operation} +/// FOR element := 0 to 1 +/// j := element*64 +/// k := element*32 +/// result[j+63:j] := Load64(m + SignExtend(i[k+31:k])*s) +/// ENDFOR +/// \endcode +/// +/// \headerfile +/// +/// \code +/// __m128i _mm_i32gather_epi64(const long long *m, __m128i i, const int s); +/// \endcode +/// +/// This intrinsic corresponds to the \c VPGATHERDQ instruction. +/// +/// \param m +/// A pointer to the memory used for loading values. +/// \param i +/// A 128-bit vector of [4 x i32] containing signed indexes into \a m. Only +/// the first two elements are used. +/// \param s +/// A literal constant scale factor for the indexes in \a i. Must be +/// 1, 2, 4, or 8. +/// \returns A 128-bit vector of [2 x i64] containing the gathered values. +#define _mm_i32gather_epi64(m, i, s) \ + ((__m128i)__builtin_ia32_gatherd_q((__v2di)_mm_undefined_si128(), \ + (long long const *)(m), \ + (__v4si)(__m128i)(i), \ + (__v2di)_mm_set1_epi64x(-1), (s))) + +/// Gathers four 64-bit integer values from memory \a m using scaled indexes +/// from the 128-bit vector of [4 x i32] in \a i. +/// +/// \code{.operation} +/// FOR element := 0 to 3 +/// j := element*64 +/// k := element*32 +/// result[j+63:j] := Load64(m + SignExtend(i[k+31:k])*s) +/// ENDFOR +/// \endcode +/// +/// \headerfile +/// +/// \code +/// __m256i _mm256_i32gather_epi64(const long long *m, __m128i i, const int s); +/// \endcode +/// +/// This intrinsic corresponds to the \c VPGATHERDQ instruction. +/// +/// \param m +/// A pointer to the memory used for loading values. +/// \param i +/// A 128-bit vector of [4 x i32] containing signed indexes into \a m. +/// \param s +/// A literal constant scale factor for the indexes in \a i. Must be +/// 1, 2, 4, or 8. +/// \returns A 256-bit vector of [4 x i64] containing the gathered values. +#define _mm256_i32gather_epi64(m, i, s) \ + ((__m256i)__builtin_ia32_gatherd_q256((__v4di)_mm256_undefined_si256(), \ + (long long const *)(m), \ + (__v4si)(__m128i)(i), \ + (__v4di)_mm256_set1_epi64x(-1), (s))) + +/// Gathers two 64-bit integer values from memory \a m using scaled indexes +/// from the 128-bit vector of [2 x i64] in \a i. +/// +/// \code{.operation} +/// FOR element := 0 to 1 +/// j := element*64 +/// k := element*64 +/// result[j+63:j] := Load64(m + SignExtend(i[k+63:k])*s) +/// ENDFOR +/// \endcode +/// +/// \headerfile +/// +/// \code +/// __m128i _mm_i64gather_epi64(const long long *m, __m128i i, const int s); +/// \endcode +/// +/// This intrinsic corresponds to the \c VPGATHERQQ instruction. +/// +/// \param m +/// A pointer to the memory used for loading values. +/// \param i +/// A 128-bit vector of [2 x i64] containing signed indexes into \a m. +/// \param s +/// A literal constant scale factor for the indexes in \a i. Must be +/// 1, 2, 4, or 8. +/// \returns A 128-bit vector of [2 x i64] containing the gathered values. +#define _mm_i64gather_epi64(m, i, s) \ + ((__m128i)__builtin_ia32_gatherq_q((__v2di)_mm_undefined_si128(), \ + (long long const *)(m), \ + (__v2di)(__m128i)(i), \ + (__v2di)_mm_set1_epi64x(-1), (s))) + +/// Gathers four 64-bit integer values from memory \a m using scaled indexes +/// from the 256-bit vector of [4 x i64] in \a i. +/// +/// \code{.operation} +/// FOR element := 0 to 3 +/// j := element*64 +/// k := element*64 +/// result[j+63:j] := Load64(m + SignExtend(i[k+63:k])*s) +/// ENDFOR +/// \endcode +/// +/// \headerfile +/// +/// \code +/// __m256i _mm256_i64gather_epi64(const long long *m, __m256i i, const int s); +/// \endcode +/// +/// This intrinsic corresponds to the \c VPGATHERQQ instruction. +/// +/// \param m +/// A pointer to the memory used for loading values. +/// \param i +/// A 256-bit vector of [4 x i64] containing signed indexes into \a m. +/// \param s +/// A literal constant scale factor for the indexes in \a i. Must be +/// 1, 2, 4, or 8. +/// \returns A 256-bit vector of [4 x i64] containing the gathered values. +#define _mm256_i64gather_epi64(m, i, s) \ + ((__m256i)__builtin_ia32_gatherq_q256((__v4di)_mm256_undefined_si256(), \ + (long long const *)(m), \ + (__v4di)(__m256i)(i), \ + (__v4di)_mm256_set1_epi64x(-1), (s))) + +#undef __DEFAULT_FN_ATTRS256 +#undef __DEFAULT_FN_ATTRS128 + +#endif /* __AVX2INTRIN_H */ diff --git a/third_party/intel/clang/avx512bf16intrin.h b/third_party/intel/clang/avx512bf16intrin.h new file mode 100644 index 000000000..b28d2e243 --- /dev/null +++ b/third_party/intel/clang/avx512bf16intrin.h @@ -0,0 +1,283 @@ +/*===------------ avx512bf16intrin.h - AVX512_BF16 intrinsics --------------=== + * + * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. + * See https://llvm.org/LICENSE.txt for license information. + * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + * + *===-----------------------------------------------------------------------=== + */ +#ifndef __IMMINTRIN_H +#error "Never use directly; include instead." +#endif + +#ifdef __SSE2__ + +#ifndef __AVX512BF16INTRIN_H +#define __AVX512BF16INTRIN_H + +typedef __bf16 __v32bf __attribute__((__vector_size__(64), __aligned__(64))); +typedef __bf16 __m512bh __attribute__((__vector_size__(64), __aligned__(64))); +typedef __bf16 __bfloat16 __attribute__((deprecated("use __bf16 instead"))); + +#define __DEFAULT_FN_ATTRS512 \ + __attribute__((__always_inline__, __nodebug__, __target__("avx512bf16,evex512"), \ + __min_vector_width__(512))) +#define __DEFAULT_FN_ATTRS \ + __attribute__((__always_inline__, __nodebug__, \ + __target__("avx512bf16,no-evex512"))) + +/// Convert One BF16 Data to One Single Float Data. +/// +/// \headerfile +/// +/// This intrinsic does not correspond to a specific instruction. +/// +/// \param __A +/// A bfloat data. +/// \returns A float data whose sign field and exponent field keep unchanged, +/// and fraction field is extended to 23 bits. +static __inline__ float __DEFAULT_FN_ATTRS _mm_cvtsbh_ss(__bf16 __A) { + return __builtin_ia32_cvtsbf162ss_32(__A); +} + +/// Convert Two Packed Single Data to One Packed BF16 Data. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VCVTNE2PS2BF16 instructions. +/// +/// \param __A +/// A 512-bit vector of [16 x float]. +/// \param __B +/// A 512-bit vector of [16 x float]. +/// \returns A 512-bit vector of [32 x bfloat] whose lower 256 bits come from +/// conversion of __B, and higher 256 bits come from conversion of __A. +static __inline__ __m512bh __DEFAULT_FN_ATTRS512 +_mm512_cvtne2ps_pbh(__m512 __A, __m512 __B) { + return (__m512bh)__builtin_ia32_cvtne2ps2bf16_512((__v16sf) __A, + (__v16sf) __B); +} + +/// Convert Two Packed Single Data to One Packed BF16 Data. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VCVTNE2PS2BF16 instructions. +/// +/// \param __A +/// A 512-bit vector of [16 x float]. +/// \param __B +/// A 512-bit vector of [16 x float]. +/// \param __W +/// A 512-bit vector of [32 x bfloat]. +/// \param __U +/// A 32-bit mask value specifying what is chosen for each element. +/// A 1 means conversion of __A or __B. A 0 means element from __W. +/// \returns A 512-bit vector of [32 x bfloat] whose lower 256 bits come from +/// conversion of __B, and higher 256 bits come from conversion of __A. +static __inline__ __m512bh __DEFAULT_FN_ATTRS512 +_mm512_mask_cvtne2ps_pbh(__m512bh __W, __mmask32 __U, __m512 __A, __m512 __B) { + return (__m512bh)__builtin_ia32_selectpbf_512((__mmask32)__U, + (__v32bf)_mm512_cvtne2ps_pbh(__A, __B), + (__v32bf)__W); +} + +/// Convert Two Packed Single Data to One Packed BF16 Data. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VCVTNE2PS2BF16 instructions. +/// +/// \param __A +/// A 512-bit vector of [16 x float]. +/// \param __B +/// A 512-bit vector of [16 x float]. +/// \param __U +/// A 32-bit mask value specifying what is chosen for each element. +/// A 1 means conversion of __A or __B. A 0 means element is zero. +/// \returns A 512-bit vector of [32 x bfloat] whose lower 256 bits come from +/// conversion of __B, and higher 256 bits come from conversion of __A. +static __inline__ __m512bh __DEFAULT_FN_ATTRS512 +_mm512_maskz_cvtne2ps_pbh(__mmask32 __U, __m512 __A, __m512 __B) { + return (__m512bh)__builtin_ia32_selectpbf_512((__mmask32)__U, + (__v32bf)_mm512_cvtne2ps_pbh(__A, __B), + (__v32bf)_mm512_setzero_si512()); +} + +/// Convert Packed Single Data to Packed BF16 Data. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VCVTNEPS2BF16 instructions. +/// +/// \param __A +/// A 512-bit vector of [16 x float]. +/// \returns A 256-bit vector of [16 x bfloat] come from conversion of __A. +static __inline__ __m256bh __DEFAULT_FN_ATTRS512 +_mm512_cvtneps_pbh(__m512 __A) { + return (__m256bh)__builtin_ia32_cvtneps2bf16_512_mask((__v16sf)__A, + (__v16bf)_mm256_undefined_si256(), + (__mmask16)-1); +} + +/// Convert Packed Single Data to Packed BF16 Data. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VCVTNEPS2BF16 instructions. +/// +/// \param __A +/// A 512-bit vector of [16 x float]. +/// \param __W +/// A 256-bit vector of [16 x bfloat]. +/// \param __U +/// A 16-bit mask value specifying what is chosen for each element. +/// A 1 means conversion of __A. A 0 means element from __W. +/// \returns A 256-bit vector of [16 x bfloat] come from conversion of __A. +static __inline__ __m256bh __DEFAULT_FN_ATTRS512 +_mm512_mask_cvtneps_pbh(__m256bh __W, __mmask16 __U, __m512 __A) { + return (__m256bh)__builtin_ia32_cvtneps2bf16_512_mask((__v16sf)__A, + (__v16bf)__W, + (__mmask16)__U); +} + +/// Convert Packed Single Data to Packed BF16 Data. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VCVTNEPS2BF16 instructions. +/// +/// \param __A +/// A 512-bit vector of [16 x float]. +/// \param __U +/// A 16-bit mask value specifying what is chosen for each element. +/// A 1 means conversion of __A. A 0 means element is zero. +/// \returns A 256-bit vector of [16 x bfloat] come from conversion of __A. +static __inline__ __m256bh __DEFAULT_FN_ATTRS512 +_mm512_maskz_cvtneps_pbh(__mmask16 __U, __m512 __A) { + return (__m256bh)__builtin_ia32_cvtneps2bf16_512_mask((__v16sf)__A, + (__v16bf)_mm256_setzero_si256(), + (__mmask16)__U); +} + +/// Dot Product of BF16 Pairs Accumulated into Packed Single Precision. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VDPBF16PS instructions. +/// +/// \param __A +/// A 512-bit vector of [32 x bfloat]. +/// \param __B +/// A 512-bit vector of [32 x bfloat]. +/// \param __D +/// A 512-bit vector of [16 x float]. +/// \returns A 512-bit vector of [16 x float] comes from Dot Product of +/// __A, __B and __D +static __inline__ __m512 __DEFAULT_FN_ATTRS512 +_mm512_dpbf16_ps(__m512 __D, __m512bh __A, __m512bh __B) { + return (__m512)__builtin_ia32_dpbf16ps_512((__v16sf) __D, + (__v32bf) __A, + (__v32bf) __B); +} + +/// Dot Product of BF16 Pairs Accumulated into Packed Single Precision. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VDPBF16PS instructions. +/// +/// \param __A +/// A 512-bit vector of [32 x bfloat]. +/// \param __B +/// A 512-bit vector of [32 x bfloat]. +/// \param __D +/// A 512-bit vector of [16 x float]. +/// \param __U +/// A 16-bit mask value specifying what is chosen for each element. +/// A 1 means __A and __B's dot product accumulated with __D. A 0 means __D. +/// \returns A 512-bit vector of [16 x float] comes from Dot Product of +/// __A, __B and __D +static __inline__ __m512 __DEFAULT_FN_ATTRS512 +_mm512_mask_dpbf16_ps(__m512 __D, __mmask16 __U, __m512bh __A, __m512bh __B) { + return (__m512)__builtin_ia32_selectps_512((__mmask16)__U, + (__v16sf)_mm512_dpbf16_ps(__D, __A, __B), + (__v16sf)__D); +} + +/// Dot Product of BF16 Pairs Accumulated into Packed Single Precision. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VDPBF16PS instructions. +/// +/// \param __A +/// A 512-bit vector of [32 x bfloat]. +/// \param __B +/// A 512-bit vector of [32 x bfloat]. +/// \param __D +/// A 512-bit vector of [16 x float]. +/// \param __U +/// A 16-bit mask value specifying what is chosen for each element. +/// A 1 means __A and __B's dot product accumulated with __D. A 0 means 0. +/// \returns A 512-bit vector of [16 x float] comes from Dot Product of +/// __A, __B and __D +static __inline__ __m512 __DEFAULT_FN_ATTRS512 +_mm512_maskz_dpbf16_ps(__mmask16 __U, __m512 __D, __m512bh __A, __m512bh __B) { + return (__m512)__builtin_ia32_selectps_512((__mmask16)__U, + (__v16sf)_mm512_dpbf16_ps(__D, __A, __B), + (__v16sf)_mm512_setzero_si512()); +} + +/// Convert Packed BF16 Data to Packed float Data. +/// +/// \headerfile +/// +/// \param __A +/// A 256-bit vector of [16 x bfloat]. +/// \returns A 512-bit vector of [16 x float] come from conversion of __A +static __inline__ __m512 __DEFAULT_FN_ATTRS512 _mm512_cvtpbh_ps(__m256bh __A) { + return _mm512_castsi512_ps((__m512i)_mm512_slli_epi32( + (__m512i)_mm512_cvtepi16_epi32((__m256i)__A), 16)); +} + +/// Convert Packed BF16 Data to Packed float Data using zeroing mask. +/// +/// \headerfile +/// +/// \param __U +/// A 16-bit mask. Elements are zeroed out when the corresponding mask +/// bit is not set. +/// \param __A +/// A 256-bit vector of [16 x bfloat]. +/// \returns A 512-bit vector of [16 x float] come from conversion of __A +static __inline__ __m512 __DEFAULT_FN_ATTRS512 +_mm512_maskz_cvtpbh_ps(__mmask16 __U, __m256bh __A) { + return _mm512_castsi512_ps((__m512i)_mm512_slli_epi32( + (__m512i)_mm512_maskz_cvtepi16_epi32((__mmask16)__U, (__m256i)__A), 16)); +} + +/// Convert Packed BF16 Data to Packed float Data using merging mask. +/// +/// \headerfile +/// +/// \param __S +/// A 512-bit vector of [16 x float]. Elements are copied from __S when +/// the corresponding mask bit is not set. +/// \param __U +/// A 16-bit mask. +/// \param __A +/// A 256-bit vector of [16 x bfloat]. +/// \returns A 512-bit vector of [16 x float] come from conversion of __A +static __inline__ __m512 __DEFAULT_FN_ATTRS512 +_mm512_mask_cvtpbh_ps(__m512 __S, __mmask16 __U, __m256bh __A) { + return _mm512_castsi512_ps((__m512i)_mm512_mask_slli_epi32( + (__m512i)__S, (__mmask16)__U, + (__m512i)_mm512_cvtepi16_epi32((__m256i)__A), 16)); +} + +#undef __DEFAULT_FN_ATTRS +#undef __DEFAULT_FN_ATTRS512 + +#endif +#endif diff --git a/third_party/intel/clang/avx512bitalgintrin.h b/third_party/intel/clang/avx512bitalgintrin.h new file mode 100644 index 000000000..bad265ceb --- /dev/null +++ b/third_party/intel/clang/avx512bitalgintrin.h @@ -0,0 +1,86 @@ +/*===------------- avx512bitalgintrin.h - BITALG intrinsics ------------------=== + * + * + * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. + * See https://llvm.org/LICENSE.txt for license information. + * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + * + *===-----------------------------------------------------------------------=== + */ +#ifndef __IMMINTRIN_H +#error "Never use directly; include instead." +#endif + +#ifndef __AVX512BITALGINTRIN_H +#define __AVX512BITALGINTRIN_H + +/* Define the default attributes for the functions in this file. */ +#define __DEFAULT_FN_ATTRS \ + __attribute__((__always_inline__, __nodebug__, \ + __target__("avx512bitalg,evex512"), \ + __min_vector_width__(512))) + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_popcnt_epi16(__m512i __A) +{ + return (__m512i) __builtin_ia32_vpopcntw_512((__v32hi) __A); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_mask_popcnt_epi16(__m512i __A, __mmask32 __U, __m512i __B) +{ + return (__m512i) __builtin_ia32_selectw_512((__mmask32) __U, + (__v32hi) _mm512_popcnt_epi16(__B), + (__v32hi) __A); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_maskz_popcnt_epi16(__mmask32 __U, __m512i __B) +{ + return _mm512_mask_popcnt_epi16((__m512i) _mm512_setzero_si512(), + __U, + __B); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_popcnt_epi8(__m512i __A) +{ + return (__m512i) __builtin_ia32_vpopcntb_512((__v64qi) __A); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_mask_popcnt_epi8(__m512i __A, __mmask64 __U, __m512i __B) +{ + return (__m512i) __builtin_ia32_selectb_512((__mmask64) __U, + (__v64qi) _mm512_popcnt_epi8(__B), + (__v64qi) __A); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_maskz_popcnt_epi8(__mmask64 __U, __m512i __B) +{ + return _mm512_mask_popcnt_epi8((__m512i) _mm512_setzero_si512(), + __U, + __B); +} + +static __inline__ __mmask64 __DEFAULT_FN_ATTRS +_mm512_mask_bitshuffle_epi64_mask(__mmask64 __U, __m512i __A, __m512i __B) +{ + return (__mmask64) __builtin_ia32_vpshufbitqmb512_mask((__v64qi) __A, + (__v64qi) __B, + __U); +} + +static __inline__ __mmask64 __DEFAULT_FN_ATTRS +_mm512_bitshuffle_epi64_mask(__m512i __A, __m512i __B) +{ + return _mm512_mask_bitshuffle_epi64_mask((__mmask64) -1, + __A, + __B); +} + + +#undef __DEFAULT_FN_ATTRS + +#endif diff --git a/third_party/intel/clang/avx512bwintrin.h b/third_party/intel/clang/avx512bwintrin.h new file mode 100644 index 000000000..c854720de --- /dev/null +++ b/third_party/intel/clang/avx512bwintrin.h @@ -0,0 +1,2014 @@ +/*===------------- avx512bwintrin.h - AVX512BW intrinsics ------------------=== + * + * + * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. + * See https://llvm.org/LICENSE.txt for license information. + * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + * + *===-----------------------------------------------------------------------=== + */ +#ifndef __IMMINTRIN_H +#error "Never use directly; include instead." +#endif + +#ifndef __AVX512BWINTRIN_H +#define __AVX512BWINTRIN_H + +typedef unsigned int __mmask32; +typedef unsigned long long __mmask64; + +/* Define the default attributes for the functions in this file. */ +#define __DEFAULT_FN_ATTRS512 \ + __attribute__((__always_inline__, __nodebug__, \ + __target__("avx512bw,evex512"), __min_vector_width__(512))) +#define __DEFAULT_FN_ATTRS \ + __attribute__((__always_inline__, __nodebug__, \ + __target__("avx512bw,no-evex512"))) + +static __inline __mmask32 __DEFAULT_FN_ATTRS +_knot_mask32(__mmask32 __M) +{ + return __builtin_ia32_knotsi(__M); +} + +static __inline __mmask64 __DEFAULT_FN_ATTRS _knot_mask64(__mmask64 __M) { + return __builtin_ia32_knotdi(__M); +} + +static __inline__ __mmask32 __DEFAULT_FN_ATTRS +_kand_mask32(__mmask32 __A, __mmask32 __B) +{ + return (__mmask32)__builtin_ia32_kandsi((__mmask32)__A, (__mmask32)__B); +} + +static __inline__ __mmask64 __DEFAULT_FN_ATTRS _kand_mask64(__mmask64 __A, + __mmask64 __B) { + return (__mmask64)__builtin_ia32_kanddi((__mmask64)__A, (__mmask64)__B); +} + +static __inline__ __mmask32 __DEFAULT_FN_ATTRS +_kandn_mask32(__mmask32 __A, __mmask32 __B) +{ + return (__mmask32)__builtin_ia32_kandnsi((__mmask32)__A, (__mmask32)__B); +} + +static __inline__ __mmask64 __DEFAULT_FN_ATTRS _kandn_mask64(__mmask64 __A, + __mmask64 __B) { + return (__mmask64)__builtin_ia32_kandndi((__mmask64)__A, (__mmask64)__B); +} + +static __inline__ __mmask32 __DEFAULT_FN_ATTRS +_kor_mask32(__mmask32 __A, __mmask32 __B) +{ + return (__mmask32)__builtin_ia32_korsi((__mmask32)__A, (__mmask32)__B); +} + +static __inline__ __mmask64 __DEFAULT_FN_ATTRS _kor_mask64(__mmask64 __A, + __mmask64 __B) { + return (__mmask64)__builtin_ia32_kordi((__mmask64)__A, (__mmask64)__B); +} + +static __inline__ __mmask32 __DEFAULT_FN_ATTRS +_kxnor_mask32(__mmask32 __A, __mmask32 __B) +{ + return (__mmask32)__builtin_ia32_kxnorsi((__mmask32)__A, (__mmask32)__B); +} + +static __inline__ __mmask64 __DEFAULT_FN_ATTRS _kxnor_mask64(__mmask64 __A, + __mmask64 __B) { + return (__mmask64)__builtin_ia32_kxnordi((__mmask64)__A, (__mmask64)__B); +} + +static __inline__ __mmask32 __DEFAULT_FN_ATTRS +_kxor_mask32(__mmask32 __A, __mmask32 __B) +{ + return (__mmask32)__builtin_ia32_kxorsi((__mmask32)__A, (__mmask32)__B); +} + +static __inline__ __mmask64 __DEFAULT_FN_ATTRS _kxor_mask64(__mmask64 __A, + __mmask64 __B) { + return (__mmask64)__builtin_ia32_kxordi((__mmask64)__A, (__mmask64)__B); +} + +static __inline__ unsigned char __DEFAULT_FN_ATTRS +_kortestc_mask32_u8(__mmask32 __A, __mmask32 __B) +{ + return (unsigned char)__builtin_ia32_kortestcsi(__A, __B); +} + +static __inline__ unsigned char __DEFAULT_FN_ATTRS +_kortestz_mask32_u8(__mmask32 __A, __mmask32 __B) +{ + return (unsigned char)__builtin_ia32_kortestzsi(__A, __B); +} + +static __inline__ unsigned char __DEFAULT_FN_ATTRS +_kortest_mask32_u8(__mmask32 __A, __mmask32 __B, unsigned char *__C) { + *__C = (unsigned char)__builtin_ia32_kortestcsi(__A, __B); + return (unsigned char)__builtin_ia32_kortestzsi(__A, __B); +} + +static __inline__ unsigned char __DEFAULT_FN_ATTRS +_kortestc_mask64_u8(__mmask64 __A, __mmask64 __B) { + return (unsigned char)__builtin_ia32_kortestcdi(__A, __B); +} + +static __inline__ unsigned char __DEFAULT_FN_ATTRS +_kortestz_mask64_u8(__mmask64 __A, __mmask64 __B) { + return (unsigned char)__builtin_ia32_kortestzdi(__A, __B); +} + +static __inline__ unsigned char __DEFAULT_FN_ATTRS +_kortest_mask64_u8(__mmask64 __A, __mmask64 __B, unsigned char *__C) { + *__C = (unsigned char)__builtin_ia32_kortestcdi(__A, __B); + return (unsigned char)__builtin_ia32_kortestzdi(__A, __B); +} + +static __inline__ unsigned char __DEFAULT_FN_ATTRS +_ktestc_mask32_u8(__mmask32 __A, __mmask32 __B) +{ + return (unsigned char)__builtin_ia32_ktestcsi(__A, __B); +} + +static __inline__ unsigned char __DEFAULT_FN_ATTRS +_ktestz_mask32_u8(__mmask32 __A, __mmask32 __B) +{ + return (unsigned char)__builtin_ia32_ktestzsi(__A, __B); +} + +static __inline__ unsigned char __DEFAULT_FN_ATTRS +_ktest_mask32_u8(__mmask32 __A, __mmask32 __B, unsigned char *__C) { + *__C = (unsigned char)__builtin_ia32_ktestcsi(__A, __B); + return (unsigned char)__builtin_ia32_ktestzsi(__A, __B); +} + +static __inline__ unsigned char __DEFAULT_FN_ATTRS +_ktestc_mask64_u8(__mmask64 __A, __mmask64 __B) { + return (unsigned char)__builtin_ia32_ktestcdi(__A, __B); +} + +static __inline__ unsigned char __DEFAULT_FN_ATTRS +_ktestz_mask64_u8(__mmask64 __A, __mmask64 __B) { + return (unsigned char)__builtin_ia32_ktestzdi(__A, __B); +} + +static __inline__ unsigned char __DEFAULT_FN_ATTRS +_ktest_mask64_u8(__mmask64 __A, __mmask64 __B, unsigned char *__C) { + *__C = (unsigned char)__builtin_ia32_ktestcdi(__A, __B); + return (unsigned char)__builtin_ia32_ktestzdi(__A, __B); +} + +static __inline__ __mmask32 __DEFAULT_FN_ATTRS +_kadd_mask32(__mmask32 __A, __mmask32 __B) +{ + return (__mmask32)__builtin_ia32_kaddsi((__mmask32)__A, (__mmask32)__B); +} + +static __inline__ __mmask64 __DEFAULT_FN_ATTRS _kadd_mask64(__mmask64 __A, + __mmask64 __B) { + return (__mmask64)__builtin_ia32_kadddi((__mmask64)__A, (__mmask64)__B); +} + +#define _kshiftli_mask32(A, I) \ + ((__mmask32)__builtin_ia32_kshiftlisi((__mmask32)(A), (unsigned int)(I))) + +#define _kshiftri_mask32(A, I) \ + ((__mmask32)__builtin_ia32_kshiftrisi((__mmask32)(A), (unsigned int)(I))) + +#define _kshiftli_mask64(A, I) \ + ((__mmask64)__builtin_ia32_kshiftlidi((__mmask64)(A), (unsigned int)(I))) + +#define _kshiftri_mask64(A, I) \ + ((__mmask64)__builtin_ia32_kshiftridi((__mmask64)(A), (unsigned int)(I))) + +static __inline__ unsigned int __DEFAULT_FN_ATTRS +_cvtmask32_u32(__mmask32 __A) { + return (unsigned int)__builtin_ia32_kmovd((__mmask32)__A); +} + +static __inline__ unsigned long long __DEFAULT_FN_ATTRS +_cvtmask64_u64(__mmask64 __A) { + return (unsigned long long)__builtin_ia32_kmovq((__mmask64)__A); +} + +static __inline__ __mmask32 __DEFAULT_FN_ATTRS +_cvtu32_mask32(unsigned int __A) { + return (__mmask32)__builtin_ia32_kmovd((__mmask32)__A); +} + +static __inline__ __mmask64 __DEFAULT_FN_ATTRS +_cvtu64_mask64(unsigned long long __A) { + return (__mmask64)__builtin_ia32_kmovq((__mmask64)__A); +} + +static __inline__ __mmask32 __DEFAULT_FN_ATTRS +_load_mask32(__mmask32 *__A) { + return (__mmask32)__builtin_ia32_kmovd(*(__mmask32 *)__A); +} + +static __inline__ __mmask64 __DEFAULT_FN_ATTRS _load_mask64(__mmask64 *__A) { + return (__mmask64)__builtin_ia32_kmovq(*(__mmask64 *)__A); +} + +static __inline__ void __DEFAULT_FN_ATTRS +_store_mask32(__mmask32 *__A, __mmask32 __B) { + *(__mmask32 *)__A = __builtin_ia32_kmovd((__mmask32)__B); +} + +static __inline__ void __DEFAULT_FN_ATTRS _store_mask64(__mmask64 *__A, + __mmask64 __B) { + *(__mmask64 *)__A = __builtin_ia32_kmovq((__mmask64)__B); +} + +/* Integer compare */ + +#define _mm512_cmp_epi8_mask(a, b, p) \ + ((__mmask64)__builtin_ia32_cmpb512_mask((__v64qi)(__m512i)(a), \ + (__v64qi)(__m512i)(b), (int)(p), \ + (__mmask64)-1)) + +#define _mm512_mask_cmp_epi8_mask(m, a, b, p) \ + ((__mmask64)__builtin_ia32_cmpb512_mask((__v64qi)(__m512i)(a), \ + (__v64qi)(__m512i)(b), (int)(p), \ + (__mmask64)(m))) + +#define _mm512_cmp_epu8_mask(a, b, p) \ + ((__mmask64)__builtin_ia32_ucmpb512_mask((__v64qi)(__m512i)(a), \ + (__v64qi)(__m512i)(b), (int)(p), \ + (__mmask64)-1)) + +#define _mm512_mask_cmp_epu8_mask(m, a, b, p) \ + ((__mmask64)__builtin_ia32_ucmpb512_mask((__v64qi)(__m512i)(a), \ + (__v64qi)(__m512i)(b), (int)(p), \ + (__mmask64)(m))) + +#define _mm512_cmp_epi16_mask(a, b, p) \ + ((__mmask32)__builtin_ia32_cmpw512_mask((__v32hi)(__m512i)(a), \ + (__v32hi)(__m512i)(b), (int)(p), \ + (__mmask32)-1)) + +#define _mm512_mask_cmp_epi16_mask(m, a, b, p) \ + ((__mmask32)__builtin_ia32_cmpw512_mask((__v32hi)(__m512i)(a), \ + (__v32hi)(__m512i)(b), (int)(p), \ + (__mmask32)(m))) + +#define _mm512_cmp_epu16_mask(a, b, p) \ + ((__mmask32)__builtin_ia32_ucmpw512_mask((__v32hi)(__m512i)(a), \ + (__v32hi)(__m512i)(b), (int)(p), \ + (__mmask32)-1)) + +#define _mm512_mask_cmp_epu16_mask(m, a, b, p) \ + ((__mmask32)__builtin_ia32_ucmpw512_mask((__v32hi)(__m512i)(a), \ + (__v32hi)(__m512i)(b), (int)(p), \ + (__mmask32)(m))) + +#define _mm512_cmpeq_epi8_mask(A, B) \ + _mm512_cmp_epi8_mask((A), (B), _MM_CMPINT_EQ) +#define _mm512_mask_cmpeq_epi8_mask(k, A, B) \ + _mm512_mask_cmp_epi8_mask((k), (A), (B), _MM_CMPINT_EQ) +#define _mm512_cmpge_epi8_mask(A, B) \ + _mm512_cmp_epi8_mask((A), (B), _MM_CMPINT_GE) +#define _mm512_mask_cmpge_epi8_mask(k, A, B) \ + _mm512_mask_cmp_epi8_mask((k), (A), (B), _MM_CMPINT_GE) +#define _mm512_cmpgt_epi8_mask(A, B) \ + _mm512_cmp_epi8_mask((A), (B), _MM_CMPINT_GT) +#define _mm512_mask_cmpgt_epi8_mask(k, A, B) \ + _mm512_mask_cmp_epi8_mask((k), (A), (B), _MM_CMPINT_GT) +#define _mm512_cmple_epi8_mask(A, B) \ + _mm512_cmp_epi8_mask((A), (B), _MM_CMPINT_LE) +#define _mm512_mask_cmple_epi8_mask(k, A, B) \ + _mm512_mask_cmp_epi8_mask((k), (A), (B), _MM_CMPINT_LE) +#define _mm512_cmplt_epi8_mask(A, B) \ + _mm512_cmp_epi8_mask((A), (B), _MM_CMPINT_LT) +#define _mm512_mask_cmplt_epi8_mask(k, A, B) \ + _mm512_mask_cmp_epi8_mask((k), (A), (B), _MM_CMPINT_LT) +#define _mm512_cmpneq_epi8_mask(A, B) \ + _mm512_cmp_epi8_mask((A), (B), _MM_CMPINT_NE) +#define _mm512_mask_cmpneq_epi8_mask(k, A, B) \ + _mm512_mask_cmp_epi8_mask((k), (A), (B), _MM_CMPINT_NE) + +#define _mm512_cmpeq_epu8_mask(A, B) \ + _mm512_cmp_epu8_mask((A), (B), _MM_CMPINT_EQ) +#define _mm512_mask_cmpeq_epu8_mask(k, A, B) \ + _mm512_mask_cmp_epu8_mask((k), (A), (B), _MM_CMPINT_EQ) +#define _mm512_cmpge_epu8_mask(A, B) \ + _mm512_cmp_epu8_mask((A), (B), _MM_CMPINT_GE) +#define _mm512_mask_cmpge_epu8_mask(k, A, B) \ + _mm512_mask_cmp_epu8_mask((k), (A), (B), _MM_CMPINT_GE) +#define _mm512_cmpgt_epu8_mask(A, B) \ + _mm512_cmp_epu8_mask((A), (B), _MM_CMPINT_GT) +#define _mm512_mask_cmpgt_epu8_mask(k, A, B) \ + _mm512_mask_cmp_epu8_mask((k), (A), (B), _MM_CMPINT_GT) +#define _mm512_cmple_epu8_mask(A, B) \ + _mm512_cmp_epu8_mask((A), (B), _MM_CMPINT_LE) +#define _mm512_mask_cmple_epu8_mask(k, A, B) \ + _mm512_mask_cmp_epu8_mask((k), (A), (B), _MM_CMPINT_LE) +#define _mm512_cmplt_epu8_mask(A, B) \ + _mm512_cmp_epu8_mask((A), (B), _MM_CMPINT_LT) +#define _mm512_mask_cmplt_epu8_mask(k, A, B) \ + _mm512_mask_cmp_epu8_mask((k), (A), (B), _MM_CMPINT_LT) +#define _mm512_cmpneq_epu8_mask(A, B) \ + _mm512_cmp_epu8_mask((A), (B), _MM_CMPINT_NE) +#define _mm512_mask_cmpneq_epu8_mask(k, A, B) \ + _mm512_mask_cmp_epu8_mask((k), (A), (B), _MM_CMPINT_NE) + +#define _mm512_cmpeq_epi16_mask(A, B) \ + _mm512_cmp_epi16_mask((A), (B), _MM_CMPINT_EQ) +#define _mm512_mask_cmpeq_epi16_mask(k, A, B) \ + _mm512_mask_cmp_epi16_mask((k), (A), (B), _MM_CMPINT_EQ) +#define _mm512_cmpge_epi16_mask(A, B) \ + _mm512_cmp_epi16_mask((A), (B), _MM_CMPINT_GE) +#define _mm512_mask_cmpge_epi16_mask(k, A, B) \ + _mm512_mask_cmp_epi16_mask((k), (A), (B), _MM_CMPINT_GE) +#define _mm512_cmpgt_epi16_mask(A, B) \ + _mm512_cmp_epi16_mask((A), (B), _MM_CMPINT_GT) +#define _mm512_mask_cmpgt_epi16_mask(k, A, B) \ + _mm512_mask_cmp_epi16_mask((k), (A), (B), _MM_CMPINT_GT) +#define _mm512_cmple_epi16_mask(A, B) \ + _mm512_cmp_epi16_mask((A), (B), _MM_CMPINT_LE) +#define _mm512_mask_cmple_epi16_mask(k, A, B) \ + _mm512_mask_cmp_epi16_mask((k), (A), (B), _MM_CMPINT_LE) +#define _mm512_cmplt_epi16_mask(A, B) \ + _mm512_cmp_epi16_mask((A), (B), _MM_CMPINT_LT) +#define _mm512_mask_cmplt_epi16_mask(k, A, B) \ + _mm512_mask_cmp_epi16_mask((k), (A), (B), _MM_CMPINT_LT) +#define _mm512_cmpneq_epi16_mask(A, B) \ + _mm512_cmp_epi16_mask((A), (B), _MM_CMPINT_NE) +#define _mm512_mask_cmpneq_epi16_mask(k, A, B) \ + _mm512_mask_cmp_epi16_mask((k), (A), (B), _MM_CMPINT_NE) + +#define _mm512_cmpeq_epu16_mask(A, B) \ + _mm512_cmp_epu16_mask((A), (B), _MM_CMPINT_EQ) +#define _mm512_mask_cmpeq_epu16_mask(k, A, B) \ + _mm512_mask_cmp_epu16_mask((k), (A), (B), _MM_CMPINT_EQ) +#define _mm512_cmpge_epu16_mask(A, B) \ + _mm512_cmp_epu16_mask((A), (B), _MM_CMPINT_GE) +#define _mm512_mask_cmpge_epu16_mask(k, A, B) \ + _mm512_mask_cmp_epu16_mask((k), (A), (B), _MM_CMPINT_GE) +#define _mm512_cmpgt_epu16_mask(A, B) \ + _mm512_cmp_epu16_mask((A), (B), _MM_CMPINT_GT) +#define _mm512_mask_cmpgt_epu16_mask(k, A, B) \ + _mm512_mask_cmp_epu16_mask((k), (A), (B), _MM_CMPINT_GT) +#define _mm512_cmple_epu16_mask(A, B) \ + _mm512_cmp_epu16_mask((A), (B), _MM_CMPINT_LE) +#define _mm512_mask_cmple_epu16_mask(k, A, B) \ + _mm512_mask_cmp_epu16_mask((k), (A), (B), _MM_CMPINT_LE) +#define _mm512_cmplt_epu16_mask(A, B) \ + _mm512_cmp_epu16_mask((A), (B), _MM_CMPINT_LT) +#define _mm512_mask_cmplt_epu16_mask(k, A, B) \ + _mm512_mask_cmp_epu16_mask((k), (A), (B), _MM_CMPINT_LT) +#define _mm512_cmpneq_epu16_mask(A, B) \ + _mm512_cmp_epu16_mask((A), (B), _MM_CMPINT_NE) +#define _mm512_mask_cmpneq_epu16_mask(k, A, B) \ + _mm512_mask_cmp_epu16_mask((k), (A), (B), _MM_CMPINT_NE) + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_add_epi8 (__m512i __A, __m512i __B) { + return (__m512i) ((__v64qu) __A + (__v64qu) __B); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_mask_add_epi8(__m512i __W, __mmask64 __U, __m512i __A, __m512i __B) { + return (__m512i)__builtin_ia32_selectb_512((__mmask64)__U, + (__v64qi)_mm512_add_epi8(__A, __B), + (__v64qi)__W); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_maskz_add_epi8(__mmask64 __U, __m512i __A, __m512i __B) { + return (__m512i)__builtin_ia32_selectb_512((__mmask64)__U, + (__v64qi)_mm512_add_epi8(__A, __B), + (__v64qi)_mm512_setzero_si512()); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_sub_epi8 (__m512i __A, __m512i __B) { + return (__m512i) ((__v64qu) __A - (__v64qu) __B); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_mask_sub_epi8(__m512i __W, __mmask64 __U, __m512i __A, __m512i __B) { + return (__m512i)__builtin_ia32_selectb_512((__mmask64)__U, + (__v64qi)_mm512_sub_epi8(__A, __B), + (__v64qi)__W); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_maskz_sub_epi8(__mmask64 __U, __m512i __A, __m512i __B) { + return (__m512i)__builtin_ia32_selectb_512((__mmask64)__U, + (__v64qi)_mm512_sub_epi8(__A, __B), + (__v64qi)_mm512_setzero_si512()); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_add_epi16 (__m512i __A, __m512i __B) { + return (__m512i) ((__v32hu) __A + (__v32hu) __B); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_mask_add_epi16(__m512i __W, __mmask32 __U, __m512i __A, __m512i __B) { + return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U, + (__v32hi)_mm512_add_epi16(__A, __B), + (__v32hi)__W); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_maskz_add_epi16(__mmask32 __U, __m512i __A, __m512i __B) { + return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U, + (__v32hi)_mm512_add_epi16(__A, __B), + (__v32hi)_mm512_setzero_si512()); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_sub_epi16 (__m512i __A, __m512i __B) { + return (__m512i) ((__v32hu) __A - (__v32hu) __B); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_mask_sub_epi16(__m512i __W, __mmask32 __U, __m512i __A, __m512i __B) { + return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U, + (__v32hi)_mm512_sub_epi16(__A, __B), + (__v32hi)__W); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_maskz_sub_epi16(__mmask32 __U, __m512i __A, __m512i __B) { + return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U, + (__v32hi)_mm512_sub_epi16(__A, __B), + (__v32hi)_mm512_setzero_si512()); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_mullo_epi16 (__m512i __A, __m512i __B) { + return (__m512i) ((__v32hu) __A * (__v32hu) __B); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_mask_mullo_epi16(__m512i __W, __mmask32 __U, __m512i __A, __m512i __B) { + return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U, + (__v32hi)_mm512_mullo_epi16(__A, __B), + (__v32hi)__W); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_maskz_mullo_epi16(__mmask32 __U, __m512i __A, __m512i __B) { + return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U, + (__v32hi)_mm512_mullo_epi16(__A, __B), + (__v32hi)_mm512_setzero_si512()); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_mask_blend_epi8 (__mmask64 __U, __m512i __A, __m512i __W) +{ + return (__m512i) __builtin_ia32_selectb_512 ((__mmask64) __U, + (__v64qi) __W, + (__v64qi) __A); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_mask_blend_epi16 (__mmask32 __U, __m512i __A, __m512i __W) +{ + return (__m512i) __builtin_ia32_selectw_512 ((__mmask32) __U, + (__v32hi) __W, + (__v32hi) __A); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_abs_epi8 (__m512i __A) +{ + return (__m512i)__builtin_elementwise_abs((__v64qs)__A); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_mask_abs_epi8 (__m512i __W, __mmask64 __U, __m512i __A) +{ + return (__m512i)__builtin_ia32_selectb_512((__mmask64)__U, + (__v64qi)_mm512_abs_epi8(__A), + (__v64qi)__W); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_maskz_abs_epi8 (__mmask64 __U, __m512i __A) +{ + return (__m512i)__builtin_ia32_selectb_512((__mmask64)__U, + (__v64qi)_mm512_abs_epi8(__A), + (__v64qi)_mm512_setzero_si512()); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_abs_epi16 (__m512i __A) +{ + return (__m512i)__builtin_elementwise_abs((__v32hi)__A); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_mask_abs_epi16 (__m512i __W, __mmask32 __U, __m512i __A) +{ + return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U, + (__v32hi)_mm512_abs_epi16(__A), + (__v32hi)__W); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_maskz_abs_epi16 (__mmask32 __U, __m512i __A) +{ + return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U, + (__v32hi)_mm512_abs_epi16(__A), + (__v32hi)_mm512_setzero_si512()); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_packs_epi32(__m512i __A, __m512i __B) +{ + return (__m512i)__builtin_ia32_packssdw512((__v16si)__A, (__v16si)__B); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_maskz_packs_epi32(__mmask32 __M, __m512i __A, __m512i __B) +{ + return (__m512i)__builtin_ia32_selectw_512((__mmask32)__M, + (__v32hi)_mm512_packs_epi32(__A, __B), + (__v32hi)_mm512_setzero_si512()); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_mask_packs_epi32(__m512i __W, __mmask32 __M, __m512i __A, __m512i __B) +{ + return (__m512i)__builtin_ia32_selectw_512((__mmask32)__M, + (__v32hi)_mm512_packs_epi32(__A, __B), + (__v32hi)__W); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_packs_epi16(__m512i __A, __m512i __B) +{ + return (__m512i)__builtin_ia32_packsswb512((__v32hi)__A, (__v32hi) __B); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_mask_packs_epi16(__m512i __W, __mmask64 __M, __m512i __A, __m512i __B) +{ + return (__m512i)__builtin_ia32_selectb_512((__mmask64)__M, + (__v64qi)_mm512_packs_epi16(__A, __B), + (__v64qi)__W); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_maskz_packs_epi16(__mmask64 __M, __m512i __A, __m512i __B) +{ + return (__m512i)__builtin_ia32_selectb_512((__mmask64)__M, + (__v64qi)_mm512_packs_epi16(__A, __B), + (__v64qi)_mm512_setzero_si512()); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_packus_epi32(__m512i __A, __m512i __B) +{ + return (__m512i)__builtin_ia32_packusdw512((__v16si) __A, (__v16si) __B); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_maskz_packus_epi32(__mmask32 __M, __m512i __A, __m512i __B) +{ + return (__m512i)__builtin_ia32_selectw_512((__mmask32)__M, + (__v32hi)_mm512_packus_epi32(__A, __B), + (__v32hi)_mm512_setzero_si512()); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_mask_packus_epi32(__m512i __W, __mmask32 __M, __m512i __A, __m512i __B) +{ + return (__m512i)__builtin_ia32_selectw_512((__mmask32)__M, + (__v32hi)_mm512_packus_epi32(__A, __B), + (__v32hi)__W); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_packus_epi16(__m512i __A, __m512i __B) +{ + return (__m512i)__builtin_ia32_packuswb512((__v32hi) __A, (__v32hi) __B); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_mask_packus_epi16(__m512i __W, __mmask64 __M, __m512i __A, __m512i __B) +{ + return (__m512i)__builtin_ia32_selectb_512((__mmask64)__M, + (__v64qi)_mm512_packus_epi16(__A, __B), + (__v64qi)__W); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_maskz_packus_epi16(__mmask64 __M, __m512i __A, __m512i __B) +{ + return (__m512i)__builtin_ia32_selectb_512((__mmask64)__M, + (__v64qi)_mm512_packus_epi16(__A, __B), + (__v64qi)_mm512_setzero_si512()); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_adds_epi8 (__m512i __A, __m512i __B) +{ + return (__m512i)__builtin_elementwise_add_sat((__v64qs)__A, (__v64qs)__B); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_mask_adds_epi8 (__m512i __W, __mmask64 __U, __m512i __A, __m512i __B) +{ + return (__m512i)__builtin_ia32_selectb_512((__mmask64)__U, + (__v64qi)_mm512_adds_epi8(__A, __B), + (__v64qi)__W); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_maskz_adds_epi8 (__mmask64 __U, __m512i __A, __m512i __B) +{ + return (__m512i)__builtin_ia32_selectb_512((__mmask64)__U, + (__v64qi)_mm512_adds_epi8(__A, __B), + (__v64qi)_mm512_setzero_si512()); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_adds_epi16 (__m512i __A, __m512i __B) +{ + return (__m512i)__builtin_elementwise_add_sat((__v32hi)__A, (__v32hi)__B); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_mask_adds_epi16 (__m512i __W, __mmask32 __U, __m512i __A, __m512i __B) +{ + return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U, + (__v32hi)_mm512_adds_epi16(__A, __B), + (__v32hi)__W); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_maskz_adds_epi16 (__mmask32 __U, __m512i __A, __m512i __B) +{ + return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U, + (__v32hi)_mm512_adds_epi16(__A, __B), + (__v32hi)_mm512_setzero_si512()); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_adds_epu8 (__m512i __A, __m512i __B) +{ + return (__m512i)__builtin_elementwise_add_sat((__v64qu) __A, (__v64qu) __B); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_mask_adds_epu8 (__m512i __W, __mmask64 __U, __m512i __A, __m512i __B) +{ + return (__m512i)__builtin_ia32_selectb_512((__mmask64)__U, + (__v64qi)_mm512_adds_epu8(__A, __B), + (__v64qi)__W); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_maskz_adds_epu8 (__mmask64 __U, __m512i __A, __m512i __B) +{ + return (__m512i)__builtin_ia32_selectb_512((__mmask64)__U, + (__v64qi)_mm512_adds_epu8(__A, __B), + (__v64qi)_mm512_setzero_si512()); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_adds_epu16 (__m512i __A, __m512i __B) +{ + return (__m512i)__builtin_elementwise_add_sat((__v32hu) __A, (__v32hu) __B); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_mask_adds_epu16 (__m512i __W, __mmask32 __U, __m512i __A, __m512i __B) +{ + return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U, + (__v32hi)_mm512_adds_epu16(__A, __B), + (__v32hi)__W); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_maskz_adds_epu16 (__mmask32 __U, __m512i __A, __m512i __B) +{ + return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U, + (__v32hi)_mm512_adds_epu16(__A, __B), + (__v32hi)_mm512_setzero_si512()); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_avg_epu8 (__m512i __A, __m512i __B) +{ + return (__m512i)__builtin_ia32_pavgb512((__v64qi)__A, (__v64qi)__B); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_mask_avg_epu8 (__m512i __W, __mmask64 __U, __m512i __A, + __m512i __B) +{ + return (__m512i)__builtin_ia32_selectb_512((__mmask64)__U, + (__v64qi)_mm512_avg_epu8(__A, __B), + (__v64qi)__W); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_maskz_avg_epu8 (__mmask64 __U, __m512i __A, __m512i __B) +{ + return (__m512i)__builtin_ia32_selectb_512((__mmask64)__U, + (__v64qi)_mm512_avg_epu8(__A, __B), + (__v64qi)_mm512_setzero_si512()); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_avg_epu16 (__m512i __A, __m512i __B) +{ + return (__m512i)__builtin_ia32_pavgw512((__v32hi)__A, (__v32hi)__B); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_mask_avg_epu16 (__m512i __W, __mmask32 __U, __m512i __A, + __m512i __B) +{ + return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U, + (__v32hi)_mm512_avg_epu16(__A, __B), + (__v32hi)__W); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_maskz_avg_epu16 (__mmask32 __U, __m512i __A, __m512i __B) +{ + return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U, + (__v32hi)_mm512_avg_epu16(__A, __B), + (__v32hi) _mm512_setzero_si512()); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_max_epi8 (__m512i __A, __m512i __B) +{ + return (__m512i)__builtin_elementwise_max((__v64qs) __A, (__v64qs) __B); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_maskz_max_epi8 (__mmask64 __M, __m512i __A, __m512i __B) +{ + return (__m512i)__builtin_ia32_selectb_512((__mmask64)__M, + (__v64qi)_mm512_max_epi8(__A, __B), + (__v64qi)_mm512_setzero_si512()); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_mask_max_epi8 (__m512i __W, __mmask64 __M, __m512i __A, __m512i __B) +{ + return (__m512i)__builtin_ia32_selectb_512((__mmask64)__M, + (__v64qi)_mm512_max_epi8(__A, __B), + (__v64qi)__W); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_max_epi16 (__m512i __A, __m512i __B) +{ + return (__m512i)__builtin_elementwise_max((__v32hi) __A, (__v32hi) __B); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_maskz_max_epi16 (__mmask32 __M, __m512i __A, __m512i __B) +{ + return (__m512i)__builtin_ia32_selectw_512((__mmask32)__M, + (__v32hi)_mm512_max_epi16(__A, __B), + (__v32hi)_mm512_setzero_si512()); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_mask_max_epi16 (__m512i __W, __mmask32 __M, __m512i __A, + __m512i __B) +{ + return (__m512i)__builtin_ia32_selectw_512((__mmask32)__M, + (__v32hi)_mm512_max_epi16(__A, __B), + (__v32hi)__W); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_max_epu8 (__m512i __A, __m512i __B) +{ + return (__m512i)__builtin_elementwise_max((__v64qu)__A, (__v64qu)__B); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_maskz_max_epu8 (__mmask64 __M, __m512i __A, __m512i __B) +{ + return (__m512i)__builtin_ia32_selectb_512((__mmask64)__M, + (__v64qi)_mm512_max_epu8(__A, __B), + (__v64qi)_mm512_setzero_si512()); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_mask_max_epu8 (__m512i __W, __mmask64 __M, __m512i __A, __m512i __B) +{ + return (__m512i)__builtin_ia32_selectb_512((__mmask64)__M, + (__v64qi)_mm512_max_epu8(__A, __B), + (__v64qi)__W); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_max_epu16 (__m512i __A, __m512i __B) +{ + return (__m512i)__builtin_elementwise_max((__v32hu)__A, (__v32hu)__B); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_maskz_max_epu16 (__mmask32 __M, __m512i __A, __m512i __B) +{ + return (__m512i)__builtin_ia32_selectw_512((__mmask32)__M, + (__v32hi)_mm512_max_epu16(__A, __B), + (__v32hi)_mm512_setzero_si512()); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_mask_max_epu16 (__m512i __W, __mmask32 __M, __m512i __A, __m512i __B) +{ + return (__m512i)__builtin_ia32_selectw_512((__mmask32)__M, + (__v32hi)_mm512_max_epu16(__A, __B), + (__v32hi)__W); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_min_epi8 (__m512i __A, __m512i __B) +{ + return (__m512i)__builtin_elementwise_min((__v64qs) __A, (__v64qs) __B); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_maskz_min_epi8 (__mmask64 __M, __m512i __A, __m512i __B) +{ + return (__m512i)__builtin_ia32_selectb_512((__mmask64)__M, + (__v64qi)_mm512_min_epi8(__A, __B), + (__v64qi)_mm512_setzero_si512()); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_mask_min_epi8 (__m512i __W, __mmask64 __M, __m512i __A, __m512i __B) +{ + return (__m512i)__builtin_ia32_selectb_512((__mmask64)__M, + (__v64qi)_mm512_min_epi8(__A, __B), + (__v64qi)__W); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_min_epi16 (__m512i __A, __m512i __B) +{ + return (__m512i)__builtin_elementwise_min((__v32hi) __A, (__v32hi) __B); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_maskz_min_epi16 (__mmask32 __M, __m512i __A, __m512i __B) +{ + return (__m512i)__builtin_ia32_selectw_512((__mmask32)__M, + (__v32hi)_mm512_min_epi16(__A, __B), + (__v32hi)_mm512_setzero_si512()); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_mask_min_epi16 (__m512i __W, __mmask32 __M, __m512i __A, __m512i __B) +{ + return (__m512i)__builtin_ia32_selectw_512((__mmask32)__M, + (__v32hi)_mm512_min_epi16(__A, __B), + (__v32hi)__W); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_min_epu8 (__m512i __A, __m512i __B) +{ + return (__m512i)__builtin_elementwise_min((__v64qu)__A, (__v64qu)__B); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_maskz_min_epu8 (__mmask64 __M, __m512i __A, __m512i __B) +{ + return (__m512i)__builtin_ia32_selectb_512((__mmask64)__M, + (__v64qi)_mm512_min_epu8(__A, __B), + (__v64qi)_mm512_setzero_si512()); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_mask_min_epu8 (__m512i __W, __mmask64 __M, __m512i __A, __m512i __B) +{ + return (__m512i)__builtin_ia32_selectb_512((__mmask64)__M, + (__v64qi)_mm512_min_epu8(__A, __B), + (__v64qi)__W); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_min_epu16 (__m512i __A, __m512i __B) +{ + return (__m512i)__builtin_elementwise_min((__v32hu)__A, (__v32hu)__B); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_maskz_min_epu16 (__mmask32 __M, __m512i __A, __m512i __B) +{ + return (__m512i)__builtin_ia32_selectw_512((__mmask32)__M, + (__v32hi)_mm512_min_epu16(__A, __B), + (__v32hi)_mm512_setzero_si512()); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_mask_min_epu16 (__m512i __W, __mmask32 __M, __m512i __A, __m512i __B) +{ + return (__m512i)__builtin_ia32_selectw_512((__mmask32)__M, + (__v32hi)_mm512_min_epu16(__A, __B), + (__v32hi)__W); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_shuffle_epi8(__m512i __A, __m512i __B) +{ + return (__m512i)__builtin_ia32_pshufb512((__v64qi)__A,(__v64qi)__B); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_mask_shuffle_epi8(__m512i __W, __mmask64 __U, __m512i __A, __m512i __B) +{ + return (__m512i)__builtin_ia32_selectb_512((__mmask64)__U, + (__v64qi)_mm512_shuffle_epi8(__A, __B), + (__v64qi)__W); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_maskz_shuffle_epi8(__mmask64 __U, __m512i __A, __m512i __B) +{ + return (__m512i)__builtin_ia32_selectb_512((__mmask64)__U, + (__v64qi)_mm512_shuffle_epi8(__A, __B), + (__v64qi)_mm512_setzero_si512()); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_subs_epi8 (__m512i __A, __m512i __B) +{ + return (__m512i)__builtin_elementwise_sub_sat((__v64qs)__A, (__v64qs)__B); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_mask_subs_epi8 (__m512i __W, __mmask64 __U, __m512i __A, __m512i __B) +{ + return (__m512i)__builtin_ia32_selectb_512((__mmask64)__U, + (__v64qi)_mm512_subs_epi8(__A, __B), + (__v64qi)__W); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_maskz_subs_epi8 (__mmask64 __U, __m512i __A, __m512i __B) +{ + return (__m512i)__builtin_ia32_selectb_512((__mmask64)__U, + (__v64qi)_mm512_subs_epi8(__A, __B), + (__v64qi)_mm512_setzero_si512()); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_subs_epi16 (__m512i __A, __m512i __B) +{ + return (__m512i)__builtin_elementwise_sub_sat((__v32hi)__A, (__v32hi)__B); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_mask_subs_epi16 (__m512i __W, __mmask32 __U, __m512i __A, __m512i __B) +{ + return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U, + (__v32hi)_mm512_subs_epi16(__A, __B), + (__v32hi)__W); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_maskz_subs_epi16 (__mmask32 __U, __m512i __A, __m512i __B) +{ + return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U, + (__v32hi)_mm512_subs_epi16(__A, __B), + (__v32hi)_mm512_setzero_si512()); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_subs_epu8 (__m512i __A, __m512i __B) +{ + return (__m512i)__builtin_elementwise_sub_sat((__v64qu) __A, (__v64qu) __B); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_mask_subs_epu8 (__m512i __W, __mmask64 __U, __m512i __A, __m512i __B) +{ + return (__m512i)__builtin_ia32_selectb_512((__mmask64)__U, + (__v64qi)_mm512_subs_epu8(__A, __B), + (__v64qi)__W); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_maskz_subs_epu8 (__mmask64 __U, __m512i __A, __m512i __B) +{ + return (__m512i)__builtin_ia32_selectb_512((__mmask64)__U, + (__v64qi)_mm512_subs_epu8(__A, __B), + (__v64qi)_mm512_setzero_si512()); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_subs_epu16 (__m512i __A, __m512i __B) +{ + return (__m512i)__builtin_elementwise_sub_sat((__v32hu) __A, (__v32hu) __B); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_mask_subs_epu16 (__m512i __W, __mmask32 __U, __m512i __A, __m512i __B) +{ + return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U, + (__v32hi)_mm512_subs_epu16(__A, __B), + (__v32hi)__W); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_maskz_subs_epu16 (__mmask32 __U, __m512i __A, __m512i __B) +{ + return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U, + (__v32hi)_mm512_subs_epu16(__A, __B), + (__v32hi)_mm512_setzero_si512()); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_permutex2var_epi16(__m512i __A, __m512i __I, __m512i __B) +{ + return (__m512i)__builtin_ia32_vpermi2varhi512((__v32hi)__A, (__v32hi)__I, + (__v32hi)__B); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_mask_permutex2var_epi16(__m512i __A, __mmask32 __U, __m512i __I, + __m512i __B) +{ + return (__m512i)__builtin_ia32_selectw_512(__U, + (__v32hi)_mm512_permutex2var_epi16(__A, __I, __B), + (__v32hi)__A); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_mask2_permutex2var_epi16(__m512i __A, __m512i __I, __mmask32 __U, + __m512i __B) +{ + return (__m512i)__builtin_ia32_selectw_512(__U, + (__v32hi)_mm512_permutex2var_epi16(__A, __I, __B), + (__v32hi)__I); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_maskz_permutex2var_epi16(__mmask32 __U, __m512i __A, __m512i __I, + __m512i __B) +{ + return (__m512i)__builtin_ia32_selectw_512(__U, + (__v32hi)_mm512_permutex2var_epi16(__A, __I, __B), + (__v32hi)_mm512_setzero_si512()); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_mulhrs_epi16(__m512i __A, __m512i __B) +{ + return (__m512i)__builtin_ia32_pmulhrsw512((__v32hi)__A, (__v32hi)__B); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_mask_mulhrs_epi16(__m512i __W, __mmask32 __U, __m512i __A, __m512i __B) +{ + return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U, + (__v32hi)_mm512_mulhrs_epi16(__A, __B), + (__v32hi)__W); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_maskz_mulhrs_epi16(__mmask32 __U, __m512i __A, __m512i __B) +{ + return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U, + (__v32hi)_mm512_mulhrs_epi16(__A, __B), + (__v32hi)_mm512_setzero_si512()); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_mulhi_epi16(__m512i __A, __m512i __B) +{ + return (__m512i)__builtin_ia32_pmulhw512((__v32hi) __A, (__v32hi) __B); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_mask_mulhi_epi16(__m512i __W, __mmask32 __U, __m512i __A, + __m512i __B) +{ + return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U, + (__v32hi)_mm512_mulhi_epi16(__A, __B), + (__v32hi)__W); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_maskz_mulhi_epi16(__mmask32 __U, __m512i __A, __m512i __B) +{ + return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U, + (__v32hi)_mm512_mulhi_epi16(__A, __B), + (__v32hi)_mm512_setzero_si512()); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_mulhi_epu16(__m512i __A, __m512i __B) +{ + return (__m512i)__builtin_ia32_pmulhuw512((__v32hi) __A, (__v32hi) __B); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_mask_mulhi_epu16(__m512i __W, __mmask32 __U, __m512i __A, __m512i __B) +{ + return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U, + (__v32hi)_mm512_mulhi_epu16(__A, __B), + (__v32hi)__W); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_maskz_mulhi_epu16 (__mmask32 __U, __m512i __A, __m512i __B) +{ + return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U, + (__v32hi)_mm512_mulhi_epu16(__A, __B), + (__v32hi)_mm512_setzero_si512()); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_maddubs_epi16(__m512i __X, __m512i __Y) { + return (__m512i)__builtin_ia32_pmaddubsw512((__v64qi)__X, (__v64qi)__Y); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_mask_maddubs_epi16(__m512i __W, __mmask32 __U, __m512i __X, + __m512i __Y) { + return (__m512i)__builtin_ia32_selectw_512((__mmask32) __U, + (__v32hi)_mm512_maddubs_epi16(__X, __Y), + (__v32hi)__W); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_maskz_maddubs_epi16(__mmask32 __U, __m512i __X, __m512i __Y) { + return (__m512i)__builtin_ia32_selectw_512((__mmask32) __U, + (__v32hi)_mm512_maddubs_epi16(__X, __Y), + (__v32hi)_mm512_setzero_si512()); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_madd_epi16(__m512i __A, __m512i __B) { + return (__m512i)__builtin_ia32_pmaddwd512((__v32hi)__A, (__v32hi)__B); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_mask_madd_epi16(__m512i __W, __mmask16 __U, __m512i __A, __m512i __B) { + return (__m512i)__builtin_ia32_selectd_512((__mmask16)__U, + (__v16si)_mm512_madd_epi16(__A, __B), + (__v16si)__W); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_maskz_madd_epi16(__mmask16 __U, __m512i __A, __m512i __B) { + return (__m512i)__builtin_ia32_selectd_512((__mmask16)__U, + (__v16si)_mm512_madd_epi16(__A, __B), + (__v16si)_mm512_setzero_si512()); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS512 +_mm512_cvtsepi16_epi8 (__m512i __A) { + return (__m256i) __builtin_ia32_pmovswb512_mask ((__v32hi) __A, + (__v32qi)_mm256_setzero_si256(), + (__mmask32) -1); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS512 +_mm512_mask_cvtsepi16_epi8 (__m256i __O, __mmask32 __M, __m512i __A) { + return (__m256i) __builtin_ia32_pmovswb512_mask ((__v32hi) __A, + (__v32qi)__O, + __M); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS512 +_mm512_maskz_cvtsepi16_epi8 (__mmask32 __M, __m512i __A) { + return (__m256i) __builtin_ia32_pmovswb512_mask ((__v32hi) __A, + (__v32qi) _mm256_setzero_si256(), + __M); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS512 +_mm512_cvtusepi16_epi8 (__m512i __A) { + return (__m256i) __builtin_ia32_pmovuswb512_mask ((__v32hi) __A, + (__v32qi) _mm256_setzero_si256(), + (__mmask32) -1); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS512 +_mm512_mask_cvtusepi16_epi8 (__m256i __O, __mmask32 __M, __m512i __A) { + return (__m256i) __builtin_ia32_pmovuswb512_mask ((__v32hi) __A, + (__v32qi) __O, + __M); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS512 +_mm512_maskz_cvtusepi16_epi8 (__mmask32 __M, __m512i __A) { + return (__m256i) __builtin_ia32_pmovuswb512_mask ((__v32hi) __A, + (__v32qi) _mm256_setzero_si256(), + __M); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS512 +_mm512_cvtepi16_epi8 (__m512i __A) { + return (__m256i) __builtin_ia32_pmovwb512_mask ((__v32hi) __A, + (__v32qi) _mm256_undefined_si256(), + (__mmask32) -1); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS512 +_mm512_mask_cvtepi16_epi8 (__m256i __O, __mmask32 __M, __m512i __A) { + return (__m256i) __builtin_ia32_pmovwb512_mask ((__v32hi) __A, + (__v32qi) __O, + __M); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS512 +_mm512_maskz_cvtepi16_epi8 (__mmask32 __M, __m512i __A) { + return (__m256i) __builtin_ia32_pmovwb512_mask ((__v32hi) __A, + (__v32qi) _mm256_setzero_si256(), + __M); +} + +static __inline__ void __DEFAULT_FN_ATTRS512 +_mm512_mask_cvtepi16_storeu_epi8 (void * __P, __mmask32 __M, __m512i __A) +{ + __builtin_ia32_pmovwb512mem_mask ((__v32qi *) __P, (__v32hi) __A, __M); +} + +static __inline__ void __DEFAULT_FN_ATTRS512 +_mm512_mask_cvtsepi16_storeu_epi8 (void * __P, __mmask32 __M, __m512i __A) +{ + __builtin_ia32_pmovswb512mem_mask ((__v32qi *) __P, (__v32hi) __A, __M); +} + +static __inline__ void __DEFAULT_FN_ATTRS512 +_mm512_mask_cvtusepi16_storeu_epi8 (void * __P, __mmask32 __M, __m512i __A) +{ + __builtin_ia32_pmovuswb512mem_mask ((__v32qi *) __P, (__v32hi) __A, __M); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_unpackhi_epi8(__m512i __A, __m512i __B) { + return (__m512i)__builtin_shufflevector((__v64qi)__A, (__v64qi)__B, + 8, 64+8, 9, 64+9, + 10, 64+10, 11, 64+11, + 12, 64+12, 13, 64+13, + 14, 64+14, 15, 64+15, + 24, 64+24, 25, 64+25, + 26, 64+26, 27, 64+27, + 28, 64+28, 29, 64+29, + 30, 64+30, 31, 64+31, + 40, 64+40, 41, 64+41, + 42, 64+42, 43, 64+43, + 44, 64+44, 45, 64+45, + 46, 64+46, 47, 64+47, + 56, 64+56, 57, 64+57, + 58, 64+58, 59, 64+59, + 60, 64+60, 61, 64+61, + 62, 64+62, 63, 64+63); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_mask_unpackhi_epi8(__m512i __W, __mmask64 __U, __m512i __A, __m512i __B) { + return (__m512i)__builtin_ia32_selectb_512((__mmask64)__U, + (__v64qi)_mm512_unpackhi_epi8(__A, __B), + (__v64qi)__W); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_maskz_unpackhi_epi8(__mmask64 __U, __m512i __A, __m512i __B) { + return (__m512i)__builtin_ia32_selectb_512((__mmask64)__U, + (__v64qi)_mm512_unpackhi_epi8(__A, __B), + (__v64qi)_mm512_setzero_si512()); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_unpackhi_epi16(__m512i __A, __m512i __B) { + return (__m512i)__builtin_shufflevector((__v32hi)__A, (__v32hi)__B, + 4, 32+4, 5, 32+5, + 6, 32+6, 7, 32+7, + 12, 32+12, 13, 32+13, + 14, 32+14, 15, 32+15, + 20, 32+20, 21, 32+21, + 22, 32+22, 23, 32+23, + 28, 32+28, 29, 32+29, + 30, 32+30, 31, 32+31); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_mask_unpackhi_epi16(__m512i __W, __mmask32 __U, __m512i __A, __m512i __B) { + return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U, + (__v32hi)_mm512_unpackhi_epi16(__A, __B), + (__v32hi)__W); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_maskz_unpackhi_epi16(__mmask32 __U, __m512i __A, __m512i __B) { + return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U, + (__v32hi)_mm512_unpackhi_epi16(__A, __B), + (__v32hi)_mm512_setzero_si512()); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_unpacklo_epi8(__m512i __A, __m512i __B) { + return (__m512i)__builtin_shufflevector((__v64qi)__A, (__v64qi)__B, + 0, 64+0, 1, 64+1, + 2, 64+2, 3, 64+3, + 4, 64+4, 5, 64+5, + 6, 64+6, 7, 64+7, + 16, 64+16, 17, 64+17, + 18, 64+18, 19, 64+19, + 20, 64+20, 21, 64+21, + 22, 64+22, 23, 64+23, + 32, 64+32, 33, 64+33, + 34, 64+34, 35, 64+35, + 36, 64+36, 37, 64+37, + 38, 64+38, 39, 64+39, + 48, 64+48, 49, 64+49, + 50, 64+50, 51, 64+51, + 52, 64+52, 53, 64+53, + 54, 64+54, 55, 64+55); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_mask_unpacklo_epi8(__m512i __W, __mmask64 __U, __m512i __A, __m512i __B) { + return (__m512i)__builtin_ia32_selectb_512((__mmask64)__U, + (__v64qi)_mm512_unpacklo_epi8(__A, __B), + (__v64qi)__W); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_maskz_unpacklo_epi8(__mmask64 __U, __m512i __A, __m512i __B) { + return (__m512i)__builtin_ia32_selectb_512((__mmask64)__U, + (__v64qi)_mm512_unpacklo_epi8(__A, __B), + (__v64qi)_mm512_setzero_si512()); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_unpacklo_epi16(__m512i __A, __m512i __B) { + return (__m512i)__builtin_shufflevector((__v32hi)__A, (__v32hi)__B, + 0, 32+0, 1, 32+1, + 2, 32+2, 3, 32+3, + 8, 32+8, 9, 32+9, + 10, 32+10, 11, 32+11, + 16, 32+16, 17, 32+17, + 18, 32+18, 19, 32+19, + 24, 32+24, 25, 32+25, + 26, 32+26, 27, 32+27); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_mask_unpacklo_epi16(__m512i __W, __mmask32 __U, __m512i __A, __m512i __B) { + return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U, + (__v32hi)_mm512_unpacklo_epi16(__A, __B), + (__v32hi)__W); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_maskz_unpacklo_epi16(__mmask32 __U, __m512i __A, __m512i __B) { + return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U, + (__v32hi)_mm512_unpacklo_epi16(__A, __B), + (__v32hi)_mm512_setzero_si512()); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_cvtepi8_epi16(__m256i __A) +{ + /* This function always performs a signed extension, but __v32qi is a char + which may be signed or unsigned, so use __v32qs. */ + return (__m512i)__builtin_convertvector((__v32qs)__A, __v32hi); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_mask_cvtepi8_epi16(__m512i __W, __mmask32 __U, __m256i __A) +{ + return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U, + (__v32hi)_mm512_cvtepi8_epi16(__A), + (__v32hi)__W); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_maskz_cvtepi8_epi16(__mmask32 __U, __m256i __A) +{ + return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U, + (__v32hi)_mm512_cvtepi8_epi16(__A), + (__v32hi)_mm512_setzero_si512()); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_cvtepu8_epi16(__m256i __A) +{ + return (__m512i)__builtin_convertvector((__v32qu)__A, __v32hi); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_mask_cvtepu8_epi16(__m512i __W, __mmask32 __U, __m256i __A) +{ + return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U, + (__v32hi)_mm512_cvtepu8_epi16(__A), + (__v32hi)__W); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_maskz_cvtepu8_epi16(__mmask32 __U, __m256i __A) +{ + return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U, + (__v32hi)_mm512_cvtepu8_epi16(__A), + (__v32hi)_mm512_setzero_si512()); +} + + +#define _mm512_shufflehi_epi16(A, imm) \ + ((__m512i)__builtin_ia32_pshufhw512((__v32hi)(__m512i)(A), (int)(imm))) + +#define _mm512_mask_shufflehi_epi16(W, U, A, imm) \ + ((__m512i)__builtin_ia32_selectw_512((__mmask32)(U), \ + (__v32hi)_mm512_shufflehi_epi16((A), \ + (imm)), \ + (__v32hi)(__m512i)(W))) + +#define _mm512_maskz_shufflehi_epi16(U, A, imm) \ + ((__m512i)__builtin_ia32_selectw_512((__mmask32)(U), \ + (__v32hi)_mm512_shufflehi_epi16((A), \ + (imm)), \ + (__v32hi)_mm512_setzero_si512())) + +#define _mm512_shufflelo_epi16(A, imm) \ + ((__m512i)__builtin_ia32_pshuflw512((__v32hi)(__m512i)(A), (int)(imm))) + + +#define _mm512_mask_shufflelo_epi16(W, U, A, imm) \ + ((__m512i)__builtin_ia32_selectw_512((__mmask32)(U), \ + (__v32hi)_mm512_shufflelo_epi16((A), \ + (imm)), \ + (__v32hi)(__m512i)(W))) + + +#define _mm512_maskz_shufflelo_epi16(U, A, imm) \ + ((__m512i)__builtin_ia32_selectw_512((__mmask32)(U), \ + (__v32hi)_mm512_shufflelo_epi16((A), \ + (imm)), \ + (__v32hi)_mm512_setzero_si512())) + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_sllv_epi16(__m512i __A, __m512i __B) +{ + return (__m512i)__builtin_ia32_psllv32hi((__v32hi) __A, (__v32hi) __B); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_mask_sllv_epi16 (__m512i __W, __mmask32 __U, __m512i __A, __m512i __B) +{ + return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U, + (__v32hi)_mm512_sllv_epi16(__A, __B), + (__v32hi)__W); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_maskz_sllv_epi16(__mmask32 __U, __m512i __A, __m512i __B) +{ + return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U, + (__v32hi)_mm512_sllv_epi16(__A, __B), + (__v32hi)_mm512_setzero_si512()); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_sll_epi16(__m512i __A, __m128i __B) +{ + return (__m512i)__builtin_ia32_psllw512((__v32hi) __A, (__v8hi) __B); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_mask_sll_epi16(__m512i __W, __mmask32 __U, __m512i __A, __m128i __B) +{ + return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U, + (__v32hi)_mm512_sll_epi16(__A, __B), + (__v32hi)__W); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_maskz_sll_epi16(__mmask32 __U, __m512i __A, __m128i __B) +{ + return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U, + (__v32hi)_mm512_sll_epi16(__A, __B), + (__v32hi)_mm512_setzero_si512()); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_slli_epi16(__m512i __A, unsigned int __B) +{ + return (__m512i)__builtin_ia32_psllwi512((__v32hi)__A, (int)__B); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_mask_slli_epi16(__m512i __W, __mmask32 __U, __m512i __A, + unsigned int __B) +{ + return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U, + (__v32hi)_mm512_slli_epi16(__A, __B), + (__v32hi)__W); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_maskz_slli_epi16(__mmask32 __U, __m512i __A, unsigned int __B) +{ + return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U, + (__v32hi)_mm512_slli_epi16(__A, __B), + (__v32hi)_mm512_setzero_si512()); +} + +#define _mm512_bslli_epi128(a, imm) \ + ((__m512i)__builtin_ia32_pslldqi512_byteshift((__v8di)(__m512i)(a), (int)(imm))) + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_srlv_epi16(__m512i __A, __m512i __B) +{ + return (__m512i)__builtin_ia32_psrlv32hi((__v32hi)__A, (__v32hi)__B); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_mask_srlv_epi16(__m512i __W, __mmask32 __U, __m512i __A, __m512i __B) +{ + return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U, + (__v32hi)_mm512_srlv_epi16(__A, __B), + (__v32hi)__W); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_maskz_srlv_epi16(__mmask32 __U, __m512i __A, __m512i __B) +{ + return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U, + (__v32hi)_mm512_srlv_epi16(__A, __B), + (__v32hi)_mm512_setzero_si512()); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_srav_epi16(__m512i __A, __m512i __B) +{ + return (__m512i)__builtin_ia32_psrav32hi((__v32hi)__A, (__v32hi)__B); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_mask_srav_epi16(__m512i __W, __mmask32 __U, __m512i __A, __m512i __B) +{ + return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U, + (__v32hi)_mm512_srav_epi16(__A, __B), + (__v32hi)__W); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_maskz_srav_epi16(__mmask32 __U, __m512i __A, __m512i __B) +{ + return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U, + (__v32hi)_mm512_srav_epi16(__A, __B), + (__v32hi)_mm512_setzero_si512()); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_sra_epi16(__m512i __A, __m128i __B) +{ + return (__m512i)__builtin_ia32_psraw512((__v32hi) __A, (__v8hi) __B); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_mask_sra_epi16(__m512i __W, __mmask32 __U, __m512i __A, __m128i __B) +{ + return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U, + (__v32hi)_mm512_sra_epi16(__A, __B), + (__v32hi)__W); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_maskz_sra_epi16(__mmask32 __U, __m512i __A, __m128i __B) +{ + return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U, + (__v32hi)_mm512_sra_epi16(__A, __B), + (__v32hi)_mm512_setzero_si512()); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_srai_epi16(__m512i __A, unsigned int __B) +{ + return (__m512i)__builtin_ia32_psrawi512((__v32hi)__A, (int)__B); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_mask_srai_epi16(__m512i __W, __mmask32 __U, __m512i __A, + unsigned int __B) +{ + return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U, + (__v32hi)_mm512_srai_epi16(__A, __B), + (__v32hi)__W); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_maskz_srai_epi16(__mmask32 __U, __m512i __A, unsigned int __B) +{ + return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U, + (__v32hi)_mm512_srai_epi16(__A, __B), + (__v32hi)_mm512_setzero_si512()); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_srl_epi16(__m512i __A, __m128i __B) +{ + return (__m512i)__builtin_ia32_psrlw512((__v32hi) __A, (__v8hi) __B); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_mask_srl_epi16(__m512i __W, __mmask32 __U, __m512i __A, __m128i __B) +{ + return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U, + (__v32hi)_mm512_srl_epi16(__A, __B), + (__v32hi)__W); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_maskz_srl_epi16(__mmask32 __U, __m512i __A, __m128i __B) +{ + return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U, + (__v32hi)_mm512_srl_epi16(__A, __B), + (__v32hi)_mm512_setzero_si512()); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_srli_epi16(__m512i __A, unsigned int __B) +{ + return (__m512i)__builtin_ia32_psrlwi512((__v32hi)__A, (int)__B); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_mask_srli_epi16(__m512i __W, __mmask32 __U, __m512i __A, + unsigned int __B) +{ + return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U, + (__v32hi)_mm512_srli_epi16(__A, __B), + (__v32hi)__W); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_maskz_srli_epi16(__mmask32 __U, __m512i __A, int __B) +{ + return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U, + (__v32hi)_mm512_srli_epi16(__A, (unsigned int)__B), + (__v32hi)_mm512_setzero_si512()); +} + +#define _mm512_bsrli_epi128(a, imm) \ + ((__m512i)__builtin_ia32_psrldqi512_byteshift((__v8di)(__m512i)(a), (int)(imm))) + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_mask_mov_epi16 (__m512i __W, __mmask32 __U, __m512i __A) +{ + return (__m512i) __builtin_ia32_selectw_512 ((__mmask32) __U, + (__v32hi) __A, + (__v32hi) __W); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_maskz_mov_epi16 (__mmask32 __U, __m512i __A) +{ + return (__m512i) __builtin_ia32_selectw_512 ((__mmask32) __U, + (__v32hi) __A, + (__v32hi) _mm512_setzero_si512 ()); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_mask_mov_epi8 (__m512i __W, __mmask64 __U, __m512i __A) +{ + return (__m512i) __builtin_ia32_selectb_512 ((__mmask64) __U, + (__v64qi) __A, + (__v64qi) __W); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_maskz_mov_epi8 (__mmask64 __U, __m512i __A) +{ + return (__m512i) __builtin_ia32_selectb_512 ((__mmask64) __U, + (__v64qi) __A, + (__v64qi) _mm512_setzero_si512 ()); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_mask_set1_epi8 (__m512i __O, __mmask64 __M, char __A) +{ + return (__m512i) __builtin_ia32_selectb_512(__M, + (__v64qi)_mm512_set1_epi8(__A), + (__v64qi) __O); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_maskz_set1_epi8 (__mmask64 __M, char __A) +{ + return (__m512i) __builtin_ia32_selectb_512(__M, + (__v64qi) _mm512_set1_epi8(__A), + (__v64qi) _mm512_setzero_si512()); +} + +static __inline__ __mmask64 __DEFAULT_FN_ATTRS _mm512_kunpackd(__mmask64 __A, + __mmask64 __B) { + return (__mmask64) __builtin_ia32_kunpckdi ((__mmask64) __A, + (__mmask64) __B); +} + +static __inline__ __mmask32 __DEFAULT_FN_ATTRS +_mm512_kunpackw (__mmask32 __A, __mmask32 __B) +{ + return (__mmask32) __builtin_ia32_kunpcksi ((__mmask32) __A, + (__mmask32) __B); +} + +static __inline __m512i __DEFAULT_FN_ATTRS512 +_mm512_loadu_epi16 (void const *__P) +{ + struct __loadu_epi16 { + __m512i_u __v; + } __attribute__((__packed__, __may_alias__)); + return ((const struct __loadu_epi16*)__P)->__v; +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_mask_loadu_epi16 (__m512i __W, __mmask32 __U, void const *__P) +{ + return (__m512i) __builtin_ia32_loaddquhi512_mask ((const __v32hi *) __P, + (__v32hi) __W, + (__mmask32) __U); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_maskz_loadu_epi16 (__mmask32 __U, void const *__P) +{ + return (__m512i) __builtin_ia32_loaddquhi512_mask ((const __v32hi *) __P, + (__v32hi) + _mm512_setzero_si512 (), + (__mmask32) __U); +} + +static __inline __m512i __DEFAULT_FN_ATTRS512 +_mm512_loadu_epi8 (void const *__P) +{ + struct __loadu_epi8 { + __m512i_u __v; + } __attribute__((__packed__, __may_alias__)); + return ((const struct __loadu_epi8*)__P)->__v; +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_mask_loadu_epi8 (__m512i __W, __mmask64 __U, void const *__P) +{ + return (__m512i) __builtin_ia32_loaddquqi512_mask ((const __v64qi *) __P, + (__v64qi) __W, + (__mmask64) __U); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_maskz_loadu_epi8 (__mmask64 __U, void const *__P) +{ + return (__m512i) __builtin_ia32_loaddquqi512_mask ((const __v64qi *) __P, + (__v64qi) + _mm512_setzero_si512 (), + (__mmask64) __U); +} + +static __inline void __DEFAULT_FN_ATTRS512 +_mm512_storeu_epi16 (void *__P, __m512i __A) +{ + struct __storeu_epi16 { + __m512i_u __v; + } __attribute__((__packed__, __may_alias__)); + ((struct __storeu_epi16*)__P)->__v = __A; +} + +static __inline__ void __DEFAULT_FN_ATTRS512 +_mm512_mask_storeu_epi16 (void *__P, __mmask32 __U, __m512i __A) +{ + __builtin_ia32_storedquhi512_mask ((__v32hi *) __P, + (__v32hi) __A, + (__mmask32) __U); +} + +static __inline void __DEFAULT_FN_ATTRS512 +_mm512_storeu_epi8 (void *__P, __m512i __A) +{ + struct __storeu_epi8 { + __m512i_u __v; + } __attribute__((__packed__, __may_alias__)); + ((struct __storeu_epi8*)__P)->__v = __A; +} + +static __inline__ void __DEFAULT_FN_ATTRS512 +_mm512_mask_storeu_epi8 (void *__P, __mmask64 __U, __m512i __A) +{ + __builtin_ia32_storedquqi512_mask ((__v64qi *) __P, + (__v64qi) __A, + (__mmask64) __U); +} + +static __inline__ __mmask64 __DEFAULT_FN_ATTRS512 +_mm512_test_epi8_mask (__m512i __A, __m512i __B) +{ + return _mm512_cmpneq_epi8_mask (_mm512_and_epi32 (__A, __B), + _mm512_setzero_si512()); +} + +static __inline__ __mmask64 __DEFAULT_FN_ATTRS512 +_mm512_mask_test_epi8_mask (__mmask64 __U, __m512i __A, __m512i __B) +{ + return _mm512_mask_cmpneq_epi8_mask (__U, _mm512_and_epi32 (__A, __B), + _mm512_setzero_si512()); +} + +static __inline__ __mmask32 __DEFAULT_FN_ATTRS512 +_mm512_test_epi16_mask (__m512i __A, __m512i __B) +{ + return _mm512_cmpneq_epi16_mask (_mm512_and_epi32 (__A, __B), + _mm512_setzero_si512()); +} + +static __inline__ __mmask32 __DEFAULT_FN_ATTRS512 +_mm512_mask_test_epi16_mask (__mmask32 __U, __m512i __A, __m512i __B) +{ + return _mm512_mask_cmpneq_epi16_mask (__U, _mm512_and_epi32 (__A, __B), + _mm512_setzero_si512()); +} + +static __inline__ __mmask64 __DEFAULT_FN_ATTRS512 +_mm512_testn_epi8_mask (__m512i __A, __m512i __B) +{ + return _mm512_cmpeq_epi8_mask (_mm512_and_epi32 (__A, __B), _mm512_setzero_si512()); +} + +static __inline__ __mmask64 __DEFAULT_FN_ATTRS512 +_mm512_mask_testn_epi8_mask (__mmask64 __U, __m512i __A, __m512i __B) +{ + return _mm512_mask_cmpeq_epi8_mask (__U, _mm512_and_epi32 (__A, __B), + _mm512_setzero_si512()); +} + +static __inline__ __mmask32 __DEFAULT_FN_ATTRS512 +_mm512_testn_epi16_mask (__m512i __A, __m512i __B) +{ + return _mm512_cmpeq_epi16_mask (_mm512_and_epi32 (__A, __B), + _mm512_setzero_si512()); +} + +static __inline__ __mmask32 __DEFAULT_FN_ATTRS512 +_mm512_mask_testn_epi16_mask (__mmask32 __U, __m512i __A, __m512i __B) +{ + return _mm512_mask_cmpeq_epi16_mask (__U, _mm512_and_epi32 (__A, __B), + _mm512_setzero_si512()); +} + +static __inline__ __mmask64 __DEFAULT_FN_ATTRS512 +_mm512_movepi8_mask (__m512i __A) +{ + return (__mmask64) __builtin_ia32_cvtb2mask512 ((__v64qi) __A); +} + +static __inline__ __mmask32 __DEFAULT_FN_ATTRS512 +_mm512_movepi16_mask (__m512i __A) +{ + return (__mmask32) __builtin_ia32_cvtw2mask512 ((__v32hi) __A); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_movm_epi8 (__mmask64 __A) +{ + return (__m512i) __builtin_ia32_cvtmask2b512 (__A); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_movm_epi16 (__mmask32 __A) +{ + return (__m512i) __builtin_ia32_cvtmask2w512 (__A); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_broadcastb_epi8 (__m128i __A) +{ + return (__m512i)__builtin_shufflevector((__v16qi) __A, (__v16qi) __A, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_mask_broadcastb_epi8 (__m512i __O, __mmask64 __M, __m128i __A) +{ + return (__m512i)__builtin_ia32_selectb_512(__M, + (__v64qi) _mm512_broadcastb_epi8(__A), + (__v64qi) __O); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_maskz_broadcastb_epi8 (__mmask64 __M, __m128i __A) +{ + return (__m512i)__builtin_ia32_selectb_512(__M, + (__v64qi) _mm512_broadcastb_epi8(__A), + (__v64qi) _mm512_setzero_si512()); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_mask_set1_epi16 (__m512i __O, __mmask32 __M, short __A) +{ + return (__m512i) __builtin_ia32_selectw_512(__M, + (__v32hi) _mm512_set1_epi16(__A), + (__v32hi) __O); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_maskz_set1_epi16 (__mmask32 __M, short __A) +{ + return (__m512i) __builtin_ia32_selectw_512(__M, + (__v32hi) _mm512_set1_epi16(__A), + (__v32hi) _mm512_setzero_si512()); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_broadcastw_epi16 (__m128i __A) +{ + return (__m512i)__builtin_shufflevector((__v8hi) __A, (__v8hi) __A, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_mask_broadcastw_epi16 (__m512i __O, __mmask32 __M, __m128i __A) +{ + return (__m512i)__builtin_ia32_selectw_512(__M, + (__v32hi) _mm512_broadcastw_epi16(__A), + (__v32hi) __O); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_maskz_broadcastw_epi16 (__mmask32 __M, __m128i __A) +{ + return (__m512i)__builtin_ia32_selectw_512(__M, + (__v32hi) _mm512_broadcastw_epi16(__A), + (__v32hi) _mm512_setzero_si512()); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_permutexvar_epi16 (__m512i __A, __m512i __B) +{ + return (__m512i)__builtin_ia32_permvarhi512((__v32hi)__B, (__v32hi)__A); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_maskz_permutexvar_epi16 (__mmask32 __M, __m512i __A, + __m512i __B) +{ + return (__m512i)__builtin_ia32_selectw_512((__mmask32)__M, + (__v32hi)_mm512_permutexvar_epi16(__A, __B), + (__v32hi)_mm512_setzero_si512()); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_mask_permutexvar_epi16 (__m512i __W, __mmask32 __M, __m512i __A, + __m512i __B) +{ + return (__m512i)__builtin_ia32_selectw_512((__mmask32)__M, + (__v32hi)_mm512_permutexvar_epi16(__A, __B), + (__v32hi)__W); +} + +#define _mm512_alignr_epi8(A, B, N) \ + ((__m512i)__builtin_ia32_palignr512((__v64qi)(__m512i)(A), \ + (__v64qi)(__m512i)(B), (int)(N))) + +#define _mm512_mask_alignr_epi8(W, U, A, B, N) \ + ((__m512i)__builtin_ia32_selectb_512((__mmask64)(U), \ + (__v64qi)_mm512_alignr_epi8((A), (B), (int)(N)), \ + (__v64qi)(__m512i)(W))) + +#define _mm512_maskz_alignr_epi8(U, A, B, N) \ + ((__m512i)__builtin_ia32_selectb_512((__mmask64)(U), \ + (__v64qi)_mm512_alignr_epi8((A), (B), (int)(N)), \ + (__v64qi)(__m512i)_mm512_setzero_si512())) + +#define _mm512_dbsad_epu8(A, B, imm) \ + ((__m512i)__builtin_ia32_dbpsadbw512((__v64qi)(__m512i)(A), \ + (__v64qi)(__m512i)(B), (int)(imm))) + +#define _mm512_mask_dbsad_epu8(W, U, A, B, imm) \ + ((__m512i)__builtin_ia32_selectw_512((__mmask32)(U), \ + (__v32hi)_mm512_dbsad_epu8((A), (B), (imm)), \ + (__v32hi)(__m512i)(W))) + +#define _mm512_maskz_dbsad_epu8(U, A, B, imm) \ + ((__m512i)__builtin_ia32_selectw_512((__mmask32)(U), \ + (__v32hi)_mm512_dbsad_epu8((A), (B), (imm)), \ + (__v32hi)_mm512_setzero_si512())) + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_sad_epu8 (__m512i __A, __m512i __B) +{ + return (__m512i) __builtin_ia32_psadbw512 ((__v64qi) __A, + (__v64qi) __B); +} + +#undef __DEFAULT_FN_ATTRS512 +#undef __DEFAULT_FN_ATTRS + +#endif diff --git a/third_party/intel/clang/avx512cdintrin.h b/third_party/intel/clang/avx512cdintrin.h new file mode 100644 index 000000000..33b552f6f --- /dev/null +++ b/third_party/intel/clang/avx512cdintrin.h @@ -0,0 +1,125 @@ +/*===------------- avx512cdintrin.h - AVX512CD intrinsics ------------------=== + * + * + * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. + * See https://llvm.org/LICENSE.txt for license information. + * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + * + *===-----------------------------------------------------------------------=== + */ +#ifndef __IMMINTRIN_H +#error "Never use directly; include instead." +#endif + +#ifndef __AVX512CDINTRIN_H +#define __AVX512CDINTRIN_H + +/* Define the default attributes for the functions in this file. */ +#define __DEFAULT_FN_ATTRS \ + __attribute__((__always_inline__, __nodebug__, \ + __target__("avx512cd,evex512"), __min_vector_width__(512))) + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_conflict_epi64 (__m512i __A) +{ + return (__m512i) __builtin_ia32_vpconflictdi_512 ((__v8di) __A); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_mask_conflict_epi64 (__m512i __W, __mmask8 __U, __m512i __A) +{ + return (__m512i)__builtin_ia32_selectq_512((__mmask8)__U, + (__v8di)_mm512_conflict_epi64(__A), + (__v8di)__W); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_maskz_conflict_epi64 (__mmask8 __U, __m512i __A) +{ + return (__m512i)__builtin_ia32_selectq_512((__mmask8)__U, + (__v8di)_mm512_conflict_epi64(__A), + (__v8di)_mm512_setzero_si512 ()); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_conflict_epi32 (__m512i __A) +{ + return (__m512i) __builtin_ia32_vpconflictsi_512 ((__v16si) __A); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_mask_conflict_epi32 (__m512i __W, __mmask16 __U, __m512i __A) +{ + return (__m512i)__builtin_ia32_selectd_512((__mmask16)__U, + (__v16si)_mm512_conflict_epi32(__A), + (__v16si)__W); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_maskz_conflict_epi32 (__mmask16 __U, __m512i __A) +{ + return (__m512i)__builtin_ia32_selectd_512((__mmask16)__U, + (__v16si)_mm512_conflict_epi32(__A), + (__v16si)_mm512_setzero_si512()); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_lzcnt_epi32 (__m512i __A) +{ + return (__m512i) __builtin_ia32_vplzcntd_512 ((__v16si) __A); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_mask_lzcnt_epi32 (__m512i __W, __mmask16 __U, __m512i __A) +{ + return (__m512i)__builtin_ia32_selectd_512((__mmask16)__U, + (__v16si)_mm512_lzcnt_epi32(__A), + (__v16si)__W); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_maskz_lzcnt_epi32 (__mmask16 __U, __m512i __A) +{ + return (__m512i)__builtin_ia32_selectd_512((__mmask16)__U, + (__v16si)_mm512_lzcnt_epi32(__A), + (__v16si)_mm512_setzero_si512()); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_lzcnt_epi64 (__m512i __A) +{ + return (__m512i) __builtin_ia32_vplzcntq_512 ((__v8di) __A); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_mask_lzcnt_epi64 (__m512i __W, __mmask8 __U, __m512i __A) +{ + return (__m512i)__builtin_ia32_selectq_512((__mmask8)__U, + (__v8di)_mm512_lzcnt_epi64(__A), + (__v8di)__W); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_maskz_lzcnt_epi64 (__mmask8 __U, __m512i __A) +{ + return (__m512i)__builtin_ia32_selectq_512((__mmask8)__U, + (__v8di)_mm512_lzcnt_epi64(__A), + (__v8di)_mm512_setzero_si512()); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_broadcastmb_epi64 (__mmask8 __A) +{ + return (__m512i) _mm512_set1_epi64((long long) __A); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_broadcastmw_epi32 (__mmask16 __A) +{ + return (__m512i) _mm512_set1_epi32((int) __A); + +} + +#undef __DEFAULT_FN_ATTRS + +#endif diff --git a/third_party/intel/clang/avx512dqintrin.h b/third_party/intel/clang/avx512dqintrin.h new file mode 100644 index 000000000..88b48e3a3 --- /dev/null +++ b/third_party/intel/clang/avx512dqintrin.h @@ -0,0 +1,1379 @@ +/*===---- avx512dqintrin.h - AVX512DQ intrinsics ---------------------------=== + * + * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. + * See https://llvm.org/LICENSE.txt for license information. + * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + * + *===-----------------------------------------------------------------------=== + */ + +#ifndef __IMMINTRIN_H +#error "Never use directly; include instead." +#endif + +#ifndef __AVX512DQINTRIN_H +#define __AVX512DQINTRIN_H + +/* Define the default attributes for the functions in this file. */ +#define __DEFAULT_FN_ATTRS512 __attribute__((__always_inline__, __nodebug__, __target__("avx512dq,evex512"), __min_vector_width__(512))) +#define __DEFAULT_FN_ATTRS \ + __attribute__((__always_inline__, __nodebug__, \ + __target__("avx512dq,no-evex512"))) + +static __inline __mmask8 __DEFAULT_FN_ATTRS +_knot_mask8(__mmask8 __M) +{ + return __builtin_ia32_knotqi(__M); +} + +static __inline__ __mmask8 __DEFAULT_FN_ATTRS +_kand_mask8(__mmask8 __A, __mmask8 __B) +{ + return (__mmask8)__builtin_ia32_kandqi((__mmask8)__A, (__mmask8)__B); +} + +static __inline__ __mmask8 __DEFAULT_FN_ATTRS +_kandn_mask8(__mmask8 __A, __mmask8 __B) +{ + return (__mmask8)__builtin_ia32_kandnqi((__mmask8)__A, (__mmask8)__B); +} + +static __inline__ __mmask8 __DEFAULT_FN_ATTRS +_kor_mask8(__mmask8 __A, __mmask8 __B) +{ + return (__mmask8)__builtin_ia32_korqi((__mmask8)__A, (__mmask8)__B); +} + +static __inline__ __mmask8 __DEFAULT_FN_ATTRS +_kxnor_mask8(__mmask8 __A, __mmask8 __B) +{ + return (__mmask8)__builtin_ia32_kxnorqi((__mmask8)__A, (__mmask8)__B); +} + +static __inline__ __mmask8 __DEFAULT_FN_ATTRS +_kxor_mask8(__mmask8 __A, __mmask8 __B) +{ + return (__mmask8)__builtin_ia32_kxorqi((__mmask8)__A, (__mmask8)__B); +} + +static __inline__ unsigned char __DEFAULT_FN_ATTRS +_kortestc_mask8_u8(__mmask8 __A, __mmask8 __B) +{ + return (unsigned char)__builtin_ia32_kortestcqi(__A, __B); +} + +static __inline__ unsigned char __DEFAULT_FN_ATTRS +_kortestz_mask8_u8(__mmask8 __A, __mmask8 __B) +{ + return (unsigned char)__builtin_ia32_kortestzqi(__A, __B); +} + +static __inline__ unsigned char __DEFAULT_FN_ATTRS +_kortest_mask8_u8(__mmask8 __A, __mmask8 __B, unsigned char *__C) { + *__C = (unsigned char)__builtin_ia32_kortestcqi(__A, __B); + return (unsigned char)__builtin_ia32_kortestzqi(__A, __B); +} + +static __inline__ unsigned char __DEFAULT_FN_ATTRS +_ktestc_mask8_u8(__mmask8 __A, __mmask8 __B) +{ + return (unsigned char)__builtin_ia32_ktestcqi(__A, __B); +} + +static __inline__ unsigned char __DEFAULT_FN_ATTRS +_ktestz_mask8_u8(__mmask8 __A, __mmask8 __B) +{ + return (unsigned char)__builtin_ia32_ktestzqi(__A, __B); +} + +static __inline__ unsigned char __DEFAULT_FN_ATTRS +_ktest_mask8_u8(__mmask8 __A, __mmask8 __B, unsigned char *__C) { + *__C = (unsigned char)__builtin_ia32_ktestcqi(__A, __B); + return (unsigned char)__builtin_ia32_ktestzqi(__A, __B); +} + +static __inline__ unsigned char __DEFAULT_FN_ATTRS +_ktestc_mask16_u8(__mmask16 __A, __mmask16 __B) +{ + return (unsigned char)__builtin_ia32_ktestchi(__A, __B); +} + +static __inline__ unsigned char __DEFAULT_FN_ATTRS +_ktestz_mask16_u8(__mmask16 __A, __mmask16 __B) +{ + return (unsigned char)__builtin_ia32_ktestzhi(__A, __B); +} + +static __inline__ unsigned char __DEFAULT_FN_ATTRS +_ktest_mask16_u8(__mmask16 __A, __mmask16 __B, unsigned char *__C) { + *__C = (unsigned char)__builtin_ia32_ktestchi(__A, __B); + return (unsigned char)__builtin_ia32_ktestzhi(__A, __B); +} + +static __inline__ __mmask8 __DEFAULT_FN_ATTRS +_kadd_mask8(__mmask8 __A, __mmask8 __B) +{ + return (__mmask8)__builtin_ia32_kaddqi((__mmask8)__A, (__mmask8)__B); +} + +static __inline__ __mmask16 __DEFAULT_FN_ATTRS +_kadd_mask16(__mmask16 __A, __mmask16 __B) +{ + return (__mmask16)__builtin_ia32_kaddhi((__mmask16)__A, (__mmask16)__B); +} + +#define _kshiftli_mask8(A, I) \ + ((__mmask8)__builtin_ia32_kshiftliqi((__mmask8)(A), (unsigned int)(I))) + +#define _kshiftri_mask8(A, I) \ + ((__mmask8)__builtin_ia32_kshiftriqi((__mmask8)(A), (unsigned int)(I))) + +static __inline__ unsigned int __DEFAULT_FN_ATTRS +_cvtmask8_u32(__mmask8 __A) { + return (unsigned int)__builtin_ia32_kmovb((__mmask8)__A); +} + +static __inline__ __mmask8 __DEFAULT_FN_ATTRS +_cvtu32_mask8(unsigned int __A) { + return (__mmask8)__builtin_ia32_kmovb((__mmask8)__A); +} + +static __inline__ __mmask8 __DEFAULT_FN_ATTRS +_load_mask8(__mmask8 *__A) { + return (__mmask8)__builtin_ia32_kmovb(*(__mmask8 *)__A); +} + +static __inline__ void __DEFAULT_FN_ATTRS +_store_mask8(__mmask8 *__A, __mmask8 __B) { + *(__mmask8 *)__A = __builtin_ia32_kmovb((__mmask8)__B); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_mullo_epi64 (__m512i __A, __m512i __B) { + return (__m512i) ((__v8du) __A * (__v8du) __B); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_mask_mullo_epi64(__m512i __W, __mmask8 __U, __m512i __A, __m512i __B) { + return (__m512i)__builtin_ia32_selectq_512((__mmask8)__U, + (__v8di)_mm512_mullo_epi64(__A, __B), + (__v8di)__W); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_maskz_mullo_epi64(__mmask8 __U, __m512i __A, __m512i __B) { + return (__m512i)__builtin_ia32_selectq_512((__mmask8)__U, + (__v8di)_mm512_mullo_epi64(__A, __B), + (__v8di)_mm512_setzero_si512()); +} + +static __inline__ __m512d __DEFAULT_FN_ATTRS512 +_mm512_xor_pd(__m512d __A, __m512d __B) { + return (__m512d)((__v8du)__A ^ (__v8du)__B); +} + +static __inline__ __m512d __DEFAULT_FN_ATTRS512 +_mm512_mask_xor_pd(__m512d __W, __mmask8 __U, __m512d __A, __m512d __B) { + return (__m512d)__builtin_ia32_selectpd_512((__mmask8)__U, + (__v8df)_mm512_xor_pd(__A, __B), + (__v8df)__W); +} + +static __inline__ __m512d __DEFAULT_FN_ATTRS512 +_mm512_maskz_xor_pd(__mmask8 __U, __m512d __A, __m512d __B) { + return (__m512d)__builtin_ia32_selectpd_512((__mmask8)__U, + (__v8df)_mm512_xor_pd(__A, __B), + (__v8df)_mm512_setzero_pd()); +} + +static __inline__ __m512 __DEFAULT_FN_ATTRS512 +_mm512_xor_ps (__m512 __A, __m512 __B) { + return (__m512)((__v16su)__A ^ (__v16su)__B); +} + +static __inline__ __m512 __DEFAULT_FN_ATTRS512 +_mm512_mask_xor_ps(__m512 __W, __mmask16 __U, __m512 __A, __m512 __B) { + return (__m512)__builtin_ia32_selectps_512((__mmask16)__U, + (__v16sf)_mm512_xor_ps(__A, __B), + (__v16sf)__W); +} + +static __inline__ __m512 __DEFAULT_FN_ATTRS512 +_mm512_maskz_xor_ps(__mmask16 __U, __m512 __A, __m512 __B) { + return (__m512)__builtin_ia32_selectps_512((__mmask16)__U, + (__v16sf)_mm512_xor_ps(__A, __B), + (__v16sf)_mm512_setzero_ps()); +} + +static __inline__ __m512d __DEFAULT_FN_ATTRS512 +_mm512_or_pd(__m512d __A, __m512d __B) { + return (__m512d)((__v8du)__A | (__v8du)__B); +} + +static __inline__ __m512d __DEFAULT_FN_ATTRS512 +_mm512_mask_or_pd(__m512d __W, __mmask8 __U, __m512d __A, __m512d __B) { + return (__m512d)__builtin_ia32_selectpd_512((__mmask8)__U, + (__v8df)_mm512_or_pd(__A, __B), + (__v8df)__W); +} + +static __inline__ __m512d __DEFAULT_FN_ATTRS512 +_mm512_maskz_or_pd(__mmask8 __U, __m512d __A, __m512d __B) { + return (__m512d)__builtin_ia32_selectpd_512((__mmask8)__U, + (__v8df)_mm512_or_pd(__A, __B), + (__v8df)_mm512_setzero_pd()); +} + +static __inline__ __m512 __DEFAULT_FN_ATTRS512 +_mm512_or_ps(__m512 __A, __m512 __B) { + return (__m512)((__v16su)__A | (__v16su)__B); +} + +static __inline__ __m512 __DEFAULT_FN_ATTRS512 +_mm512_mask_or_ps(__m512 __W, __mmask16 __U, __m512 __A, __m512 __B) { + return (__m512)__builtin_ia32_selectps_512((__mmask16)__U, + (__v16sf)_mm512_or_ps(__A, __B), + (__v16sf)__W); +} + +static __inline__ __m512 __DEFAULT_FN_ATTRS512 +_mm512_maskz_or_ps(__mmask16 __U, __m512 __A, __m512 __B) { + return (__m512)__builtin_ia32_selectps_512((__mmask16)__U, + (__v16sf)_mm512_or_ps(__A, __B), + (__v16sf)_mm512_setzero_ps()); +} + +static __inline__ __m512d __DEFAULT_FN_ATTRS512 +_mm512_and_pd(__m512d __A, __m512d __B) { + return (__m512d)((__v8du)__A & (__v8du)__B); +} + +static __inline__ __m512d __DEFAULT_FN_ATTRS512 +_mm512_mask_and_pd(__m512d __W, __mmask8 __U, __m512d __A, __m512d __B) { + return (__m512d)__builtin_ia32_selectpd_512((__mmask8)__U, + (__v8df)_mm512_and_pd(__A, __B), + (__v8df)__W); +} + +static __inline__ __m512d __DEFAULT_FN_ATTRS512 +_mm512_maskz_and_pd(__mmask8 __U, __m512d __A, __m512d __B) { + return (__m512d)__builtin_ia32_selectpd_512((__mmask8)__U, + (__v8df)_mm512_and_pd(__A, __B), + (__v8df)_mm512_setzero_pd()); +} + +static __inline__ __m512 __DEFAULT_FN_ATTRS512 +_mm512_and_ps(__m512 __A, __m512 __B) { + return (__m512)((__v16su)__A & (__v16su)__B); +} + +static __inline__ __m512 __DEFAULT_FN_ATTRS512 +_mm512_mask_and_ps(__m512 __W, __mmask16 __U, __m512 __A, __m512 __B) { + return (__m512)__builtin_ia32_selectps_512((__mmask16)__U, + (__v16sf)_mm512_and_ps(__A, __B), + (__v16sf)__W); +} + +static __inline__ __m512 __DEFAULT_FN_ATTRS512 +_mm512_maskz_and_ps(__mmask16 __U, __m512 __A, __m512 __B) { + return (__m512)__builtin_ia32_selectps_512((__mmask16)__U, + (__v16sf)_mm512_and_ps(__A, __B), + (__v16sf)_mm512_setzero_ps()); +} + +static __inline__ __m512d __DEFAULT_FN_ATTRS512 +_mm512_andnot_pd(__m512d __A, __m512d __B) { + return (__m512d)(~(__v8du)__A & (__v8du)__B); +} + +static __inline__ __m512d __DEFAULT_FN_ATTRS512 +_mm512_mask_andnot_pd(__m512d __W, __mmask8 __U, __m512d __A, __m512d __B) { + return (__m512d)__builtin_ia32_selectpd_512((__mmask8)__U, + (__v8df)_mm512_andnot_pd(__A, __B), + (__v8df)__W); +} + +static __inline__ __m512d __DEFAULT_FN_ATTRS512 +_mm512_maskz_andnot_pd(__mmask8 __U, __m512d __A, __m512d __B) { + return (__m512d)__builtin_ia32_selectpd_512((__mmask8)__U, + (__v8df)_mm512_andnot_pd(__A, __B), + (__v8df)_mm512_setzero_pd()); +} + +static __inline__ __m512 __DEFAULT_FN_ATTRS512 +_mm512_andnot_ps(__m512 __A, __m512 __B) { + return (__m512)(~(__v16su)__A & (__v16su)__B); +} + +static __inline__ __m512 __DEFAULT_FN_ATTRS512 +_mm512_mask_andnot_ps(__m512 __W, __mmask16 __U, __m512 __A, __m512 __B) { + return (__m512)__builtin_ia32_selectps_512((__mmask16)__U, + (__v16sf)_mm512_andnot_ps(__A, __B), + (__v16sf)__W); +} + +static __inline__ __m512 __DEFAULT_FN_ATTRS512 +_mm512_maskz_andnot_ps(__mmask16 __U, __m512 __A, __m512 __B) { + return (__m512)__builtin_ia32_selectps_512((__mmask16)__U, + (__v16sf)_mm512_andnot_ps(__A, __B), + (__v16sf)_mm512_setzero_ps()); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_cvtpd_epi64 (__m512d __A) { + return (__m512i) __builtin_ia32_cvtpd2qq512_mask ((__v8df) __A, + (__v8di) _mm512_setzero_si512(), + (__mmask8) -1, + _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_mask_cvtpd_epi64 (__m512i __W, __mmask8 __U, __m512d __A) { + return (__m512i) __builtin_ia32_cvtpd2qq512_mask ((__v8df) __A, + (__v8di) __W, + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_maskz_cvtpd_epi64 (__mmask8 __U, __m512d __A) { + return (__m512i) __builtin_ia32_cvtpd2qq512_mask ((__v8df) __A, + (__v8di) _mm512_setzero_si512(), + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +#define _mm512_cvt_roundpd_epi64(A, R) \ + ((__m512i)__builtin_ia32_cvtpd2qq512_mask((__v8df)(__m512d)(A), \ + (__v8di)_mm512_setzero_si512(), \ + (__mmask8)-1, (int)(R))) + +#define _mm512_mask_cvt_roundpd_epi64(W, U, A, R) \ + ((__m512i)__builtin_ia32_cvtpd2qq512_mask((__v8df)(__m512d)(A), \ + (__v8di)(__m512i)(W), \ + (__mmask8)(U), (int)(R))) + +#define _mm512_maskz_cvt_roundpd_epi64(U, A, R) \ + ((__m512i)__builtin_ia32_cvtpd2qq512_mask((__v8df)(__m512d)(A), \ + (__v8di)_mm512_setzero_si512(), \ + (__mmask8)(U), (int)(R))) + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_cvtpd_epu64 (__m512d __A) { + return (__m512i) __builtin_ia32_cvtpd2uqq512_mask ((__v8df) __A, + (__v8di) _mm512_setzero_si512(), + (__mmask8) -1, + _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_mask_cvtpd_epu64 (__m512i __W, __mmask8 __U, __m512d __A) { + return (__m512i) __builtin_ia32_cvtpd2uqq512_mask ((__v8df) __A, + (__v8di) __W, + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_maskz_cvtpd_epu64 (__mmask8 __U, __m512d __A) { + return (__m512i) __builtin_ia32_cvtpd2uqq512_mask ((__v8df) __A, + (__v8di) _mm512_setzero_si512(), + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +#define _mm512_cvt_roundpd_epu64(A, R) \ + ((__m512i)__builtin_ia32_cvtpd2uqq512_mask((__v8df)(__m512d)(A), \ + (__v8di)_mm512_setzero_si512(), \ + (__mmask8)-1, (int)(R))) + +#define _mm512_mask_cvt_roundpd_epu64(W, U, A, R) \ + ((__m512i)__builtin_ia32_cvtpd2uqq512_mask((__v8df)(__m512d)(A), \ + (__v8di)(__m512i)(W), \ + (__mmask8)(U), (int)(R))) + +#define _mm512_maskz_cvt_roundpd_epu64(U, A, R) \ + ((__m512i)__builtin_ia32_cvtpd2uqq512_mask((__v8df)(__m512d)(A), \ + (__v8di)_mm512_setzero_si512(), \ + (__mmask8)(U), (int)(R))) + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_cvtps_epi64 (__m256 __A) { + return (__m512i) __builtin_ia32_cvtps2qq512_mask ((__v8sf) __A, + (__v8di) _mm512_setzero_si512(), + (__mmask8) -1, + _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_mask_cvtps_epi64 (__m512i __W, __mmask8 __U, __m256 __A) { + return (__m512i) __builtin_ia32_cvtps2qq512_mask ((__v8sf) __A, + (__v8di) __W, + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_maskz_cvtps_epi64 (__mmask8 __U, __m256 __A) { + return (__m512i) __builtin_ia32_cvtps2qq512_mask ((__v8sf) __A, + (__v8di) _mm512_setzero_si512(), + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +#define _mm512_cvt_roundps_epi64(A, R) \ + ((__m512i)__builtin_ia32_cvtps2qq512_mask((__v8sf)(__m256)(A), \ + (__v8di)_mm512_setzero_si512(), \ + (__mmask8)-1, (int)(R))) + +#define _mm512_mask_cvt_roundps_epi64(W, U, A, R) \ + ((__m512i)__builtin_ia32_cvtps2qq512_mask((__v8sf)(__m256)(A), \ + (__v8di)(__m512i)(W), \ + (__mmask8)(U), (int)(R))) + +#define _mm512_maskz_cvt_roundps_epi64(U, A, R) \ + ((__m512i)__builtin_ia32_cvtps2qq512_mask((__v8sf)(__m256)(A), \ + (__v8di)_mm512_setzero_si512(), \ + (__mmask8)(U), (int)(R))) + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_cvtps_epu64 (__m256 __A) { + return (__m512i) __builtin_ia32_cvtps2uqq512_mask ((__v8sf) __A, + (__v8di) _mm512_setzero_si512(), + (__mmask8) -1, + _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_mask_cvtps_epu64 (__m512i __W, __mmask8 __U, __m256 __A) { + return (__m512i) __builtin_ia32_cvtps2uqq512_mask ((__v8sf) __A, + (__v8di) __W, + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_maskz_cvtps_epu64 (__mmask8 __U, __m256 __A) { + return (__m512i) __builtin_ia32_cvtps2uqq512_mask ((__v8sf) __A, + (__v8di) _mm512_setzero_si512(), + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +#define _mm512_cvt_roundps_epu64(A, R) \ + ((__m512i)__builtin_ia32_cvtps2uqq512_mask((__v8sf)(__m256)(A), \ + (__v8di)_mm512_setzero_si512(), \ + (__mmask8)-1, (int)(R))) + +#define _mm512_mask_cvt_roundps_epu64(W, U, A, R) \ + ((__m512i)__builtin_ia32_cvtps2uqq512_mask((__v8sf)(__m256)(A), \ + (__v8di)(__m512i)(W), \ + (__mmask8)(U), (int)(R))) + +#define _mm512_maskz_cvt_roundps_epu64(U, A, R) \ + ((__m512i)__builtin_ia32_cvtps2uqq512_mask((__v8sf)(__m256)(A), \ + (__v8di)_mm512_setzero_si512(), \ + (__mmask8)(U), (int)(R))) + + +static __inline__ __m512d __DEFAULT_FN_ATTRS512 +_mm512_cvtepi64_pd (__m512i __A) { + return (__m512d)__builtin_convertvector((__v8di)__A, __v8df); +} + +static __inline__ __m512d __DEFAULT_FN_ATTRS512 +_mm512_mask_cvtepi64_pd (__m512d __W, __mmask8 __U, __m512i __A) { + return (__m512d)__builtin_ia32_selectpd_512((__mmask8)__U, + (__v8df)_mm512_cvtepi64_pd(__A), + (__v8df)__W); +} + +static __inline__ __m512d __DEFAULT_FN_ATTRS512 +_mm512_maskz_cvtepi64_pd (__mmask8 __U, __m512i __A) { + return (__m512d)__builtin_ia32_selectpd_512((__mmask8)__U, + (__v8df)_mm512_cvtepi64_pd(__A), + (__v8df)_mm512_setzero_pd()); +} + +#define _mm512_cvt_roundepi64_pd(A, R) \ + ((__m512d)__builtin_ia32_cvtqq2pd512_mask((__v8di)(__m512i)(A), \ + (__v8df)_mm512_setzero_pd(), \ + (__mmask8)-1, (int)(R))) + +#define _mm512_mask_cvt_roundepi64_pd(W, U, A, R) \ + ((__m512d)__builtin_ia32_cvtqq2pd512_mask((__v8di)(__m512i)(A), \ + (__v8df)(__m512d)(W), \ + (__mmask8)(U), (int)(R))) + +#define _mm512_maskz_cvt_roundepi64_pd(U, A, R) \ + ((__m512d)__builtin_ia32_cvtqq2pd512_mask((__v8di)(__m512i)(A), \ + (__v8df)_mm512_setzero_pd(), \ + (__mmask8)(U), (int)(R))) + +static __inline__ __m256 __DEFAULT_FN_ATTRS512 +_mm512_cvtepi64_ps (__m512i __A) { + return (__m256) __builtin_ia32_cvtqq2ps512_mask ((__v8di) __A, + (__v8sf) _mm256_setzero_ps(), + (__mmask8) -1, + _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m256 __DEFAULT_FN_ATTRS512 +_mm512_mask_cvtepi64_ps (__m256 __W, __mmask8 __U, __m512i __A) { + return (__m256) __builtin_ia32_cvtqq2ps512_mask ((__v8di) __A, + (__v8sf) __W, + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m256 __DEFAULT_FN_ATTRS512 +_mm512_maskz_cvtepi64_ps (__mmask8 __U, __m512i __A) { + return (__m256) __builtin_ia32_cvtqq2ps512_mask ((__v8di) __A, + (__v8sf) _mm256_setzero_ps(), + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +#define _mm512_cvt_roundepi64_ps(A, R) \ + ((__m256)__builtin_ia32_cvtqq2ps512_mask((__v8di)(__m512i)(A), \ + (__v8sf)_mm256_setzero_ps(), \ + (__mmask8)-1, (int)(R))) + +#define _mm512_mask_cvt_roundepi64_ps(W, U, A, R) \ + ((__m256)__builtin_ia32_cvtqq2ps512_mask((__v8di)(__m512i)(A), \ + (__v8sf)(__m256)(W), (__mmask8)(U), \ + (int)(R))) + +#define _mm512_maskz_cvt_roundepi64_ps(U, A, R) \ + ((__m256)__builtin_ia32_cvtqq2ps512_mask((__v8di)(__m512i)(A), \ + (__v8sf)_mm256_setzero_ps(), \ + (__mmask8)(U), (int)(R))) + + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_cvttpd_epi64 (__m512d __A) { + return (__m512i) __builtin_ia32_cvttpd2qq512_mask ((__v8df) __A, + (__v8di) _mm512_setzero_si512(), + (__mmask8) -1, + _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_mask_cvttpd_epi64 (__m512i __W, __mmask8 __U, __m512d __A) { + return (__m512i) __builtin_ia32_cvttpd2qq512_mask ((__v8df) __A, + (__v8di) __W, + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_maskz_cvttpd_epi64 (__mmask8 __U, __m512d __A) { + return (__m512i) __builtin_ia32_cvttpd2qq512_mask ((__v8df) __A, + (__v8di) _mm512_setzero_si512(), + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +#define _mm512_cvtt_roundpd_epi64(A, R) \ + ((__m512i)__builtin_ia32_cvttpd2qq512_mask((__v8df)(__m512d)(A), \ + (__v8di)_mm512_setzero_si512(), \ + (__mmask8)-1, (int)(R))) + +#define _mm512_mask_cvtt_roundpd_epi64(W, U, A, R) \ + ((__m512i)__builtin_ia32_cvttpd2qq512_mask((__v8df)(__m512d)(A), \ + (__v8di)(__m512i)(W), \ + (__mmask8)(U), (int)(R))) + +#define _mm512_maskz_cvtt_roundpd_epi64(U, A, R) \ + ((__m512i)__builtin_ia32_cvttpd2qq512_mask((__v8df)(__m512d)(A), \ + (__v8di)_mm512_setzero_si512(), \ + (__mmask8)(U), (int)(R))) + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_cvttpd_epu64 (__m512d __A) { + return (__m512i) __builtin_ia32_cvttpd2uqq512_mask ((__v8df) __A, + (__v8di) _mm512_setzero_si512(), + (__mmask8) -1, + _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_mask_cvttpd_epu64 (__m512i __W, __mmask8 __U, __m512d __A) { + return (__m512i) __builtin_ia32_cvttpd2uqq512_mask ((__v8df) __A, + (__v8di) __W, + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_maskz_cvttpd_epu64 (__mmask8 __U, __m512d __A) { + return (__m512i) __builtin_ia32_cvttpd2uqq512_mask ((__v8df) __A, + (__v8di) _mm512_setzero_si512(), + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +#define _mm512_cvtt_roundpd_epu64(A, R) \ + ((__m512i)__builtin_ia32_cvttpd2uqq512_mask((__v8df)(__m512d)(A), \ + (__v8di)_mm512_setzero_si512(), \ + (__mmask8)-1, (int)(R))) + +#define _mm512_mask_cvtt_roundpd_epu64(W, U, A, R) \ + ((__m512i)__builtin_ia32_cvttpd2uqq512_mask((__v8df)(__m512d)(A), \ + (__v8di)(__m512i)(W), \ + (__mmask8)(U), (int)(R))) + +#define _mm512_maskz_cvtt_roundpd_epu64(U, A, R) \ + ((__m512i)__builtin_ia32_cvttpd2uqq512_mask((__v8df)(__m512d)(A), \ + (__v8di)_mm512_setzero_si512(), \ + (__mmask8)(U), (int)(R))) + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_cvttps_epi64 (__m256 __A) { + return (__m512i) __builtin_ia32_cvttps2qq512_mask ((__v8sf) __A, + (__v8di) _mm512_setzero_si512(), + (__mmask8) -1, + _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_mask_cvttps_epi64 (__m512i __W, __mmask8 __U, __m256 __A) { + return (__m512i) __builtin_ia32_cvttps2qq512_mask ((__v8sf) __A, + (__v8di) __W, + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_maskz_cvttps_epi64 (__mmask8 __U, __m256 __A) { + return (__m512i) __builtin_ia32_cvttps2qq512_mask ((__v8sf) __A, + (__v8di) _mm512_setzero_si512(), + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +#define _mm512_cvtt_roundps_epi64(A, R) \ + ((__m512i)__builtin_ia32_cvttps2qq512_mask((__v8sf)(__m256)(A), \ + (__v8di)_mm512_setzero_si512(), \ + (__mmask8)-1, (int)(R))) + +#define _mm512_mask_cvtt_roundps_epi64(W, U, A, R) \ + ((__m512i)__builtin_ia32_cvttps2qq512_mask((__v8sf)(__m256)(A), \ + (__v8di)(__m512i)(W), \ + (__mmask8)(U), (int)(R))) + +#define _mm512_maskz_cvtt_roundps_epi64(U, A, R) \ + ((__m512i)__builtin_ia32_cvttps2qq512_mask((__v8sf)(__m256)(A), \ + (__v8di)_mm512_setzero_si512(), \ + (__mmask8)(U), (int)(R))) + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_cvttps_epu64 (__m256 __A) { + return (__m512i) __builtin_ia32_cvttps2uqq512_mask ((__v8sf) __A, + (__v8di) _mm512_setzero_si512(), + (__mmask8) -1, + _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_mask_cvttps_epu64 (__m512i __W, __mmask8 __U, __m256 __A) { + return (__m512i) __builtin_ia32_cvttps2uqq512_mask ((__v8sf) __A, + (__v8di) __W, + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_maskz_cvttps_epu64 (__mmask8 __U, __m256 __A) { + return (__m512i) __builtin_ia32_cvttps2uqq512_mask ((__v8sf) __A, + (__v8di) _mm512_setzero_si512(), + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +#define _mm512_cvtt_roundps_epu64(A, R) \ + ((__m512i)__builtin_ia32_cvttps2uqq512_mask((__v8sf)(__m256)(A), \ + (__v8di)_mm512_setzero_si512(), \ + (__mmask8)-1, (int)(R))) + +#define _mm512_mask_cvtt_roundps_epu64(W, U, A, R) \ + ((__m512i)__builtin_ia32_cvttps2uqq512_mask((__v8sf)(__m256)(A), \ + (__v8di)(__m512i)(W), \ + (__mmask8)(U), (int)(R))) + +#define _mm512_maskz_cvtt_roundps_epu64(U, A, R) \ + ((__m512i)__builtin_ia32_cvttps2uqq512_mask((__v8sf)(__m256)(A), \ + (__v8di)_mm512_setzero_si512(), \ + (__mmask8)(U), (int)(R))) + +static __inline__ __m512d __DEFAULT_FN_ATTRS512 +_mm512_cvtepu64_pd (__m512i __A) { + return (__m512d)__builtin_convertvector((__v8du)__A, __v8df); +} + +static __inline__ __m512d __DEFAULT_FN_ATTRS512 +_mm512_mask_cvtepu64_pd (__m512d __W, __mmask8 __U, __m512i __A) { + return (__m512d)__builtin_ia32_selectpd_512((__mmask8)__U, + (__v8df)_mm512_cvtepu64_pd(__A), + (__v8df)__W); +} + +static __inline__ __m512d __DEFAULT_FN_ATTRS512 +_mm512_maskz_cvtepu64_pd (__mmask8 __U, __m512i __A) { + return (__m512d)__builtin_ia32_selectpd_512((__mmask8)__U, + (__v8df)_mm512_cvtepu64_pd(__A), + (__v8df)_mm512_setzero_pd()); +} + +#define _mm512_cvt_roundepu64_pd(A, R) \ + ((__m512d)__builtin_ia32_cvtuqq2pd512_mask((__v8di)(__m512i)(A), \ + (__v8df)_mm512_setzero_pd(), \ + (__mmask8)-1, (int)(R))) + +#define _mm512_mask_cvt_roundepu64_pd(W, U, A, R) \ + ((__m512d)__builtin_ia32_cvtuqq2pd512_mask((__v8di)(__m512i)(A), \ + (__v8df)(__m512d)(W), \ + (__mmask8)(U), (int)(R))) + + +#define _mm512_maskz_cvt_roundepu64_pd(U, A, R) \ + ((__m512d)__builtin_ia32_cvtuqq2pd512_mask((__v8di)(__m512i)(A), \ + (__v8df)_mm512_setzero_pd(), \ + (__mmask8)(U), (int)(R))) + + +static __inline__ __m256 __DEFAULT_FN_ATTRS512 +_mm512_cvtepu64_ps (__m512i __A) { + return (__m256) __builtin_ia32_cvtuqq2ps512_mask ((__v8di) __A, + (__v8sf) _mm256_setzero_ps(), + (__mmask8) -1, + _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m256 __DEFAULT_FN_ATTRS512 +_mm512_mask_cvtepu64_ps (__m256 __W, __mmask8 __U, __m512i __A) { + return (__m256) __builtin_ia32_cvtuqq2ps512_mask ((__v8di) __A, + (__v8sf) __W, + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m256 __DEFAULT_FN_ATTRS512 +_mm512_maskz_cvtepu64_ps (__mmask8 __U, __m512i __A) { + return (__m256) __builtin_ia32_cvtuqq2ps512_mask ((__v8di) __A, + (__v8sf) _mm256_setzero_ps(), + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +#define _mm512_cvt_roundepu64_ps(A, R) \ + ((__m256)__builtin_ia32_cvtuqq2ps512_mask((__v8di)(__m512i)(A), \ + (__v8sf)_mm256_setzero_ps(), \ + (__mmask8)-1, (int)(R))) + +#define _mm512_mask_cvt_roundepu64_ps(W, U, A, R) \ + ((__m256)__builtin_ia32_cvtuqq2ps512_mask((__v8di)(__m512i)(A), \ + (__v8sf)(__m256)(W), (__mmask8)(U), \ + (int)(R))) + +#define _mm512_maskz_cvt_roundepu64_ps(U, A, R) \ + ((__m256)__builtin_ia32_cvtuqq2ps512_mask((__v8di)(__m512i)(A), \ + (__v8sf)_mm256_setzero_ps(), \ + (__mmask8)(U), (int)(R))) + +#define _mm512_range_pd(A, B, C) \ + ((__m512d)__builtin_ia32_rangepd512_mask((__v8df)(__m512d)(A), \ + (__v8df)(__m512d)(B), (int)(C), \ + (__v8df)_mm512_setzero_pd(), \ + (__mmask8)-1, \ + _MM_FROUND_CUR_DIRECTION)) + +#define _mm512_mask_range_pd(W, U, A, B, C) \ + ((__m512d)__builtin_ia32_rangepd512_mask((__v8df)(__m512d)(A), \ + (__v8df)(__m512d)(B), (int)(C), \ + (__v8df)(__m512d)(W), (__mmask8)(U), \ + _MM_FROUND_CUR_DIRECTION)) + +#define _mm512_maskz_range_pd(U, A, B, C) \ + ((__m512d)__builtin_ia32_rangepd512_mask((__v8df)(__m512d)(A), \ + (__v8df)(__m512d)(B), (int)(C), \ + (__v8df)_mm512_setzero_pd(), \ + (__mmask8)(U), \ + _MM_FROUND_CUR_DIRECTION)) + +#define _mm512_range_round_pd(A, B, C, R) \ + ((__m512d)__builtin_ia32_rangepd512_mask((__v8df)(__m512d)(A), \ + (__v8df)(__m512d)(B), (int)(C), \ + (__v8df)_mm512_setzero_pd(), \ + (__mmask8)-1, (int)(R))) + +#define _mm512_mask_range_round_pd(W, U, A, B, C, R) \ + ((__m512d)__builtin_ia32_rangepd512_mask((__v8df)(__m512d)(A), \ + (__v8df)(__m512d)(B), (int)(C), \ + (__v8df)(__m512d)(W), (__mmask8)(U), \ + (int)(R))) + +#define _mm512_maskz_range_round_pd(U, A, B, C, R) \ + ((__m512d)__builtin_ia32_rangepd512_mask((__v8df)(__m512d)(A), \ + (__v8df)(__m512d)(B), (int)(C), \ + (__v8df)_mm512_setzero_pd(), \ + (__mmask8)(U), (int)(R))) + +#define _mm512_range_ps(A, B, C) \ + ((__m512)__builtin_ia32_rangeps512_mask((__v16sf)(__m512)(A), \ + (__v16sf)(__m512)(B), (int)(C), \ + (__v16sf)_mm512_setzero_ps(), \ + (__mmask16)-1, \ + _MM_FROUND_CUR_DIRECTION)) + +#define _mm512_mask_range_ps(W, U, A, B, C) \ + ((__m512)__builtin_ia32_rangeps512_mask((__v16sf)(__m512)(A), \ + (__v16sf)(__m512)(B), (int)(C), \ + (__v16sf)(__m512)(W), (__mmask16)(U), \ + _MM_FROUND_CUR_DIRECTION)) + +#define _mm512_maskz_range_ps(U, A, B, C) \ + ((__m512)__builtin_ia32_rangeps512_mask((__v16sf)(__m512)(A), \ + (__v16sf)(__m512)(B), (int)(C), \ + (__v16sf)_mm512_setzero_ps(), \ + (__mmask16)(U), \ + _MM_FROUND_CUR_DIRECTION)) + +#define _mm512_range_round_ps(A, B, C, R) \ + ((__m512)__builtin_ia32_rangeps512_mask((__v16sf)(__m512)(A), \ + (__v16sf)(__m512)(B), (int)(C), \ + (__v16sf)_mm512_setzero_ps(), \ + (__mmask16)-1, (int)(R))) + +#define _mm512_mask_range_round_ps(W, U, A, B, C, R) \ + ((__m512)__builtin_ia32_rangeps512_mask((__v16sf)(__m512)(A), \ + (__v16sf)(__m512)(B), (int)(C), \ + (__v16sf)(__m512)(W), (__mmask16)(U), \ + (int)(R))) + +#define _mm512_maskz_range_round_ps(U, A, B, C, R) \ + ((__m512)__builtin_ia32_rangeps512_mask((__v16sf)(__m512)(A), \ + (__v16sf)(__m512)(B), (int)(C), \ + (__v16sf)_mm512_setzero_ps(), \ + (__mmask16)(U), (int)(R))) + +#define _mm_range_round_ss(A, B, C, R) \ + ((__m128)__builtin_ia32_rangess128_round_mask((__v4sf)(__m128)(A), \ + (__v4sf)(__m128)(B), \ + (__v4sf)_mm_setzero_ps(), \ + (__mmask8) -1, (int)(C),\ + (int)(R))) + +#define _mm_range_ss(A ,B , C) _mm_range_round_ss(A, B, C ,_MM_FROUND_CUR_DIRECTION) + +#define _mm_mask_range_round_ss(W, U, A, B, C, R) \ + ((__m128)__builtin_ia32_rangess128_round_mask((__v4sf)(__m128)(A), \ + (__v4sf)(__m128)(B), \ + (__v4sf)(__m128)(W),\ + (__mmask8)(U), (int)(C),\ + (int)(R))) + +#define _mm_mask_range_ss(W , U, A, B, C) _mm_mask_range_round_ss(W, U, A, B, C , _MM_FROUND_CUR_DIRECTION) + +#define _mm_maskz_range_round_ss(U, A, B, C, R) \ + ((__m128)__builtin_ia32_rangess128_round_mask((__v4sf)(__m128)(A), \ + (__v4sf)(__m128)(B), \ + (__v4sf)_mm_setzero_ps(), \ + (__mmask8)(U), (int)(C),\ + (int)(R))) + +#define _mm_maskz_range_ss(U, A ,B , C) _mm_maskz_range_round_ss(U, A, B, C ,_MM_FROUND_CUR_DIRECTION) + +#define _mm_range_round_sd(A, B, C, R) \ + ((__m128d)__builtin_ia32_rangesd128_round_mask((__v2df)(__m128d)(A), \ + (__v2df)(__m128d)(B), \ + (__v2df)_mm_setzero_pd(), \ + (__mmask8) -1, (int)(C),\ + (int)(R))) + +#define _mm_range_sd(A ,B , C) _mm_range_round_sd(A, B, C ,_MM_FROUND_CUR_DIRECTION) + +#define _mm_mask_range_round_sd(W, U, A, B, C, R) \ + ((__m128d)__builtin_ia32_rangesd128_round_mask((__v2df)(__m128d)(A), \ + (__v2df)(__m128d)(B), \ + (__v2df)(__m128d)(W),\ + (__mmask8)(U), (int)(C),\ + (int)(R))) + +#define _mm_mask_range_sd(W, U, A, B, C) _mm_mask_range_round_sd(W, U, A, B, C ,_MM_FROUND_CUR_DIRECTION) + +#define _mm_maskz_range_round_sd(U, A, B, C, R) \ + ((__m128d)__builtin_ia32_rangesd128_round_mask((__v2df)(__m128d)(A), \ + (__v2df)(__m128d)(B), \ + (__v2df)_mm_setzero_pd(), \ + (__mmask8)(U), (int)(C),\ + (int)(R))) + +#define _mm_maskz_range_sd(U, A, B, C) _mm_maskz_range_round_sd(U, A, B, C ,_MM_FROUND_CUR_DIRECTION) + +#define _mm512_reduce_pd(A, B) \ + ((__m512d)__builtin_ia32_reducepd512_mask((__v8df)(__m512d)(A), (int)(B), \ + (__v8df)_mm512_setzero_pd(), \ + (__mmask8)-1, \ + _MM_FROUND_CUR_DIRECTION)) + +#define _mm512_mask_reduce_pd(W, U, A, B) \ + ((__m512d)__builtin_ia32_reducepd512_mask((__v8df)(__m512d)(A), (int)(B), \ + (__v8df)(__m512d)(W), \ + (__mmask8)(U), \ + _MM_FROUND_CUR_DIRECTION)) + +#define _mm512_maskz_reduce_pd(U, A, B) \ + ((__m512d)__builtin_ia32_reducepd512_mask((__v8df)(__m512d)(A), (int)(B), \ + (__v8df)_mm512_setzero_pd(), \ + (__mmask8)(U), \ + _MM_FROUND_CUR_DIRECTION)) + +#define _mm512_reduce_ps(A, B) \ + ((__m512)__builtin_ia32_reduceps512_mask((__v16sf)(__m512)(A), (int)(B), \ + (__v16sf)_mm512_setzero_ps(), \ + (__mmask16)-1, \ + _MM_FROUND_CUR_DIRECTION)) + +#define _mm512_mask_reduce_ps(W, U, A, B) \ + ((__m512)__builtin_ia32_reduceps512_mask((__v16sf)(__m512)(A), (int)(B), \ + (__v16sf)(__m512)(W), \ + (__mmask16)(U), \ + _MM_FROUND_CUR_DIRECTION)) + +#define _mm512_maskz_reduce_ps(U, A, B) \ + ((__m512)__builtin_ia32_reduceps512_mask((__v16sf)(__m512)(A), (int)(B), \ + (__v16sf)_mm512_setzero_ps(), \ + (__mmask16)(U), \ + _MM_FROUND_CUR_DIRECTION)) + +#define _mm512_reduce_round_pd(A, B, R) \ + ((__m512d)__builtin_ia32_reducepd512_mask((__v8df)(__m512d)(A), (int)(B), \ + (__v8df)_mm512_setzero_pd(), \ + (__mmask8)-1, (int)(R))) + +#define _mm512_mask_reduce_round_pd(W, U, A, B, R) \ + ((__m512d)__builtin_ia32_reducepd512_mask((__v8df)(__m512d)(A), (int)(B), \ + (__v8df)(__m512d)(W), \ + (__mmask8)(U), (int)(R))) + +#define _mm512_maskz_reduce_round_pd(U, A, B, R) \ + ((__m512d)__builtin_ia32_reducepd512_mask((__v8df)(__m512d)(A), (int)(B), \ + (__v8df)_mm512_setzero_pd(), \ + (__mmask8)(U), (int)(R))) + +#define _mm512_reduce_round_ps(A, B, R) \ + ((__m512)__builtin_ia32_reduceps512_mask((__v16sf)(__m512)(A), (int)(B), \ + (__v16sf)_mm512_setzero_ps(), \ + (__mmask16)-1, (int)(R))) + +#define _mm512_mask_reduce_round_ps(W, U, A, B, R) \ + ((__m512)__builtin_ia32_reduceps512_mask((__v16sf)(__m512)(A), (int)(B), \ + (__v16sf)(__m512)(W), \ + (__mmask16)(U), (int)(R))) + +#define _mm512_maskz_reduce_round_ps(U, A, B, R) \ + ((__m512)__builtin_ia32_reduceps512_mask((__v16sf)(__m512)(A), (int)(B), \ + (__v16sf)_mm512_setzero_ps(), \ + (__mmask16)(U), (int)(R))) + +#define _mm_reduce_ss(A, B, C) \ + ((__m128)__builtin_ia32_reducess_mask((__v4sf)(__m128)(A), \ + (__v4sf)(__m128)(B), \ + (__v4sf)_mm_setzero_ps(), (__mmask8)-1, \ + (int)(C), _MM_FROUND_CUR_DIRECTION)) + +#define _mm_mask_reduce_ss(W, U, A, B, C) \ + ((__m128)__builtin_ia32_reducess_mask((__v4sf)(__m128)(A), \ + (__v4sf)(__m128)(B), \ + (__v4sf)(__m128)(W), (__mmask8)(U), \ + (int)(C), _MM_FROUND_CUR_DIRECTION)) + +#define _mm_maskz_reduce_ss(U, A, B, C) \ + ((__m128)__builtin_ia32_reducess_mask((__v4sf)(__m128)(A), \ + (__v4sf)(__m128)(B), \ + (__v4sf)_mm_setzero_ps(), \ + (__mmask8)(U), (int)(C), \ + _MM_FROUND_CUR_DIRECTION)) + +#define _mm_reduce_round_ss(A, B, C, R) \ + ((__m128)__builtin_ia32_reducess_mask((__v4sf)(__m128)(A), \ + (__v4sf)(__m128)(B), \ + (__v4sf)_mm_setzero_ps(), (__mmask8)-1, \ + (int)(C), (int)(R))) + +#define _mm_mask_reduce_round_ss(W, U, A, B, C, R) \ + ((__m128)__builtin_ia32_reducess_mask((__v4sf)(__m128)(A), \ + (__v4sf)(__m128)(B), \ + (__v4sf)(__m128)(W), (__mmask8)(U), \ + (int)(C), (int)(R))) + +#define _mm_maskz_reduce_round_ss(U, A, B, C, R) \ + ((__m128)__builtin_ia32_reducess_mask((__v4sf)(__m128)(A), \ + (__v4sf)(__m128)(B), \ + (__v4sf)_mm_setzero_ps(), \ + (__mmask8)(U), (int)(C), (int)(R))) + +#define _mm_reduce_sd(A, B, C) \ + ((__m128d)__builtin_ia32_reducesd_mask((__v2df)(__m128d)(A), \ + (__v2df)(__m128d)(B), \ + (__v2df)_mm_setzero_pd(), \ + (__mmask8)-1, (int)(C), \ + _MM_FROUND_CUR_DIRECTION)) + +#define _mm_mask_reduce_sd(W, U, A, B, C) \ + ((__m128d)__builtin_ia32_reducesd_mask((__v2df)(__m128d)(A), \ + (__v2df)(__m128d)(B), \ + (__v2df)(__m128d)(W), (__mmask8)(U), \ + (int)(C), _MM_FROUND_CUR_DIRECTION)) + +#define _mm_maskz_reduce_sd(U, A, B, C) \ + ((__m128d)__builtin_ia32_reducesd_mask((__v2df)(__m128d)(A), \ + (__v2df)(__m128d)(B), \ + (__v2df)_mm_setzero_pd(), \ + (__mmask8)(U), (int)(C), \ + _MM_FROUND_CUR_DIRECTION)) + +#define _mm_reduce_round_sd(A, B, C, R) \ + ((__m128d)__builtin_ia32_reducesd_mask((__v2df)(__m128d)(A), \ + (__v2df)(__m128d)(B), \ + (__v2df)_mm_setzero_pd(), \ + (__mmask8)-1, (int)(C), (int)(R))) + +#define _mm_mask_reduce_round_sd(W, U, A, B, C, R) \ + ((__m128d)__builtin_ia32_reducesd_mask((__v2df)(__m128d)(A), \ + (__v2df)(__m128d)(B), \ + (__v2df)(__m128d)(W), (__mmask8)(U), \ + (int)(C), (int)(R))) + +#define _mm_maskz_reduce_round_sd(U, A, B, C, R) \ + ((__m128d)__builtin_ia32_reducesd_mask((__v2df)(__m128d)(A), \ + (__v2df)(__m128d)(B), \ + (__v2df)_mm_setzero_pd(), \ + (__mmask8)(U), (int)(C), (int)(R))) + +static __inline__ __mmask16 __DEFAULT_FN_ATTRS512 +_mm512_movepi32_mask (__m512i __A) +{ + return (__mmask16) __builtin_ia32_cvtd2mask512 ((__v16si) __A); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_movm_epi32 (__mmask16 __A) +{ + return (__m512i) __builtin_ia32_cvtmask2d512 (__A); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_movm_epi64 (__mmask8 __A) +{ + return (__m512i) __builtin_ia32_cvtmask2q512 (__A); +} + +static __inline__ __mmask8 __DEFAULT_FN_ATTRS512 +_mm512_movepi64_mask (__m512i __A) +{ + return (__mmask8) __builtin_ia32_cvtq2mask512 ((__v8di) __A); +} + + +static __inline__ __m512 __DEFAULT_FN_ATTRS512 +_mm512_broadcast_f32x2 (__m128 __A) +{ + return (__m512)__builtin_shufflevector((__v4sf)__A, (__v4sf)__A, + 0, 1, 0, 1, 0, 1, 0, 1, + 0, 1, 0, 1, 0, 1, 0, 1); +} + +static __inline__ __m512 __DEFAULT_FN_ATTRS512 +_mm512_mask_broadcast_f32x2 (__m512 __O, __mmask16 __M, __m128 __A) +{ + return (__m512)__builtin_ia32_selectps_512((__mmask16)__M, + (__v16sf)_mm512_broadcast_f32x2(__A), + (__v16sf)__O); +} + +static __inline__ __m512 __DEFAULT_FN_ATTRS512 +_mm512_maskz_broadcast_f32x2 (__mmask16 __M, __m128 __A) +{ + return (__m512)__builtin_ia32_selectps_512((__mmask16)__M, + (__v16sf)_mm512_broadcast_f32x2(__A), + (__v16sf)_mm512_setzero_ps()); +} + +static __inline__ __m512 __DEFAULT_FN_ATTRS512 +_mm512_broadcast_f32x8(__m256 __A) +{ + return (__m512)__builtin_shufflevector((__v8sf)__A, (__v8sf)__A, + 0, 1, 2, 3, 4, 5, 6, 7, + 0, 1, 2, 3, 4, 5, 6, 7); +} + +static __inline__ __m512 __DEFAULT_FN_ATTRS512 +_mm512_mask_broadcast_f32x8(__m512 __O, __mmask16 __M, __m256 __A) +{ + return (__m512)__builtin_ia32_selectps_512((__mmask16)__M, + (__v16sf)_mm512_broadcast_f32x8(__A), + (__v16sf)__O); +} + +static __inline__ __m512 __DEFAULT_FN_ATTRS512 +_mm512_maskz_broadcast_f32x8(__mmask16 __M, __m256 __A) +{ + return (__m512)__builtin_ia32_selectps_512((__mmask16)__M, + (__v16sf)_mm512_broadcast_f32x8(__A), + (__v16sf)_mm512_setzero_ps()); +} + +static __inline__ __m512d __DEFAULT_FN_ATTRS512 +_mm512_broadcast_f64x2(__m128d __A) +{ + return (__m512d)__builtin_shufflevector((__v2df)__A, (__v2df)__A, + 0, 1, 0, 1, 0, 1, 0, 1); +} + +static __inline__ __m512d __DEFAULT_FN_ATTRS512 +_mm512_mask_broadcast_f64x2(__m512d __O, __mmask8 __M, __m128d __A) +{ + return (__m512d)__builtin_ia32_selectpd_512((__mmask8)__M, + (__v8df)_mm512_broadcast_f64x2(__A), + (__v8df)__O); +} + +static __inline__ __m512d __DEFAULT_FN_ATTRS512 +_mm512_maskz_broadcast_f64x2(__mmask8 __M, __m128d __A) +{ + return (__m512d)__builtin_ia32_selectpd_512((__mmask8)__M, + (__v8df)_mm512_broadcast_f64x2(__A), + (__v8df)_mm512_setzero_pd()); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_broadcast_i32x2 (__m128i __A) +{ + return (__m512i)__builtin_shufflevector((__v4si)__A, (__v4si)__A, + 0, 1, 0, 1, 0, 1, 0, 1, + 0, 1, 0, 1, 0, 1, 0, 1); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_mask_broadcast_i32x2 (__m512i __O, __mmask16 __M, __m128i __A) +{ + return (__m512i)__builtin_ia32_selectd_512((__mmask16)__M, + (__v16si)_mm512_broadcast_i32x2(__A), + (__v16si)__O); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_maskz_broadcast_i32x2 (__mmask16 __M, __m128i __A) +{ + return (__m512i)__builtin_ia32_selectd_512((__mmask16)__M, + (__v16si)_mm512_broadcast_i32x2(__A), + (__v16si)_mm512_setzero_si512()); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_broadcast_i32x8(__m256i __A) +{ + return (__m512i)__builtin_shufflevector((__v8si)__A, (__v8si)__A, + 0, 1, 2, 3, 4, 5, 6, 7, + 0, 1, 2, 3, 4, 5, 6, 7); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_mask_broadcast_i32x8(__m512i __O, __mmask16 __M, __m256i __A) +{ + return (__m512i)__builtin_ia32_selectd_512((__mmask16)__M, + (__v16si)_mm512_broadcast_i32x8(__A), + (__v16si)__O); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_maskz_broadcast_i32x8(__mmask16 __M, __m256i __A) +{ + return (__m512i)__builtin_ia32_selectd_512((__mmask16)__M, + (__v16si)_mm512_broadcast_i32x8(__A), + (__v16si)_mm512_setzero_si512()); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_broadcast_i64x2(__m128i __A) +{ + return (__m512i)__builtin_shufflevector((__v2di)__A, (__v2di)__A, + 0, 1, 0, 1, 0, 1, 0, 1); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_mask_broadcast_i64x2(__m512i __O, __mmask8 __M, __m128i __A) +{ + return (__m512i)__builtin_ia32_selectq_512((__mmask8)__M, + (__v8di)_mm512_broadcast_i64x2(__A), + (__v8di)__O); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_maskz_broadcast_i64x2(__mmask8 __M, __m128i __A) +{ + return (__m512i)__builtin_ia32_selectq_512((__mmask8)__M, + (__v8di)_mm512_broadcast_i64x2(__A), + (__v8di)_mm512_setzero_si512()); +} + +#define _mm512_extractf32x8_ps(A, imm) \ + ((__m256)__builtin_ia32_extractf32x8_mask((__v16sf)(__m512)(A), (int)(imm), \ + (__v8sf)_mm256_undefined_ps(), \ + (__mmask8)-1)) + +#define _mm512_mask_extractf32x8_ps(W, U, A, imm) \ + ((__m256)__builtin_ia32_extractf32x8_mask((__v16sf)(__m512)(A), (int)(imm), \ + (__v8sf)(__m256)(W), \ + (__mmask8)(U))) + +#define _mm512_maskz_extractf32x8_ps(U, A, imm) \ + ((__m256)__builtin_ia32_extractf32x8_mask((__v16sf)(__m512)(A), (int)(imm), \ + (__v8sf)_mm256_setzero_ps(), \ + (__mmask8)(U))) + +#define _mm512_extractf64x2_pd(A, imm) \ + ((__m128d)__builtin_ia32_extractf64x2_512_mask((__v8df)(__m512d)(A), \ + (int)(imm), \ + (__v2df)_mm_undefined_pd(), \ + (__mmask8)-1)) + +#define _mm512_mask_extractf64x2_pd(W, U, A, imm) \ + ((__m128d)__builtin_ia32_extractf64x2_512_mask((__v8df)(__m512d)(A), \ + (int)(imm), \ + (__v2df)(__m128d)(W), \ + (__mmask8)(U))) + +#define _mm512_maskz_extractf64x2_pd(U, A, imm) \ + ((__m128d)__builtin_ia32_extractf64x2_512_mask((__v8df)(__m512d)(A), \ + (int)(imm), \ + (__v2df)_mm_setzero_pd(), \ + (__mmask8)(U))) + +#define _mm512_extracti32x8_epi32(A, imm) \ + ((__m256i)__builtin_ia32_extracti32x8_mask((__v16si)(__m512i)(A), (int)(imm), \ + (__v8si)_mm256_undefined_si256(), \ + (__mmask8)-1)) + +#define _mm512_mask_extracti32x8_epi32(W, U, A, imm) \ + ((__m256i)__builtin_ia32_extracti32x8_mask((__v16si)(__m512i)(A), (int)(imm), \ + (__v8si)(__m256i)(W), \ + (__mmask8)(U))) + +#define _mm512_maskz_extracti32x8_epi32(U, A, imm) \ + ((__m256i)__builtin_ia32_extracti32x8_mask((__v16si)(__m512i)(A), (int)(imm), \ + (__v8si)_mm256_setzero_si256(), \ + (__mmask8)(U))) + +#define _mm512_extracti64x2_epi64(A, imm) \ + ((__m128i)__builtin_ia32_extracti64x2_512_mask((__v8di)(__m512i)(A), \ + (int)(imm), \ + (__v2di)_mm_undefined_si128(), \ + (__mmask8)-1)) + +#define _mm512_mask_extracti64x2_epi64(W, U, A, imm) \ + ((__m128i)__builtin_ia32_extracti64x2_512_mask((__v8di)(__m512i)(A), \ + (int)(imm), \ + (__v2di)(__m128i)(W), \ + (__mmask8)(U))) + +#define _mm512_maskz_extracti64x2_epi64(U, A, imm) \ + ((__m128i)__builtin_ia32_extracti64x2_512_mask((__v8di)(__m512i)(A), \ + (int)(imm), \ + (__v2di)_mm_setzero_si128(), \ + (__mmask8)(U))) + +#define _mm512_insertf32x8(A, B, imm) \ + ((__m512)__builtin_ia32_insertf32x8((__v16sf)(__m512)(A), \ + (__v8sf)(__m256)(B), (int)(imm))) + +#define _mm512_mask_insertf32x8(W, U, A, B, imm) \ + ((__m512)__builtin_ia32_selectps_512((__mmask16)(U), \ + (__v16sf)_mm512_insertf32x8((A), (B), (imm)), \ + (__v16sf)(__m512)(W))) + +#define _mm512_maskz_insertf32x8(U, A, B, imm) \ + ((__m512)__builtin_ia32_selectps_512((__mmask16)(U), \ + (__v16sf)_mm512_insertf32x8((A), (B), (imm)), \ + (__v16sf)_mm512_setzero_ps())) + +#define _mm512_insertf64x2(A, B, imm) \ + ((__m512d)__builtin_ia32_insertf64x2_512((__v8df)(__m512d)(A), \ + (__v2df)(__m128d)(B), (int)(imm))) + +#define _mm512_mask_insertf64x2(W, U, A, B, imm) \ + ((__m512d)__builtin_ia32_selectpd_512((__mmask8)(U), \ + (__v8df)_mm512_insertf64x2((A), (B), (imm)), \ + (__v8df)(__m512d)(W))) + +#define _mm512_maskz_insertf64x2(U, A, B, imm) \ + ((__m512d)__builtin_ia32_selectpd_512((__mmask8)(U), \ + (__v8df)_mm512_insertf64x2((A), (B), (imm)), \ + (__v8df)_mm512_setzero_pd())) + +#define _mm512_inserti32x8(A, B, imm) \ + ((__m512i)__builtin_ia32_inserti32x8((__v16si)(__m512i)(A), \ + (__v8si)(__m256i)(B), (int)(imm))) + +#define _mm512_mask_inserti32x8(W, U, A, B, imm) \ + ((__m512i)__builtin_ia32_selectd_512((__mmask16)(U), \ + (__v16si)_mm512_inserti32x8((A), (B), (imm)), \ + (__v16si)(__m512i)(W))) + +#define _mm512_maskz_inserti32x8(U, A, B, imm) \ + ((__m512i)__builtin_ia32_selectd_512((__mmask16)(U), \ + (__v16si)_mm512_inserti32x8((A), (B), (imm)), \ + (__v16si)_mm512_setzero_si512())) + +#define _mm512_inserti64x2(A, B, imm) \ + ((__m512i)__builtin_ia32_inserti64x2_512((__v8di)(__m512i)(A), \ + (__v2di)(__m128i)(B), (int)(imm))) + +#define _mm512_mask_inserti64x2(W, U, A, B, imm) \ + ((__m512i)__builtin_ia32_selectq_512((__mmask8)(U), \ + (__v8di)_mm512_inserti64x2((A), (B), (imm)), \ + (__v8di)(__m512i)(W))) + +#define _mm512_maskz_inserti64x2(U, A, B, imm) \ + ((__m512i)__builtin_ia32_selectq_512((__mmask8)(U), \ + (__v8di)_mm512_inserti64x2((A), (B), (imm)), \ + (__v8di)_mm512_setzero_si512())) + +#define _mm512_mask_fpclass_ps_mask(U, A, imm) \ + ((__mmask16)__builtin_ia32_fpclassps512_mask((__v16sf)(__m512)(A), \ + (int)(imm), (__mmask16)(U))) + +#define _mm512_fpclass_ps_mask(A, imm) \ + ((__mmask16)__builtin_ia32_fpclassps512_mask((__v16sf)(__m512)(A), \ + (int)(imm), (__mmask16)-1)) + +#define _mm512_mask_fpclass_pd_mask(U, A, imm) \ + ((__mmask8)__builtin_ia32_fpclasspd512_mask((__v8df)(__m512d)(A), (int)(imm), \ + (__mmask8)(U))) + +#define _mm512_fpclass_pd_mask(A, imm) \ + ((__mmask8)__builtin_ia32_fpclasspd512_mask((__v8df)(__m512d)(A), (int)(imm), \ + (__mmask8)-1)) + +#define _mm_fpclass_sd_mask(A, imm) \ + ((__mmask8)__builtin_ia32_fpclasssd_mask((__v2df)(__m128d)(A), (int)(imm), \ + (__mmask8)-1)) + +#define _mm_mask_fpclass_sd_mask(U, A, imm) \ + ((__mmask8)__builtin_ia32_fpclasssd_mask((__v2df)(__m128d)(A), (int)(imm), \ + (__mmask8)(U))) + +#define _mm_fpclass_ss_mask(A, imm) \ + ((__mmask8)__builtin_ia32_fpclassss_mask((__v4sf)(__m128)(A), (int)(imm), \ + (__mmask8)-1)) + +#define _mm_mask_fpclass_ss_mask(U, A, imm) \ + ((__mmask8)__builtin_ia32_fpclassss_mask((__v4sf)(__m128)(A), (int)(imm), \ + (__mmask8)(U))) + +#undef __DEFAULT_FN_ATTRS512 +#undef __DEFAULT_FN_ATTRS + +#endif diff --git a/third_party/intel/clang/avx512erintrin.h b/third_party/intel/clang/avx512erintrin.h new file mode 100644 index 000000000..1c5a2d2d2 --- /dev/null +++ b/third_party/intel/clang/avx512erintrin.h @@ -0,0 +1,271 @@ +/*===---- avx512erintrin.h - AVX512ER intrinsics ---------------------------=== + * + * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. + * See https://llvm.org/LICENSE.txt for license information. + * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + * + *===-----------------------------------------------------------------------=== + */ +#ifndef __IMMINTRIN_H +#error "Never use directly; include instead." +#endif + +#ifndef __AVX512ERINTRIN_H +#define __AVX512ERINTRIN_H + +/* exp2a23 */ +#define _mm512_exp2a23_round_pd(A, R) \ + ((__m512d)__builtin_ia32_exp2pd_mask((__v8df)(__m512d)(A), \ + (__v8df)_mm512_setzero_pd(), \ + (__mmask8)-1, (int)(R))) + +#define _mm512_mask_exp2a23_round_pd(S, M, A, R) \ + ((__m512d)__builtin_ia32_exp2pd_mask((__v8df)(__m512d)(A), \ + (__v8df)(__m512d)(S), (__mmask8)(M), \ + (int)(R))) + +#define _mm512_maskz_exp2a23_round_pd(M, A, R) \ + ((__m512d)__builtin_ia32_exp2pd_mask((__v8df)(__m512d)(A), \ + (__v8df)_mm512_setzero_pd(), \ + (__mmask8)(M), (int)(R))) + +#define _mm512_exp2a23_pd(A) \ + _mm512_exp2a23_round_pd((A), _MM_FROUND_CUR_DIRECTION) + +#define _mm512_mask_exp2a23_pd(S, M, A) \ + _mm512_mask_exp2a23_round_pd((S), (M), (A), _MM_FROUND_CUR_DIRECTION) + +#define _mm512_maskz_exp2a23_pd(M, A) \ + _mm512_maskz_exp2a23_round_pd((M), (A), _MM_FROUND_CUR_DIRECTION) + +#define _mm512_exp2a23_round_ps(A, R) \ + ((__m512)__builtin_ia32_exp2ps_mask((__v16sf)(__m512)(A), \ + (__v16sf)_mm512_setzero_ps(), \ + (__mmask16)-1, (int)(R))) + +#define _mm512_mask_exp2a23_round_ps(S, M, A, R) \ + ((__m512)__builtin_ia32_exp2ps_mask((__v16sf)(__m512)(A), \ + (__v16sf)(__m512)(S), (__mmask16)(M), \ + (int)(R))) + +#define _mm512_maskz_exp2a23_round_ps(M, A, R) \ + ((__m512)__builtin_ia32_exp2ps_mask((__v16sf)(__m512)(A), \ + (__v16sf)_mm512_setzero_ps(), \ + (__mmask16)(M), (int)(R))) + +#define _mm512_exp2a23_ps(A) \ + _mm512_exp2a23_round_ps((A), _MM_FROUND_CUR_DIRECTION) + +#define _mm512_mask_exp2a23_ps(S, M, A) \ + _mm512_mask_exp2a23_round_ps((S), (M), (A), _MM_FROUND_CUR_DIRECTION) + +#define _mm512_maskz_exp2a23_ps(M, A) \ + _mm512_maskz_exp2a23_round_ps((M), (A), _MM_FROUND_CUR_DIRECTION) + +/* rsqrt28 */ +#define _mm512_rsqrt28_round_pd(A, R) \ + ((__m512d)__builtin_ia32_rsqrt28pd_mask((__v8df)(__m512d)(A), \ + (__v8df)_mm512_setzero_pd(), \ + (__mmask8)-1, (int)(R))) + +#define _mm512_mask_rsqrt28_round_pd(S, M, A, R) \ + ((__m512d)__builtin_ia32_rsqrt28pd_mask((__v8df)(__m512d)(A), \ + (__v8df)(__m512d)(S), (__mmask8)(M), \ + (int)(R))) + +#define _mm512_maskz_rsqrt28_round_pd(M, A, R) \ + ((__m512d)__builtin_ia32_rsqrt28pd_mask((__v8df)(__m512d)(A), \ + (__v8df)_mm512_setzero_pd(), \ + (__mmask8)(M), (int)(R))) + +#define _mm512_rsqrt28_pd(A) \ + _mm512_rsqrt28_round_pd((A), _MM_FROUND_CUR_DIRECTION) + +#define _mm512_mask_rsqrt28_pd(S, M, A) \ + _mm512_mask_rsqrt28_round_pd((S), (M), (A), _MM_FROUND_CUR_DIRECTION) + +#define _mm512_maskz_rsqrt28_pd(M, A) \ + _mm512_maskz_rsqrt28_round_pd((M), (A), _MM_FROUND_CUR_DIRECTION) + +#define _mm512_rsqrt28_round_ps(A, R) \ + ((__m512)__builtin_ia32_rsqrt28ps_mask((__v16sf)(__m512)(A), \ + (__v16sf)_mm512_setzero_ps(), \ + (__mmask16)-1, (int)(R))) + +#define _mm512_mask_rsqrt28_round_ps(S, M, A, R) \ + ((__m512)__builtin_ia32_rsqrt28ps_mask((__v16sf)(__m512)(A), \ + (__v16sf)(__m512)(S), (__mmask16)(M), \ + (int)(R))) + +#define _mm512_maskz_rsqrt28_round_ps(M, A, R) \ + ((__m512)__builtin_ia32_rsqrt28ps_mask((__v16sf)(__m512)(A), \ + (__v16sf)_mm512_setzero_ps(), \ + (__mmask16)(M), (int)(R))) + +#define _mm512_rsqrt28_ps(A) \ + _mm512_rsqrt28_round_ps((A), _MM_FROUND_CUR_DIRECTION) + +#define _mm512_mask_rsqrt28_ps(S, M, A) \ + _mm512_mask_rsqrt28_round_ps((S), (M), A, _MM_FROUND_CUR_DIRECTION) + +#define _mm512_maskz_rsqrt28_ps(M, A) \ + _mm512_maskz_rsqrt28_round_ps((M), (A), _MM_FROUND_CUR_DIRECTION) + +#define _mm_rsqrt28_round_ss(A, B, R) \ + ((__m128)__builtin_ia32_rsqrt28ss_round_mask((__v4sf)(__m128)(A), \ + (__v4sf)(__m128)(B), \ + (__v4sf)_mm_setzero_ps(), \ + (__mmask8)-1, (int)(R))) + +#define _mm_mask_rsqrt28_round_ss(S, M, A, B, R) \ + ((__m128)__builtin_ia32_rsqrt28ss_round_mask((__v4sf)(__m128)(A), \ + (__v4sf)(__m128)(B), \ + (__v4sf)(__m128)(S), \ + (__mmask8)(M), (int)(R))) + +#define _mm_maskz_rsqrt28_round_ss(M, A, B, R) \ + ((__m128)__builtin_ia32_rsqrt28ss_round_mask((__v4sf)(__m128)(A), \ + (__v4sf)(__m128)(B), \ + (__v4sf)_mm_setzero_ps(), \ + (__mmask8)(M), (int)(R))) + +#define _mm_rsqrt28_ss(A, B) \ + _mm_rsqrt28_round_ss((A), (B), _MM_FROUND_CUR_DIRECTION) + +#define _mm_mask_rsqrt28_ss(S, M, A, B) \ + _mm_mask_rsqrt28_round_ss((S), (M), (A), (B), _MM_FROUND_CUR_DIRECTION) + +#define _mm_maskz_rsqrt28_ss(M, A, B) \ + _mm_maskz_rsqrt28_round_ss((M), (A), (B), _MM_FROUND_CUR_DIRECTION) + +#define _mm_rsqrt28_round_sd(A, B, R) \ + ((__m128d)__builtin_ia32_rsqrt28sd_round_mask((__v2df)(__m128d)(A), \ + (__v2df)(__m128d)(B), \ + (__v2df)_mm_setzero_pd(), \ + (__mmask8)-1, (int)(R))) + +#define _mm_mask_rsqrt28_round_sd(S, M, A, B, R) \ + ((__m128d)__builtin_ia32_rsqrt28sd_round_mask((__v2df)(__m128d)(A), \ + (__v2df)(__m128d)(B), \ + (__v2df)(__m128d)(S), \ + (__mmask8)(M), (int)(R))) + +#define _mm_maskz_rsqrt28_round_sd(M, A, B, R) \ + ((__m128d)__builtin_ia32_rsqrt28sd_round_mask((__v2df)(__m128d)(A), \ + (__v2df)(__m128d)(B), \ + (__v2df)_mm_setzero_pd(), \ + (__mmask8)(M), (int)(R))) + +#define _mm_rsqrt28_sd(A, B) \ + _mm_rsqrt28_round_sd((A), (B), _MM_FROUND_CUR_DIRECTION) + +#define _mm_mask_rsqrt28_sd(S, M, A, B) \ + _mm_mask_rsqrt28_round_sd((S), (M), (A), (B), _MM_FROUND_CUR_DIRECTION) + +#define _mm_maskz_rsqrt28_sd(M, A, B) \ + _mm_maskz_rsqrt28_round_sd((M), (A), (B), _MM_FROUND_CUR_DIRECTION) + +/* rcp28 */ +#define _mm512_rcp28_round_pd(A, R) \ + ((__m512d)__builtin_ia32_rcp28pd_mask((__v8df)(__m512d)(A), \ + (__v8df)_mm512_setzero_pd(), \ + (__mmask8)-1, (int)(R))) + +#define _mm512_mask_rcp28_round_pd(S, M, A, R) \ + ((__m512d)__builtin_ia32_rcp28pd_mask((__v8df)(__m512d)(A), \ + (__v8df)(__m512d)(S), (__mmask8)(M), \ + (int)(R))) + +#define _mm512_maskz_rcp28_round_pd(M, A, R) \ + ((__m512d)__builtin_ia32_rcp28pd_mask((__v8df)(__m512d)(A), \ + (__v8df)_mm512_setzero_pd(), \ + (__mmask8)(M), (int)(R))) + +#define _mm512_rcp28_pd(A) \ + _mm512_rcp28_round_pd((A), _MM_FROUND_CUR_DIRECTION) + +#define _mm512_mask_rcp28_pd(S, M, A) \ + _mm512_mask_rcp28_round_pd((S), (M), (A), _MM_FROUND_CUR_DIRECTION) + +#define _mm512_maskz_rcp28_pd(M, A) \ + _mm512_maskz_rcp28_round_pd((M), (A), _MM_FROUND_CUR_DIRECTION) + +#define _mm512_rcp28_round_ps(A, R) \ + ((__m512)__builtin_ia32_rcp28ps_mask((__v16sf)(__m512)(A), \ + (__v16sf)_mm512_setzero_ps(), \ + (__mmask16)-1, (int)(R))) + +#define _mm512_mask_rcp28_round_ps(S, M, A, R) \ + ((__m512)__builtin_ia32_rcp28ps_mask((__v16sf)(__m512)(A), \ + (__v16sf)(__m512)(S), (__mmask16)(M), \ + (int)(R))) + +#define _mm512_maskz_rcp28_round_ps(M, A, R) \ + ((__m512)__builtin_ia32_rcp28ps_mask((__v16sf)(__m512)(A), \ + (__v16sf)_mm512_setzero_ps(), \ + (__mmask16)(M), (int)(R))) + +#define _mm512_rcp28_ps(A) \ + _mm512_rcp28_round_ps((A), _MM_FROUND_CUR_DIRECTION) + +#define _mm512_mask_rcp28_ps(S, M, A) \ + _mm512_mask_rcp28_round_ps((S), (M), (A), _MM_FROUND_CUR_DIRECTION) + +#define _mm512_maskz_rcp28_ps(M, A) \ + _mm512_maskz_rcp28_round_ps((M), (A), _MM_FROUND_CUR_DIRECTION) + +#define _mm_rcp28_round_ss(A, B, R) \ + ((__m128)__builtin_ia32_rcp28ss_round_mask((__v4sf)(__m128)(A), \ + (__v4sf)(__m128)(B), \ + (__v4sf)_mm_setzero_ps(), \ + (__mmask8)-1, (int)(R))) + +#define _mm_mask_rcp28_round_ss(S, M, A, B, R) \ + ((__m128)__builtin_ia32_rcp28ss_round_mask((__v4sf)(__m128)(A), \ + (__v4sf)(__m128)(B), \ + (__v4sf)(__m128)(S), \ + (__mmask8)(M), (int)(R))) + +#define _mm_maskz_rcp28_round_ss(M, A, B, R) \ + ((__m128)__builtin_ia32_rcp28ss_round_mask((__v4sf)(__m128)(A), \ + (__v4sf)(__m128)(B), \ + (__v4sf)_mm_setzero_ps(), \ + (__mmask8)(M), (int)(R))) + +#define _mm_rcp28_ss(A, B) \ + _mm_rcp28_round_ss((A), (B), _MM_FROUND_CUR_DIRECTION) + +#define _mm_mask_rcp28_ss(S, M, A, B) \ + _mm_mask_rcp28_round_ss((S), (M), (A), (B), _MM_FROUND_CUR_DIRECTION) + +#define _mm_maskz_rcp28_ss(M, A, B) \ + _mm_maskz_rcp28_round_ss((M), (A), (B), _MM_FROUND_CUR_DIRECTION) + +#define _mm_rcp28_round_sd(A, B, R) \ + ((__m128d)__builtin_ia32_rcp28sd_round_mask((__v2df)(__m128d)(A), \ + (__v2df)(__m128d)(B), \ + (__v2df)_mm_setzero_pd(), \ + (__mmask8)-1, (int)(R))) + +#define _mm_mask_rcp28_round_sd(S, M, A, B, R) \ + ((__m128d)__builtin_ia32_rcp28sd_round_mask((__v2df)(__m128d)(A), \ + (__v2df)(__m128d)(B), \ + (__v2df)(__m128d)(S), \ + (__mmask8)(M), (int)(R))) + +#define _mm_maskz_rcp28_round_sd(M, A, B, R) \ + ((__m128d)__builtin_ia32_rcp28sd_round_mask((__v2df)(__m128d)(A), \ + (__v2df)(__m128d)(B), \ + (__v2df)_mm_setzero_pd(), \ + (__mmask8)(M), (int)(R))) + +#define _mm_rcp28_sd(A, B) \ + _mm_rcp28_round_sd((A), (B), _MM_FROUND_CUR_DIRECTION) + +#define _mm_mask_rcp28_sd(S, M, A, B) \ + _mm_mask_rcp28_round_sd((S), (M), (A), (B), _MM_FROUND_CUR_DIRECTION) + +#define _mm_maskz_rcp28_sd(M, A, B) \ + _mm_maskz_rcp28_round_sd((M), (A), (B), _MM_FROUND_CUR_DIRECTION) + +#endif /* __AVX512ERINTRIN_H */ diff --git a/third_party/intel/clang/avx512fintrin.h b/third_party/intel/clang/avx512fintrin.h new file mode 100644 index 000000000..4f172c74b --- /dev/null +++ b/third_party/intel/clang/avx512fintrin.h @@ -0,0 +1,9779 @@ +/*===---- avx512fintrin.h - AVX512F intrinsics -----------------------------=== + * + * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. + * See https://llvm.org/LICENSE.txt for license information. + * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + * + *===-----------------------------------------------------------------------=== + */ +#ifndef __IMMINTRIN_H +#error "Never use directly; include instead." +#endif + +#ifndef __AVX512FINTRIN_H +#define __AVX512FINTRIN_H + +typedef char __v64qi __attribute__((__vector_size__(64))); +typedef short __v32hi __attribute__((__vector_size__(64))); +typedef double __v8df __attribute__((__vector_size__(64))); +typedef float __v16sf __attribute__((__vector_size__(64))); +typedef long long __v8di __attribute__((__vector_size__(64))); +typedef int __v16si __attribute__((__vector_size__(64))); + +/* Unsigned types */ +typedef unsigned char __v64qu __attribute__((__vector_size__(64))); +typedef unsigned short __v32hu __attribute__((__vector_size__(64))); +typedef unsigned long long __v8du __attribute__((__vector_size__(64))); +typedef unsigned int __v16su __attribute__((__vector_size__(64))); + +/* We need an explicitly signed variant for char. Note that this shouldn't + * appear in the interface though. */ +typedef signed char __v64qs __attribute__((__vector_size__(64))); + +typedef float __m512 __attribute__((__vector_size__(64), __aligned__(64))); +typedef double __m512d __attribute__((__vector_size__(64), __aligned__(64))); +typedef long long __m512i __attribute__((__vector_size__(64), __aligned__(64))); + +typedef float __m512_u __attribute__((__vector_size__(64), __aligned__(1))); +typedef double __m512d_u __attribute__((__vector_size__(64), __aligned__(1))); +typedef long long __m512i_u __attribute__((__vector_size__(64), __aligned__(1))); + +typedef unsigned char __mmask8; +typedef unsigned short __mmask16; + +/* Rounding mode macros. */ +#define _MM_FROUND_TO_NEAREST_INT 0x00 +#define _MM_FROUND_TO_NEG_INF 0x01 +#define _MM_FROUND_TO_POS_INF 0x02 +#define _MM_FROUND_TO_ZERO 0x03 +#define _MM_FROUND_CUR_DIRECTION 0x04 + +/* Constants for integer comparison predicates */ +typedef enum { + _MM_CMPINT_EQ, /* Equal */ + _MM_CMPINT_LT, /* Less than */ + _MM_CMPINT_LE, /* Less than or Equal */ + _MM_CMPINT_UNUSED, + _MM_CMPINT_NE, /* Not Equal */ + _MM_CMPINT_NLT, /* Not Less than */ +#define _MM_CMPINT_GE _MM_CMPINT_NLT /* Greater than or Equal */ + _MM_CMPINT_NLE /* Not Less than or Equal */ +#define _MM_CMPINT_GT _MM_CMPINT_NLE /* Greater than */ +} _MM_CMPINT_ENUM; + +typedef enum +{ + _MM_PERM_AAAA = 0x00, _MM_PERM_AAAB = 0x01, _MM_PERM_AAAC = 0x02, + _MM_PERM_AAAD = 0x03, _MM_PERM_AABA = 0x04, _MM_PERM_AABB = 0x05, + _MM_PERM_AABC = 0x06, _MM_PERM_AABD = 0x07, _MM_PERM_AACA = 0x08, + _MM_PERM_AACB = 0x09, _MM_PERM_AACC = 0x0A, _MM_PERM_AACD = 0x0B, + _MM_PERM_AADA = 0x0C, _MM_PERM_AADB = 0x0D, _MM_PERM_AADC = 0x0E, + _MM_PERM_AADD = 0x0F, _MM_PERM_ABAA = 0x10, _MM_PERM_ABAB = 0x11, + _MM_PERM_ABAC = 0x12, _MM_PERM_ABAD = 0x13, _MM_PERM_ABBA = 0x14, + _MM_PERM_ABBB = 0x15, _MM_PERM_ABBC = 0x16, _MM_PERM_ABBD = 0x17, + _MM_PERM_ABCA = 0x18, _MM_PERM_ABCB = 0x19, _MM_PERM_ABCC = 0x1A, + _MM_PERM_ABCD = 0x1B, _MM_PERM_ABDA = 0x1C, _MM_PERM_ABDB = 0x1D, + _MM_PERM_ABDC = 0x1E, _MM_PERM_ABDD = 0x1F, _MM_PERM_ACAA = 0x20, + _MM_PERM_ACAB = 0x21, _MM_PERM_ACAC = 0x22, _MM_PERM_ACAD = 0x23, + _MM_PERM_ACBA = 0x24, _MM_PERM_ACBB = 0x25, _MM_PERM_ACBC = 0x26, + _MM_PERM_ACBD = 0x27, _MM_PERM_ACCA = 0x28, _MM_PERM_ACCB = 0x29, + _MM_PERM_ACCC = 0x2A, _MM_PERM_ACCD = 0x2B, _MM_PERM_ACDA = 0x2C, + _MM_PERM_ACDB = 0x2D, _MM_PERM_ACDC = 0x2E, _MM_PERM_ACDD = 0x2F, + _MM_PERM_ADAA = 0x30, _MM_PERM_ADAB = 0x31, _MM_PERM_ADAC = 0x32, + _MM_PERM_ADAD = 0x33, _MM_PERM_ADBA = 0x34, _MM_PERM_ADBB = 0x35, + _MM_PERM_ADBC = 0x36, _MM_PERM_ADBD = 0x37, _MM_PERM_ADCA = 0x38, + _MM_PERM_ADCB = 0x39, _MM_PERM_ADCC = 0x3A, _MM_PERM_ADCD = 0x3B, + _MM_PERM_ADDA = 0x3C, _MM_PERM_ADDB = 0x3D, _MM_PERM_ADDC = 0x3E, + _MM_PERM_ADDD = 0x3F, _MM_PERM_BAAA = 0x40, _MM_PERM_BAAB = 0x41, + _MM_PERM_BAAC = 0x42, _MM_PERM_BAAD = 0x43, _MM_PERM_BABA = 0x44, + _MM_PERM_BABB = 0x45, _MM_PERM_BABC = 0x46, _MM_PERM_BABD = 0x47, + _MM_PERM_BACA = 0x48, _MM_PERM_BACB = 0x49, _MM_PERM_BACC = 0x4A, + _MM_PERM_BACD = 0x4B, _MM_PERM_BADA = 0x4C, _MM_PERM_BADB = 0x4D, + _MM_PERM_BADC = 0x4E, _MM_PERM_BADD = 0x4F, _MM_PERM_BBAA = 0x50, + _MM_PERM_BBAB = 0x51, _MM_PERM_BBAC = 0x52, _MM_PERM_BBAD = 0x53, + _MM_PERM_BBBA = 0x54, _MM_PERM_BBBB = 0x55, _MM_PERM_BBBC = 0x56, + _MM_PERM_BBBD = 0x57, _MM_PERM_BBCA = 0x58, _MM_PERM_BBCB = 0x59, + _MM_PERM_BBCC = 0x5A, _MM_PERM_BBCD = 0x5B, _MM_PERM_BBDA = 0x5C, + _MM_PERM_BBDB = 0x5D, _MM_PERM_BBDC = 0x5E, _MM_PERM_BBDD = 0x5F, + _MM_PERM_BCAA = 0x60, _MM_PERM_BCAB = 0x61, _MM_PERM_BCAC = 0x62, + _MM_PERM_BCAD = 0x63, _MM_PERM_BCBA = 0x64, _MM_PERM_BCBB = 0x65, + _MM_PERM_BCBC = 0x66, _MM_PERM_BCBD = 0x67, _MM_PERM_BCCA = 0x68, + _MM_PERM_BCCB = 0x69, _MM_PERM_BCCC = 0x6A, _MM_PERM_BCCD = 0x6B, + _MM_PERM_BCDA = 0x6C, _MM_PERM_BCDB = 0x6D, _MM_PERM_BCDC = 0x6E, + _MM_PERM_BCDD = 0x6F, _MM_PERM_BDAA = 0x70, _MM_PERM_BDAB = 0x71, + _MM_PERM_BDAC = 0x72, _MM_PERM_BDAD = 0x73, _MM_PERM_BDBA = 0x74, + _MM_PERM_BDBB = 0x75, _MM_PERM_BDBC = 0x76, _MM_PERM_BDBD = 0x77, + _MM_PERM_BDCA = 0x78, _MM_PERM_BDCB = 0x79, _MM_PERM_BDCC = 0x7A, + _MM_PERM_BDCD = 0x7B, _MM_PERM_BDDA = 0x7C, _MM_PERM_BDDB = 0x7D, + _MM_PERM_BDDC = 0x7E, _MM_PERM_BDDD = 0x7F, _MM_PERM_CAAA = 0x80, + _MM_PERM_CAAB = 0x81, _MM_PERM_CAAC = 0x82, _MM_PERM_CAAD = 0x83, + _MM_PERM_CABA = 0x84, _MM_PERM_CABB = 0x85, _MM_PERM_CABC = 0x86, + _MM_PERM_CABD = 0x87, _MM_PERM_CACA = 0x88, _MM_PERM_CACB = 0x89, + _MM_PERM_CACC = 0x8A, _MM_PERM_CACD = 0x8B, _MM_PERM_CADA = 0x8C, + _MM_PERM_CADB = 0x8D, _MM_PERM_CADC = 0x8E, _MM_PERM_CADD = 0x8F, + _MM_PERM_CBAA = 0x90, _MM_PERM_CBAB = 0x91, _MM_PERM_CBAC = 0x92, + _MM_PERM_CBAD = 0x93, _MM_PERM_CBBA = 0x94, _MM_PERM_CBBB = 0x95, + _MM_PERM_CBBC = 0x96, _MM_PERM_CBBD = 0x97, _MM_PERM_CBCA = 0x98, + _MM_PERM_CBCB = 0x99, _MM_PERM_CBCC = 0x9A, _MM_PERM_CBCD = 0x9B, + _MM_PERM_CBDA = 0x9C, _MM_PERM_CBDB = 0x9D, _MM_PERM_CBDC = 0x9E, + _MM_PERM_CBDD = 0x9F, _MM_PERM_CCAA = 0xA0, _MM_PERM_CCAB = 0xA1, + _MM_PERM_CCAC = 0xA2, _MM_PERM_CCAD = 0xA3, _MM_PERM_CCBA = 0xA4, + _MM_PERM_CCBB = 0xA5, _MM_PERM_CCBC = 0xA6, _MM_PERM_CCBD = 0xA7, + _MM_PERM_CCCA = 0xA8, _MM_PERM_CCCB = 0xA9, _MM_PERM_CCCC = 0xAA, + _MM_PERM_CCCD = 0xAB, _MM_PERM_CCDA = 0xAC, _MM_PERM_CCDB = 0xAD, + _MM_PERM_CCDC = 0xAE, _MM_PERM_CCDD = 0xAF, _MM_PERM_CDAA = 0xB0, + _MM_PERM_CDAB = 0xB1, _MM_PERM_CDAC = 0xB2, _MM_PERM_CDAD = 0xB3, + _MM_PERM_CDBA = 0xB4, _MM_PERM_CDBB = 0xB5, _MM_PERM_CDBC = 0xB6, + _MM_PERM_CDBD = 0xB7, _MM_PERM_CDCA = 0xB8, _MM_PERM_CDCB = 0xB9, + _MM_PERM_CDCC = 0xBA, _MM_PERM_CDCD = 0xBB, _MM_PERM_CDDA = 0xBC, + _MM_PERM_CDDB = 0xBD, _MM_PERM_CDDC = 0xBE, _MM_PERM_CDDD = 0xBF, + _MM_PERM_DAAA = 0xC0, _MM_PERM_DAAB = 0xC1, _MM_PERM_DAAC = 0xC2, + _MM_PERM_DAAD = 0xC3, _MM_PERM_DABA = 0xC4, _MM_PERM_DABB = 0xC5, + _MM_PERM_DABC = 0xC6, _MM_PERM_DABD = 0xC7, _MM_PERM_DACA = 0xC8, + _MM_PERM_DACB = 0xC9, _MM_PERM_DACC = 0xCA, _MM_PERM_DACD = 0xCB, + _MM_PERM_DADA = 0xCC, _MM_PERM_DADB = 0xCD, _MM_PERM_DADC = 0xCE, + _MM_PERM_DADD = 0xCF, _MM_PERM_DBAA = 0xD0, _MM_PERM_DBAB = 0xD1, + _MM_PERM_DBAC = 0xD2, _MM_PERM_DBAD = 0xD3, _MM_PERM_DBBA = 0xD4, + _MM_PERM_DBBB = 0xD5, _MM_PERM_DBBC = 0xD6, _MM_PERM_DBBD = 0xD7, + _MM_PERM_DBCA = 0xD8, _MM_PERM_DBCB = 0xD9, _MM_PERM_DBCC = 0xDA, + _MM_PERM_DBCD = 0xDB, _MM_PERM_DBDA = 0xDC, _MM_PERM_DBDB = 0xDD, + _MM_PERM_DBDC = 0xDE, _MM_PERM_DBDD = 0xDF, _MM_PERM_DCAA = 0xE0, + _MM_PERM_DCAB = 0xE1, _MM_PERM_DCAC = 0xE2, _MM_PERM_DCAD = 0xE3, + _MM_PERM_DCBA = 0xE4, _MM_PERM_DCBB = 0xE5, _MM_PERM_DCBC = 0xE6, + _MM_PERM_DCBD = 0xE7, _MM_PERM_DCCA = 0xE8, _MM_PERM_DCCB = 0xE9, + _MM_PERM_DCCC = 0xEA, _MM_PERM_DCCD = 0xEB, _MM_PERM_DCDA = 0xEC, + _MM_PERM_DCDB = 0xED, _MM_PERM_DCDC = 0xEE, _MM_PERM_DCDD = 0xEF, + _MM_PERM_DDAA = 0xF0, _MM_PERM_DDAB = 0xF1, _MM_PERM_DDAC = 0xF2, + _MM_PERM_DDAD = 0xF3, _MM_PERM_DDBA = 0xF4, _MM_PERM_DDBB = 0xF5, + _MM_PERM_DDBC = 0xF6, _MM_PERM_DDBD = 0xF7, _MM_PERM_DDCA = 0xF8, + _MM_PERM_DDCB = 0xF9, _MM_PERM_DDCC = 0xFA, _MM_PERM_DDCD = 0xFB, + _MM_PERM_DDDA = 0xFC, _MM_PERM_DDDB = 0xFD, _MM_PERM_DDDC = 0xFE, + _MM_PERM_DDDD = 0xFF +} _MM_PERM_ENUM; + +typedef enum +{ + _MM_MANT_NORM_1_2, /* interval [1, 2) */ + _MM_MANT_NORM_p5_2, /* interval [0.5, 2) */ + _MM_MANT_NORM_p5_1, /* interval [0.5, 1) */ + _MM_MANT_NORM_p75_1p5 /* interval [0.75, 1.5) */ +} _MM_MANTISSA_NORM_ENUM; + +typedef enum +{ + _MM_MANT_SIGN_src, /* sign = sign(SRC) */ + _MM_MANT_SIGN_zero, /* sign = 0 */ + _MM_MANT_SIGN_nan /* DEST = NaN if sign(SRC) = 1 */ +} _MM_MANTISSA_SIGN_ENUM; + +/* Define the default attributes for the functions in this file. */ +#define __DEFAULT_FN_ATTRS512 __attribute__((__always_inline__, __nodebug__, __target__("avx512f,evex512"), __min_vector_width__(512))) +#define __DEFAULT_FN_ATTRS128 \ + __attribute__((__always_inline__, __nodebug__, \ + __target__("avx512f,no-evex512"), __min_vector_width__(128))) +#define __DEFAULT_FN_ATTRS \ + __attribute__((__always_inline__, __nodebug__, \ + __target__("avx512f,no-evex512"))) + +/* Create vectors with repeated elements */ + +static __inline __m512i __DEFAULT_FN_ATTRS512 +_mm512_setzero_si512(void) +{ + return __extension__ (__m512i)(__v8di){ 0, 0, 0, 0, 0, 0, 0, 0 }; +} + +#define _mm512_setzero_epi32 _mm512_setzero_si512 + +static __inline__ __m512d __DEFAULT_FN_ATTRS512 +_mm512_undefined_pd(void) +{ + return (__m512d)__builtin_ia32_undef512(); +} + +static __inline__ __m512 __DEFAULT_FN_ATTRS512 +_mm512_undefined(void) +{ + return (__m512)__builtin_ia32_undef512(); +} + +static __inline__ __m512 __DEFAULT_FN_ATTRS512 +_mm512_undefined_ps(void) +{ + return (__m512)__builtin_ia32_undef512(); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_undefined_epi32(void) +{ + return (__m512i)__builtin_ia32_undef512(); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_broadcastd_epi32 (__m128i __A) +{ + return (__m512i)__builtin_shufflevector((__v4si) __A, (__v4si) __A, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_mask_broadcastd_epi32 (__m512i __O, __mmask16 __M, __m128i __A) +{ + return (__m512i)__builtin_ia32_selectd_512(__M, + (__v16si) _mm512_broadcastd_epi32(__A), + (__v16si) __O); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_maskz_broadcastd_epi32 (__mmask16 __M, __m128i __A) +{ + return (__m512i)__builtin_ia32_selectd_512(__M, + (__v16si) _mm512_broadcastd_epi32(__A), + (__v16si) _mm512_setzero_si512()); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_broadcastq_epi64 (__m128i __A) +{ + return (__m512i)__builtin_shufflevector((__v2di) __A, (__v2di) __A, + 0, 0, 0, 0, 0, 0, 0, 0); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_mask_broadcastq_epi64 (__m512i __O, __mmask8 __M, __m128i __A) +{ + return (__m512i)__builtin_ia32_selectq_512(__M, + (__v8di) _mm512_broadcastq_epi64(__A), + (__v8di) __O); + +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_maskz_broadcastq_epi64 (__mmask8 __M, __m128i __A) +{ + return (__m512i)__builtin_ia32_selectq_512(__M, + (__v8di) _mm512_broadcastq_epi64(__A), + (__v8di) _mm512_setzero_si512()); +} + + +static __inline __m512 __DEFAULT_FN_ATTRS512 +_mm512_setzero_ps(void) +{ + return __extension__ (__m512){ 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, + 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f }; +} + +#define _mm512_setzero _mm512_setzero_ps + +static __inline __m512d __DEFAULT_FN_ATTRS512 +_mm512_setzero_pd(void) +{ + return __extension__ (__m512d){ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0 }; +} + +static __inline __m512 __DEFAULT_FN_ATTRS512 +_mm512_set1_ps(float __w) +{ + return __extension__ (__m512){ __w, __w, __w, __w, __w, __w, __w, __w, + __w, __w, __w, __w, __w, __w, __w, __w }; +} + +static __inline __m512d __DEFAULT_FN_ATTRS512 +_mm512_set1_pd(double __w) +{ + return __extension__ (__m512d){ __w, __w, __w, __w, __w, __w, __w, __w }; +} + +static __inline __m512i __DEFAULT_FN_ATTRS512 +_mm512_set1_epi8(char __w) +{ + return __extension__ (__m512i)(__v64qi){ + __w, __w, __w, __w, __w, __w, __w, __w, + __w, __w, __w, __w, __w, __w, __w, __w, + __w, __w, __w, __w, __w, __w, __w, __w, + __w, __w, __w, __w, __w, __w, __w, __w, + __w, __w, __w, __w, __w, __w, __w, __w, + __w, __w, __w, __w, __w, __w, __w, __w, + __w, __w, __w, __w, __w, __w, __w, __w, + __w, __w, __w, __w, __w, __w, __w, __w }; +} + +static __inline __m512i __DEFAULT_FN_ATTRS512 +_mm512_set1_epi16(short __w) +{ + return __extension__ (__m512i)(__v32hi){ + __w, __w, __w, __w, __w, __w, __w, __w, + __w, __w, __w, __w, __w, __w, __w, __w, + __w, __w, __w, __w, __w, __w, __w, __w, + __w, __w, __w, __w, __w, __w, __w, __w }; +} + +static __inline __m512i __DEFAULT_FN_ATTRS512 +_mm512_set1_epi32(int __s) +{ + return __extension__ (__m512i)(__v16si){ + __s, __s, __s, __s, __s, __s, __s, __s, + __s, __s, __s, __s, __s, __s, __s, __s }; +} + +static __inline __m512i __DEFAULT_FN_ATTRS512 +_mm512_maskz_set1_epi32(__mmask16 __M, int __A) +{ + return (__m512i)__builtin_ia32_selectd_512(__M, + (__v16si)_mm512_set1_epi32(__A), + (__v16si)_mm512_setzero_si512()); +} + +static __inline __m512i __DEFAULT_FN_ATTRS512 +_mm512_set1_epi64(long long __d) +{ + return __extension__(__m512i)(__v8di){ __d, __d, __d, __d, __d, __d, __d, __d }; +} + +static __inline __m512i __DEFAULT_FN_ATTRS512 +_mm512_maskz_set1_epi64(__mmask8 __M, long long __A) +{ + return (__m512i)__builtin_ia32_selectq_512(__M, + (__v8di)_mm512_set1_epi64(__A), + (__v8di)_mm512_setzero_si512()); +} + +static __inline__ __m512 __DEFAULT_FN_ATTRS512 +_mm512_broadcastss_ps(__m128 __A) +{ + return (__m512)__builtin_shufflevector((__v4sf) __A, (__v4sf) __A, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0); +} + +static __inline __m512i __DEFAULT_FN_ATTRS512 +_mm512_set4_epi32 (int __A, int __B, int __C, int __D) +{ + return __extension__ (__m512i)(__v16si) + { __D, __C, __B, __A, __D, __C, __B, __A, + __D, __C, __B, __A, __D, __C, __B, __A }; +} + +static __inline __m512i __DEFAULT_FN_ATTRS512 +_mm512_set4_epi64 (long long __A, long long __B, long long __C, + long long __D) +{ + return __extension__ (__m512i) (__v8di) + { __D, __C, __B, __A, __D, __C, __B, __A }; +} + +static __inline __m512d __DEFAULT_FN_ATTRS512 +_mm512_set4_pd (double __A, double __B, double __C, double __D) +{ + return __extension__ (__m512d) + { __D, __C, __B, __A, __D, __C, __B, __A }; +} + +static __inline __m512 __DEFAULT_FN_ATTRS512 +_mm512_set4_ps (float __A, float __B, float __C, float __D) +{ + return __extension__ (__m512) + { __D, __C, __B, __A, __D, __C, __B, __A, + __D, __C, __B, __A, __D, __C, __B, __A }; +} + +#define _mm512_setr4_epi32(e0,e1,e2,e3) \ + _mm512_set4_epi32((e3),(e2),(e1),(e0)) + +#define _mm512_setr4_epi64(e0,e1,e2,e3) \ + _mm512_set4_epi64((e3),(e2),(e1),(e0)) + +#define _mm512_setr4_pd(e0,e1,e2,e3) \ + _mm512_set4_pd((e3),(e2),(e1),(e0)) + +#define _mm512_setr4_ps(e0,e1,e2,e3) \ + _mm512_set4_ps((e3),(e2),(e1),(e0)) + +static __inline__ __m512d __DEFAULT_FN_ATTRS512 +_mm512_broadcastsd_pd(__m128d __A) +{ + return (__m512d)__builtin_shufflevector((__v2df) __A, (__v2df) __A, + 0, 0, 0, 0, 0, 0, 0, 0); +} + +/* Cast between vector types */ + +static __inline __m512d __DEFAULT_FN_ATTRS512 +_mm512_castpd256_pd512(__m256d __a) +{ + return __builtin_shufflevector(__a, __builtin_nondeterministic_value(__a), 0, + 1, 2, 3, 4, 5, 6, 7); +} + +static __inline __m512 __DEFAULT_FN_ATTRS512 +_mm512_castps256_ps512(__m256 __a) +{ + return __builtin_shufflevector(__a, __builtin_nondeterministic_value(__a), 0, + 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15); +} + +static __inline __m128d __DEFAULT_FN_ATTRS512 +_mm512_castpd512_pd128(__m512d __a) +{ + return __builtin_shufflevector(__a, __a, 0, 1); +} + +static __inline __m256d __DEFAULT_FN_ATTRS512 +_mm512_castpd512_pd256 (__m512d __A) +{ + return __builtin_shufflevector(__A, __A, 0, 1, 2, 3); +} + +static __inline __m128 __DEFAULT_FN_ATTRS512 +_mm512_castps512_ps128(__m512 __a) +{ + return __builtin_shufflevector(__a, __a, 0, 1, 2, 3); +} + +static __inline __m256 __DEFAULT_FN_ATTRS512 +_mm512_castps512_ps256 (__m512 __A) +{ + return __builtin_shufflevector(__A, __A, 0, 1, 2, 3, 4, 5, 6, 7); +} + +static __inline __m512 __DEFAULT_FN_ATTRS512 +_mm512_castpd_ps (__m512d __A) +{ + return (__m512) (__A); +} + +static __inline __m512i __DEFAULT_FN_ATTRS512 +_mm512_castpd_si512 (__m512d __A) +{ + return (__m512i) (__A); +} + +static __inline__ __m512d __DEFAULT_FN_ATTRS512 +_mm512_castpd128_pd512 (__m128d __A) +{ + __m256d __B = __builtin_nondeterministic_value(__B); + return __builtin_shufflevector( + __builtin_shufflevector(__A, __builtin_nondeterministic_value(__A), 0, 1, 2, 3), + __B, 0, 1, 2, 3, 4, 5, 6, 7); +} + +static __inline __m512d __DEFAULT_FN_ATTRS512 +_mm512_castps_pd (__m512 __A) +{ + return (__m512d) (__A); +} + +static __inline __m512i __DEFAULT_FN_ATTRS512 +_mm512_castps_si512 (__m512 __A) +{ + return (__m512i) (__A); +} + +static __inline__ __m512 __DEFAULT_FN_ATTRS512 +_mm512_castps128_ps512 (__m128 __A) +{ + __m256 __B = __builtin_nondeterministic_value(__B); + return __builtin_shufflevector( + __builtin_shufflevector(__A, __builtin_nondeterministic_value(__A), 0, 1, 2, 3, 4, 5, 6, 7), + __B, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_castsi128_si512 (__m128i __A) +{ + __m256i __B = __builtin_nondeterministic_value(__B); + return __builtin_shufflevector( + __builtin_shufflevector(__A, __builtin_nondeterministic_value(__A), 0, 1, 2, 3), + __B, 0, 1, 2, 3, 4, 5, 6, 7); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_castsi256_si512 (__m256i __A) +{ + return __builtin_shufflevector( __A, __builtin_nondeterministic_value(__A), 0, 1, 2, 3, 4, 5, 6, 7); +} + +static __inline __m512 __DEFAULT_FN_ATTRS512 +_mm512_castsi512_ps (__m512i __A) +{ + return (__m512) (__A); +} + +static __inline __m512d __DEFAULT_FN_ATTRS512 +_mm512_castsi512_pd (__m512i __A) +{ + return (__m512d) (__A); +} + +static __inline __m128i __DEFAULT_FN_ATTRS512 +_mm512_castsi512_si128 (__m512i __A) +{ + return (__m128i)__builtin_shufflevector(__A, __A , 0, 1); +} + +static __inline __m256i __DEFAULT_FN_ATTRS512 +_mm512_castsi512_si256 (__m512i __A) +{ + return (__m256i)__builtin_shufflevector(__A, __A , 0, 1, 2, 3); +} + +static __inline__ __mmask16 __DEFAULT_FN_ATTRS +_mm512_int2mask(int __a) +{ + return (__mmask16)__a; +} + +static __inline__ int __DEFAULT_FN_ATTRS +_mm512_mask2int(__mmask16 __a) +{ + return (int)__a; +} + +/// Constructs a 512-bit floating-point vector of [8 x double] from a +/// 128-bit floating-point vector of [2 x double]. The lower 128 bits +/// contain the value of the source vector. The upper 384 bits are set +/// to zero. +/// +/// \headerfile +/// +/// This intrinsic has no corresponding instruction. +/// +/// \param __a +/// A 128-bit vector of [2 x double]. +/// \returns A 512-bit floating-point vector of [8 x double]. The lower 128 bits +/// contain the value of the parameter. The upper 384 bits are set to zero. +static __inline __m512d __DEFAULT_FN_ATTRS512 +_mm512_zextpd128_pd512(__m128d __a) +{ + return __builtin_shufflevector((__v2df)__a, (__v2df)_mm_setzero_pd(), 0, 1, 2, 3, 2, 3, 2, 3); +} + +/// Constructs a 512-bit floating-point vector of [8 x double] from a +/// 256-bit floating-point vector of [4 x double]. The lower 256 bits +/// contain the value of the source vector. The upper 256 bits are set +/// to zero. +/// +/// \headerfile +/// +/// This intrinsic has no corresponding instruction. +/// +/// \param __a +/// A 256-bit vector of [4 x double]. +/// \returns A 512-bit floating-point vector of [8 x double]. The lower 256 bits +/// contain the value of the parameter. The upper 256 bits are set to zero. +static __inline __m512d __DEFAULT_FN_ATTRS512 +_mm512_zextpd256_pd512(__m256d __a) +{ + return __builtin_shufflevector((__v4df)__a, (__v4df)_mm256_setzero_pd(), 0, 1, 2, 3, 4, 5, 6, 7); +} + +/// Constructs a 512-bit floating-point vector of [16 x float] from a +/// 128-bit floating-point vector of [4 x float]. The lower 128 bits contain +/// the value of the source vector. The upper 384 bits are set to zero. +/// +/// \headerfile +/// +/// This intrinsic has no corresponding instruction. +/// +/// \param __a +/// A 128-bit vector of [4 x float]. +/// \returns A 512-bit floating-point vector of [16 x float]. The lower 128 bits +/// contain the value of the parameter. The upper 384 bits are set to zero. +static __inline __m512 __DEFAULT_FN_ATTRS512 +_mm512_zextps128_ps512(__m128 __a) +{ + return __builtin_shufflevector((__v4sf)__a, (__v4sf)_mm_setzero_ps(), 0, 1, 2, 3, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6, 7); +} + +/// Constructs a 512-bit floating-point vector of [16 x float] from a +/// 256-bit floating-point vector of [8 x float]. The lower 256 bits contain +/// the value of the source vector. The upper 256 bits are set to zero. +/// +/// \headerfile +/// +/// This intrinsic has no corresponding instruction. +/// +/// \param __a +/// A 256-bit vector of [8 x float]. +/// \returns A 512-bit floating-point vector of [16 x float]. The lower 256 bits +/// contain the value of the parameter. The upper 256 bits are set to zero. +static __inline __m512 __DEFAULT_FN_ATTRS512 +_mm512_zextps256_ps512(__m256 __a) +{ + return __builtin_shufflevector((__v8sf)__a, (__v8sf)_mm256_setzero_ps(), 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15); +} + +/// Constructs a 512-bit integer vector from a 128-bit integer vector. +/// The lower 128 bits contain the value of the source vector. The upper +/// 384 bits are set to zero. +/// +/// \headerfile +/// +/// This intrinsic has no corresponding instruction. +/// +/// \param __a +/// A 128-bit integer vector. +/// \returns A 512-bit integer vector. The lower 128 bits contain the value of +/// the parameter. The upper 384 bits are set to zero. +static __inline __m512i __DEFAULT_FN_ATTRS512 +_mm512_zextsi128_si512(__m128i __a) +{ + return __builtin_shufflevector((__v2di)__a, (__v2di)_mm_setzero_si128(), 0, 1, 2, 3, 2, 3, 2, 3); +} + +/// Constructs a 512-bit integer vector from a 256-bit integer vector. +/// The lower 256 bits contain the value of the source vector. The upper +/// 256 bits are set to zero. +/// +/// \headerfile +/// +/// This intrinsic has no corresponding instruction. +/// +/// \param __a +/// A 256-bit integer vector. +/// \returns A 512-bit integer vector. The lower 256 bits contain the value of +/// the parameter. The upper 256 bits are set to zero. +static __inline __m512i __DEFAULT_FN_ATTRS512 +_mm512_zextsi256_si512(__m256i __a) +{ + return __builtin_shufflevector((__v4di)__a, (__v4di)_mm256_setzero_si256(), 0, 1, 2, 3, 4, 5, 6, 7); +} + +/* Bitwise operators */ +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_and_epi32(__m512i __a, __m512i __b) +{ + return (__m512i)((__v16su)__a & (__v16su)__b); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_mask_and_epi32(__m512i __src, __mmask16 __k, __m512i __a, __m512i __b) +{ + return (__m512i)__builtin_ia32_selectd_512((__mmask16)__k, + (__v16si) _mm512_and_epi32(__a, __b), + (__v16si) __src); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_maskz_and_epi32(__mmask16 __k, __m512i __a, __m512i __b) +{ + return (__m512i) _mm512_mask_and_epi32(_mm512_setzero_si512 (), + __k, __a, __b); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_and_epi64(__m512i __a, __m512i __b) +{ + return (__m512i)((__v8du)__a & (__v8du)__b); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_mask_and_epi64(__m512i __src, __mmask8 __k, __m512i __a, __m512i __b) +{ + return (__m512i) __builtin_ia32_selectq_512 ((__mmask8) __k, + (__v8di) _mm512_and_epi64(__a, __b), + (__v8di) __src); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_maskz_and_epi64(__mmask8 __k, __m512i __a, __m512i __b) +{ + return (__m512i) _mm512_mask_and_epi64(_mm512_setzero_si512 (), + __k, __a, __b); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_andnot_si512 (__m512i __A, __m512i __B) +{ + return (__m512i)(~(__v8du)__A & (__v8du)__B); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_andnot_epi32 (__m512i __A, __m512i __B) +{ + return (__m512i)(~(__v16su)__A & (__v16su)__B); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_mask_andnot_epi32(__m512i __W, __mmask16 __U, __m512i __A, __m512i __B) +{ + return (__m512i)__builtin_ia32_selectd_512((__mmask16)__U, + (__v16si)_mm512_andnot_epi32(__A, __B), + (__v16si)__W); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_maskz_andnot_epi32(__mmask16 __U, __m512i __A, __m512i __B) +{ + return (__m512i)_mm512_mask_andnot_epi32(_mm512_setzero_si512(), + __U, __A, __B); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_andnot_epi64(__m512i __A, __m512i __B) +{ + return (__m512i)(~(__v8du)__A & (__v8du)__B); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_mask_andnot_epi64(__m512i __W, __mmask8 __U, __m512i __A, __m512i __B) +{ + return (__m512i)__builtin_ia32_selectq_512((__mmask8)__U, + (__v8di)_mm512_andnot_epi64(__A, __B), + (__v8di)__W); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_maskz_andnot_epi64(__mmask8 __U, __m512i __A, __m512i __B) +{ + return (__m512i)_mm512_mask_andnot_epi64(_mm512_setzero_si512(), + __U, __A, __B); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_or_epi32(__m512i __a, __m512i __b) +{ + return (__m512i)((__v16su)__a | (__v16su)__b); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_mask_or_epi32(__m512i __src, __mmask16 __k, __m512i __a, __m512i __b) +{ + return (__m512i)__builtin_ia32_selectd_512((__mmask16)__k, + (__v16si)_mm512_or_epi32(__a, __b), + (__v16si)__src); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_maskz_or_epi32(__mmask16 __k, __m512i __a, __m512i __b) +{ + return (__m512i)_mm512_mask_or_epi32(_mm512_setzero_si512(), __k, __a, __b); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_or_epi64(__m512i __a, __m512i __b) +{ + return (__m512i)((__v8du)__a | (__v8du)__b); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_mask_or_epi64(__m512i __src, __mmask8 __k, __m512i __a, __m512i __b) +{ + return (__m512i)__builtin_ia32_selectq_512((__mmask8)__k, + (__v8di)_mm512_or_epi64(__a, __b), + (__v8di)__src); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_maskz_or_epi64(__mmask8 __k, __m512i __a, __m512i __b) +{ + return (__m512i)_mm512_mask_or_epi64(_mm512_setzero_si512(), __k, __a, __b); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_xor_epi32(__m512i __a, __m512i __b) +{ + return (__m512i)((__v16su)__a ^ (__v16su)__b); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_mask_xor_epi32(__m512i __src, __mmask16 __k, __m512i __a, __m512i __b) +{ + return (__m512i)__builtin_ia32_selectd_512((__mmask16)__k, + (__v16si)_mm512_xor_epi32(__a, __b), + (__v16si)__src); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_maskz_xor_epi32(__mmask16 __k, __m512i __a, __m512i __b) +{ + return (__m512i)_mm512_mask_xor_epi32(_mm512_setzero_si512(), __k, __a, __b); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_xor_epi64(__m512i __a, __m512i __b) +{ + return (__m512i)((__v8du)__a ^ (__v8du)__b); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_mask_xor_epi64(__m512i __src, __mmask8 __k, __m512i __a, __m512i __b) +{ + return (__m512i)__builtin_ia32_selectq_512((__mmask8)__k, + (__v8di)_mm512_xor_epi64(__a, __b), + (__v8di)__src); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_maskz_xor_epi64(__mmask8 __k, __m512i __a, __m512i __b) +{ + return (__m512i)_mm512_mask_xor_epi64(_mm512_setzero_si512(), __k, __a, __b); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_and_si512(__m512i __a, __m512i __b) +{ + return (__m512i)((__v8du)__a & (__v8du)__b); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_or_si512(__m512i __a, __m512i __b) +{ + return (__m512i)((__v8du)__a | (__v8du)__b); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_xor_si512(__m512i __a, __m512i __b) +{ + return (__m512i)((__v8du)__a ^ (__v8du)__b); +} + +/* Arithmetic */ + +static __inline __m512d __DEFAULT_FN_ATTRS512 +_mm512_add_pd(__m512d __a, __m512d __b) +{ + return (__m512d)((__v8df)__a + (__v8df)__b); +} + +static __inline __m512 __DEFAULT_FN_ATTRS512 +_mm512_add_ps(__m512 __a, __m512 __b) +{ + return (__m512)((__v16sf)__a + (__v16sf)__b); +} + +static __inline __m512d __DEFAULT_FN_ATTRS512 +_mm512_mul_pd(__m512d __a, __m512d __b) +{ + return (__m512d)((__v8df)__a * (__v8df)__b); +} + +static __inline __m512 __DEFAULT_FN_ATTRS512 +_mm512_mul_ps(__m512 __a, __m512 __b) +{ + return (__m512)((__v16sf)__a * (__v16sf)__b); +} + +static __inline __m512d __DEFAULT_FN_ATTRS512 +_mm512_sub_pd(__m512d __a, __m512d __b) +{ + return (__m512d)((__v8df)__a - (__v8df)__b); +} + +static __inline __m512 __DEFAULT_FN_ATTRS512 +_mm512_sub_ps(__m512 __a, __m512 __b) +{ + return (__m512)((__v16sf)__a - (__v16sf)__b); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_add_epi64 (__m512i __A, __m512i __B) +{ + return (__m512i) ((__v8du) __A + (__v8du) __B); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_mask_add_epi64(__m512i __W, __mmask8 __U, __m512i __A, __m512i __B) +{ + return (__m512i)__builtin_ia32_selectq_512((__mmask8)__U, + (__v8di)_mm512_add_epi64(__A, __B), + (__v8di)__W); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_maskz_add_epi64(__mmask8 __U, __m512i __A, __m512i __B) +{ + return (__m512i)__builtin_ia32_selectq_512((__mmask8)__U, + (__v8di)_mm512_add_epi64(__A, __B), + (__v8di)_mm512_setzero_si512()); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_sub_epi64 (__m512i __A, __m512i __B) +{ + return (__m512i) ((__v8du) __A - (__v8du) __B); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_mask_sub_epi64(__m512i __W, __mmask8 __U, __m512i __A, __m512i __B) +{ + return (__m512i)__builtin_ia32_selectq_512((__mmask8)__U, + (__v8di)_mm512_sub_epi64(__A, __B), + (__v8di)__W); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_maskz_sub_epi64(__mmask8 __U, __m512i __A, __m512i __B) +{ + return (__m512i)__builtin_ia32_selectq_512((__mmask8)__U, + (__v8di)_mm512_sub_epi64(__A, __B), + (__v8di)_mm512_setzero_si512()); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_add_epi32 (__m512i __A, __m512i __B) +{ + return (__m512i) ((__v16su) __A + (__v16su) __B); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_mask_add_epi32(__m512i __W, __mmask16 __U, __m512i __A, __m512i __B) +{ + return (__m512i)__builtin_ia32_selectd_512((__mmask16)__U, + (__v16si)_mm512_add_epi32(__A, __B), + (__v16si)__W); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_maskz_add_epi32 (__mmask16 __U, __m512i __A, __m512i __B) +{ + return (__m512i)__builtin_ia32_selectd_512((__mmask16)__U, + (__v16si)_mm512_add_epi32(__A, __B), + (__v16si)_mm512_setzero_si512()); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_sub_epi32 (__m512i __A, __m512i __B) +{ + return (__m512i) ((__v16su) __A - (__v16su) __B); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_mask_sub_epi32(__m512i __W, __mmask16 __U, __m512i __A, __m512i __B) +{ + return (__m512i)__builtin_ia32_selectd_512((__mmask16)__U, + (__v16si)_mm512_sub_epi32(__A, __B), + (__v16si)__W); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_maskz_sub_epi32(__mmask16 __U, __m512i __A, __m512i __B) +{ + return (__m512i)__builtin_ia32_selectd_512((__mmask16)__U, + (__v16si)_mm512_sub_epi32(__A, __B), + (__v16si)_mm512_setzero_si512()); +} + +#define _mm512_max_round_pd(A, B, R) \ + ((__m512d)__builtin_ia32_maxpd512((__v8df)(__m512d)(A), \ + (__v8df)(__m512d)(B), (int)(R))) + +#define _mm512_mask_max_round_pd(W, U, A, B, R) \ + ((__m512d)__builtin_ia32_selectpd_512((__mmask8)(U), \ + (__v8df)_mm512_max_round_pd((A), (B), (R)), \ + (__v8df)(W))) + +#define _mm512_maskz_max_round_pd(U, A, B, R) \ + ((__m512d)__builtin_ia32_selectpd_512((__mmask8)(U), \ + (__v8df)_mm512_max_round_pd((A), (B), (R)), \ + (__v8df)_mm512_setzero_pd())) + +static __inline__ __m512d __DEFAULT_FN_ATTRS512 +_mm512_max_pd(__m512d __A, __m512d __B) +{ + return (__m512d) __builtin_ia32_maxpd512((__v8df) __A, (__v8df) __B, + _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m512d __DEFAULT_FN_ATTRS512 +_mm512_mask_max_pd (__m512d __W, __mmask8 __U, __m512d __A, __m512d __B) +{ + return (__m512d)__builtin_ia32_selectpd_512(__U, + (__v8df)_mm512_max_pd(__A, __B), + (__v8df)__W); +} + +static __inline__ __m512d __DEFAULT_FN_ATTRS512 +_mm512_maskz_max_pd (__mmask8 __U, __m512d __A, __m512d __B) +{ + return (__m512d)__builtin_ia32_selectpd_512(__U, + (__v8df)_mm512_max_pd(__A, __B), + (__v8df)_mm512_setzero_pd()); +} + +#define _mm512_max_round_ps(A, B, R) \ + ((__m512)__builtin_ia32_maxps512((__v16sf)(__m512)(A), \ + (__v16sf)(__m512)(B), (int)(R))) + +#define _mm512_mask_max_round_ps(W, U, A, B, R) \ + ((__m512)__builtin_ia32_selectps_512((__mmask16)(U), \ + (__v16sf)_mm512_max_round_ps((A), (B), (R)), \ + (__v16sf)(W))) + +#define _mm512_maskz_max_round_ps(U, A, B, R) \ + ((__m512)__builtin_ia32_selectps_512((__mmask16)(U), \ + (__v16sf)_mm512_max_round_ps((A), (B), (R)), \ + (__v16sf)_mm512_setzero_ps())) + +static __inline__ __m512 __DEFAULT_FN_ATTRS512 +_mm512_max_ps(__m512 __A, __m512 __B) +{ + return (__m512) __builtin_ia32_maxps512((__v16sf) __A, (__v16sf) __B, + _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m512 __DEFAULT_FN_ATTRS512 +_mm512_mask_max_ps (__m512 __W, __mmask16 __U, __m512 __A, __m512 __B) +{ + return (__m512)__builtin_ia32_selectps_512(__U, + (__v16sf)_mm512_max_ps(__A, __B), + (__v16sf)__W); +} + +static __inline__ __m512 __DEFAULT_FN_ATTRS512 +_mm512_maskz_max_ps (__mmask16 __U, __m512 __A, __m512 __B) +{ + return (__m512)__builtin_ia32_selectps_512(__U, + (__v16sf)_mm512_max_ps(__A, __B), + (__v16sf)_mm512_setzero_ps()); +} + +static __inline__ __m128 __DEFAULT_FN_ATTRS128 +_mm_mask_max_ss(__m128 __W, __mmask8 __U,__m128 __A, __m128 __B) { + return (__m128) __builtin_ia32_maxss_round_mask ((__v4sf) __A, + (__v4sf) __B, + (__v4sf) __W, + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m128 __DEFAULT_FN_ATTRS128 +_mm_maskz_max_ss(__mmask8 __U,__m128 __A, __m128 __B) { + return (__m128) __builtin_ia32_maxss_round_mask ((__v4sf) __A, + (__v4sf) __B, + (__v4sf) _mm_setzero_ps (), + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +#define _mm_max_round_ss(A, B, R) \ + ((__m128)__builtin_ia32_maxss_round_mask((__v4sf)(__m128)(A), \ + (__v4sf)(__m128)(B), \ + (__v4sf)_mm_setzero_ps(), \ + (__mmask8)-1, (int)(R))) + +#define _mm_mask_max_round_ss(W, U, A, B, R) \ + ((__m128)__builtin_ia32_maxss_round_mask((__v4sf)(__m128)(A), \ + (__v4sf)(__m128)(B), \ + (__v4sf)(__m128)(W), (__mmask8)(U), \ + (int)(R))) + +#define _mm_maskz_max_round_ss(U, A, B, R) \ + ((__m128)__builtin_ia32_maxss_round_mask((__v4sf)(__m128)(A), \ + (__v4sf)(__m128)(B), \ + (__v4sf)_mm_setzero_ps(), \ + (__mmask8)(U), (int)(R))) + +static __inline__ __m128d __DEFAULT_FN_ATTRS128 +_mm_mask_max_sd(__m128d __W, __mmask8 __U,__m128d __A, __m128d __B) { + return (__m128d) __builtin_ia32_maxsd_round_mask ((__v2df) __A, + (__v2df) __B, + (__v2df) __W, + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m128d __DEFAULT_FN_ATTRS128 +_mm_maskz_max_sd(__mmask8 __U,__m128d __A, __m128d __B) { + return (__m128d) __builtin_ia32_maxsd_round_mask ((__v2df) __A, + (__v2df) __B, + (__v2df) _mm_setzero_pd (), + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +#define _mm_max_round_sd(A, B, R) \ + ((__m128d)__builtin_ia32_maxsd_round_mask((__v2df)(__m128d)(A), \ + (__v2df)(__m128d)(B), \ + (__v2df)_mm_setzero_pd(), \ + (__mmask8)-1, (int)(R))) + +#define _mm_mask_max_round_sd(W, U, A, B, R) \ + ((__m128d)__builtin_ia32_maxsd_round_mask((__v2df)(__m128d)(A), \ + (__v2df)(__m128d)(B), \ + (__v2df)(__m128d)(W), \ + (__mmask8)(U), (int)(R))) + +#define _mm_maskz_max_round_sd(U, A, B, R) \ + ((__m128d)__builtin_ia32_maxsd_round_mask((__v2df)(__m128d)(A), \ + (__v2df)(__m128d)(B), \ + (__v2df)_mm_setzero_pd(), \ + (__mmask8)(U), (int)(R))) + +static __inline __m512i +__DEFAULT_FN_ATTRS512 +_mm512_max_epi32(__m512i __A, __m512i __B) +{ + return (__m512i)__builtin_elementwise_max((__v16si)__A, (__v16si)__B); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_mask_max_epi32 (__m512i __W, __mmask16 __M, __m512i __A, __m512i __B) +{ + return (__m512i)__builtin_ia32_selectd_512((__mmask16)__M, + (__v16si)_mm512_max_epi32(__A, __B), + (__v16si)__W); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_maskz_max_epi32 (__mmask16 __M, __m512i __A, __m512i __B) +{ + return (__m512i)__builtin_ia32_selectd_512((__mmask16)__M, + (__v16si)_mm512_max_epi32(__A, __B), + (__v16si)_mm512_setzero_si512()); +} + +static __inline __m512i __DEFAULT_FN_ATTRS512 +_mm512_max_epu32(__m512i __A, __m512i __B) +{ + return (__m512i)__builtin_elementwise_max((__v16su)__A, (__v16su)__B); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_mask_max_epu32 (__m512i __W, __mmask16 __M, __m512i __A, __m512i __B) +{ + return (__m512i)__builtin_ia32_selectd_512((__mmask16)__M, + (__v16si)_mm512_max_epu32(__A, __B), + (__v16si)__W); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_maskz_max_epu32 (__mmask16 __M, __m512i __A, __m512i __B) +{ + return (__m512i)__builtin_ia32_selectd_512((__mmask16)__M, + (__v16si)_mm512_max_epu32(__A, __B), + (__v16si)_mm512_setzero_si512()); +} + +static __inline __m512i __DEFAULT_FN_ATTRS512 +_mm512_max_epi64(__m512i __A, __m512i __B) +{ + return (__m512i)__builtin_elementwise_max((__v8di)__A, (__v8di)__B); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_mask_max_epi64 (__m512i __W, __mmask8 __M, __m512i __A, __m512i __B) +{ + return (__m512i)__builtin_ia32_selectq_512((__mmask8)__M, + (__v8di)_mm512_max_epi64(__A, __B), + (__v8di)__W); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_maskz_max_epi64 (__mmask8 __M, __m512i __A, __m512i __B) +{ + return (__m512i)__builtin_ia32_selectq_512((__mmask8)__M, + (__v8di)_mm512_max_epi64(__A, __B), + (__v8di)_mm512_setzero_si512()); +} + +static __inline __m512i __DEFAULT_FN_ATTRS512 +_mm512_max_epu64(__m512i __A, __m512i __B) +{ + return (__m512i)__builtin_elementwise_max((__v8du)__A, (__v8du)__B); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_mask_max_epu64 (__m512i __W, __mmask8 __M, __m512i __A, __m512i __B) +{ + return (__m512i)__builtin_ia32_selectq_512((__mmask8)__M, + (__v8di)_mm512_max_epu64(__A, __B), + (__v8di)__W); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_maskz_max_epu64 (__mmask8 __M, __m512i __A, __m512i __B) +{ + return (__m512i)__builtin_ia32_selectq_512((__mmask8)__M, + (__v8di)_mm512_max_epu64(__A, __B), + (__v8di)_mm512_setzero_si512()); +} + +#define _mm512_min_round_pd(A, B, R) \ + ((__m512d)__builtin_ia32_minpd512((__v8df)(__m512d)(A), \ + (__v8df)(__m512d)(B), (int)(R))) + +#define _mm512_mask_min_round_pd(W, U, A, B, R) \ + ((__m512d)__builtin_ia32_selectpd_512((__mmask8)(U), \ + (__v8df)_mm512_min_round_pd((A), (B), (R)), \ + (__v8df)(W))) + +#define _mm512_maskz_min_round_pd(U, A, B, R) \ + ((__m512d)__builtin_ia32_selectpd_512((__mmask8)(U), \ + (__v8df)_mm512_min_round_pd((A), (B), (R)), \ + (__v8df)_mm512_setzero_pd())) + +static __inline__ __m512d __DEFAULT_FN_ATTRS512 +_mm512_min_pd(__m512d __A, __m512d __B) +{ + return (__m512d) __builtin_ia32_minpd512((__v8df) __A, (__v8df) __B, + _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m512d __DEFAULT_FN_ATTRS512 +_mm512_mask_min_pd (__m512d __W, __mmask8 __U, __m512d __A, __m512d __B) +{ + return (__m512d)__builtin_ia32_selectpd_512(__U, + (__v8df)_mm512_min_pd(__A, __B), + (__v8df)__W); +} + +static __inline__ __m512d __DEFAULT_FN_ATTRS512 +_mm512_maskz_min_pd (__mmask8 __U, __m512d __A, __m512d __B) +{ + return (__m512d)__builtin_ia32_selectpd_512(__U, + (__v8df)_mm512_min_pd(__A, __B), + (__v8df)_mm512_setzero_pd()); +} + +#define _mm512_min_round_ps(A, B, R) \ + ((__m512)__builtin_ia32_minps512((__v16sf)(__m512)(A), \ + (__v16sf)(__m512)(B), (int)(R))) + +#define _mm512_mask_min_round_ps(W, U, A, B, R) \ + ((__m512)__builtin_ia32_selectps_512((__mmask16)(U), \ + (__v16sf)_mm512_min_round_ps((A), (B), (R)), \ + (__v16sf)(W))) + +#define _mm512_maskz_min_round_ps(U, A, B, R) \ + ((__m512)__builtin_ia32_selectps_512((__mmask16)(U), \ + (__v16sf)_mm512_min_round_ps((A), (B), (R)), \ + (__v16sf)_mm512_setzero_ps())) + +static __inline__ __m512 __DEFAULT_FN_ATTRS512 +_mm512_min_ps(__m512 __A, __m512 __B) +{ + return (__m512) __builtin_ia32_minps512((__v16sf) __A, (__v16sf) __B, + _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m512 __DEFAULT_FN_ATTRS512 +_mm512_mask_min_ps (__m512 __W, __mmask16 __U, __m512 __A, __m512 __B) +{ + return (__m512)__builtin_ia32_selectps_512(__U, + (__v16sf)_mm512_min_ps(__A, __B), + (__v16sf)__W); +} + +static __inline__ __m512 __DEFAULT_FN_ATTRS512 +_mm512_maskz_min_ps (__mmask16 __U, __m512 __A, __m512 __B) +{ + return (__m512)__builtin_ia32_selectps_512(__U, + (__v16sf)_mm512_min_ps(__A, __B), + (__v16sf)_mm512_setzero_ps()); +} + +static __inline__ __m128 __DEFAULT_FN_ATTRS128 +_mm_mask_min_ss(__m128 __W, __mmask8 __U,__m128 __A, __m128 __B) { + return (__m128) __builtin_ia32_minss_round_mask ((__v4sf) __A, + (__v4sf) __B, + (__v4sf) __W, + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m128 __DEFAULT_FN_ATTRS128 +_mm_maskz_min_ss(__mmask8 __U,__m128 __A, __m128 __B) { + return (__m128) __builtin_ia32_minss_round_mask ((__v4sf) __A, + (__v4sf) __B, + (__v4sf) _mm_setzero_ps (), + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +#define _mm_min_round_ss(A, B, R) \ + ((__m128)__builtin_ia32_minss_round_mask((__v4sf)(__m128)(A), \ + (__v4sf)(__m128)(B), \ + (__v4sf)_mm_setzero_ps(), \ + (__mmask8)-1, (int)(R))) + +#define _mm_mask_min_round_ss(W, U, A, B, R) \ + ((__m128)__builtin_ia32_minss_round_mask((__v4sf)(__m128)(A), \ + (__v4sf)(__m128)(B), \ + (__v4sf)(__m128)(W), (__mmask8)(U), \ + (int)(R))) + +#define _mm_maskz_min_round_ss(U, A, B, R) \ + ((__m128)__builtin_ia32_minss_round_mask((__v4sf)(__m128)(A), \ + (__v4sf)(__m128)(B), \ + (__v4sf)_mm_setzero_ps(), \ + (__mmask8)(U), (int)(R))) + +static __inline__ __m128d __DEFAULT_FN_ATTRS128 +_mm_mask_min_sd(__m128d __W, __mmask8 __U,__m128d __A, __m128d __B) { + return (__m128d) __builtin_ia32_minsd_round_mask ((__v2df) __A, + (__v2df) __B, + (__v2df) __W, + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m128d __DEFAULT_FN_ATTRS128 +_mm_maskz_min_sd(__mmask8 __U,__m128d __A, __m128d __B) { + return (__m128d) __builtin_ia32_minsd_round_mask ((__v2df) __A, + (__v2df) __B, + (__v2df) _mm_setzero_pd (), + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +#define _mm_min_round_sd(A, B, R) \ + ((__m128d)__builtin_ia32_minsd_round_mask((__v2df)(__m128d)(A), \ + (__v2df)(__m128d)(B), \ + (__v2df)_mm_setzero_pd(), \ + (__mmask8)-1, (int)(R))) + +#define _mm_mask_min_round_sd(W, U, A, B, R) \ + ((__m128d)__builtin_ia32_minsd_round_mask((__v2df)(__m128d)(A), \ + (__v2df)(__m128d)(B), \ + (__v2df)(__m128d)(W), \ + (__mmask8)(U), (int)(R))) + +#define _mm_maskz_min_round_sd(U, A, B, R) \ + ((__m128d)__builtin_ia32_minsd_round_mask((__v2df)(__m128d)(A), \ + (__v2df)(__m128d)(B), \ + (__v2df)_mm_setzero_pd(), \ + (__mmask8)(U), (int)(R))) + +static __inline __m512i +__DEFAULT_FN_ATTRS512 +_mm512_min_epi32(__m512i __A, __m512i __B) +{ + return (__m512i)__builtin_elementwise_min((__v16si)__A, (__v16si)__B); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_mask_min_epi32 (__m512i __W, __mmask16 __M, __m512i __A, __m512i __B) +{ + return (__m512i)__builtin_ia32_selectd_512((__mmask16)__M, + (__v16si)_mm512_min_epi32(__A, __B), + (__v16si)__W); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_maskz_min_epi32 (__mmask16 __M, __m512i __A, __m512i __B) +{ + return (__m512i)__builtin_ia32_selectd_512((__mmask16)__M, + (__v16si)_mm512_min_epi32(__A, __B), + (__v16si)_mm512_setzero_si512()); +} + +static __inline __m512i __DEFAULT_FN_ATTRS512 +_mm512_min_epu32(__m512i __A, __m512i __B) +{ + return (__m512i)__builtin_elementwise_min((__v16su)__A, (__v16su)__B); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_mask_min_epu32 (__m512i __W, __mmask16 __M, __m512i __A, __m512i __B) +{ + return (__m512i)__builtin_ia32_selectd_512((__mmask16)__M, + (__v16si)_mm512_min_epu32(__A, __B), + (__v16si)__W); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_maskz_min_epu32 (__mmask16 __M, __m512i __A, __m512i __B) +{ + return (__m512i)__builtin_ia32_selectd_512((__mmask16)__M, + (__v16si)_mm512_min_epu32(__A, __B), + (__v16si)_mm512_setzero_si512()); +} + +static __inline __m512i __DEFAULT_FN_ATTRS512 +_mm512_min_epi64(__m512i __A, __m512i __B) +{ + return (__m512i)__builtin_elementwise_min((__v8di)__A, (__v8di)__B); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_mask_min_epi64 (__m512i __W, __mmask8 __M, __m512i __A, __m512i __B) +{ + return (__m512i)__builtin_ia32_selectq_512((__mmask8)__M, + (__v8di)_mm512_min_epi64(__A, __B), + (__v8di)__W); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_maskz_min_epi64 (__mmask8 __M, __m512i __A, __m512i __B) +{ + return (__m512i)__builtin_ia32_selectq_512((__mmask8)__M, + (__v8di)_mm512_min_epi64(__A, __B), + (__v8di)_mm512_setzero_si512()); +} + +static __inline __m512i __DEFAULT_FN_ATTRS512 +_mm512_min_epu64(__m512i __A, __m512i __B) +{ + return (__m512i)__builtin_elementwise_min((__v8du)__A, (__v8du)__B); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_mask_min_epu64 (__m512i __W, __mmask8 __M, __m512i __A, __m512i __B) +{ + return (__m512i)__builtin_ia32_selectq_512((__mmask8)__M, + (__v8di)_mm512_min_epu64(__A, __B), + (__v8di)__W); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_maskz_min_epu64 (__mmask8 __M, __m512i __A, __m512i __B) +{ + return (__m512i)__builtin_ia32_selectq_512((__mmask8)__M, + (__v8di)_mm512_min_epu64(__A, __B), + (__v8di)_mm512_setzero_si512()); +} + +static __inline __m512i __DEFAULT_FN_ATTRS512 +_mm512_mul_epi32(__m512i __X, __m512i __Y) +{ + return (__m512i)__builtin_ia32_pmuldq512((__v16si)__X, (__v16si) __Y); +} + +static __inline __m512i __DEFAULT_FN_ATTRS512 +_mm512_mask_mul_epi32(__m512i __W, __mmask8 __M, __m512i __X, __m512i __Y) +{ + return (__m512i)__builtin_ia32_selectq_512((__mmask8)__M, + (__v8di)_mm512_mul_epi32(__X, __Y), + (__v8di)__W); +} + +static __inline __m512i __DEFAULT_FN_ATTRS512 +_mm512_maskz_mul_epi32(__mmask8 __M, __m512i __X, __m512i __Y) +{ + return (__m512i)__builtin_ia32_selectq_512((__mmask8)__M, + (__v8di)_mm512_mul_epi32(__X, __Y), + (__v8di)_mm512_setzero_si512 ()); +} + +static __inline __m512i __DEFAULT_FN_ATTRS512 +_mm512_mul_epu32(__m512i __X, __m512i __Y) +{ + return (__m512i)__builtin_ia32_pmuludq512((__v16si)__X, (__v16si)__Y); +} + +static __inline __m512i __DEFAULT_FN_ATTRS512 +_mm512_mask_mul_epu32(__m512i __W, __mmask8 __M, __m512i __X, __m512i __Y) +{ + return (__m512i)__builtin_ia32_selectq_512((__mmask8)__M, + (__v8di)_mm512_mul_epu32(__X, __Y), + (__v8di)__W); +} + +static __inline __m512i __DEFAULT_FN_ATTRS512 +_mm512_maskz_mul_epu32(__mmask8 __M, __m512i __X, __m512i __Y) +{ + return (__m512i)__builtin_ia32_selectq_512((__mmask8)__M, + (__v8di)_mm512_mul_epu32(__X, __Y), + (__v8di)_mm512_setzero_si512 ()); +} + +static __inline __m512i __DEFAULT_FN_ATTRS512 +_mm512_mullo_epi32 (__m512i __A, __m512i __B) +{ + return (__m512i) ((__v16su) __A * (__v16su) __B); +} + +static __inline __m512i __DEFAULT_FN_ATTRS512 +_mm512_maskz_mullo_epi32(__mmask16 __M, __m512i __A, __m512i __B) +{ + return (__m512i)__builtin_ia32_selectd_512((__mmask16)__M, + (__v16si)_mm512_mullo_epi32(__A, __B), + (__v16si)_mm512_setzero_si512()); +} + +static __inline __m512i __DEFAULT_FN_ATTRS512 +_mm512_mask_mullo_epi32(__m512i __W, __mmask16 __M, __m512i __A, __m512i __B) +{ + return (__m512i)__builtin_ia32_selectd_512((__mmask16)__M, + (__v16si)_mm512_mullo_epi32(__A, __B), + (__v16si)__W); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_mullox_epi64 (__m512i __A, __m512i __B) { + return (__m512i) ((__v8du) __A * (__v8du) __B); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_mask_mullox_epi64(__m512i __W, __mmask8 __U, __m512i __A, __m512i __B) { + return (__m512i)__builtin_ia32_selectq_512((__mmask8)__U, + (__v8di)_mm512_mullox_epi64(__A, __B), + (__v8di)__W); +} + +#define _mm512_sqrt_round_pd(A, R) \ + ((__m512d)__builtin_ia32_sqrtpd512((__v8df)(__m512d)(A), (int)(R))) + +#define _mm512_mask_sqrt_round_pd(W, U, A, R) \ + ((__m512d)__builtin_ia32_selectpd_512((__mmask8)(U), \ + (__v8df)_mm512_sqrt_round_pd((A), (R)), \ + (__v8df)(__m512d)(W))) + +#define _mm512_maskz_sqrt_round_pd(U, A, R) \ + ((__m512d)__builtin_ia32_selectpd_512((__mmask8)(U), \ + (__v8df)_mm512_sqrt_round_pd((A), (R)), \ + (__v8df)_mm512_setzero_pd())) + +static __inline__ __m512d __DEFAULT_FN_ATTRS512 +_mm512_sqrt_pd(__m512d __A) +{ + return (__m512d)__builtin_ia32_sqrtpd512((__v8df)__A, + _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m512d __DEFAULT_FN_ATTRS512 +_mm512_mask_sqrt_pd (__m512d __W, __mmask8 __U, __m512d __A) +{ + return (__m512d)__builtin_ia32_selectpd_512(__U, + (__v8df)_mm512_sqrt_pd(__A), + (__v8df)__W); +} + +static __inline__ __m512d __DEFAULT_FN_ATTRS512 +_mm512_maskz_sqrt_pd (__mmask8 __U, __m512d __A) +{ + return (__m512d)__builtin_ia32_selectpd_512(__U, + (__v8df)_mm512_sqrt_pd(__A), + (__v8df)_mm512_setzero_pd()); +} + +#define _mm512_sqrt_round_ps(A, R) \ + ((__m512)__builtin_ia32_sqrtps512((__v16sf)(__m512)(A), (int)(R))) + +#define _mm512_mask_sqrt_round_ps(W, U, A, R) \ + ((__m512)__builtin_ia32_selectps_512((__mmask16)(U), \ + (__v16sf)_mm512_sqrt_round_ps((A), (R)), \ + (__v16sf)(__m512)(W))) + +#define _mm512_maskz_sqrt_round_ps(U, A, R) \ + ((__m512)__builtin_ia32_selectps_512((__mmask16)(U), \ + (__v16sf)_mm512_sqrt_round_ps((A), (R)), \ + (__v16sf)_mm512_setzero_ps())) + +static __inline__ __m512 __DEFAULT_FN_ATTRS512 +_mm512_sqrt_ps(__m512 __A) +{ + return (__m512)__builtin_ia32_sqrtps512((__v16sf)__A, + _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m512 __DEFAULT_FN_ATTRS512 +_mm512_mask_sqrt_ps(__m512 __W, __mmask16 __U, __m512 __A) +{ + return (__m512)__builtin_ia32_selectps_512(__U, + (__v16sf)_mm512_sqrt_ps(__A), + (__v16sf)__W); +} + +static __inline__ __m512 __DEFAULT_FN_ATTRS512 +_mm512_maskz_sqrt_ps( __mmask16 __U, __m512 __A) +{ + return (__m512)__builtin_ia32_selectps_512(__U, + (__v16sf)_mm512_sqrt_ps(__A), + (__v16sf)_mm512_setzero_ps()); +} + +static __inline__ __m512d __DEFAULT_FN_ATTRS512 +_mm512_rsqrt14_pd(__m512d __A) +{ + return (__m512d) __builtin_ia32_rsqrt14pd512_mask ((__v8df) __A, + (__v8df) + _mm512_setzero_pd (), + (__mmask8) -1);} + +static __inline__ __m512d __DEFAULT_FN_ATTRS512 +_mm512_mask_rsqrt14_pd (__m512d __W, __mmask8 __U, __m512d __A) +{ + return (__m512d) __builtin_ia32_rsqrt14pd512_mask ((__v8df) __A, + (__v8df) __W, + (__mmask8) __U); +} + +static __inline__ __m512d __DEFAULT_FN_ATTRS512 +_mm512_maskz_rsqrt14_pd (__mmask8 __U, __m512d __A) +{ + return (__m512d) __builtin_ia32_rsqrt14pd512_mask ((__v8df) __A, + (__v8df) + _mm512_setzero_pd (), + (__mmask8) __U); +} + +static __inline__ __m512 __DEFAULT_FN_ATTRS512 +_mm512_rsqrt14_ps(__m512 __A) +{ + return (__m512) __builtin_ia32_rsqrt14ps512_mask ((__v16sf) __A, + (__v16sf) + _mm512_setzero_ps (), + (__mmask16) -1); +} + +static __inline__ __m512 __DEFAULT_FN_ATTRS512 +_mm512_mask_rsqrt14_ps (__m512 __W, __mmask16 __U, __m512 __A) +{ + return (__m512) __builtin_ia32_rsqrt14ps512_mask ((__v16sf) __A, + (__v16sf) __W, + (__mmask16) __U); +} + +static __inline__ __m512 __DEFAULT_FN_ATTRS512 +_mm512_maskz_rsqrt14_ps (__mmask16 __U, __m512 __A) +{ + return (__m512) __builtin_ia32_rsqrt14ps512_mask ((__v16sf) __A, + (__v16sf) + _mm512_setzero_ps (), + (__mmask16) __U); +} + +static __inline__ __m128 __DEFAULT_FN_ATTRS128 +_mm_rsqrt14_ss(__m128 __A, __m128 __B) +{ + return (__m128) __builtin_ia32_rsqrt14ss_mask ((__v4sf) __A, + (__v4sf) __B, + (__v4sf) + _mm_setzero_ps (), + (__mmask8) -1); +} + +static __inline__ __m128 __DEFAULT_FN_ATTRS128 +_mm_mask_rsqrt14_ss (__m128 __W, __mmask8 __U, __m128 __A, __m128 __B) +{ + return (__m128) __builtin_ia32_rsqrt14ss_mask ((__v4sf) __A, + (__v4sf) __B, + (__v4sf) __W, + (__mmask8) __U); +} + +static __inline__ __m128 __DEFAULT_FN_ATTRS128 +_mm_maskz_rsqrt14_ss (__mmask8 __U, __m128 __A, __m128 __B) +{ + return (__m128) __builtin_ia32_rsqrt14ss_mask ((__v4sf) __A, + (__v4sf) __B, + (__v4sf) _mm_setzero_ps (), + (__mmask8) __U); +} + +static __inline__ __m128d __DEFAULT_FN_ATTRS128 +_mm_rsqrt14_sd(__m128d __A, __m128d __B) +{ + return (__m128d) __builtin_ia32_rsqrt14sd_mask ((__v2df) __A, + (__v2df) __B, + (__v2df) + _mm_setzero_pd (), + (__mmask8) -1); +} + +static __inline__ __m128d __DEFAULT_FN_ATTRS128 +_mm_mask_rsqrt14_sd (__m128d __W, __mmask8 __U, __m128d __A, __m128d __B) +{ + return (__m128d) __builtin_ia32_rsqrt14sd_mask ( (__v2df) __A, + (__v2df) __B, + (__v2df) __W, + (__mmask8) __U); +} + +static __inline__ __m128d __DEFAULT_FN_ATTRS128 +_mm_maskz_rsqrt14_sd (__mmask8 __U, __m128d __A, __m128d __B) +{ + return (__m128d) __builtin_ia32_rsqrt14sd_mask ( (__v2df) __A, + (__v2df) __B, + (__v2df) _mm_setzero_pd (), + (__mmask8) __U); +} + +static __inline__ __m512d __DEFAULT_FN_ATTRS512 +_mm512_rcp14_pd(__m512d __A) +{ + return (__m512d) __builtin_ia32_rcp14pd512_mask ((__v8df) __A, + (__v8df) + _mm512_setzero_pd (), + (__mmask8) -1); +} + +static __inline__ __m512d __DEFAULT_FN_ATTRS512 +_mm512_mask_rcp14_pd (__m512d __W, __mmask8 __U, __m512d __A) +{ + return (__m512d) __builtin_ia32_rcp14pd512_mask ((__v8df) __A, + (__v8df) __W, + (__mmask8) __U); +} + +static __inline__ __m512d __DEFAULT_FN_ATTRS512 +_mm512_maskz_rcp14_pd (__mmask8 __U, __m512d __A) +{ + return (__m512d) __builtin_ia32_rcp14pd512_mask ((__v8df) __A, + (__v8df) + _mm512_setzero_pd (), + (__mmask8) __U); +} + +static __inline__ __m512 __DEFAULT_FN_ATTRS512 +_mm512_rcp14_ps(__m512 __A) +{ + return (__m512) __builtin_ia32_rcp14ps512_mask ((__v16sf) __A, + (__v16sf) + _mm512_setzero_ps (), + (__mmask16) -1); +} + +static __inline__ __m512 __DEFAULT_FN_ATTRS512 +_mm512_mask_rcp14_ps (__m512 __W, __mmask16 __U, __m512 __A) +{ + return (__m512) __builtin_ia32_rcp14ps512_mask ((__v16sf) __A, + (__v16sf) __W, + (__mmask16) __U); +} + +static __inline__ __m512 __DEFAULT_FN_ATTRS512 +_mm512_maskz_rcp14_ps (__mmask16 __U, __m512 __A) +{ + return (__m512) __builtin_ia32_rcp14ps512_mask ((__v16sf) __A, + (__v16sf) + _mm512_setzero_ps (), + (__mmask16) __U); +} + +static __inline__ __m128 __DEFAULT_FN_ATTRS128 +_mm_rcp14_ss(__m128 __A, __m128 __B) +{ + return (__m128) __builtin_ia32_rcp14ss_mask ((__v4sf) __A, + (__v4sf) __B, + (__v4sf) + _mm_setzero_ps (), + (__mmask8) -1); +} + +static __inline__ __m128 __DEFAULT_FN_ATTRS128 +_mm_mask_rcp14_ss (__m128 __W, __mmask8 __U, __m128 __A, __m128 __B) +{ + return (__m128) __builtin_ia32_rcp14ss_mask ((__v4sf) __A, + (__v4sf) __B, + (__v4sf) __W, + (__mmask8) __U); +} + +static __inline__ __m128 __DEFAULT_FN_ATTRS128 +_mm_maskz_rcp14_ss (__mmask8 __U, __m128 __A, __m128 __B) +{ + return (__m128) __builtin_ia32_rcp14ss_mask ((__v4sf) __A, + (__v4sf) __B, + (__v4sf) _mm_setzero_ps (), + (__mmask8) __U); +} + +static __inline__ __m128d __DEFAULT_FN_ATTRS128 +_mm_rcp14_sd(__m128d __A, __m128d __B) +{ + return (__m128d) __builtin_ia32_rcp14sd_mask ((__v2df) __A, + (__v2df) __B, + (__v2df) + _mm_setzero_pd (), + (__mmask8) -1); +} + +static __inline__ __m128d __DEFAULT_FN_ATTRS128 +_mm_mask_rcp14_sd (__m128d __W, __mmask8 __U, __m128d __A, __m128d __B) +{ + return (__m128d) __builtin_ia32_rcp14sd_mask ( (__v2df) __A, + (__v2df) __B, + (__v2df) __W, + (__mmask8) __U); +} + +static __inline__ __m128d __DEFAULT_FN_ATTRS128 +_mm_maskz_rcp14_sd (__mmask8 __U, __m128d __A, __m128d __B) +{ + return (__m128d) __builtin_ia32_rcp14sd_mask ( (__v2df) __A, + (__v2df) __B, + (__v2df) _mm_setzero_pd (), + (__mmask8) __U); +} + +static __inline __m512 __DEFAULT_FN_ATTRS512 +_mm512_floor_ps(__m512 __A) +{ + return (__m512) __builtin_ia32_rndscaleps_mask ((__v16sf) __A, + _MM_FROUND_FLOOR, + (__v16sf) __A, (unsigned short)-1, + _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m512 __DEFAULT_FN_ATTRS512 +_mm512_mask_floor_ps (__m512 __W, __mmask16 __U, __m512 __A) +{ + return (__m512) __builtin_ia32_rndscaleps_mask ((__v16sf) __A, + _MM_FROUND_FLOOR, + (__v16sf) __W, __U, + _MM_FROUND_CUR_DIRECTION); +} + +static __inline __m512d __DEFAULT_FN_ATTRS512 +_mm512_floor_pd(__m512d __A) +{ + return (__m512d) __builtin_ia32_rndscalepd_mask ((__v8df) __A, + _MM_FROUND_FLOOR, + (__v8df) __A, (unsigned char)-1, + _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m512d __DEFAULT_FN_ATTRS512 +_mm512_mask_floor_pd (__m512d __W, __mmask8 __U, __m512d __A) +{ + return (__m512d) __builtin_ia32_rndscalepd_mask ((__v8df) __A, + _MM_FROUND_FLOOR, + (__v8df) __W, __U, + _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m512 __DEFAULT_FN_ATTRS512 +_mm512_mask_ceil_ps (__m512 __W, __mmask16 __U, __m512 __A) +{ + return (__m512) __builtin_ia32_rndscaleps_mask ((__v16sf) __A, + _MM_FROUND_CEIL, + (__v16sf) __W, __U, + _MM_FROUND_CUR_DIRECTION); +} + +static __inline __m512 __DEFAULT_FN_ATTRS512 +_mm512_ceil_ps(__m512 __A) +{ + return (__m512) __builtin_ia32_rndscaleps_mask ((__v16sf) __A, + _MM_FROUND_CEIL, + (__v16sf) __A, (unsigned short)-1, + _MM_FROUND_CUR_DIRECTION); +} + +static __inline __m512d __DEFAULT_FN_ATTRS512 +_mm512_ceil_pd(__m512d __A) +{ + return (__m512d) __builtin_ia32_rndscalepd_mask ((__v8df) __A, + _MM_FROUND_CEIL, + (__v8df) __A, (unsigned char)-1, + _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m512d __DEFAULT_FN_ATTRS512 +_mm512_mask_ceil_pd (__m512d __W, __mmask8 __U, __m512d __A) +{ + return (__m512d) __builtin_ia32_rndscalepd_mask ((__v8df) __A, + _MM_FROUND_CEIL, + (__v8df) __W, __U, + _MM_FROUND_CUR_DIRECTION); +} + +static __inline __m512i __DEFAULT_FN_ATTRS512 +_mm512_abs_epi64(__m512i __A) +{ + return (__m512i)__builtin_elementwise_abs((__v8di)__A); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_mask_abs_epi64 (__m512i __W, __mmask8 __U, __m512i __A) +{ + return (__m512i)__builtin_ia32_selectq_512((__mmask8)__U, + (__v8di)_mm512_abs_epi64(__A), + (__v8di)__W); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_maskz_abs_epi64 (__mmask8 __U, __m512i __A) +{ + return (__m512i)__builtin_ia32_selectq_512((__mmask8)__U, + (__v8di)_mm512_abs_epi64(__A), + (__v8di)_mm512_setzero_si512()); +} + +static __inline __m512i __DEFAULT_FN_ATTRS512 +_mm512_abs_epi32(__m512i __A) +{ + return (__m512i)__builtin_elementwise_abs((__v16si) __A); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_mask_abs_epi32 (__m512i __W, __mmask16 __U, __m512i __A) +{ + return (__m512i)__builtin_ia32_selectd_512(__U, + (__v16si)_mm512_abs_epi32(__A), + (__v16si)__W); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_maskz_abs_epi32 (__mmask16 __U, __m512i __A) +{ + return (__m512i)__builtin_ia32_selectd_512(__U, + (__v16si)_mm512_abs_epi32(__A), + (__v16si)_mm512_setzero_si512()); +} + +static __inline__ __m128 __DEFAULT_FN_ATTRS128 +_mm_mask_add_ss(__m128 __W, __mmask8 __U,__m128 __A, __m128 __B) { + __A = _mm_add_ss(__A, __B); + return __builtin_ia32_selectss_128(__U, __A, __W); +} + +static __inline__ __m128 __DEFAULT_FN_ATTRS128 +_mm_maskz_add_ss(__mmask8 __U,__m128 __A, __m128 __B) { + __A = _mm_add_ss(__A, __B); + return __builtin_ia32_selectss_128(__U, __A, _mm_setzero_ps()); +} + +#define _mm_add_round_ss(A, B, R) \ + ((__m128)__builtin_ia32_addss_round_mask((__v4sf)(__m128)(A), \ + (__v4sf)(__m128)(B), \ + (__v4sf)_mm_setzero_ps(), \ + (__mmask8)-1, (int)(R))) + +#define _mm_mask_add_round_ss(W, U, A, B, R) \ + ((__m128)__builtin_ia32_addss_round_mask((__v4sf)(__m128)(A), \ + (__v4sf)(__m128)(B), \ + (__v4sf)(__m128)(W), (__mmask8)(U), \ + (int)(R))) + +#define _mm_maskz_add_round_ss(U, A, B, R) \ + ((__m128)__builtin_ia32_addss_round_mask((__v4sf)(__m128)(A), \ + (__v4sf)(__m128)(B), \ + (__v4sf)_mm_setzero_ps(), \ + (__mmask8)(U), (int)(R))) + +static __inline__ __m128d __DEFAULT_FN_ATTRS128 +_mm_mask_add_sd(__m128d __W, __mmask8 __U,__m128d __A, __m128d __B) { + __A = _mm_add_sd(__A, __B); + return __builtin_ia32_selectsd_128(__U, __A, __W); +} + +static __inline__ __m128d __DEFAULT_FN_ATTRS128 +_mm_maskz_add_sd(__mmask8 __U,__m128d __A, __m128d __B) { + __A = _mm_add_sd(__A, __B); + return __builtin_ia32_selectsd_128(__U, __A, _mm_setzero_pd()); +} +#define _mm_add_round_sd(A, B, R) \ + ((__m128d)__builtin_ia32_addsd_round_mask((__v2df)(__m128d)(A), \ + (__v2df)(__m128d)(B), \ + (__v2df)_mm_setzero_pd(), \ + (__mmask8)-1, (int)(R))) + +#define _mm_mask_add_round_sd(W, U, A, B, R) \ + ((__m128d)__builtin_ia32_addsd_round_mask((__v2df)(__m128d)(A), \ + (__v2df)(__m128d)(B), \ + (__v2df)(__m128d)(W), \ + (__mmask8)(U), (int)(R))) + +#define _mm_maskz_add_round_sd(U, A, B, R) \ + ((__m128d)__builtin_ia32_addsd_round_mask((__v2df)(__m128d)(A), \ + (__v2df)(__m128d)(B), \ + (__v2df)_mm_setzero_pd(), \ + (__mmask8)(U), (int)(R))) + +static __inline__ __m512d __DEFAULT_FN_ATTRS512 +_mm512_mask_add_pd(__m512d __W, __mmask8 __U, __m512d __A, __m512d __B) { + return (__m512d)__builtin_ia32_selectpd_512((__mmask8)__U, + (__v8df)_mm512_add_pd(__A, __B), + (__v8df)__W); +} + +static __inline__ __m512d __DEFAULT_FN_ATTRS512 +_mm512_maskz_add_pd(__mmask8 __U, __m512d __A, __m512d __B) { + return (__m512d)__builtin_ia32_selectpd_512((__mmask8)__U, + (__v8df)_mm512_add_pd(__A, __B), + (__v8df)_mm512_setzero_pd()); +} + +static __inline__ __m512 __DEFAULT_FN_ATTRS512 +_mm512_mask_add_ps(__m512 __W, __mmask16 __U, __m512 __A, __m512 __B) { + return (__m512)__builtin_ia32_selectps_512((__mmask16)__U, + (__v16sf)_mm512_add_ps(__A, __B), + (__v16sf)__W); +} + +static __inline__ __m512 __DEFAULT_FN_ATTRS512 +_mm512_maskz_add_ps(__mmask16 __U, __m512 __A, __m512 __B) { + return (__m512)__builtin_ia32_selectps_512((__mmask16)__U, + (__v16sf)_mm512_add_ps(__A, __B), + (__v16sf)_mm512_setzero_ps()); +} + +#define _mm512_add_round_pd(A, B, R) \ + ((__m512d)__builtin_ia32_addpd512((__v8df)(__m512d)(A), \ + (__v8df)(__m512d)(B), (int)(R))) + +#define _mm512_mask_add_round_pd(W, U, A, B, R) \ + ((__m512d)__builtin_ia32_selectpd_512((__mmask8)(U), \ + (__v8df)_mm512_add_round_pd((A), (B), (R)), \ + (__v8df)(__m512d)(W))) + +#define _mm512_maskz_add_round_pd(U, A, B, R) \ + ((__m512d)__builtin_ia32_selectpd_512((__mmask8)(U), \ + (__v8df)_mm512_add_round_pd((A), (B), (R)), \ + (__v8df)_mm512_setzero_pd())) + +#define _mm512_add_round_ps(A, B, R) \ + ((__m512)__builtin_ia32_addps512((__v16sf)(__m512)(A), \ + (__v16sf)(__m512)(B), (int)(R))) + +#define _mm512_mask_add_round_ps(W, U, A, B, R) \ + ((__m512)__builtin_ia32_selectps_512((__mmask16)(U), \ + (__v16sf)_mm512_add_round_ps((A), (B), (R)), \ + (__v16sf)(__m512)(W))) + +#define _mm512_maskz_add_round_ps(U, A, B, R) \ + ((__m512)__builtin_ia32_selectps_512((__mmask16)(U), \ + (__v16sf)_mm512_add_round_ps((A), (B), (R)), \ + (__v16sf)_mm512_setzero_ps())) + +static __inline__ __m128 __DEFAULT_FN_ATTRS128 +_mm_mask_sub_ss(__m128 __W, __mmask8 __U,__m128 __A, __m128 __B) { + __A = _mm_sub_ss(__A, __B); + return __builtin_ia32_selectss_128(__U, __A, __W); +} + +static __inline__ __m128 __DEFAULT_FN_ATTRS128 +_mm_maskz_sub_ss(__mmask8 __U,__m128 __A, __m128 __B) { + __A = _mm_sub_ss(__A, __B); + return __builtin_ia32_selectss_128(__U, __A, _mm_setzero_ps()); +} +#define _mm_sub_round_ss(A, B, R) \ + ((__m128)__builtin_ia32_subss_round_mask((__v4sf)(__m128)(A), \ + (__v4sf)(__m128)(B), \ + (__v4sf)_mm_setzero_ps(), \ + (__mmask8)-1, (int)(R))) + +#define _mm_mask_sub_round_ss(W, U, A, B, R) \ + ((__m128)__builtin_ia32_subss_round_mask((__v4sf)(__m128)(A), \ + (__v4sf)(__m128)(B), \ + (__v4sf)(__m128)(W), (__mmask8)(U), \ + (int)(R))) + +#define _mm_maskz_sub_round_ss(U, A, B, R) \ + ((__m128)__builtin_ia32_subss_round_mask((__v4sf)(__m128)(A), \ + (__v4sf)(__m128)(B), \ + (__v4sf)_mm_setzero_ps(), \ + (__mmask8)(U), (int)(R))) + +static __inline__ __m128d __DEFAULT_FN_ATTRS128 +_mm_mask_sub_sd(__m128d __W, __mmask8 __U,__m128d __A, __m128d __B) { + __A = _mm_sub_sd(__A, __B); + return __builtin_ia32_selectsd_128(__U, __A, __W); +} + +static __inline__ __m128d __DEFAULT_FN_ATTRS128 +_mm_maskz_sub_sd(__mmask8 __U,__m128d __A, __m128d __B) { + __A = _mm_sub_sd(__A, __B); + return __builtin_ia32_selectsd_128(__U, __A, _mm_setzero_pd()); +} + +#define _mm_sub_round_sd(A, B, R) \ + ((__m128d)__builtin_ia32_subsd_round_mask((__v2df)(__m128d)(A), \ + (__v2df)(__m128d)(B), \ + (__v2df)_mm_setzero_pd(), \ + (__mmask8)-1, (int)(R))) + +#define _mm_mask_sub_round_sd(W, U, A, B, R) \ + ((__m128d)__builtin_ia32_subsd_round_mask((__v2df)(__m128d)(A), \ + (__v2df)(__m128d)(B), \ + (__v2df)(__m128d)(W), \ + (__mmask8)(U), (int)(R))) + +#define _mm_maskz_sub_round_sd(U, A, B, R) \ + ((__m128d)__builtin_ia32_subsd_round_mask((__v2df)(__m128d)(A), \ + (__v2df)(__m128d)(B), \ + (__v2df)_mm_setzero_pd(), \ + (__mmask8)(U), (int)(R))) + +static __inline__ __m512d __DEFAULT_FN_ATTRS512 +_mm512_mask_sub_pd(__m512d __W, __mmask8 __U, __m512d __A, __m512d __B) { + return (__m512d)__builtin_ia32_selectpd_512((__mmask8)__U, + (__v8df)_mm512_sub_pd(__A, __B), + (__v8df)__W); +} + +static __inline__ __m512d __DEFAULT_FN_ATTRS512 +_mm512_maskz_sub_pd(__mmask8 __U, __m512d __A, __m512d __B) { + return (__m512d)__builtin_ia32_selectpd_512((__mmask8)__U, + (__v8df)_mm512_sub_pd(__A, __B), + (__v8df)_mm512_setzero_pd()); +} + +static __inline__ __m512 __DEFAULT_FN_ATTRS512 +_mm512_mask_sub_ps(__m512 __W, __mmask16 __U, __m512 __A, __m512 __B) { + return (__m512)__builtin_ia32_selectps_512((__mmask16)__U, + (__v16sf)_mm512_sub_ps(__A, __B), + (__v16sf)__W); +} + +static __inline__ __m512 __DEFAULT_FN_ATTRS512 +_mm512_maskz_sub_ps(__mmask16 __U, __m512 __A, __m512 __B) { + return (__m512)__builtin_ia32_selectps_512((__mmask16)__U, + (__v16sf)_mm512_sub_ps(__A, __B), + (__v16sf)_mm512_setzero_ps()); +} + +#define _mm512_sub_round_pd(A, B, R) \ + ((__m512d)__builtin_ia32_subpd512((__v8df)(__m512d)(A), \ + (__v8df)(__m512d)(B), (int)(R))) + +#define _mm512_mask_sub_round_pd(W, U, A, B, R) \ + ((__m512d)__builtin_ia32_selectpd_512((__mmask8)(U), \ + (__v8df)_mm512_sub_round_pd((A), (B), (R)), \ + (__v8df)(__m512d)(W))) + +#define _mm512_maskz_sub_round_pd(U, A, B, R) \ + ((__m512d)__builtin_ia32_selectpd_512((__mmask8)(U), \ + (__v8df)_mm512_sub_round_pd((A), (B), (R)), \ + (__v8df)_mm512_setzero_pd())) + +#define _mm512_sub_round_ps(A, B, R) \ + ((__m512)__builtin_ia32_subps512((__v16sf)(__m512)(A), \ + (__v16sf)(__m512)(B), (int)(R))) + +#define _mm512_mask_sub_round_ps(W, U, A, B, R) \ + ((__m512)__builtin_ia32_selectps_512((__mmask16)(U), \ + (__v16sf)_mm512_sub_round_ps((A), (B), (R)), \ + (__v16sf)(__m512)(W))) + +#define _mm512_maskz_sub_round_ps(U, A, B, R) \ + ((__m512)__builtin_ia32_selectps_512((__mmask16)(U), \ + (__v16sf)_mm512_sub_round_ps((A), (B), (R)), \ + (__v16sf)_mm512_setzero_ps())) + +static __inline__ __m128 __DEFAULT_FN_ATTRS128 +_mm_mask_mul_ss(__m128 __W, __mmask8 __U,__m128 __A, __m128 __B) { + __A = _mm_mul_ss(__A, __B); + return __builtin_ia32_selectss_128(__U, __A, __W); +} + +static __inline__ __m128 __DEFAULT_FN_ATTRS128 +_mm_maskz_mul_ss(__mmask8 __U,__m128 __A, __m128 __B) { + __A = _mm_mul_ss(__A, __B); + return __builtin_ia32_selectss_128(__U, __A, _mm_setzero_ps()); +} +#define _mm_mul_round_ss(A, B, R) \ + ((__m128)__builtin_ia32_mulss_round_mask((__v4sf)(__m128)(A), \ + (__v4sf)(__m128)(B), \ + (__v4sf)_mm_setzero_ps(), \ + (__mmask8)-1, (int)(R))) + +#define _mm_mask_mul_round_ss(W, U, A, B, R) \ + ((__m128)__builtin_ia32_mulss_round_mask((__v4sf)(__m128)(A), \ + (__v4sf)(__m128)(B), \ + (__v4sf)(__m128)(W), (__mmask8)(U), \ + (int)(R))) + +#define _mm_maskz_mul_round_ss(U, A, B, R) \ + ((__m128)__builtin_ia32_mulss_round_mask((__v4sf)(__m128)(A), \ + (__v4sf)(__m128)(B), \ + (__v4sf)_mm_setzero_ps(), \ + (__mmask8)(U), (int)(R))) + +static __inline__ __m128d __DEFAULT_FN_ATTRS128 +_mm_mask_mul_sd(__m128d __W, __mmask8 __U,__m128d __A, __m128d __B) { + __A = _mm_mul_sd(__A, __B); + return __builtin_ia32_selectsd_128(__U, __A, __W); +} + +static __inline__ __m128d __DEFAULT_FN_ATTRS128 +_mm_maskz_mul_sd(__mmask8 __U,__m128d __A, __m128d __B) { + __A = _mm_mul_sd(__A, __B); + return __builtin_ia32_selectsd_128(__U, __A, _mm_setzero_pd()); +} + +#define _mm_mul_round_sd(A, B, R) \ + ((__m128d)__builtin_ia32_mulsd_round_mask((__v2df)(__m128d)(A), \ + (__v2df)(__m128d)(B), \ + (__v2df)_mm_setzero_pd(), \ + (__mmask8)-1, (int)(R))) + +#define _mm_mask_mul_round_sd(W, U, A, B, R) \ + ((__m128d)__builtin_ia32_mulsd_round_mask((__v2df)(__m128d)(A), \ + (__v2df)(__m128d)(B), \ + (__v2df)(__m128d)(W), \ + (__mmask8)(U), (int)(R))) + +#define _mm_maskz_mul_round_sd(U, A, B, R) \ + ((__m128d)__builtin_ia32_mulsd_round_mask((__v2df)(__m128d)(A), \ + (__v2df)(__m128d)(B), \ + (__v2df)_mm_setzero_pd(), \ + (__mmask8)(U), (int)(R))) + +static __inline__ __m512d __DEFAULT_FN_ATTRS512 +_mm512_mask_mul_pd(__m512d __W, __mmask8 __U, __m512d __A, __m512d __B) { + return (__m512d)__builtin_ia32_selectpd_512((__mmask8)__U, + (__v8df)_mm512_mul_pd(__A, __B), + (__v8df)__W); +} + +static __inline__ __m512d __DEFAULT_FN_ATTRS512 +_mm512_maskz_mul_pd(__mmask8 __U, __m512d __A, __m512d __B) { + return (__m512d)__builtin_ia32_selectpd_512((__mmask8)__U, + (__v8df)_mm512_mul_pd(__A, __B), + (__v8df)_mm512_setzero_pd()); +} + +static __inline__ __m512 __DEFAULT_FN_ATTRS512 +_mm512_mask_mul_ps(__m512 __W, __mmask16 __U, __m512 __A, __m512 __B) { + return (__m512)__builtin_ia32_selectps_512((__mmask16)__U, + (__v16sf)_mm512_mul_ps(__A, __B), + (__v16sf)__W); +} + +static __inline__ __m512 __DEFAULT_FN_ATTRS512 +_mm512_maskz_mul_ps(__mmask16 __U, __m512 __A, __m512 __B) { + return (__m512)__builtin_ia32_selectps_512((__mmask16)__U, + (__v16sf)_mm512_mul_ps(__A, __B), + (__v16sf)_mm512_setzero_ps()); +} + +#define _mm512_mul_round_pd(A, B, R) \ + ((__m512d)__builtin_ia32_mulpd512((__v8df)(__m512d)(A), \ + (__v8df)(__m512d)(B), (int)(R))) + +#define _mm512_mask_mul_round_pd(W, U, A, B, R) \ + ((__m512d)__builtin_ia32_selectpd_512((__mmask8)(U), \ + (__v8df)_mm512_mul_round_pd((A), (B), (R)), \ + (__v8df)(__m512d)(W))) + +#define _mm512_maskz_mul_round_pd(U, A, B, R) \ + ((__m512d)__builtin_ia32_selectpd_512((__mmask8)(U), \ + (__v8df)_mm512_mul_round_pd((A), (B), (R)), \ + (__v8df)_mm512_setzero_pd())) + +#define _mm512_mul_round_ps(A, B, R) \ + ((__m512)__builtin_ia32_mulps512((__v16sf)(__m512)(A), \ + (__v16sf)(__m512)(B), (int)(R))) + +#define _mm512_mask_mul_round_ps(W, U, A, B, R) \ + ((__m512)__builtin_ia32_selectps_512((__mmask16)(U), \ + (__v16sf)_mm512_mul_round_ps((A), (B), (R)), \ + (__v16sf)(__m512)(W))) + +#define _mm512_maskz_mul_round_ps(U, A, B, R) \ + ((__m512)__builtin_ia32_selectps_512((__mmask16)(U), \ + (__v16sf)_mm512_mul_round_ps((A), (B), (R)), \ + (__v16sf)_mm512_setzero_ps())) + +static __inline__ __m128 __DEFAULT_FN_ATTRS128 +_mm_mask_div_ss(__m128 __W, __mmask8 __U,__m128 __A, __m128 __B) { + __A = _mm_div_ss(__A, __B); + return __builtin_ia32_selectss_128(__U, __A, __W); +} + +static __inline__ __m128 __DEFAULT_FN_ATTRS128 +_mm_maskz_div_ss(__mmask8 __U,__m128 __A, __m128 __B) { + __A = _mm_div_ss(__A, __B); + return __builtin_ia32_selectss_128(__U, __A, _mm_setzero_ps()); +} + +#define _mm_div_round_ss(A, B, R) \ + ((__m128)__builtin_ia32_divss_round_mask((__v4sf)(__m128)(A), \ + (__v4sf)(__m128)(B), \ + (__v4sf)_mm_setzero_ps(), \ + (__mmask8)-1, (int)(R))) + +#define _mm_mask_div_round_ss(W, U, A, B, R) \ + ((__m128)__builtin_ia32_divss_round_mask((__v4sf)(__m128)(A), \ + (__v4sf)(__m128)(B), \ + (__v4sf)(__m128)(W), (__mmask8)(U), \ + (int)(R))) + +#define _mm_maskz_div_round_ss(U, A, B, R) \ + ((__m128)__builtin_ia32_divss_round_mask((__v4sf)(__m128)(A), \ + (__v4sf)(__m128)(B), \ + (__v4sf)_mm_setzero_ps(), \ + (__mmask8)(U), (int)(R))) + +static __inline__ __m128d __DEFAULT_FN_ATTRS128 +_mm_mask_div_sd(__m128d __W, __mmask8 __U,__m128d __A, __m128d __B) { + __A = _mm_div_sd(__A, __B); + return __builtin_ia32_selectsd_128(__U, __A, __W); +} + +static __inline__ __m128d __DEFAULT_FN_ATTRS128 +_mm_maskz_div_sd(__mmask8 __U,__m128d __A, __m128d __B) { + __A = _mm_div_sd(__A, __B); + return __builtin_ia32_selectsd_128(__U, __A, _mm_setzero_pd()); +} + +#define _mm_div_round_sd(A, B, R) \ + ((__m128d)__builtin_ia32_divsd_round_mask((__v2df)(__m128d)(A), \ + (__v2df)(__m128d)(B), \ + (__v2df)_mm_setzero_pd(), \ + (__mmask8)-1, (int)(R))) + +#define _mm_mask_div_round_sd(W, U, A, B, R) \ + ((__m128d)__builtin_ia32_divsd_round_mask((__v2df)(__m128d)(A), \ + (__v2df)(__m128d)(B), \ + (__v2df)(__m128d)(W), \ + (__mmask8)(U), (int)(R))) + +#define _mm_maskz_div_round_sd(U, A, B, R) \ + ((__m128d)__builtin_ia32_divsd_round_mask((__v2df)(__m128d)(A), \ + (__v2df)(__m128d)(B), \ + (__v2df)_mm_setzero_pd(), \ + (__mmask8)(U), (int)(R))) + +static __inline __m512d __DEFAULT_FN_ATTRS512 +_mm512_div_pd(__m512d __a, __m512d __b) +{ + return (__m512d)((__v8df)__a/(__v8df)__b); +} + +static __inline__ __m512d __DEFAULT_FN_ATTRS512 +_mm512_mask_div_pd(__m512d __W, __mmask8 __U, __m512d __A, __m512d __B) { + return (__m512d)__builtin_ia32_selectpd_512((__mmask8)__U, + (__v8df)_mm512_div_pd(__A, __B), + (__v8df)__W); +} + +static __inline__ __m512d __DEFAULT_FN_ATTRS512 +_mm512_maskz_div_pd(__mmask8 __U, __m512d __A, __m512d __B) { + return (__m512d)__builtin_ia32_selectpd_512((__mmask8)__U, + (__v8df)_mm512_div_pd(__A, __B), + (__v8df)_mm512_setzero_pd()); +} + +static __inline __m512 __DEFAULT_FN_ATTRS512 +_mm512_div_ps(__m512 __a, __m512 __b) +{ + return (__m512)((__v16sf)__a/(__v16sf)__b); +} + +static __inline__ __m512 __DEFAULT_FN_ATTRS512 +_mm512_mask_div_ps(__m512 __W, __mmask16 __U, __m512 __A, __m512 __B) { + return (__m512)__builtin_ia32_selectps_512((__mmask16)__U, + (__v16sf)_mm512_div_ps(__A, __B), + (__v16sf)__W); +} + +static __inline__ __m512 __DEFAULT_FN_ATTRS512 +_mm512_maskz_div_ps(__mmask16 __U, __m512 __A, __m512 __B) { + return (__m512)__builtin_ia32_selectps_512((__mmask16)__U, + (__v16sf)_mm512_div_ps(__A, __B), + (__v16sf)_mm512_setzero_ps()); +} + +#define _mm512_div_round_pd(A, B, R) \ + ((__m512d)__builtin_ia32_divpd512((__v8df)(__m512d)(A), \ + (__v8df)(__m512d)(B), (int)(R))) + +#define _mm512_mask_div_round_pd(W, U, A, B, R) \ + ((__m512d)__builtin_ia32_selectpd_512((__mmask8)(U), \ + (__v8df)_mm512_div_round_pd((A), (B), (R)), \ + (__v8df)(__m512d)(W))) + +#define _mm512_maskz_div_round_pd(U, A, B, R) \ + ((__m512d)__builtin_ia32_selectpd_512((__mmask8)(U), \ + (__v8df)_mm512_div_round_pd((A), (B), (R)), \ + (__v8df)_mm512_setzero_pd())) + +#define _mm512_div_round_ps(A, B, R) \ + ((__m512)__builtin_ia32_divps512((__v16sf)(__m512)(A), \ + (__v16sf)(__m512)(B), (int)(R))) + +#define _mm512_mask_div_round_ps(W, U, A, B, R) \ + ((__m512)__builtin_ia32_selectps_512((__mmask16)(U), \ + (__v16sf)_mm512_div_round_ps((A), (B), (R)), \ + (__v16sf)(__m512)(W))) + +#define _mm512_maskz_div_round_ps(U, A, B, R) \ + ((__m512)__builtin_ia32_selectps_512((__mmask16)(U), \ + (__v16sf)_mm512_div_round_ps((A), (B), (R)), \ + (__v16sf)_mm512_setzero_ps())) + +#define _mm512_roundscale_ps(A, B) \ + ((__m512)__builtin_ia32_rndscaleps_mask((__v16sf)(__m512)(A), (int)(B), \ + (__v16sf)_mm512_undefined_ps(), \ + (__mmask16)-1, \ + _MM_FROUND_CUR_DIRECTION)) + +#define _mm512_mask_roundscale_ps(A, B, C, imm) \ + ((__m512)__builtin_ia32_rndscaleps_mask((__v16sf)(__m512)(C), (int)(imm), \ + (__v16sf)(__m512)(A), (__mmask16)(B), \ + _MM_FROUND_CUR_DIRECTION)) + +#define _mm512_maskz_roundscale_ps(A, B, imm) \ + ((__m512)__builtin_ia32_rndscaleps_mask((__v16sf)(__m512)(B), (int)(imm), \ + (__v16sf)_mm512_setzero_ps(), \ + (__mmask16)(A), \ + _MM_FROUND_CUR_DIRECTION)) + +#define _mm512_mask_roundscale_round_ps(A, B, C, imm, R) \ + ((__m512)__builtin_ia32_rndscaleps_mask((__v16sf)(__m512)(C), (int)(imm), \ + (__v16sf)(__m512)(A), (__mmask16)(B), \ + (int)(R))) + +#define _mm512_maskz_roundscale_round_ps(A, B, imm, R) \ + ((__m512)__builtin_ia32_rndscaleps_mask((__v16sf)(__m512)(B), (int)(imm), \ + (__v16sf)_mm512_setzero_ps(), \ + (__mmask16)(A), (int)(R))) + +#define _mm512_roundscale_round_ps(A, imm, R) \ + ((__m512)__builtin_ia32_rndscaleps_mask((__v16sf)(__m512)(A), (int)(imm), \ + (__v16sf)_mm512_undefined_ps(), \ + (__mmask16)-1, (int)(R))) + +#define _mm512_roundscale_pd(A, B) \ + ((__m512d)__builtin_ia32_rndscalepd_mask((__v8df)(__m512d)(A), (int)(B), \ + (__v8df)_mm512_undefined_pd(), \ + (__mmask8)-1, \ + _MM_FROUND_CUR_DIRECTION)) + +#define _mm512_mask_roundscale_pd(A, B, C, imm) \ + ((__m512d)__builtin_ia32_rndscalepd_mask((__v8df)(__m512d)(C), (int)(imm), \ + (__v8df)(__m512d)(A), (__mmask8)(B), \ + _MM_FROUND_CUR_DIRECTION)) + +#define _mm512_maskz_roundscale_pd(A, B, imm) \ + ((__m512d)__builtin_ia32_rndscalepd_mask((__v8df)(__m512d)(B), (int)(imm), \ + (__v8df)_mm512_setzero_pd(), \ + (__mmask8)(A), \ + _MM_FROUND_CUR_DIRECTION)) + +#define _mm512_mask_roundscale_round_pd(A, B, C, imm, R) \ + ((__m512d)__builtin_ia32_rndscalepd_mask((__v8df)(__m512d)(C), (int)(imm), \ + (__v8df)(__m512d)(A), (__mmask8)(B), \ + (int)(R))) + +#define _mm512_maskz_roundscale_round_pd(A, B, imm, R) \ + ((__m512d)__builtin_ia32_rndscalepd_mask((__v8df)(__m512d)(B), (int)(imm), \ + (__v8df)_mm512_setzero_pd(), \ + (__mmask8)(A), (int)(R))) + +#define _mm512_roundscale_round_pd(A, imm, R) \ + ((__m512d)__builtin_ia32_rndscalepd_mask((__v8df)(__m512d)(A), (int)(imm), \ + (__v8df)_mm512_undefined_pd(), \ + (__mmask8)-1, (int)(R))) + +#define _mm512_fmadd_round_pd(A, B, C, R) \ + ((__m512d)__builtin_ia32_vfmaddpd512_mask((__v8df)(__m512d)(A), \ + (__v8df)(__m512d)(B), \ + (__v8df)(__m512d)(C), \ + (__mmask8)-1, (int)(R))) + + +#define _mm512_mask_fmadd_round_pd(A, U, B, C, R) \ + ((__m512d)__builtin_ia32_vfmaddpd512_mask((__v8df)(__m512d)(A), \ + (__v8df)(__m512d)(B), \ + (__v8df)(__m512d)(C), \ + (__mmask8)(U), (int)(R))) + + +#define _mm512_mask3_fmadd_round_pd(A, B, C, U, R) \ + ((__m512d)__builtin_ia32_vfmaddpd512_mask3((__v8df)(__m512d)(A), \ + (__v8df)(__m512d)(B), \ + (__v8df)(__m512d)(C), \ + (__mmask8)(U), (int)(R))) + + +#define _mm512_maskz_fmadd_round_pd(U, A, B, C, R) \ + ((__m512d)__builtin_ia32_vfmaddpd512_maskz((__v8df)(__m512d)(A), \ + (__v8df)(__m512d)(B), \ + (__v8df)(__m512d)(C), \ + (__mmask8)(U), (int)(R))) + + +#define _mm512_fmsub_round_pd(A, B, C, R) \ + ((__m512d)__builtin_ia32_vfmaddpd512_mask((__v8df)(__m512d)(A), \ + (__v8df)(__m512d)(B), \ + -(__v8df)(__m512d)(C), \ + (__mmask8)-1, (int)(R))) + + +#define _mm512_mask_fmsub_round_pd(A, U, B, C, R) \ + ((__m512d)__builtin_ia32_vfmaddpd512_mask((__v8df)(__m512d)(A), \ + (__v8df)(__m512d)(B), \ + -(__v8df)(__m512d)(C), \ + (__mmask8)(U), (int)(R))) + + +#define _mm512_maskz_fmsub_round_pd(U, A, B, C, R) \ + ((__m512d)__builtin_ia32_vfmaddpd512_maskz((__v8df)(__m512d)(A), \ + (__v8df)(__m512d)(B), \ + -(__v8df)(__m512d)(C), \ + (__mmask8)(U), (int)(R))) + + +#define _mm512_fnmadd_round_pd(A, B, C, R) \ + ((__m512d)__builtin_ia32_vfmaddpd512_mask(-(__v8df)(__m512d)(A), \ + (__v8df)(__m512d)(B), \ + (__v8df)(__m512d)(C), \ + (__mmask8)-1, (int)(R))) + + +#define _mm512_mask3_fnmadd_round_pd(A, B, C, U, R) \ + ((__m512d)__builtin_ia32_vfmaddpd512_mask3(-(__v8df)(__m512d)(A), \ + (__v8df)(__m512d)(B), \ + (__v8df)(__m512d)(C), \ + (__mmask8)(U), (int)(R))) + + +#define _mm512_maskz_fnmadd_round_pd(U, A, B, C, R) \ + ((__m512d)__builtin_ia32_vfmaddpd512_maskz(-(__v8df)(__m512d)(A), \ + (__v8df)(__m512d)(B), \ + (__v8df)(__m512d)(C), \ + (__mmask8)(U), (int)(R))) + + +#define _mm512_fnmsub_round_pd(A, B, C, R) \ + ((__m512d)__builtin_ia32_vfmaddpd512_mask(-(__v8df)(__m512d)(A), \ + (__v8df)(__m512d)(B), \ + -(__v8df)(__m512d)(C), \ + (__mmask8)-1, (int)(R))) + + +#define _mm512_maskz_fnmsub_round_pd(U, A, B, C, R) \ + ((__m512d)__builtin_ia32_vfmaddpd512_maskz(-(__v8df)(__m512d)(A), \ + (__v8df)(__m512d)(B), \ + -(__v8df)(__m512d)(C), \ + (__mmask8)(U), (int)(R))) + + +static __inline__ __m512d __DEFAULT_FN_ATTRS512 +_mm512_fmadd_pd(__m512d __A, __m512d __B, __m512d __C) +{ + return (__m512d) __builtin_ia32_vfmaddpd512_mask ((__v8df) __A, + (__v8df) __B, + (__v8df) __C, + (__mmask8) -1, + _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m512d __DEFAULT_FN_ATTRS512 +_mm512_mask_fmadd_pd(__m512d __A, __mmask8 __U, __m512d __B, __m512d __C) +{ + return (__m512d) __builtin_ia32_vfmaddpd512_mask ((__v8df) __A, + (__v8df) __B, + (__v8df) __C, + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m512d __DEFAULT_FN_ATTRS512 +_mm512_mask3_fmadd_pd(__m512d __A, __m512d __B, __m512d __C, __mmask8 __U) +{ + return (__m512d) __builtin_ia32_vfmaddpd512_mask3 ((__v8df) __A, + (__v8df) __B, + (__v8df) __C, + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m512d __DEFAULT_FN_ATTRS512 +_mm512_maskz_fmadd_pd(__mmask8 __U, __m512d __A, __m512d __B, __m512d __C) +{ + return (__m512d) __builtin_ia32_vfmaddpd512_maskz ((__v8df) __A, + (__v8df) __B, + (__v8df) __C, + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m512d __DEFAULT_FN_ATTRS512 +_mm512_fmsub_pd(__m512d __A, __m512d __B, __m512d __C) +{ + return (__m512d) __builtin_ia32_vfmaddpd512_mask ((__v8df) __A, + (__v8df) __B, + -(__v8df) __C, + (__mmask8) -1, + _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m512d __DEFAULT_FN_ATTRS512 +_mm512_mask_fmsub_pd(__m512d __A, __mmask8 __U, __m512d __B, __m512d __C) +{ + return (__m512d) __builtin_ia32_vfmaddpd512_mask ((__v8df) __A, + (__v8df) __B, + -(__v8df) __C, + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m512d __DEFAULT_FN_ATTRS512 +_mm512_maskz_fmsub_pd(__mmask8 __U, __m512d __A, __m512d __B, __m512d __C) +{ + return (__m512d) __builtin_ia32_vfmaddpd512_maskz ((__v8df) __A, + (__v8df) __B, + -(__v8df) __C, + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m512d __DEFAULT_FN_ATTRS512 +_mm512_fnmadd_pd(__m512d __A, __m512d __B, __m512d __C) +{ + return (__m512d) __builtin_ia32_vfmaddpd512_mask ((__v8df) __A, + -(__v8df) __B, + (__v8df) __C, + (__mmask8) -1, + _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m512d __DEFAULT_FN_ATTRS512 +_mm512_mask3_fnmadd_pd(__m512d __A, __m512d __B, __m512d __C, __mmask8 __U) +{ + return (__m512d) __builtin_ia32_vfmaddpd512_mask3 (-(__v8df) __A, + (__v8df) __B, + (__v8df) __C, + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m512d __DEFAULT_FN_ATTRS512 +_mm512_maskz_fnmadd_pd(__mmask8 __U, __m512d __A, __m512d __B, __m512d __C) +{ + return (__m512d) __builtin_ia32_vfmaddpd512_maskz (-(__v8df) __A, + (__v8df) __B, + (__v8df) __C, + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m512d __DEFAULT_FN_ATTRS512 +_mm512_fnmsub_pd(__m512d __A, __m512d __B, __m512d __C) +{ + return (__m512d) __builtin_ia32_vfmaddpd512_mask ((__v8df) __A, + -(__v8df) __B, + -(__v8df) __C, + (__mmask8) -1, + _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m512d __DEFAULT_FN_ATTRS512 +_mm512_maskz_fnmsub_pd(__mmask8 __U, __m512d __A, __m512d __B, __m512d __C) +{ + return (__m512d) __builtin_ia32_vfmaddpd512_maskz (-(__v8df) __A, + (__v8df) __B, + -(__v8df) __C, + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +#define _mm512_fmadd_round_ps(A, B, C, R) \ + ((__m512)__builtin_ia32_vfmaddps512_mask((__v16sf)(__m512)(A), \ + (__v16sf)(__m512)(B), \ + (__v16sf)(__m512)(C), \ + (__mmask16)-1, (int)(R))) + + +#define _mm512_mask_fmadd_round_ps(A, U, B, C, R) \ + ((__m512)__builtin_ia32_vfmaddps512_mask((__v16sf)(__m512)(A), \ + (__v16sf)(__m512)(B), \ + (__v16sf)(__m512)(C), \ + (__mmask16)(U), (int)(R))) + + +#define _mm512_mask3_fmadd_round_ps(A, B, C, U, R) \ + ((__m512)__builtin_ia32_vfmaddps512_mask3((__v16sf)(__m512)(A), \ + (__v16sf)(__m512)(B), \ + (__v16sf)(__m512)(C), \ + (__mmask16)(U), (int)(R))) + + +#define _mm512_maskz_fmadd_round_ps(U, A, B, C, R) \ + ((__m512)__builtin_ia32_vfmaddps512_maskz((__v16sf)(__m512)(A), \ + (__v16sf)(__m512)(B), \ + (__v16sf)(__m512)(C), \ + (__mmask16)(U), (int)(R))) + + +#define _mm512_fmsub_round_ps(A, B, C, R) \ + ((__m512)__builtin_ia32_vfmaddps512_mask((__v16sf)(__m512)(A), \ + (__v16sf)(__m512)(B), \ + -(__v16sf)(__m512)(C), \ + (__mmask16)-1, (int)(R))) + + +#define _mm512_mask_fmsub_round_ps(A, U, B, C, R) \ + ((__m512)__builtin_ia32_vfmaddps512_mask((__v16sf)(__m512)(A), \ + (__v16sf)(__m512)(B), \ + -(__v16sf)(__m512)(C), \ + (__mmask16)(U), (int)(R))) + + +#define _mm512_maskz_fmsub_round_ps(U, A, B, C, R) \ + ((__m512)__builtin_ia32_vfmaddps512_maskz((__v16sf)(__m512)(A), \ + (__v16sf)(__m512)(B), \ + -(__v16sf)(__m512)(C), \ + (__mmask16)(U), (int)(R))) + + +#define _mm512_fnmadd_round_ps(A, B, C, R) \ + ((__m512)__builtin_ia32_vfmaddps512_mask((__v16sf)(__m512)(A), \ + -(__v16sf)(__m512)(B), \ + (__v16sf)(__m512)(C), \ + (__mmask16)-1, (int)(R))) + + +#define _mm512_mask3_fnmadd_round_ps(A, B, C, U, R) \ + ((__m512)__builtin_ia32_vfmaddps512_mask3(-(__v16sf)(__m512)(A), \ + (__v16sf)(__m512)(B), \ + (__v16sf)(__m512)(C), \ + (__mmask16)(U), (int)(R))) + + +#define _mm512_maskz_fnmadd_round_ps(U, A, B, C, R) \ + ((__m512)__builtin_ia32_vfmaddps512_maskz(-(__v16sf)(__m512)(A), \ + (__v16sf)(__m512)(B), \ + (__v16sf)(__m512)(C), \ + (__mmask16)(U), (int)(R))) + + +#define _mm512_fnmsub_round_ps(A, B, C, R) \ + ((__m512)__builtin_ia32_vfmaddps512_mask((__v16sf)(__m512)(A), \ + -(__v16sf)(__m512)(B), \ + -(__v16sf)(__m512)(C), \ + (__mmask16)-1, (int)(R))) + + +#define _mm512_maskz_fnmsub_round_ps(U, A, B, C, R) \ + ((__m512)__builtin_ia32_vfmaddps512_maskz(-(__v16sf)(__m512)(A), \ + (__v16sf)(__m512)(B), \ + -(__v16sf)(__m512)(C), \ + (__mmask16)(U), (int)(R))) + + +static __inline__ __m512 __DEFAULT_FN_ATTRS512 +_mm512_fmadd_ps(__m512 __A, __m512 __B, __m512 __C) +{ + return (__m512) __builtin_ia32_vfmaddps512_mask ((__v16sf) __A, + (__v16sf) __B, + (__v16sf) __C, + (__mmask16) -1, + _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m512 __DEFAULT_FN_ATTRS512 +_mm512_mask_fmadd_ps(__m512 __A, __mmask16 __U, __m512 __B, __m512 __C) +{ + return (__m512) __builtin_ia32_vfmaddps512_mask ((__v16sf) __A, + (__v16sf) __B, + (__v16sf) __C, + (__mmask16) __U, + _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m512 __DEFAULT_FN_ATTRS512 +_mm512_mask3_fmadd_ps(__m512 __A, __m512 __B, __m512 __C, __mmask16 __U) +{ + return (__m512) __builtin_ia32_vfmaddps512_mask3 ((__v16sf) __A, + (__v16sf) __B, + (__v16sf) __C, + (__mmask16) __U, + _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m512 __DEFAULT_FN_ATTRS512 +_mm512_maskz_fmadd_ps(__mmask16 __U, __m512 __A, __m512 __B, __m512 __C) +{ + return (__m512) __builtin_ia32_vfmaddps512_maskz ((__v16sf) __A, + (__v16sf) __B, + (__v16sf) __C, + (__mmask16) __U, + _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m512 __DEFAULT_FN_ATTRS512 +_mm512_fmsub_ps(__m512 __A, __m512 __B, __m512 __C) +{ + return (__m512) __builtin_ia32_vfmaddps512_mask ((__v16sf) __A, + (__v16sf) __B, + -(__v16sf) __C, + (__mmask16) -1, + _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m512 __DEFAULT_FN_ATTRS512 +_mm512_mask_fmsub_ps(__m512 __A, __mmask16 __U, __m512 __B, __m512 __C) +{ + return (__m512) __builtin_ia32_vfmaddps512_mask ((__v16sf) __A, + (__v16sf) __B, + -(__v16sf) __C, + (__mmask16) __U, + _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m512 __DEFAULT_FN_ATTRS512 +_mm512_maskz_fmsub_ps(__mmask16 __U, __m512 __A, __m512 __B, __m512 __C) +{ + return (__m512) __builtin_ia32_vfmaddps512_maskz ((__v16sf) __A, + (__v16sf) __B, + -(__v16sf) __C, + (__mmask16) __U, + _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m512 __DEFAULT_FN_ATTRS512 +_mm512_fnmadd_ps(__m512 __A, __m512 __B, __m512 __C) +{ + return (__m512) __builtin_ia32_vfmaddps512_mask ((__v16sf) __A, + -(__v16sf) __B, + (__v16sf) __C, + (__mmask16) -1, + _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m512 __DEFAULT_FN_ATTRS512 +_mm512_mask3_fnmadd_ps(__m512 __A, __m512 __B, __m512 __C, __mmask16 __U) +{ + return (__m512) __builtin_ia32_vfmaddps512_mask3 (-(__v16sf) __A, + (__v16sf) __B, + (__v16sf) __C, + (__mmask16) __U, + _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m512 __DEFAULT_FN_ATTRS512 +_mm512_maskz_fnmadd_ps(__mmask16 __U, __m512 __A, __m512 __B, __m512 __C) +{ + return (__m512) __builtin_ia32_vfmaddps512_maskz (-(__v16sf) __A, + (__v16sf) __B, + (__v16sf) __C, + (__mmask16) __U, + _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m512 __DEFAULT_FN_ATTRS512 +_mm512_fnmsub_ps(__m512 __A, __m512 __B, __m512 __C) +{ + return (__m512) __builtin_ia32_vfmaddps512_mask ((__v16sf) __A, + -(__v16sf) __B, + -(__v16sf) __C, + (__mmask16) -1, + _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m512 __DEFAULT_FN_ATTRS512 +_mm512_maskz_fnmsub_ps(__mmask16 __U, __m512 __A, __m512 __B, __m512 __C) +{ + return (__m512) __builtin_ia32_vfmaddps512_maskz (-(__v16sf) __A, + (__v16sf) __B, + -(__v16sf) __C, + (__mmask16) __U, + _MM_FROUND_CUR_DIRECTION); +} + +#define _mm512_fmaddsub_round_pd(A, B, C, R) \ + ((__m512d)__builtin_ia32_vfmaddsubpd512_mask((__v8df)(__m512d)(A), \ + (__v8df)(__m512d)(B), \ + (__v8df)(__m512d)(C), \ + (__mmask8)-1, (int)(R))) + + +#define _mm512_mask_fmaddsub_round_pd(A, U, B, C, R) \ + ((__m512d)__builtin_ia32_vfmaddsubpd512_mask((__v8df)(__m512d)(A), \ + (__v8df)(__m512d)(B), \ + (__v8df)(__m512d)(C), \ + (__mmask8)(U), (int)(R))) + + +#define _mm512_mask3_fmaddsub_round_pd(A, B, C, U, R) \ + ((__m512d)__builtin_ia32_vfmaddsubpd512_mask3((__v8df)(__m512d)(A), \ + (__v8df)(__m512d)(B), \ + (__v8df)(__m512d)(C), \ + (__mmask8)(U), (int)(R))) + + +#define _mm512_maskz_fmaddsub_round_pd(U, A, B, C, R) \ + ((__m512d)__builtin_ia32_vfmaddsubpd512_maskz((__v8df)(__m512d)(A), \ + (__v8df)(__m512d)(B), \ + (__v8df)(__m512d)(C), \ + (__mmask8)(U), (int)(R))) + + +#define _mm512_fmsubadd_round_pd(A, B, C, R) \ + ((__m512d)__builtin_ia32_vfmaddsubpd512_mask((__v8df)(__m512d)(A), \ + (__v8df)(__m512d)(B), \ + -(__v8df)(__m512d)(C), \ + (__mmask8)-1, (int)(R))) + + +#define _mm512_mask_fmsubadd_round_pd(A, U, B, C, R) \ + ((__m512d)__builtin_ia32_vfmaddsubpd512_mask((__v8df)(__m512d)(A), \ + (__v8df)(__m512d)(B), \ + -(__v8df)(__m512d)(C), \ + (__mmask8)(U), (int)(R))) + + +#define _mm512_maskz_fmsubadd_round_pd(U, A, B, C, R) \ + ((__m512d)__builtin_ia32_vfmaddsubpd512_maskz((__v8df)(__m512d)(A), \ + (__v8df)(__m512d)(B), \ + -(__v8df)(__m512d)(C), \ + (__mmask8)(U), (int)(R))) + + +static __inline__ __m512d __DEFAULT_FN_ATTRS512 +_mm512_fmaddsub_pd(__m512d __A, __m512d __B, __m512d __C) +{ + return (__m512d) __builtin_ia32_vfmaddsubpd512_mask ((__v8df) __A, + (__v8df) __B, + (__v8df) __C, + (__mmask8) -1, + _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m512d __DEFAULT_FN_ATTRS512 +_mm512_mask_fmaddsub_pd(__m512d __A, __mmask8 __U, __m512d __B, __m512d __C) +{ + return (__m512d) __builtin_ia32_vfmaddsubpd512_mask ((__v8df) __A, + (__v8df) __B, + (__v8df) __C, + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m512d __DEFAULT_FN_ATTRS512 +_mm512_mask3_fmaddsub_pd(__m512d __A, __m512d __B, __m512d __C, __mmask8 __U) +{ + return (__m512d) __builtin_ia32_vfmaddsubpd512_mask3 ((__v8df) __A, + (__v8df) __B, + (__v8df) __C, + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m512d __DEFAULT_FN_ATTRS512 +_mm512_maskz_fmaddsub_pd(__mmask8 __U, __m512d __A, __m512d __B, __m512d __C) +{ + return (__m512d) __builtin_ia32_vfmaddsubpd512_maskz ((__v8df) __A, + (__v8df) __B, + (__v8df) __C, + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m512d __DEFAULT_FN_ATTRS512 +_mm512_fmsubadd_pd(__m512d __A, __m512d __B, __m512d __C) +{ + return (__m512d) __builtin_ia32_vfmaddsubpd512_mask ((__v8df) __A, + (__v8df) __B, + -(__v8df) __C, + (__mmask8) -1, + _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m512d __DEFAULT_FN_ATTRS512 +_mm512_mask_fmsubadd_pd(__m512d __A, __mmask8 __U, __m512d __B, __m512d __C) +{ + return (__m512d) __builtin_ia32_vfmaddsubpd512_mask ((__v8df) __A, + (__v8df) __B, + -(__v8df) __C, + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m512d __DEFAULT_FN_ATTRS512 +_mm512_maskz_fmsubadd_pd(__mmask8 __U, __m512d __A, __m512d __B, __m512d __C) +{ + return (__m512d) __builtin_ia32_vfmaddsubpd512_maskz ((__v8df) __A, + (__v8df) __B, + -(__v8df) __C, + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +#define _mm512_fmaddsub_round_ps(A, B, C, R) \ + ((__m512)__builtin_ia32_vfmaddsubps512_mask((__v16sf)(__m512)(A), \ + (__v16sf)(__m512)(B), \ + (__v16sf)(__m512)(C), \ + (__mmask16)-1, (int)(R))) + + +#define _mm512_mask_fmaddsub_round_ps(A, U, B, C, R) \ + ((__m512)__builtin_ia32_vfmaddsubps512_mask((__v16sf)(__m512)(A), \ + (__v16sf)(__m512)(B), \ + (__v16sf)(__m512)(C), \ + (__mmask16)(U), (int)(R))) + + +#define _mm512_mask3_fmaddsub_round_ps(A, B, C, U, R) \ + ((__m512)__builtin_ia32_vfmaddsubps512_mask3((__v16sf)(__m512)(A), \ + (__v16sf)(__m512)(B), \ + (__v16sf)(__m512)(C), \ + (__mmask16)(U), (int)(R))) + + +#define _mm512_maskz_fmaddsub_round_ps(U, A, B, C, R) \ + ((__m512)__builtin_ia32_vfmaddsubps512_maskz((__v16sf)(__m512)(A), \ + (__v16sf)(__m512)(B), \ + (__v16sf)(__m512)(C), \ + (__mmask16)(U), (int)(R))) + + +#define _mm512_fmsubadd_round_ps(A, B, C, R) \ + ((__m512)__builtin_ia32_vfmaddsubps512_mask((__v16sf)(__m512)(A), \ + (__v16sf)(__m512)(B), \ + -(__v16sf)(__m512)(C), \ + (__mmask16)-1, (int)(R))) + + +#define _mm512_mask_fmsubadd_round_ps(A, U, B, C, R) \ + ((__m512)__builtin_ia32_vfmaddsubps512_mask((__v16sf)(__m512)(A), \ + (__v16sf)(__m512)(B), \ + -(__v16sf)(__m512)(C), \ + (__mmask16)(U), (int)(R))) + + +#define _mm512_maskz_fmsubadd_round_ps(U, A, B, C, R) \ + ((__m512)__builtin_ia32_vfmaddsubps512_maskz((__v16sf)(__m512)(A), \ + (__v16sf)(__m512)(B), \ + -(__v16sf)(__m512)(C), \ + (__mmask16)(U), (int)(R))) + + +static __inline__ __m512 __DEFAULT_FN_ATTRS512 +_mm512_fmaddsub_ps(__m512 __A, __m512 __B, __m512 __C) +{ + return (__m512) __builtin_ia32_vfmaddsubps512_mask ((__v16sf) __A, + (__v16sf) __B, + (__v16sf) __C, + (__mmask16) -1, + _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m512 __DEFAULT_FN_ATTRS512 +_mm512_mask_fmaddsub_ps(__m512 __A, __mmask16 __U, __m512 __B, __m512 __C) +{ + return (__m512) __builtin_ia32_vfmaddsubps512_mask ((__v16sf) __A, + (__v16sf) __B, + (__v16sf) __C, + (__mmask16) __U, + _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m512 __DEFAULT_FN_ATTRS512 +_mm512_mask3_fmaddsub_ps(__m512 __A, __m512 __B, __m512 __C, __mmask16 __U) +{ + return (__m512) __builtin_ia32_vfmaddsubps512_mask3 ((__v16sf) __A, + (__v16sf) __B, + (__v16sf) __C, + (__mmask16) __U, + _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m512 __DEFAULT_FN_ATTRS512 +_mm512_maskz_fmaddsub_ps(__mmask16 __U, __m512 __A, __m512 __B, __m512 __C) +{ + return (__m512) __builtin_ia32_vfmaddsubps512_maskz ((__v16sf) __A, + (__v16sf) __B, + (__v16sf) __C, + (__mmask16) __U, + _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m512 __DEFAULT_FN_ATTRS512 +_mm512_fmsubadd_ps(__m512 __A, __m512 __B, __m512 __C) +{ + return (__m512) __builtin_ia32_vfmaddsubps512_mask ((__v16sf) __A, + (__v16sf) __B, + -(__v16sf) __C, + (__mmask16) -1, + _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m512 __DEFAULT_FN_ATTRS512 +_mm512_mask_fmsubadd_ps(__m512 __A, __mmask16 __U, __m512 __B, __m512 __C) +{ + return (__m512) __builtin_ia32_vfmaddsubps512_mask ((__v16sf) __A, + (__v16sf) __B, + -(__v16sf) __C, + (__mmask16) __U, + _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m512 __DEFAULT_FN_ATTRS512 +_mm512_maskz_fmsubadd_ps(__mmask16 __U, __m512 __A, __m512 __B, __m512 __C) +{ + return (__m512) __builtin_ia32_vfmaddsubps512_maskz ((__v16sf) __A, + (__v16sf) __B, + -(__v16sf) __C, + (__mmask16) __U, + _MM_FROUND_CUR_DIRECTION); +} + +#define _mm512_mask3_fmsub_round_pd(A, B, C, U, R) \ + ((__m512d)__builtin_ia32_vfmsubpd512_mask3((__v8df)(__m512d)(A), \ + (__v8df)(__m512d)(B), \ + (__v8df)(__m512d)(C), \ + (__mmask8)(U), (int)(R))) + + +static __inline__ __m512d __DEFAULT_FN_ATTRS512 +_mm512_mask3_fmsub_pd(__m512d __A, __m512d __B, __m512d __C, __mmask8 __U) +{ + return (__m512d)__builtin_ia32_vfmsubpd512_mask3 ((__v8df) __A, + (__v8df) __B, + (__v8df) __C, + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +#define _mm512_mask3_fmsub_round_ps(A, B, C, U, R) \ + ((__m512)__builtin_ia32_vfmsubps512_mask3((__v16sf)(__m512)(A), \ + (__v16sf)(__m512)(B), \ + (__v16sf)(__m512)(C), \ + (__mmask16)(U), (int)(R))) + +static __inline__ __m512 __DEFAULT_FN_ATTRS512 +_mm512_mask3_fmsub_ps(__m512 __A, __m512 __B, __m512 __C, __mmask16 __U) +{ + return (__m512)__builtin_ia32_vfmsubps512_mask3 ((__v16sf) __A, + (__v16sf) __B, + (__v16sf) __C, + (__mmask16) __U, + _MM_FROUND_CUR_DIRECTION); +} + +#define _mm512_mask3_fmsubadd_round_pd(A, B, C, U, R) \ + ((__m512d)__builtin_ia32_vfmsubaddpd512_mask3((__v8df)(__m512d)(A), \ + (__v8df)(__m512d)(B), \ + (__v8df)(__m512d)(C), \ + (__mmask8)(U), (int)(R))) + + +static __inline__ __m512d __DEFAULT_FN_ATTRS512 +_mm512_mask3_fmsubadd_pd(__m512d __A, __m512d __B, __m512d __C, __mmask8 __U) +{ + return (__m512d)__builtin_ia32_vfmsubaddpd512_mask3 ((__v8df) __A, + (__v8df) __B, + (__v8df) __C, + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +#define _mm512_mask3_fmsubadd_round_ps(A, B, C, U, R) \ + ((__m512)__builtin_ia32_vfmsubaddps512_mask3((__v16sf)(__m512)(A), \ + (__v16sf)(__m512)(B), \ + (__v16sf)(__m512)(C), \ + (__mmask16)(U), (int)(R))) + + +static __inline__ __m512 __DEFAULT_FN_ATTRS512 +_mm512_mask3_fmsubadd_ps(__m512 __A, __m512 __B, __m512 __C, __mmask16 __U) +{ + return (__m512)__builtin_ia32_vfmsubaddps512_mask3 ((__v16sf) __A, + (__v16sf) __B, + (__v16sf) __C, + (__mmask16) __U, + _MM_FROUND_CUR_DIRECTION); +} + +#define _mm512_mask_fnmadd_round_pd(A, U, B, C, R) \ + ((__m512d)__builtin_ia32_vfmaddpd512_mask((__v8df)(__m512d)(A), \ + -(__v8df)(__m512d)(B), \ + (__v8df)(__m512d)(C), \ + (__mmask8)(U), (int)(R))) + + +static __inline__ __m512d __DEFAULT_FN_ATTRS512 +_mm512_mask_fnmadd_pd(__m512d __A, __mmask8 __U, __m512d __B, __m512d __C) +{ + return (__m512d) __builtin_ia32_vfmaddpd512_mask ((__v8df) __A, + -(__v8df) __B, + (__v8df) __C, + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +#define _mm512_mask_fnmadd_round_ps(A, U, B, C, R) \ + ((__m512)__builtin_ia32_vfmaddps512_mask((__v16sf)(__m512)(A), \ + -(__v16sf)(__m512)(B), \ + (__v16sf)(__m512)(C), \ + (__mmask16)(U), (int)(R))) + + +static __inline__ __m512 __DEFAULT_FN_ATTRS512 +_mm512_mask_fnmadd_ps(__m512 __A, __mmask16 __U, __m512 __B, __m512 __C) +{ + return (__m512) __builtin_ia32_vfmaddps512_mask ((__v16sf) __A, + -(__v16sf) __B, + (__v16sf) __C, + (__mmask16) __U, + _MM_FROUND_CUR_DIRECTION); +} + +#define _mm512_mask_fnmsub_round_pd(A, U, B, C, R) \ + ((__m512d)__builtin_ia32_vfmaddpd512_mask((__v8df)(__m512d)(A), \ + -(__v8df)(__m512d)(B), \ + -(__v8df)(__m512d)(C), \ + (__mmask8)(U), (int)(R))) + + +#define _mm512_mask3_fnmsub_round_pd(A, B, C, U, R) \ + ((__m512d)__builtin_ia32_vfmsubpd512_mask3(-(__v8df)(__m512d)(A), \ + (__v8df)(__m512d)(B), \ + (__v8df)(__m512d)(C), \ + (__mmask8)(U), (int)(R))) + + +static __inline__ __m512d __DEFAULT_FN_ATTRS512 +_mm512_mask_fnmsub_pd(__m512d __A, __mmask8 __U, __m512d __B, __m512d __C) +{ + return (__m512d) __builtin_ia32_vfmaddpd512_mask ((__v8df) __A, + -(__v8df) __B, + -(__v8df) __C, + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m512d __DEFAULT_FN_ATTRS512 +_mm512_mask3_fnmsub_pd(__m512d __A, __m512d __B, __m512d __C, __mmask8 __U) +{ + return (__m512d) __builtin_ia32_vfmsubpd512_mask3 (-(__v8df) __A, + (__v8df) __B, + (__v8df) __C, + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +#define _mm512_mask_fnmsub_round_ps(A, U, B, C, R) \ + ((__m512)__builtin_ia32_vfmaddps512_mask((__v16sf)(__m512)(A), \ + -(__v16sf)(__m512)(B), \ + -(__v16sf)(__m512)(C), \ + (__mmask16)(U), (int)(R))) + + +#define _mm512_mask3_fnmsub_round_ps(A, B, C, U, R) \ + ((__m512)__builtin_ia32_vfmsubps512_mask3(-(__v16sf)(__m512)(A), \ + (__v16sf)(__m512)(B), \ + (__v16sf)(__m512)(C), \ + (__mmask16)(U), (int)(R))) + + +static __inline__ __m512 __DEFAULT_FN_ATTRS512 +_mm512_mask_fnmsub_ps(__m512 __A, __mmask16 __U, __m512 __B, __m512 __C) +{ + return (__m512) __builtin_ia32_vfmaddps512_mask ((__v16sf) __A, + -(__v16sf) __B, + -(__v16sf) __C, + (__mmask16) __U, + _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m512 __DEFAULT_FN_ATTRS512 +_mm512_mask3_fnmsub_ps(__m512 __A, __m512 __B, __m512 __C, __mmask16 __U) +{ + return (__m512) __builtin_ia32_vfmsubps512_mask3 (-(__v16sf) __A, + (__v16sf) __B, + (__v16sf) __C, + (__mmask16) __U, + _MM_FROUND_CUR_DIRECTION); +} + + + +/* Vector permutations */ + +static __inline __m512i __DEFAULT_FN_ATTRS512 +_mm512_permutex2var_epi32(__m512i __A, __m512i __I, __m512i __B) +{ + return (__m512i)__builtin_ia32_vpermi2vard512((__v16si)__A, (__v16si) __I, + (__v16si) __B); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_mask_permutex2var_epi32(__m512i __A, __mmask16 __U, __m512i __I, + __m512i __B) +{ + return (__m512i)__builtin_ia32_selectd_512(__U, + (__v16si)_mm512_permutex2var_epi32(__A, __I, __B), + (__v16si)__A); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_mask2_permutex2var_epi32(__m512i __A, __m512i __I, __mmask16 __U, + __m512i __B) +{ + return (__m512i)__builtin_ia32_selectd_512(__U, + (__v16si)_mm512_permutex2var_epi32(__A, __I, __B), + (__v16si)__I); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_maskz_permutex2var_epi32(__mmask16 __U, __m512i __A, __m512i __I, + __m512i __B) +{ + return (__m512i)__builtin_ia32_selectd_512(__U, + (__v16si)_mm512_permutex2var_epi32(__A, __I, __B), + (__v16si)_mm512_setzero_si512()); +} + +static __inline __m512i __DEFAULT_FN_ATTRS512 +_mm512_permutex2var_epi64(__m512i __A, __m512i __I, __m512i __B) +{ + return (__m512i)__builtin_ia32_vpermi2varq512((__v8di)__A, (__v8di) __I, + (__v8di) __B); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_mask_permutex2var_epi64(__m512i __A, __mmask8 __U, __m512i __I, + __m512i __B) +{ + return (__m512i)__builtin_ia32_selectq_512(__U, + (__v8di)_mm512_permutex2var_epi64(__A, __I, __B), + (__v8di)__A); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_mask2_permutex2var_epi64(__m512i __A, __m512i __I, __mmask8 __U, + __m512i __B) +{ + return (__m512i)__builtin_ia32_selectq_512(__U, + (__v8di)_mm512_permutex2var_epi64(__A, __I, __B), + (__v8di)__I); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_maskz_permutex2var_epi64(__mmask8 __U, __m512i __A, __m512i __I, + __m512i __B) +{ + return (__m512i)__builtin_ia32_selectq_512(__U, + (__v8di)_mm512_permutex2var_epi64(__A, __I, __B), + (__v8di)_mm512_setzero_si512()); +} + +#define _mm512_alignr_epi64(A, B, I) \ + ((__m512i)__builtin_ia32_alignq512((__v8di)(__m512i)(A), \ + (__v8di)(__m512i)(B), (int)(I))) + +#define _mm512_mask_alignr_epi64(W, U, A, B, imm) \ + ((__m512i)__builtin_ia32_selectq_512((__mmask8)(U), \ + (__v8di)_mm512_alignr_epi64((A), (B), (imm)), \ + (__v8di)(__m512i)(W))) + +#define _mm512_maskz_alignr_epi64(U, A, B, imm) \ + ((__m512i)__builtin_ia32_selectq_512((__mmask8)(U), \ + (__v8di)_mm512_alignr_epi64((A), (B), (imm)), \ + (__v8di)_mm512_setzero_si512())) + +#define _mm512_alignr_epi32(A, B, I) \ + ((__m512i)__builtin_ia32_alignd512((__v16si)(__m512i)(A), \ + (__v16si)(__m512i)(B), (int)(I))) + +#define _mm512_mask_alignr_epi32(W, U, A, B, imm) \ + ((__m512i)__builtin_ia32_selectd_512((__mmask16)(U), \ + (__v16si)_mm512_alignr_epi32((A), (B), (imm)), \ + (__v16si)(__m512i)(W))) + +#define _mm512_maskz_alignr_epi32(U, A, B, imm) \ + ((__m512i)__builtin_ia32_selectd_512((__mmask16)(U), \ + (__v16si)_mm512_alignr_epi32((A), (B), (imm)), \ + (__v16si)_mm512_setzero_si512())) +/* Vector Extract */ + +#define _mm512_extractf64x4_pd(A, I) \ + ((__m256d)__builtin_ia32_extractf64x4_mask((__v8df)(__m512d)(A), (int)(I), \ + (__v4df)_mm256_undefined_pd(), \ + (__mmask8)-1)) + +#define _mm512_mask_extractf64x4_pd(W, U, A, imm) \ + ((__m256d)__builtin_ia32_extractf64x4_mask((__v8df)(__m512d)(A), (int)(imm), \ + (__v4df)(__m256d)(W), \ + (__mmask8)(U))) + +#define _mm512_maskz_extractf64x4_pd(U, A, imm) \ + ((__m256d)__builtin_ia32_extractf64x4_mask((__v8df)(__m512d)(A), (int)(imm), \ + (__v4df)_mm256_setzero_pd(), \ + (__mmask8)(U))) + +#define _mm512_extractf32x4_ps(A, I) \ + ((__m128)__builtin_ia32_extractf32x4_mask((__v16sf)(__m512)(A), (int)(I), \ + (__v4sf)_mm_undefined_ps(), \ + (__mmask8)-1)) + +#define _mm512_mask_extractf32x4_ps(W, U, A, imm) \ + ((__m128)__builtin_ia32_extractf32x4_mask((__v16sf)(__m512)(A), (int)(imm), \ + (__v4sf)(__m128)(W), \ + (__mmask8)(U))) + +#define _mm512_maskz_extractf32x4_ps(U, A, imm) \ + ((__m128)__builtin_ia32_extractf32x4_mask((__v16sf)(__m512)(A), (int)(imm), \ + (__v4sf)_mm_setzero_ps(), \ + (__mmask8)(U))) + +/* Vector Blend */ + +static __inline __m512d __DEFAULT_FN_ATTRS512 +_mm512_mask_blend_pd(__mmask8 __U, __m512d __A, __m512d __W) +{ + return (__m512d) __builtin_ia32_selectpd_512 ((__mmask8) __U, + (__v8df) __W, + (__v8df) __A); +} + +static __inline __m512 __DEFAULT_FN_ATTRS512 +_mm512_mask_blend_ps(__mmask16 __U, __m512 __A, __m512 __W) +{ + return (__m512) __builtin_ia32_selectps_512 ((__mmask16) __U, + (__v16sf) __W, + (__v16sf) __A); +} + +static __inline __m512i __DEFAULT_FN_ATTRS512 +_mm512_mask_blend_epi64(__mmask8 __U, __m512i __A, __m512i __W) +{ + return (__m512i) __builtin_ia32_selectq_512 ((__mmask8) __U, + (__v8di) __W, + (__v8di) __A); +} + +static __inline __m512i __DEFAULT_FN_ATTRS512 +_mm512_mask_blend_epi32(__mmask16 __U, __m512i __A, __m512i __W) +{ + return (__m512i) __builtin_ia32_selectd_512 ((__mmask16) __U, + (__v16si) __W, + (__v16si) __A); +} + +/* Compare */ + +#define _mm512_cmp_round_ps_mask(A, B, P, R) \ + ((__mmask16)__builtin_ia32_cmpps512_mask((__v16sf)(__m512)(A), \ + (__v16sf)(__m512)(B), (int)(P), \ + (__mmask16)-1, (int)(R))) + +#define _mm512_mask_cmp_round_ps_mask(U, A, B, P, R) \ + ((__mmask16)__builtin_ia32_cmpps512_mask((__v16sf)(__m512)(A), \ + (__v16sf)(__m512)(B), (int)(P), \ + (__mmask16)(U), (int)(R))) + +#define _mm512_cmp_ps_mask(A, B, P) \ + _mm512_cmp_round_ps_mask((A), (B), (P), _MM_FROUND_CUR_DIRECTION) +#define _mm512_mask_cmp_ps_mask(U, A, B, P) \ + _mm512_mask_cmp_round_ps_mask((U), (A), (B), (P), _MM_FROUND_CUR_DIRECTION) + +#define _mm512_cmpeq_ps_mask(A, B) \ + _mm512_cmp_ps_mask((A), (B), _CMP_EQ_OQ) +#define _mm512_mask_cmpeq_ps_mask(k, A, B) \ + _mm512_mask_cmp_ps_mask((k), (A), (B), _CMP_EQ_OQ) + +#define _mm512_cmplt_ps_mask(A, B) \ + _mm512_cmp_ps_mask((A), (B), _CMP_LT_OS) +#define _mm512_mask_cmplt_ps_mask(k, A, B) \ + _mm512_mask_cmp_ps_mask((k), (A), (B), _CMP_LT_OS) + +#define _mm512_cmple_ps_mask(A, B) \ + _mm512_cmp_ps_mask((A), (B), _CMP_LE_OS) +#define _mm512_mask_cmple_ps_mask(k, A, B) \ + _mm512_mask_cmp_ps_mask((k), (A), (B), _CMP_LE_OS) + +#define _mm512_cmpunord_ps_mask(A, B) \ + _mm512_cmp_ps_mask((A), (B), _CMP_UNORD_Q) +#define _mm512_mask_cmpunord_ps_mask(k, A, B) \ + _mm512_mask_cmp_ps_mask((k), (A), (B), _CMP_UNORD_Q) + +#define _mm512_cmpneq_ps_mask(A, B) \ + _mm512_cmp_ps_mask((A), (B), _CMP_NEQ_UQ) +#define _mm512_mask_cmpneq_ps_mask(k, A, B) \ + _mm512_mask_cmp_ps_mask((k), (A), (B), _CMP_NEQ_UQ) + +#define _mm512_cmpnlt_ps_mask(A, B) \ + _mm512_cmp_ps_mask((A), (B), _CMP_NLT_US) +#define _mm512_mask_cmpnlt_ps_mask(k, A, B) \ + _mm512_mask_cmp_ps_mask((k), (A), (B), _CMP_NLT_US) + +#define _mm512_cmpnle_ps_mask(A, B) \ + _mm512_cmp_ps_mask((A), (B), _CMP_NLE_US) +#define _mm512_mask_cmpnle_ps_mask(k, A, B) \ + _mm512_mask_cmp_ps_mask((k), (A), (B), _CMP_NLE_US) + +#define _mm512_cmpord_ps_mask(A, B) \ + _mm512_cmp_ps_mask((A), (B), _CMP_ORD_Q) +#define _mm512_mask_cmpord_ps_mask(k, A, B) \ + _mm512_mask_cmp_ps_mask((k), (A), (B), _CMP_ORD_Q) + +#define _mm512_cmp_round_pd_mask(A, B, P, R) \ + ((__mmask8)__builtin_ia32_cmppd512_mask((__v8df)(__m512d)(A), \ + (__v8df)(__m512d)(B), (int)(P), \ + (__mmask8)-1, (int)(R))) + +#define _mm512_mask_cmp_round_pd_mask(U, A, B, P, R) \ + ((__mmask8)__builtin_ia32_cmppd512_mask((__v8df)(__m512d)(A), \ + (__v8df)(__m512d)(B), (int)(P), \ + (__mmask8)(U), (int)(R))) + +#define _mm512_cmp_pd_mask(A, B, P) \ + _mm512_cmp_round_pd_mask((A), (B), (P), _MM_FROUND_CUR_DIRECTION) +#define _mm512_mask_cmp_pd_mask(U, A, B, P) \ + _mm512_mask_cmp_round_pd_mask((U), (A), (B), (P), _MM_FROUND_CUR_DIRECTION) + +#define _mm512_cmpeq_pd_mask(A, B) \ + _mm512_cmp_pd_mask((A), (B), _CMP_EQ_OQ) +#define _mm512_mask_cmpeq_pd_mask(k, A, B) \ + _mm512_mask_cmp_pd_mask((k), (A), (B), _CMP_EQ_OQ) + +#define _mm512_cmplt_pd_mask(A, B) \ + _mm512_cmp_pd_mask((A), (B), _CMP_LT_OS) +#define _mm512_mask_cmplt_pd_mask(k, A, B) \ + _mm512_mask_cmp_pd_mask((k), (A), (B), _CMP_LT_OS) + +#define _mm512_cmple_pd_mask(A, B) \ + _mm512_cmp_pd_mask((A), (B), _CMP_LE_OS) +#define _mm512_mask_cmple_pd_mask(k, A, B) \ + _mm512_mask_cmp_pd_mask((k), (A), (B), _CMP_LE_OS) + +#define _mm512_cmpunord_pd_mask(A, B) \ + _mm512_cmp_pd_mask((A), (B), _CMP_UNORD_Q) +#define _mm512_mask_cmpunord_pd_mask(k, A, B) \ + _mm512_mask_cmp_pd_mask((k), (A), (B), _CMP_UNORD_Q) + +#define _mm512_cmpneq_pd_mask(A, B) \ + _mm512_cmp_pd_mask((A), (B), _CMP_NEQ_UQ) +#define _mm512_mask_cmpneq_pd_mask(k, A, B) \ + _mm512_mask_cmp_pd_mask((k), (A), (B), _CMP_NEQ_UQ) + +#define _mm512_cmpnlt_pd_mask(A, B) \ + _mm512_cmp_pd_mask((A), (B), _CMP_NLT_US) +#define _mm512_mask_cmpnlt_pd_mask(k, A, B) \ + _mm512_mask_cmp_pd_mask((k), (A), (B), _CMP_NLT_US) + +#define _mm512_cmpnle_pd_mask(A, B) \ + _mm512_cmp_pd_mask((A), (B), _CMP_NLE_US) +#define _mm512_mask_cmpnle_pd_mask(k, A, B) \ + _mm512_mask_cmp_pd_mask((k), (A), (B), _CMP_NLE_US) + +#define _mm512_cmpord_pd_mask(A, B) \ + _mm512_cmp_pd_mask((A), (B), _CMP_ORD_Q) +#define _mm512_mask_cmpord_pd_mask(k, A, B) \ + _mm512_mask_cmp_pd_mask((k), (A), (B), _CMP_ORD_Q) + +/* Conversion */ + +#define _mm512_cvtt_roundps_epu32(A, R) \ + ((__m512i)__builtin_ia32_cvttps2udq512_mask((__v16sf)(__m512)(A), \ + (__v16si)_mm512_undefined_epi32(), \ + (__mmask16)-1, (int)(R))) + +#define _mm512_mask_cvtt_roundps_epu32(W, U, A, R) \ + ((__m512i)__builtin_ia32_cvttps2udq512_mask((__v16sf)(__m512)(A), \ + (__v16si)(__m512i)(W), \ + (__mmask16)(U), (int)(R))) + +#define _mm512_maskz_cvtt_roundps_epu32(U, A, R) \ + ((__m512i)__builtin_ia32_cvttps2udq512_mask((__v16sf)(__m512)(A), \ + (__v16si)_mm512_setzero_si512(), \ + (__mmask16)(U), (int)(R))) + + +static __inline __m512i __DEFAULT_FN_ATTRS512 +_mm512_cvttps_epu32(__m512 __A) +{ + return (__m512i) __builtin_ia32_cvttps2udq512_mask ((__v16sf) __A, + (__v16si) + _mm512_setzero_si512 (), + (__mmask16) -1, + _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_mask_cvttps_epu32 (__m512i __W, __mmask16 __U, __m512 __A) +{ + return (__m512i) __builtin_ia32_cvttps2udq512_mask ((__v16sf) __A, + (__v16si) __W, + (__mmask16) __U, + _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_maskz_cvttps_epu32 (__mmask16 __U, __m512 __A) +{ + return (__m512i) __builtin_ia32_cvttps2udq512_mask ((__v16sf) __A, + (__v16si) _mm512_setzero_si512 (), + (__mmask16) __U, + _MM_FROUND_CUR_DIRECTION); +} + +#define _mm512_cvt_roundepi32_ps(A, R) \ + ((__m512)__builtin_ia32_cvtdq2ps512_mask((__v16si)(__m512i)(A), \ + (__v16sf)_mm512_setzero_ps(), \ + (__mmask16)-1, (int)(R))) + +#define _mm512_mask_cvt_roundepi32_ps(W, U, A, R) \ + ((__m512)__builtin_ia32_cvtdq2ps512_mask((__v16si)(__m512i)(A), \ + (__v16sf)(__m512)(W), \ + (__mmask16)(U), (int)(R))) + +#define _mm512_maskz_cvt_roundepi32_ps(U, A, R) \ + ((__m512)__builtin_ia32_cvtdq2ps512_mask((__v16si)(__m512i)(A), \ + (__v16sf)_mm512_setzero_ps(), \ + (__mmask16)(U), (int)(R))) + +#define _mm512_cvt_roundepu32_ps(A, R) \ + ((__m512)__builtin_ia32_cvtudq2ps512_mask((__v16si)(__m512i)(A), \ + (__v16sf)_mm512_setzero_ps(), \ + (__mmask16)-1, (int)(R))) + +#define _mm512_mask_cvt_roundepu32_ps(W, U, A, R) \ + ((__m512)__builtin_ia32_cvtudq2ps512_mask((__v16si)(__m512i)(A), \ + (__v16sf)(__m512)(W), \ + (__mmask16)(U), (int)(R))) + +#define _mm512_maskz_cvt_roundepu32_ps(U, A, R) \ + ((__m512)__builtin_ia32_cvtudq2ps512_mask((__v16si)(__m512i)(A), \ + (__v16sf)_mm512_setzero_ps(), \ + (__mmask16)(U), (int)(R))) + +static __inline__ __m512 __DEFAULT_FN_ATTRS512 +_mm512_cvtepu32_ps (__m512i __A) +{ + return (__m512)__builtin_convertvector((__v16su)__A, __v16sf); +} + +static __inline__ __m512 __DEFAULT_FN_ATTRS512 +_mm512_mask_cvtepu32_ps (__m512 __W, __mmask16 __U, __m512i __A) +{ + return (__m512)__builtin_ia32_selectps_512((__mmask16)__U, + (__v16sf)_mm512_cvtepu32_ps(__A), + (__v16sf)__W); +} + +static __inline__ __m512 __DEFAULT_FN_ATTRS512 +_mm512_maskz_cvtepu32_ps (__mmask16 __U, __m512i __A) +{ + return (__m512)__builtin_ia32_selectps_512((__mmask16)__U, + (__v16sf)_mm512_cvtepu32_ps(__A), + (__v16sf)_mm512_setzero_ps()); +} + +static __inline __m512d __DEFAULT_FN_ATTRS512 +_mm512_cvtepi32_pd(__m256i __A) +{ + return (__m512d)__builtin_convertvector((__v8si)__A, __v8df); +} + +static __inline__ __m512d __DEFAULT_FN_ATTRS512 +_mm512_mask_cvtepi32_pd (__m512d __W, __mmask8 __U, __m256i __A) +{ + return (__m512d)__builtin_ia32_selectpd_512((__mmask8) __U, + (__v8df)_mm512_cvtepi32_pd(__A), + (__v8df)__W); +} + +static __inline__ __m512d __DEFAULT_FN_ATTRS512 +_mm512_maskz_cvtepi32_pd (__mmask8 __U, __m256i __A) +{ + return (__m512d)__builtin_ia32_selectpd_512((__mmask8) __U, + (__v8df)_mm512_cvtepi32_pd(__A), + (__v8df)_mm512_setzero_pd()); +} + +static __inline__ __m512d __DEFAULT_FN_ATTRS512 +_mm512_cvtepi32lo_pd(__m512i __A) +{ + return (__m512d) _mm512_cvtepi32_pd(_mm512_castsi512_si256(__A)); +} + +static __inline__ __m512d __DEFAULT_FN_ATTRS512 +_mm512_mask_cvtepi32lo_pd(__m512d __W, __mmask8 __U,__m512i __A) +{ + return (__m512d) _mm512_mask_cvtepi32_pd(__W, __U, _mm512_castsi512_si256(__A)); +} + +static __inline__ __m512 __DEFAULT_FN_ATTRS512 +_mm512_cvtepi32_ps (__m512i __A) +{ + return (__m512)__builtin_convertvector((__v16si)__A, __v16sf); +} + +static __inline__ __m512 __DEFAULT_FN_ATTRS512 +_mm512_mask_cvtepi32_ps (__m512 __W, __mmask16 __U, __m512i __A) +{ + return (__m512)__builtin_ia32_selectps_512((__mmask16)__U, + (__v16sf)_mm512_cvtepi32_ps(__A), + (__v16sf)__W); +} + +static __inline__ __m512 __DEFAULT_FN_ATTRS512 +_mm512_maskz_cvtepi32_ps (__mmask16 __U, __m512i __A) +{ + return (__m512)__builtin_ia32_selectps_512((__mmask16)__U, + (__v16sf)_mm512_cvtepi32_ps(__A), + (__v16sf)_mm512_setzero_ps()); +} + +static __inline __m512d __DEFAULT_FN_ATTRS512 +_mm512_cvtepu32_pd(__m256i __A) +{ + return (__m512d)__builtin_convertvector((__v8su)__A, __v8df); +} + +static __inline__ __m512d __DEFAULT_FN_ATTRS512 +_mm512_mask_cvtepu32_pd (__m512d __W, __mmask8 __U, __m256i __A) +{ + return (__m512d)__builtin_ia32_selectpd_512((__mmask8) __U, + (__v8df)_mm512_cvtepu32_pd(__A), + (__v8df)__W); +} + +static __inline__ __m512d __DEFAULT_FN_ATTRS512 +_mm512_maskz_cvtepu32_pd (__mmask8 __U, __m256i __A) +{ + return (__m512d)__builtin_ia32_selectpd_512((__mmask8) __U, + (__v8df)_mm512_cvtepu32_pd(__A), + (__v8df)_mm512_setzero_pd()); +} + +static __inline__ __m512d __DEFAULT_FN_ATTRS512 +_mm512_cvtepu32lo_pd(__m512i __A) +{ + return (__m512d) _mm512_cvtepu32_pd(_mm512_castsi512_si256(__A)); +} + +static __inline__ __m512d __DEFAULT_FN_ATTRS512 +_mm512_mask_cvtepu32lo_pd(__m512d __W, __mmask8 __U,__m512i __A) +{ + return (__m512d) _mm512_mask_cvtepu32_pd(__W, __U, _mm512_castsi512_si256(__A)); +} + +#define _mm512_cvt_roundpd_ps(A, R) \ + ((__m256)__builtin_ia32_cvtpd2ps512_mask((__v8df)(__m512d)(A), \ + (__v8sf)_mm256_setzero_ps(), \ + (__mmask8)-1, (int)(R))) + +#define _mm512_mask_cvt_roundpd_ps(W, U, A, R) \ + ((__m256)__builtin_ia32_cvtpd2ps512_mask((__v8df)(__m512d)(A), \ + (__v8sf)(__m256)(W), (__mmask8)(U), \ + (int)(R))) + +#define _mm512_maskz_cvt_roundpd_ps(U, A, R) \ + ((__m256)__builtin_ia32_cvtpd2ps512_mask((__v8df)(__m512d)(A), \ + (__v8sf)_mm256_setzero_ps(), \ + (__mmask8)(U), (int)(R))) + +static __inline__ __m256 __DEFAULT_FN_ATTRS512 +_mm512_cvtpd_ps (__m512d __A) +{ + return (__m256) __builtin_ia32_cvtpd2ps512_mask ((__v8df) __A, + (__v8sf) _mm256_undefined_ps (), + (__mmask8) -1, + _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m256 __DEFAULT_FN_ATTRS512 +_mm512_mask_cvtpd_ps (__m256 __W, __mmask8 __U, __m512d __A) +{ + return (__m256) __builtin_ia32_cvtpd2ps512_mask ((__v8df) __A, + (__v8sf) __W, + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m256 __DEFAULT_FN_ATTRS512 +_mm512_maskz_cvtpd_ps (__mmask8 __U, __m512d __A) +{ + return (__m256) __builtin_ia32_cvtpd2ps512_mask ((__v8df) __A, + (__v8sf) _mm256_setzero_ps (), + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m512 __DEFAULT_FN_ATTRS512 +_mm512_cvtpd_pslo (__m512d __A) +{ + return (__m512) __builtin_shufflevector((__v8sf) _mm512_cvtpd_ps(__A), + (__v8sf) _mm256_setzero_ps (), + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15); +} + +static __inline__ __m512 __DEFAULT_FN_ATTRS512 +_mm512_mask_cvtpd_pslo (__m512 __W, __mmask8 __U,__m512d __A) +{ + return (__m512) __builtin_shufflevector ( + (__v8sf) _mm512_mask_cvtpd_ps (_mm512_castps512_ps256(__W), + __U, __A), + (__v8sf) _mm256_setzero_ps (), + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15); +} + +#define _mm512_cvt_roundps_ph(A, I) \ + ((__m256i)__builtin_ia32_vcvtps2ph512_mask((__v16sf)(__m512)(A), (int)(I), \ + (__v16hi)_mm256_undefined_si256(), \ + (__mmask16)-1)) + +#define _mm512_mask_cvt_roundps_ph(U, W, A, I) \ + ((__m256i)__builtin_ia32_vcvtps2ph512_mask((__v16sf)(__m512)(A), (int)(I), \ + (__v16hi)(__m256i)(U), \ + (__mmask16)(W))) + +#define _mm512_maskz_cvt_roundps_ph(W, A, I) \ + ((__m256i)__builtin_ia32_vcvtps2ph512_mask((__v16sf)(__m512)(A), (int)(I), \ + (__v16hi)_mm256_setzero_si256(), \ + (__mmask16)(W))) + +#define _mm512_cvtps_ph _mm512_cvt_roundps_ph +#define _mm512_mask_cvtps_ph _mm512_mask_cvt_roundps_ph +#define _mm512_maskz_cvtps_ph _mm512_maskz_cvt_roundps_ph + +#define _mm512_cvt_roundph_ps(A, R) \ + ((__m512)__builtin_ia32_vcvtph2ps512_mask((__v16hi)(__m256i)(A), \ + (__v16sf)_mm512_undefined_ps(), \ + (__mmask16)-1, (int)(R))) + +#define _mm512_mask_cvt_roundph_ps(W, U, A, R) \ + ((__m512)__builtin_ia32_vcvtph2ps512_mask((__v16hi)(__m256i)(A), \ + (__v16sf)(__m512)(W), \ + (__mmask16)(U), (int)(R))) + +#define _mm512_maskz_cvt_roundph_ps(U, A, R) \ + ((__m512)__builtin_ia32_vcvtph2ps512_mask((__v16hi)(__m256i)(A), \ + (__v16sf)_mm512_setzero_ps(), \ + (__mmask16)(U), (int)(R))) + + +static __inline __m512 __DEFAULT_FN_ATTRS512 +_mm512_cvtph_ps(__m256i __A) +{ + return (__m512) __builtin_ia32_vcvtph2ps512_mask ((__v16hi) __A, + (__v16sf) + _mm512_setzero_ps (), + (__mmask16) -1, + _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m512 __DEFAULT_FN_ATTRS512 +_mm512_mask_cvtph_ps (__m512 __W, __mmask16 __U, __m256i __A) +{ + return (__m512) __builtin_ia32_vcvtph2ps512_mask ((__v16hi) __A, + (__v16sf) __W, + (__mmask16) __U, + _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m512 __DEFAULT_FN_ATTRS512 +_mm512_maskz_cvtph_ps (__mmask16 __U, __m256i __A) +{ + return (__m512) __builtin_ia32_vcvtph2ps512_mask ((__v16hi) __A, + (__v16sf) _mm512_setzero_ps (), + (__mmask16) __U, + _MM_FROUND_CUR_DIRECTION); +} + +#define _mm512_cvtt_roundpd_epi32(A, R) \ + ((__m256i)__builtin_ia32_cvttpd2dq512_mask((__v8df)(__m512d)(A), \ + (__v8si)_mm256_setzero_si256(), \ + (__mmask8)-1, (int)(R))) + +#define _mm512_mask_cvtt_roundpd_epi32(W, U, A, R) \ + ((__m256i)__builtin_ia32_cvttpd2dq512_mask((__v8df)(__m512d)(A), \ + (__v8si)(__m256i)(W), \ + (__mmask8)(U), (int)(R))) + +#define _mm512_maskz_cvtt_roundpd_epi32(U, A, R) \ + ((__m256i)__builtin_ia32_cvttpd2dq512_mask((__v8df)(__m512d)(A), \ + (__v8si)_mm256_setzero_si256(), \ + (__mmask8)(U), (int)(R))) + +static __inline __m256i __DEFAULT_FN_ATTRS512 +_mm512_cvttpd_epi32(__m512d __a) +{ + return (__m256i)__builtin_ia32_cvttpd2dq512_mask((__v8df) __a, + (__v8si)_mm256_setzero_si256(), + (__mmask8) -1, + _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS512 +_mm512_mask_cvttpd_epi32 (__m256i __W, __mmask8 __U, __m512d __A) +{ + return (__m256i) __builtin_ia32_cvttpd2dq512_mask ((__v8df) __A, + (__v8si) __W, + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS512 +_mm512_maskz_cvttpd_epi32 (__mmask8 __U, __m512d __A) +{ + return (__m256i) __builtin_ia32_cvttpd2dq512_mask ((__v8df) __A, + (__v8si) _mm256_setzero_si256 (), + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +#define _mm512_cvtt_roundps_epi32(A, R) \ + ((__m512i)__builtin_ia32_cvttps2dq512_mask((__v16sf)(__m512)(A), \ + (__v16si)_mm512_setzero_si512(), \ + (__mmask16)-1, (int)(R))) + +#define _mm512_mask_cvtt_roundps_epi32(W, U, A, R) \ + ((__m512i)__builtin_ia32_cvttps2dq512_mask((__v16sf)(__m512)(A), \ + (__v16si)(__m512i)(W), \ + (__mmask16)(U), (int)(R))) + +#define _mm512_maskz_cvtt_roundps_epi32(U, A, R) \ + ((__m512i)__builtin_ia32_cvttps2dq512_mask((__v16sf)(__m512)(A), \ + (__v16si)_mm512_setzero_si512(), \ + (__mmask16)(U), (int)(R))) + +static __inline __m512i __DEFAULT_FN_ATTRS512 +_mm512_cvttps_epi32(__m512 __a) +{ + return (__m512i) + __builtin_ia32_cvttps2dq512_mask((__v16sf) __a, + (__v16si) _mm512_setzero_si512 (), + (__mmask16) -1, _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_mask_cvttps_epi32 (__m512i __W, __mmask16 __U, __m512 __A) +{ + return (__m512i) __builtin_ia32_cvttps2dq512_mask ((__v16sf) __A, + (__v16si) __W, + (__mmask16) __U, + _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_maskz_cvttps_epi32 (__mmask16 __U, __m512 __A) +{ + return (__m512i) __builtin_ia32_cvttps2dq512_mask ((__v16sf) __A, + (__v16si) _mm512_setzero_si512 (), + (__mmask16) __U, + _MM_FROUND_CUR_DIRECTION); +} + +#define _mm512_cvt_roundps_epi32(A, R) \ + ((__m512i)__builtin_ia32_cvtps2dq512_mask((__v16sf)(__m512)(A), \ + (__v16si)_mm512_setzero_si512(), \ + (__mmask16)-1, (int)(R))) + +#define _mm512_mask_cvt_roundps_epi32(W, U, A, R) \ + ((__m512i)__builtin_ia32_cvtps2dq512_mask((__v16sf)(__m512)(A), \ + (__v16si)(__m512i)(W), \ + (__mmask16)(U), (int)(R))) + +#define _mm512_maskz_cvt_roundps_epi32(U, A, R) \ + ((__m512i)__builtin_ia32_cvtps2dq512_mask((__v16sf)(__m512)(A), \ + (__v16si)_mm512_setzero_si512(), \ + (__mmask16)(U), (int)(R))) + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_cvtps_epi32 (__m512 __A) +{ + return (__m512i) __builtin_ia32_cvtps2dq512_mask ((__v16sf) __A, + (__v16si) _mm512_undefined_epi32 (), + (__mmask16) -1, + _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_mask_cvtps_epi32 (__m512i __W, __mmask16 __U, __m512 __A) +{ + return (__m512i) __builtin_ia32_cvtps2dq512_mask ((__v16sf) __A, + (__v16si) __W, + (__mmask16) __U, + _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_maskz_cvtps_epi32 (__mmask16 __U, __m512 __A) +{ + return (__m512i) __builtin_ia32_cvtps2dq512_mask ((__v16sf) __A, + (__v16si) + _mm512_setzero_si512 (), + (__mmask16) __U, + _MM_FROUND_CUR_DIRECTION); +} + +#define _mm512_cvt_roundpd_epi32(A, R) \ + ((__m256i)__builtin_ia32_cvtpd2dq512_mask((__v8df)(__m512d)(A), \ + (__v8si)_mm256_setzero_si256(), \ + (__mmask8)-1, (int)(R))) + +#define _mm512_mask_cvt_roundpd_epi32(W, U, A, R) \ + ((__m256i)__builtin_ia32_cvtpd2dq512_mask((__v8df)(__m512d)(A), \ + (__v8si)(__m256i)(W), \ + (__mmask8)(U), (int)(R))) + +#define _mm512_maskz_cvt_roundpd_epi32(U, A, R) \ + ((__m256i)__builtin_ia32_cvtpd2dq512_mask((__v8df)(__m512d)(A), \ + (__v8si)_mm256_setzero_si256(), \ + (__mmask8)(U), (int)(R))) + +static __inline__ __m256i __DEFAULT_FN_ATTRS512 +_mm512_cvtpd_epi32 (__m512d __A) +{ + return (__m256i) __builtin_ia32_cvtpd2dq512_mask ((__v8df) __A, + (__v8si) + _mm256_undefined_si256 (), + (__mmask8) -1, + _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS512 +_mm512_mask_cvtpd_epi32 (__m256i __W, __mmask8 __U, __m512d __A) +{ + return (__m256i) __builtin_ia32_cvtpd2dq512_mask ((__v8df) __A, + (__v8si) __W, + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS512 +_mm512_maskz_cvtpd_epi32 (__mmask8 __U, __m512d __A) +{ + return (__m256i) __builtin_ia32_cvtpd2dq512_mask ((__v8df) __A, + (__v8si) + _mm256_setzero_si256 (), + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +#define _mm512_cvt_roundps_epu32(A, R) \ + ((__m512i)__builtin_ia32_cvtps2udq512_mask((__v16sf)(__m512)(A), \ + (__v16si)_mm512_setzero_si512(), \ + (__mmask16)-1, (int)(R))) + +#define _mm512_mask_cvt_roundps_epu32(W, U, A, R) \ + ((__m512i)__builtin_ia32_cvtps2udq512_mask((__v16sf)(__m512)(A), \ + (__v16si)(__m512i)(W), \ + (__mmask16)(U), (int)(R))) + +#define _mm512_maskz_cvt_roundps_epu32(U, A, R) \ + ((__m512i)__builtin_ia32_cvtps2udq512_mask((__v16sf)(__m512)(A), \ + (__v16si)_mm512_setzero_si512(), \ + (__mmask16)(U), (int)(R))) + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_cvtps_epu32 ( __m512 __A) +{ + return (__m512i) __builtin_ia32_cvtps2udq512_mask ((__v16sf) __A,\ + (__v16si)\ + _mm512_undefined_epi32 (), + (__mmask16) -1,\ + _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_mask_cvtps_epu32 (__m512i __W, __mmask16 __U, __m512 __A) +{ + return (__m512i) __builtin_ia32_cvtps2udq512_mask ((__v16sf) __A, + (__v16si) __W, + (__mmask16) __U, + _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_maskz_cvtps_epu32 ( __mmask16 __U, __m512 __A) +{ + return (__m512i) __builtin_ia32_cvtps2udq512_mask ((__v16sf) __A, + (__v16si) + _mm512_setzero_si512 (), + (__mmask16) __U , + _MM_FROUND_CUR_DIRECTION); +} + +#define _mm512_cvt_roundpd_epu32(A, R) \ + ((__m256i)__builtin_ia32_cvtpd2udq512_mask((__v8df)(__m512d)(A), \ + (__v8si)_mm256_setzero_si256(), \ + (__mmask8)-1, (int)(R))) + +#define _mm512_mask_cvt_roundpd_epu32(W, U, A, R) \ + ((__m256i)__builtin_ia32_cvtpd2udq512_mask((__v8df)(__m512d)(A), \ + (__v8si)(__m256i)(W), \ + (__mmask8)(U), (int)(R))) + +#define _mm512_maskz_cvt_roundpd_epu32(U, A, R) \ + ((__m256i)__builtin_ia32_cvtpd2udq512_mask((__v8df)(__m512d)(A), \ + (__v8si)_mm256_setzero_si256(), \ + (__mmask8)(U), (int)(R))) + +static __inline__ __m256i __DEFAULT_FN_ATTRS512 +_mm512_cvtpd_epu32 (__m512d __A) +{ + return (__m256i) __builtin_ia32_cvtpd2udq512_mask ((__v8df) __A, + (__v8si) + _mm256_undefined_si256 (), + (__mmask8) -1, + _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS512 +_mm512_mask_cvtpd_epu32 (__m256i __W, __mmask8 __U, __m512d __A) +{ + return (__m256i) __builtin_ia32_cvtpd2udq512_mask ((__v8df) __A, + (__v8si) __W, + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS512 +_mm512_maskz_cvtpd_epu32 (__mmask8 __U, __m512d __A) +{ + return (__m256i) __builtin_ia32_cvtpd2udq512_mask ((__v8df) __A, + (__v8si) + _mm256_setzero_si256 (), + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ double __DEFAULT_FN_ATTRS512 +_mm512_cvtsd_f64(__m512d __a) +{ + return __a[0]; +} + +static __inline__ float __DEFAULT_FN_ATTRS512 +_mm512_cvtss_f32(__m512 __a) +{ + return __a[0]; +} + +/* Unpack and Interleave */ + +static __inline __m512d __DEFAULT_FN_ATTRS512 +_mm512_unpackhi_pd(__m512d __a, __m512d __b) +{ + return (__m512d)__builtin_shufflevector((__v8df)__a, (__v8df)__b, + 1, 9, 1+2, 9+2, 1+4, 9+4, 1+6, 9+6); +} + +static __inline__ __m512d __DEFAULT_FN_ATTRS512 +_mm512_mask_unpackhi_pd(__m512d __W, __mmask8 __U, __m512d __A, __m512d __B) +{ + return (__m512d)__builtin_ia32_selectpd_512((__mmask8) __U, + (__v8df)_mm512_unpackhi_pd(__A, __B), + (__v8df)__W); +} + +static __inline__ __m512d __DEFAULT_FN_ATTRS512 +_mm512_maskz_unpackhi_pd(__mmask8 __U, __m512d __A, __m512d __B) +{ + return (__m512d)__builtin_ia32_selectpd_512((__mmask8) __U, + (__v8df)_mm512_unpackhi_pd(__A, __B), + (__v8df)_mm512_setzero_pd()); +} + +static __inline __m512d __DEFAULT_FN_ATTRS512 +_mm512_unpacklo_pd(__m512d __a, __m512d __b) +{ + return (__m512d)__builtin_shufflevector((__v8df)__a, (__v8df)__b, + 0, 8, 0+2, 8+2, 0+4, 8+4, 0+6, 8+6); +} + +static __inline__ __m512d __DEFAULT_FN_ATTRS512 +_mm512_mask_unpacklo_pd(__m512d __W, __mmask8 __U, __m512d __A, __m512d __B) +{ + return (__m512d)__builtin_ia32_selectpd_512((__mmask8) __U, + (__v8df)_mm512_unpacklo_pd(__A, __B), + (__v8df)__W); +} + +static __inline__ __m512d __DEFAULT_FN_ATTRS512 +_mm512_maskz_unpacklo_pd (__mmask8 __U, __m512d __A, __m512d __B) +{ + return (__m512d)__builtin_ia32_selectpd_512((__mmask8) __U, + (__v8df)_mm512_unpacklo_pd(__A, __B), + (__v8df)_mm512_setzero_pd()); +} + +static __inline __m512 __DEFAULT_FN_ATTRS512 +_mm512_unpackhi_ps(__m512 __a, __m512 __b) +{ + return (__m512)__builtin_shufflevector((__v16sf)__a, (__v16sf)__b, + 2, 18, 3, 19, + 2+4, 18+4, 3+4, 19+4, + 2+8, 18+8, 3+8, 19+8, + 2+12, 18+12, 3+12, 19+12); +} + +static __inline__ __m512 __DEFAULT_FN_ATTRS512 +_mm512_mask_unpackhi_ps(__m512 __W, __mmask16 __U, __m512 __A, __m512 __B) +{ + return (__m512)__builtin_ia32_selectps_512((__mmask16) __U, + (__v16sf)_mm512_unpackhi_ps(__A, __B), + (__v16sf)__W); +} + +static __inline__ __m512 __DEFAULT_FN_ATTRS512 +_mm512_maskz_unpackhi_ps (__mmask16 __U, __m512 __A, __m512 __B) +{ + return (__m512)__builtin_ia32_selectps_512((__mmask16) __U, + (__v16sf)_mm512_unpackhi_ps(__A, __B), + (__v16sf)_mm512_setzero_ps()); +} + +static __inline __m512 __DEFAULT_FN_ATTRS512 +_mm512_unpacklo_ps(__m512 __a, __m512 __b) +{ + return (__m512)__builtin_shufflevector((__v16sf)__a, (__v16sf)__b, + 0, 16, 1, 17, + 0+4, 16+4, 1+4, 17+4, + 0+8, 16+8, 1+8, 17+8, + 0+12, 16+12, 1+12, 17+12); +} + +static __inline__ __m512 __DEFAULT_FN_ATTRS512 +_mm512_mask_unpacklo_ps(__m512 __W, __mmask16 __U, __m512 __A, __m512 __B) +{ + return (__m512)__builtin_ia32_selectps_512((__mmask16) __U, + (__v16sf)_mm512_unpacklo_ps(__A, __B), + (__v16sf)__W); +} + +static __inline__ __m512 __DEFAULT_FN_ATTRS512 +_mm512_maskz_unpacklo_ps (__mmask16 __U, __m512 __A, __m512 __B) +{ + return (__m512)__builtin_ia32_selectps_512((__mmask16) __U, + (__v16sf)_mm512_unpacklo_ps(__A, __B), + (__v16sf)_mm512_setzero_ps()); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_unpackhi_epi32(__m512i __A, __m512i __B) +{ + return (__m512i)__builtin_shufflevector((__v16si)__A, (__v16si)__B, + 2, 18, 3, 19, + 2+4, 18+4, 3+4, 19+4, + 2+8, 18+8, 3+8, 19+8, + 2+12, 18+12, 3+12, 19+12); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_mask_unpackhi_epi32(__m512i __W, __mmask16 __U, __m512i __A, __m512i __B) +{ + return (__m512i)__builtin_ia32_selectd_512((__mmask16) __U, + (__v16si)_mm512_unpackhi_epi32(__A, __B), + (__v16si)__W); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_maskz_unpackhi_epi32(__mmask16 __U, __m512i __A, __m512i __B) +{ + return (__m512i)__builtin_ia32_selectd_512((__mmask16) __U, + (__v16si)_mm512_unpackhi_epi32(__A, __B), + (__v16si)_mm512_setzero_si512()); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_unpacklo_epi32(__m512i __A, __m512i __B) +{ + return (__m512i)__builtin_shufflevector((__v16si)__A, (__v16si)__B, + 0, 16, 1, 17, + 0+4, 16+4, 1+4, 17+4, + 0+8, 16+8, 1+8, 17+8, + 0+12, 16+12, 1+12, 17+12); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_mask_unpacklo_epi32(__m512i __W, __mmask16 __U, __m512i __A, __m512i __B) +{ + return (__m512i)__builtin_ia32_selectd_512((__mmask16) __U, + (__v16si)_mm512_unpacklo_epi32(__A, __B), + (__v16si)__W); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_maskz_unpacklo_epi32(__mmask16 __U, __m512i __A, __m512i __B) +{ + return (__m512i)__builtin_ia32_selectd_512((__mmask16) __U, + (__v16si)_mm512_unpacklo_epi32(__A, __B), + (__v16si)_mm512_setzero_si512()); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_unpackhi_epi64(__m512i __A, __m512i __B) +{ + return (__m512i)__builtin_shufflevector((__v8di)__A, (__v8di)__B, + 1, 9, 1+2, 9+2, 1+4, 9+4, 1+6, 9+6); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_mask_unpackhi_epi64(__m512i __W, __mmask8 __U, __m512i __A, __m512i __B) +{ + return (__m512i)__builtin_ia32_selectq_512((__mmask8) __U, + (__v8di)_mm512_unpackhi_epi64(__A, __B), + (__v8di)__W); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_maskz_unpackhi_epi64(__mmask8 __U, __m512i __A, __m512i __B) +{ + return (__m512i)__builtin_ia32_selectq_512((__mmask8) __U, + (__v8di)_mm512_unpackhi_epi64(__A, __B), + (__v8di)_mm512_setzero_si512()); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_unpacklo_epi64 (__m512i __A, __m512i __B) +{ + return (__m512i)__builtin_shufflevector((__v8di)__A, (__v8di)__B, + 0, 8, 0+2, 8+2, 0+4, 8+4, 0+6, 8+6); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_mask_unpacklo_epi64 (__m512i __W, __mmask8 __U, __m512i __A, __m512i __B) +{ + return (__m512i)__builtin_ia32_selectq_512((__mmask8) __U, + (__v8di)_mm512_unpacklo_epi64(__A, __B), + (__v8di)__W); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_maskz_unpacklo_epi64 (__mmask8 __U, __m512i __A, __m512i __B) +{ + return (__m512i)__builtin_ia32_selectq_512((__mmask8) __U, + (__v8di)_mm512_unpacklo_epi64(__A, __B), + (__v8di)_mm512_setzero_si512()); +} + + +/* SIMD load ops */ + +static __inline __m512i __DEFAULT_FN_ATTRS512 +_mm512_loadu_si512 (void const *__P) +{ + struct __loadu_si512 { + __m512i_u __v; + } __attribute__((__packed__, __may_alias__)); + return ((const struct __loadu_si512*)__P)->__v; +} + +static __inline __m512i __DEFAULT_FN_ATTRS512 +_mm512_loadu_epi32 (void const *__P) +{ + struct __loadu_epi32 { + __m512i_u __v; + } __attribute__((__packed__, __may_alias__)); + return ((const struct __loadu_epi32*)__P)->__v; +} + +static __inline __m512i __DEFAULT_FN_ATTRS512 +_mm512_mask_loadu_epi32 (__m512i __W, __mmask16 __U, void const *__P) +{ + return (__m512i) __builtin_ia32_loaddqusi512_mask ((const int *) __P, + (__v16si) __W, + (__mmask16) __U); +} + + +static __inline __m512i __DEFAULT_FN_ATTRS512 +_mm512_maskz_loadu_epi32(__mmask16 __U, void const *__P) +{ + return (__m512i) __builtin_ia32_loaddqusi512_mask ((const int *)__P, + (__v16si) + _mm512_setzero_si512 (), + (__mmask16) __U); +} + +static __inline __m512i __DEFAULT_FN_ATTRS512 +_mm512_loadu_epi64 (void const *__P) +{ + struct __loadu_epi64 { + __m512i_u __v; + } __attribute__((__packed__, __may_alias__)); + return ((const struct __loadu_epi64*)__P)->__v; +} + +static __inline __m512i __DEFAULT_FN_ATTRS512 +_mm512_mask_loadu_epi64 (__m512i __W, __mmask8 __U, void const *__P) +{ + return (__m512i) __builtin_ia32_loaddqudi512_mask ((const long long *) __P, + (__v8di) __W, + (__mmask8) __U); +} + +static __inline __m512i __DEFAULT_FN_ATTRS512 +_mm512_maskz_loadu_epi64(__mmask8 __U, void const *__P) +{ + return (__m512i) __builtin_ia32_loaddqudi512_mask ((const long long *)__P, + (__v8di) + _mm512_setzero_si512 (), + (__mmask8) __U); +} + +static __inline __m512 __DEFAULT_FN_ATTRS512 +_mm512_mask_loadu_ps (__m512 __W, __mmask16 __U, void const *__P) +{ + return (__m512) __builtin_ia32_loadups512_mask ((const float *) __P, + (__v16sf) __W, + (__mmask16) __U); +} + +static __inline __m512 __DEFAULT_FN_ATTRS512 +_mm512_maskz_loadu_ps(__mmask16 __U, void const *__P) +{ + return (__m512) __builtin_ia32_loadups512_mask ((const float *)__P, + (__v16sf) + _mm512_setzero_ps (), + (__mmask16) __U); +} + +static __inline __m512d __DEFAULT_FN_ATTRS512 +_mm512_mask_loadu_pd (__m512d __W, __mmask8 __U, void const *__P) +{ + return (__m512d) __builtin_ia32_loadupd512_mask ((const double *) __P, + (__v8df) __W, + (__mmask8) __U); +} + +static __inline __m512d __DEFAULT_FN_ATTRS512 +_mm512_maskz_loadu_pd(__mmask8 __U, void const *__P) +{ + return (__m512d) __builtin_ia32_loadupd512_mask ((const double *)__P, + (__v8df) + _mm512_setzero_pd (), + (__mmask8) __U); +} + +static __inline __m512d __DEFAULT_FN_ATTRS512 +_mm512_loadu_pd(void const *__p) +{ + struct __loadu_pd { + __m512d_u __v; + } __attribute__((__packed__, __may_alias__)); + return ((const struct __loadu_pd*)__p)->__v; +} + +static __inline __m512 __DEFAULT_FN_ATTRS512 +_mm512_loadu_ps(void const *__p) +{ + struct __loadu_ps { + __m512_u __v; + } __attribute__((__packed__, __may_alias__)); + return ((const struct __loadu_ps*)__p)->__v; +} + +static __inline __m512 __DEFAULT_FN_ATTRS512 +_mm512_load_ps(void const *__p) +{ + return *(const __m512*)__p; +} + +static __inline __m512 __DEFAULT_FN_ATTRS512 +_mm512_mask_load_ps (__m512 __W, __mmask16 __U, void const *__P) +{ + return (__m512) __builtin_ia32_loadaps512_mask ((const __v16sf *) __P, + (__v16sf) __W, + (__mmask16) __U); +} + +static __inline __m512 __DEFAULT_FN_ATTRS512 +_mm512_maskz_load_ps(__mmask16 __U, void const *__P) +{ + return (__m512) __builtin_ia32_loadaps512_mask ((const __v16sf *)__P, + (__v16sf) + _mm512_setzero_ps (), + (__mmask16) __U); +} + +static __inline __m512d __DEFAULT_FN_ATTRS512 +_mm512_load_pd(void const *__p) +{ + return *(const __m512d*)__p; +} + +static __inline __m512d __DEFAULT_FN_ATTRS512 +_mm512_mask_load_pd (__m512d __W, __mmask8 __U, void const *__P) +{ + return (__m512d) __builtin_ia32_loadapd512_mask ((const __v8df *) __P, + (__v8df) __W, + (__mmask8) __U); +} + +static __inline __m512d __DEFAULT_FN_ATTRS512 +_mm512_maskz_load_pd(__mmask8 __U, void const *__P) +{ + return (__m512d) __builtin_ia32_loadapd512_mask ((const __v8df *)__P, + (__v8df) + _mm512_setzero_pd (), + (__mmask8) __U); +} + +static __inline __m512i __DEFAULT_FN_ATTRS512 +_mm512_load_si512 (void const *__P) +{ + return *(const __m512i *) __P; +} + +static __inline __m512i __DEFAULT_FN_ATTRS512 +_mm512_load_epi32 (void const *__P) +{ + return *(const __m512i *) __P; +} + +static __inline __m512i __DEFAULT_FN_ATTRS512 +_mm512_load_epi64 (void const *__P) +{ + return *(const __m512i *) __P; +} + +/* SIMD store ops */ + +static __inline void __DEFAULT_FN_ATTRS512 +_mm512_storeu_epi64 (void *__P, __m512i __A) +{ + struct __storeu_epi64 { + __m512i_u __v; + } __attribute__((__packed__, __may_alias__)); + ((struct __storeu_epi64*)__P)->__v = __A; +} + +static __inline void __DEFAULT_FN_ATTRS512 +_mm512_mask_storeu_epi64(void *__P, __mmask8 __U, __m512i __A) +{ + __builtin_ia32_storedqudi512_mask ((long long *)__P, (__v8di) __A, + (__mmask8) __U); +} + +static __inline void __DEFAULT_FN_ATTRS512 +_mm512_storeu_si512 (void *__P, __m512i __A) +{ + struct __storeu_si512 { + __m512i_u __v; + } __attribute__((__packed__, __may_alias__)); + ((struct __storeu_si512*)__P)->__v = __A; +} + +static __inline void __DEFAULT_FN_ATTRS512 +_mm512_storeu_epi32 (void *__P, __m512i __A) +{ + struct __storeu_epi32 { + __m512i_u __v; + } __attribute__((__packed__, __may_alias__)); + ((struct __storeu_epi32*)__P)->__v = __A; +} + +static __inline void __DEFAULT_FN_ATTRS512 +_mm512_mask_storeu_epi32(void *__P, __mmask16 __U, __m512i __A) +{ + __builtin_ia32_storedqusi512_mask ((int *)__P, (__v16si) __A, + (__mmask16) __U); +} + +static __inline void __DEFAULT_FN_ATTRS512 +_mm512_mask_storeu_pd(void *__P, __mmask8 __U, __m512d __A) +{ + __builtin_ia32_storeupd512_mask ((double *)__P, (__v8df) __A, (__mmask8) __U); +} + +static __inline void __DEFAULT_FN_ATTRS512 +_mm512_storeu_pd(void *__P, __m512d __A) +{ + struct __storeu_pd { + __m512d_u __v; + } __attribute__((__packed__, __may_alias__)); + ((struct __storeu_pd*)__P)->__v = __A; +} + +static __inline void __DEFAULT_FN_ATTRS512 +_mm512_mask_storeu_ps(void *__P, __mmask16 __U, __m512 __A) +{ + __builtin_ia32_storeups512_mask ((float *)__P, (__v16sf) __A, + (__mmask16) __U); +} + +static __inline void __DEFAULT_FN_ATTRS512 +_mm512_storeu_ps(void *__P, __m512 __A) +{ + struct __storeu_ps { + __m512_u __v; + } __attribute__((__packed__, __may_alias__)); + ((struct __storeu_ps*)__P)->__v = __A; +} + +static __inline void __DEFAULT_FN_ATTRS512 +_mm512_mask_store_pd(void *__P, __mmask8 __U, __m512d __A) +{ + __builtin_ia32_storeapd512_mask ((__v8df *)__P, (__v8df) __A, (__mmask8) __U); +} + +static __inline void __DEFAULT_FN_ATTRS512 +_mm512_store_pd(void *__P, __m512d __A) +{ + *(__m512d*)__P = __A; +} + +static __inline void __DEFAULT_FN_ATTRS512 +_mm512_mask_store_ps(void *__P, __mmask16 __U, __m512 __A) +{ + __builtin_ia32_storeaps512_mask ((__v16sf *)__P, (__v16sf) __A, + (__mmask16) __U); +} + +static __inline void __DEFAULT_FN_ATTRS512 +_mm512_store_ps(void *__P, __m512 __A) +{ + *(__m512*)__P = __A; +} + +static __inline void __DEFAULT_FN_ATTRS512 +_mm512_store_si512 (void *__P, __m512i __A) +{ + *(__m512i *) __P = __A; +} + +static __inline void __DEFAULT_FN_ATTRS512 +_mm512_store_epi32 (void *__P, __m512i __A) +{ + *(__m512i *) __P = __A; +} + +static __inline void __DEFAULT_FN_ATTRS512 +_mm512_store_epi64 (void *__P, __m512i __A) +{ + *(__m512i *) __P = __A; +} + +/* Mask ops */ + +static __inline __mmask16 __DEFAULT_FN_ATTRS +_mm512_knot(__mmask16 __M) +{ + return __builtin_ia32_knothi(__M); +} + +/* Integer compare */ + +#define _mm512_cmpeq_epi32_mask(A, B) \ + _mm512_cmp_epi32_mask((A), (B), _MM_CMPINT_EQ) +#define _mm512_mask_cmpeq_epi32_mask(k, A, B) \ + _mm512_mask_cmp_epi32_mask((k), (A), (B), _MM_CMPINT_EQ) +#define _mm512_cmpge_epi32_mask(A, B) \ + _mm512_cmp_epi32_mask((A), (B), _MM_CMPINT_GE) +#define _mm512_mask_cmpge_epi32_mask(k, A, B) \ + _mm512_mask_cmp_epi32_mask((k), (A), (B), _MM_CMPINT_GE) +#define _mm512_cmpgt_epi32_mask(A, B) \ + _mm512_cmp_epi32_mask((A), (B), _MM_CMPINT_GT) +#define _mm512_mask_cmpgt_epi32_mask(k, A, B) \ + _mm512_mask_cmp_epi32_mask((k), (A), (B), _MM_CMPINT_GT) +#define _mm512_cmple_epi32_mask(A, B) \ + _mm512_cmp_epi32_mask((A), (B), _MM_CMPINT_LE) +#define _mm512_mask_cmple_epi32_mask(k, A, B) \ + _mm512_mask_cmp_epi32_mask((k), (A), (B), _MM_CMPINT_LE) +#define _mm512_cmplt_epi32_mask(A, B) \ + _mm512_cmp_epi32_mask((A), (B), _MM_CMPINT_LT) +#define _mm512_mask_cmplt_epi32_mask(k, A, B) \ + _mm512_mask_cmp_epi32_mask((k), (A), (B), _MM_CMPINT_LT) +#define _mm512_cmpneq_epi32_mask(A, B) \ + _mm512_cmp_epi32_mask((A), (B), _MM_CMPINT_NE) +#define _mm512_mask_cmpneq_epi32_mask(k, A, B) \ + _mm512_mask_cmp_epi32_mask((k), (A), (B), _MM_CMPINT_NE) + +#define _mm512_cmpeq_epu32_mask(A, B) \ + _mm512_cmp_epu32_mask((A), (B), _MM_CMPINT_EQ) +#define _mm512_mask_cmpeq_epu32_mask(k, A, B) \ + _mm512_mask_cmp_epu32_mask((k), (A), (B), _MM_CMPINT_EQ) +#define _mm512_cmpge_epu32_mask(A, B) \ + _mm512_cmp_epu32_mask((A), (B), _MM_CMPINT_GE) +#define _mm512_mask_cmpge_epu32_mask(k, A, B) \ + _mm512_mask_cmp_epu32_mask((k), (A), (B), _MM_CMPINT_GE) +#define _mm512_cmpgt_epu32_mask(A, B) \ + _mm512_cmp_epu32_mask((A), (B), _MM_CMPINT_GT) +#define _mm512_mask_cmpgt_epu32_mask(k, A, B) \ + _mm512_mask_cmp_epu32_mask((k), (A), (B), _MM_CMPINT_GT) +#define _mm512_cmple_epu32_mask(A, B) \ + _mm512_cmp_epu32_mask((A), (B), _MM_CMPINT_LE) +#define _mm512_mask_cmple_epu32_mask(k, A, B) \ + _mm512_mask_cmp_epu32_mask((k), (A), (B), _MM_CMPINT_LE) +#define _mm512_cmplt_epu32_mask(A, B) \ + _mm512_cmp_epu32_mask((A), (B), _MM_CMPINT_LT) +#define _mm512_mask_cmplt_epu32_mask(k, A, B) \ + _mm512_mask_cmp_epu32_mask((k), (A), (B), _MM_CMPINT_LT) +#define _mm512_cmpneq_epu32_mask(A, B) \ + _mm512_cmp_epu32_mask((A), (B), _MM_CMPINT_NE) +#define _mm512_mask_cmpneq_epu32_mask(k, A, B) \ + _mm512_mask_cmp_epu32_mask((k), (A), (B), _MM_CMPINT_NE) + +#define _mm512_cmpeq_epi64_mask(A, B) \ + _mm512_cmp_epi64_mask((A), (B), _MM_CMPINT_EQ) +#define _mm512_mask_cmpeq_epi64_mask(k, A, B) \ + _mm512_mask_cmp_epi64_mask((k), (A), (B), _MM_CMPINT_EQ) +#define _mm512_cmpge_epi64_mask(A, B) \ + _mm512_cmp_epi64_mask((A), (B), _MM_CMPINT_GE) +#define _mm512_mask_cmpge_epi64_mask(k, A, B) \ + _mm512_mask_cmp_epi64_mask((k), (A), (B), _MM_CMPINT_GE) +#define _mm512_cmpgt_epi64_mask(A, B) \ + _mm512_cmp_epi64_mask((A), (B), _MM_CMPINT_GT) +#define _mm512_mask_cmpgt_epi64_mask(k, A, B) \ + _mm512_mask_cmp_epi64_mask((k), (A), (B), _MM_CMPINT_GT) +#define _mm512_cmple_epi64_mask(A, B) \ + _mm512_cmp_epi64_mask((A), (B), _MM_CMPINT_LE) +#define _mm512_mask_cmple_epi64_mask(k, A, B) \ + _mm512_mask_cmp_epi64_mask((k), (A), (B), _MM_CMPINT_LE) +#define _mm512_cmplt_epi64_mask(A, B) \ + _mm512_cmp_epi64_mask((A), (B), _MM_CMPINT_LT) +#define _mm512_mask_cmplt_epi64_mask(k, A, B) \ + _mm512_mask_cmp_epi64_mask((k), (A), (B), _MM_CMPINT_LT) +#define _mm512_cmpneq_epi64_mask(A, B) \ + _mm512_cmp_epi64_mask((A), (B), _MM_CMPINT_NE) +#define _mm512_mask_cmpneq_epi64_mask(k, A, B) \ + _mm512_mask_cmp_epi64_mask((k), (A), (B), _MM_CMPINT_NE) + +#define _mm512_cmpeq_epu64_mask(A, B) \ + _mm512_cmp_epu64_mask((A), (B), _MM_CMPINT_EQ) +#define _mm512_mask_cmpeq_epu64_mask(k, A, B) \ + _mm512_mask_cmp_epu64_mask((k), (A), (B), _MM_CMPINT_EQ) +#define _mm512_cmpge_epu64_mask(A, B) \ + _mm512_cmp_epu64_mask((A), (B), _MM_CMPINT_GE) +#define _mm512_mask_cmpge_epu64_mask(k, A, B) \ + _mm512_mask_cmp_epu64_mask((k), (A), (B), _MM_CMPINT_GE) +#define _mm512_cmpgt_epu64_mask(A, B) \ + _mm512_cmp_epu64_mask((A), (B), _MM_CMPINT_GT) +#define _mm512_mask_cmpgt_epu64_mask(k, A, B) \ + _mm512_mask_cmp_epu64_mask((k), (A), (B), _MM_CMPINT_GT) +#define _mm512_cmple_epu64_mask(A, B) \ + _mm512_cmp_epu64_mask((A), (B), _MM_CMPINT_LE) +#define _mm512_mask_cmple_epu64_mask(k, A, B) \ + _mm512_mask_cmp_epu64_mask((k), (A), (B), _MM_CMPINT_LE) +#define _mm512_cmplt_epu64_mask(A, B) \ + _mm512_cmp_epu64_mask((A), (B), _MM_CMPINT_LT) +#define _mm512_mask_cmplt_epu64_mask(k, A, B) \ + _mm512_mask_cmp_epu64_mask((k), (A), (B), _MM_CMPINT_LT) +#define _mm512_cmpneq_epu64_mask(A, B) \ + _mm512_cmp_epu64_mask((A), (B), _MM_CMPINT_NE) +#define _mm512_mask_cmpneq_epu64_mask(k, A, B) \ + _mm512_mask_cmp_epu64_mask((k), (A), (B), _MM_CMPINT_NE) + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_cvtepi8_epi32(__m128i __A) +{ + /* This function always performs a signed extension, but __v16qi is a char + which may be signed or unsigned, so use __v16qs. */ + return (__m512i)__builtin_convertvector((__v16qs)__A, __v16si); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_mask_cvtepi8_epi32(__m512i __W, __mmask16 __U, __m128i __A) +{ + return (__m512i)__builtin_ia32_selectd_512((__mmask16)__U, + (__v16si)_mm512_cvtepi8_epi32(__A), + (__v16si)__W); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_maskz_cvtepi8_epi32(__mmask16 __U, __m128i __A) +{ + return (__m512i)__builtin_ia32_selectd_512((__mmask16)__U, + (__v16si)_mm512_cvtepi8_epi32(__A), + (__v16si)_mm512_setzero_si512()); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_cvtepi8_epi64(__m128i __A) +{ + /* This function always performs a signed extension, but __v16qi is a char + which may be signed or unsigned, so use __v16qs. */ + return (__m512i)__builtin_convertvector(__builtin_shufflevector((__v16qs)__A, (__v16qs)__A, 0, 1, 2, 3, 4, 5, 6, 7), __v8di); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_mask_cvtepi8_epi64(__m512i __W, __mmask8 __U, __m128i __A) +{ + return (__m512i)__builtin_ia32_selectq_512((__mmask8)__U, + (__v8di)_mm512_cvtepi8_epi64(__A), + (__v8di)__W); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_maskz_cvtepi8_epi64(__mmask8 __U, __m128i __A) +{ + return (__m512i)__builtin_ia32_selectq_512((__mmask8)__U, + (__v8di)_mm512_cvtepi8_epi64(__A), + (__v8di)_mm512_setzero_si512 ()); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_cvtepi32_epi64(__m256i __X) +{ + return (__m512i)__builtin_convertvector((__v8si)__X, __v8di); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_mask_cvtepi32_epi64(__m512i __W, __mmask8 __U, __m256i __X) +{ + return (__m512i)__builtin_ia32_selectq_512((__mmask8)__U, + (__v8di)_mm512_cvtepi32_epi64(__X), + (__v8di)__W); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_maskz_cvtepi32_epi64(__mmask8 __U, __m256i __X) +{ + return (__m512i)__builtin_ia32_selectq_512((__mmask8)__U, + (__v8di)_mm512_cvtepi32_epi64(__X), + (__v8di)_mm512_setzero_si512()); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_cvtepi16_epi32(__m256i __A) +{ + return (__m512i)__builtin_convertvector((__v16hi)__A, __v16si); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_mask_cvtepi16_epi32(__m512i __W, __mmask16 __U, __m256i __A) +{ + return (__m512i)__builtin_ia32_selectd_512((__mmask16)__U, + (__v16si)_mm512_cvtepi16_epi32(__A), + (__v16si)__W); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_maskz_cvtepi16_epi32(__mmask16 __U, __m256i __A) +{ + return (__m512i)__builtin_ia32_selectd_512((__mmask16)__U, + (__v16si)_mm512_cvtepi16_epi32(__A), + (__v16si)_mm512_setzero_si512 ()); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_cvtepi16_epi64(__m128i __A) +{ + return (__m512i)__builtin_convertvector((__v8hi)__A, __v8di); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_mask_cvtepi16_epi64(__m512i __W, __mmask8 __U, __m128i __A) +{ + return (__m512i)__builtin_ia32_selectq_512((__mmask8)__U, + (__v8di)_mm512_cvtepi16_epi64(__A), + (__v8di)__W); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_maskz_cvtepi16_epi64(__mmask8 __U, __m128i __A) +{ + return (__m512i)__builtin_ia32_selectq_512((__mmask8)__U, + (__v8di)_mm512_cvtepi16_epi64(__A), + (__v8di)_mm512_setzero_si512()); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_cvtepu8_epi32(__m128i __A) +{ + return (__m512i)__builtin_convertvector((__v16qu)__A, __v16si); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_mask_cvtepu8_epi32(__m512i __W, __mmask16 __U, __m128i __A) +{ + return (__m512i)__builtin_ia32_selectd_512((__mmask16)__U, + (__v16si)_mm512_cvtepu8_epi32(__A), + (__v16si)__W); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_maskz_cvtepu8_epi32(__mmask16 __U, __m128i __A) +{ + return (__m512i)__builtin_ia32_selectd_512((__mmask16)__U, + (__v16si)_mm512_cvtepu8_epi32(__A), + (__v16si)_mm512_setzero_si512()); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_cvtepu8_epi64(__m128i __A) +{ + return (__m512i)__builtin_convertvector(__builtin_shufflevector((__v16qu)__A, (__v16qu)__A, 0, 1, 2, 3, 4, 5, 6, 7), __v8di); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_mask_cvtepu8_epi64(__m512i __W, __mmask8 __U, __m128i __A) +{ + return (__m512i)__builtin_ia32_selectq_512((__mmask8)__U, + (__v8di)_mm512_cvtepu8_epi64(__A), + (__v8di)__W); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_maskz_cvtepu8_epi64(__mmask8 __U, __m128i __A) +{ + return (__m512i)__builtin_ia32_selectq_512((__mmask8)__U, + (__v8di)_mm512_cvtepu8_epi64(__A), + (__v8di)_mm512_setzero_si512()); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_cvtepu32_epi64(__m256i __X) +{ + return (__m512i)__builtin_convertvector((__v8su)__X, __v8di); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_mask_cvtepu32_epi64(__m512i __W, __mmask8 __U, __m256i __X) +{ + return (__m512i)__builtin_ia32_selectq_512((__mmask8)__U, + (__v8di)_mm512_cvtepu32_epi64(__X), + (__v8di)__W); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_maskz_cvtepu32_epi64(__mmask8 __U, __m256i __X) +{ + return (__m512i)__builtin_ia32_selectq_512((__mmask8)__U, + (__v8di)_mm512_cvtepu32_epi64(__X), + (__v8di)_mm512_setzero_si512()); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_cvtepu16_epi32(__m256i __A) +{ + return (__m512i)__builtin_convertvector((__v16hu)__A, __v16si); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_mask_cvtepu16_epi32(__m512i __W, __mmask16 __U, __m256i __A) +{ + return (__m512i)__builtin_ia32_selectd_512((__mmask16)__U, + (__v16si)_mm512_cvtepu16_epi32(__A), + (__v16si)__W); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_maskz_cvtepu16_epi32(__mmask16 __U, __m256i __A) +{ + return (__m512i)__builtin_ia32_selectd_512((__mmask16)__U, + (__v16si)_mm512_cvtepu16_epi32(__A), + (__v16si)_mm512_setzero_si512()); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_cvtepu16_epi64(__m128i __A) +{ + return (__m512i)__builtin_convertvector((__v8hu)__A, __v8di); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_mask_cvtepu16_epi64(__m512i __W, __mmask8 __U, __m128i __A) +{ + return (__m512i)__builtin_ia32_selectq_512((__mmask8)__U, + (__v8di)_mm512_cvtepu16_epi64(__A), + (__v8di)__W); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_maskz_cvtepu16_epi64(__mmask8 __U, __m128i __A) +{ + return (__m512i)__builtin_ia32_selectq_512((__mmask8)__U, + (__v8di)_mm512_cvtepu16_epi64(__A), + (__v8di)_mm512_setzero_si512()); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_rorv_epi32 (__m512i __A, __m512i __B) +{ + return (__m512i)__builtin_ia32_prorvd512((__v16si)__A, (__v16si)__B); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_mask_rorv_epi32 (__m512i __W, __mmask16 __U, __m512i __A, __m512i __B) +{ + return (__m512i)__builtin_ia32_selectd_512(__U, + (__v16si)_mm512_rorv_epi32(__A, __B), + (__v16si)__W); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_maskz_rorv_epi32 (__mmask16 __U, __m512i __A, __m512i __B) +{ + return (__m512i)__builtin_ia32_selectd_512(__U, + (__v16si)_mm512_rorv_epi32(__A, __B), + (__v16si)_mm512_setzero_si512()); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_rorv_epi64 (__m512i __A, __m512i __B) +{ + return (__m512i)__builtin_ia32_prorvq512((__v8di)__A, (__v8di)__B); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_mask_rorv_epi64 (__m512i __W, __mmask8 __U, __m512i __A, __m512i __B) +{ + return (__m512i)__builtin_ia32_selectq_512(__U, + (__v8di)_mm512_rorv_epi64(__A, __B), + (__v8di)__W); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_maskz_rorv_epi64 (__mmask8 __U, __m512i __A, __m512i __B) +{ + return (__m512i)__builtin_ia32_selectq_512(__U, + (__v8di)_mm512_rorv_epi64(__A, __B), + (__v8di)_mm512_setzero_si512()); +} + + + +#define _mm512_cmp_epi32_mask(a, b, p) \ + ((__mmask16)__builtin_ia32_cmpd512_mask((__v16si)(__m512i)(a), \ + (__v16si)(__m512i)(b), (int)(p), \ + (__mmask16)-1)) + +#define _mm512_cmp_epu32_mask(a, b, p) \ + ((__mmask16)__builtin_ia32_ucmpd512_mask((__v16si)(__m512i)(a), \ + (__v16si)(__m512i)(b), (int)(p), \ + (__mmask16)-1)) + +#define _mm512_cmp_epi64_mask(a, b, p) \ + ((__mmask8)__builtin_ia32_cmpq512_mask((__v8di)(__m512i)(a), \ + (__v8di)(__m512i)(b), (int)(p), \ + (__mmask8)-1)) + +#define _mm512_cmp_epu64_mask(a, b, p) \ + ((__mmask8)__builtin_ia32_ucmpq512_mask((__v8di)(__m512i)(a), \ + (__v8di)(__m512i)(b), (int)(p), \ + (__mmask8)-1)) + +#define _mm512_mask_cmp_epi32_mask(m, a, b, p) \ + ((__mmask16)__builtin_ia32_cmpd512_mask((__v16si)(__m512i)(a), \ + (__v16si)(__m512i)(b), (int)(p), \ + (__mmask16)(m))) + +#define _mm512_mask_cmp_epu32_mask(m, a, b, p) \ + ((__mmask16)__builtin_ia32_ucmpd512_mask((__v16si)(__m512i)(a), \ + (__v16si)(__m512i)(b), (int)(p), \ + (__mmask16)(m))) + +#define _mm512_mask_cmp_epi64_mask(m, a, b, p) \ + ((__mmask8)__builtin_ia32_cmpq512_mask((__v8di)(__m512i)(a), \ + (__v8di)(__m512i)(b), (int)(p), \ + (__mmask8)(m))) + +#define _mm512_mask_cmp_epu64_mask(m, a, b, p) \ + ((__mmask8)__builtin_ia32_ucmpq512_mask((__v8di)(__m512i)(a), \ + (__v8di)(__m512i)(b), (int)(p), \ + (__mmask8)(m))) + +#define _mm512_rol_epi32(a, b) \ + ((__m512i)__builtin_ia32_prold512((__v16si)(__m512i)(a), (int)(b))) + +#define _mm512_mask_rol_epi32(W, U, a, b) \ + ((__m512i)__builtin_ia32_selectd_512((__mmask16)(U), \ + (__v16si)_mm512_rol_epi32((a), (b)), \ + (__v16si)(__m512i)(W))) + +#define _mm512_maskz_rol_epi32(U, a, b) \ + ((__m512i)__builtin_ia32_selectd_512((__mmask16)(U), \ + (__v16si)_mm512_rol_epi32((a), (b)), \ + (__v16si)_mm512_setzero_si512())) + +#define _mm512_rol_epi64(a, b) \ + ((__m512i)__builtin_ia32_prolq512((__v8di)(__m512i)(a), (int)(b))) + +#define _mm512_mask_rol_epi64(W, U, a, b) \ + ((__m512i)__builtin_ia32_selectq_512((__mmask8)(U), \ + (__v8di)_mm512_rol_epi64((a), (b)), \ + (__v8di)(__m512i)(W))) + +#define _mm512_maskz_rol_epi64(U, a, b) \ + ((__m512i)__builtin_ia32_selectq_512((__mmask8)(U), \ + (__v8di)_mm512_rol_epi64((a), (b)), \ + (__v8di)_mm512_setzero_si512())) + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_rolv_epi32 (__m512i __A, __m512i __B) +{ + return (__m512i)__builtin_ia32_prolvd512((__v16si)__A, (__v16si)__B); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_mask_rolv_epi32 (__m512i __W, __mmask16 __U, __m512i __A, __m512i __B) +{ + return (__m512i)__builtin_ia32_selectd_512(__U, + (__v16si)_mm512_rolv_epi32(__A, __B), + (__v16si)__W); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_maskz_rolv_epi32 (__mmask16 __U, __m512i __A, __m512i __B) +{ + return (__m512i)__builtin_ia32_selectd_512(__U, + (__v16si)_mm512_rolv_epi32(__A, __B), + (__v16si)_mm512_setzero_si512()); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_rolv_epi64 (__m512i __A, __m512i __B) +{ + return (__m512i)__builtin_ia32_prolvq512((__v8di)__A, (__v8di)__B); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_mask_rolv_epi64 (__m512i __W, __mmask8 __U, __m512i __A, __m512i __B) +{ + return (__m512i)__builtin_ia32_selectq_512(__U, + (__v8di)_mm512_rolv_epi64(__A, __B), + (__v8di)__W); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_maskz_rolv_epi64 (__mmask8 __U, __m512i __A, __m512i __B) +{ + return (__m512i)__builtin_ia32_selectq_512(__U, + (__v8di)_mm512_rolv_epi64(__A, __B), + (__v8di)_mm512_setzero_si512()); +} + +#define _mm512_ror_epi32(A, B) \ + ((__m512i)__builtin_ia32_prord512((__v16si)(__m512i)(A), (int)(B))) + +#define _mm512_mask_ror_epi32(W, U, A, B) \ + ((__m512i)__builtin_ia32_selectd_512((__mmask16)(U), \ + (__v16si)_mm512_ror_epi32((A), (B)), \ + (__v16si)(__m512i)(W))) + +#define _mm512_maskz_ror_epi32(U, A, B) \ + ((__m512i)__builtin_ia32_selectd_512((__mmask16)(U), \ + (__v16si)_mm512_ror_epi32((A), (B)), \ + (__v16si)_mm512_setzero_si512())) + +#define _mm512_ror_epi64(A, B) \ + ((__m512i)__builtin_ia32_prorq512((__v8di)(__m512i)(A), (int)(B))) + +#define _mm512_mask_ror_epi64(W, U, A, B) \ + ((__m512i)__builtin_ia32_selectq_512((__mmask8)(U), \ + (__v8di)_mm512_ror_epi64((A), (B)), \ + (__v8di)(__m512i)(W))) + +#define _mm512_maskz_ror_epi64(U, A, B) \ + ((__m512i)__builtin_ia32_selectq_512((__mmask8)(U), \ + (__v8di)_mm512_ror_epi64((A), (B)), \ + (__v8di)_mm512_setzero_si512())) + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_slli_epi32(__m512i __A, unsigned int __B) +{ + return (__m512i)__builtin_ia32_pslldi512((__v16si)__A, (int)__B); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_mask_slli_epi32(__m512i __W, __mmask16 __U, __m512i __A, + unsigned int __B) +{ + return (__m512i)__builtin_ia32_selectd_512((__mmask16)__U, + (__v16si)_mm512_slli_epi32(__A, __B), + (__v16si)__W); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_maskz_slli_epi32(__mmask16 __U, __m512i __A, unsigned int __B) { + return (__m512i)__builtin_ia32_selectd_512((__mmask16)__U, + (__v16si)_mm512_slli_epi32(__A, __B), + (__v16si)_mm512_setzero_si512()); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_slli_epi64(__m512i __A, unsigned int __B) +{ + return (__m512i)__builtin_ia32_psllqi512((__v8di)__A, (int)__B); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_mask_slli_epi64(__m512i __W, __mmask8 __U, __m512i __A, unsigned int __B) +{ + return (__m512i)__builtin_ia32_selectq_512((__mmask8)__U, + (__v8di)_mm512_slli_epi64(__A, __B), + (__v8di)__W); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_maskz_slli_epi64(__mmask8 __U, __m512i __A, unsigned int __B) +{ + return (__m512i)__builtin_ia32_selectq_512((__mmask8)__U, + (__v8di)_mm512_slli_epi64(__A, __B), + (__v8di)_mm512_setzero_si512()); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_srli_epi32(__m512i __A, unsigned int __B) +{ + return (__m512i)__builtin_ia32_psrldi512((__v16si)__A, (int)__B); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_mask_srli_epi32(__m512i __W, __mmask16 __U, __m512i __A, + unsigned int __B) +{ + return (__m512i)__builtin_ia32_selectd_512((__mmask16)__U, + (__v16si)_mm512_srli_epi32(__A, __B), + (__v16si)__W); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_maskz_srli_epi32(__mmask16 __U, __m512i __A, unsigned int __B) { + return (__m512i)__builtin_ia32_selectd_512((__mmask16)__U, + (__v16si)_mm512_srli_epi32(__A, __B), + (__v16si)_mm512_setzero_si512()); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_srli_epi64(__m512i __A, unsigned int __B) +{ + return (__m512i)__builtin_ia32_psrlqi512((__v8di)__A, (int)__B); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_mask_srli_epi64(__m512i __W, __mmask8 __U, __m512i __A, + unsigned int __B) +{ + return (__m512i)__builtin_ia32_selectq_512((__mmask8)__U, + (__v8di)_mm512_srli_epi64(__A, __B), + (__v8di)__W); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_maskz_srli_epi64(__mmask8 __U, __m512i __A, + unsigned int __B) +{ + return (__m512i)__builtin_ia32_selectq_512((__mmask8)__U, + (__v8di)_mm512_srli_epi64(__A, __B), + (__v8di)_mm512_setzero_si512()); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_mask_load_epi32 (__m512i __W, __mmask16 __U, void const *__P) +{ + return (__m512i) __builtin_ia32_movdqa32load512_mask ((const __v16si *) __P, + (__v16si) __W, + (__mmask16) __U); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_maskz_load_epi32 (__mmask16 __U, void const *__P) +{ + return (__m512i) __builtin_ia32_movdqa32load512_mask ((const __v16si *) __P, + (__v16si) + _mm512_setzero_si512 (), + (__mmask16) __U); +} + +static __inline__ void __DEFAULT_FN_ATTRS512 +_mm512_mask_store_epi32 (void *__P, __mmask16 __U, __m512i __A) +{ + __builtin_ia32_movdqa32store512_mask ((__v16si *) __P, (__v16si) __A, + (__mmask16) __U); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_mask_mov_epi32 (__m512i __W, __mmask16 __U, __m512i __A) +{ + return (__m512i) __builtin_ia32_selectd_512 ((__mmask16) __U, + (__v16si) __A, + (__v16si) __W); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_maskz_mov_epi32 (__mmask16 __U, __m512i __A) +{ + return (__m512i) __builtin_ia32_selectd_512 ((__mmask16) __U, + (__v16si) __A, + (__v16si) _mm512_setzero_si512 ()); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_mask_mov_epi64 (__m512i __W, __mmask8 __U, __m512i __A) +{ + return (__m512i) __builtin_ia32_selectq_512 ((__mmask8) __U, + (__v8di) __A, + (__v8di) __W); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_maskz_mov_epi64 (__mmask8 __U, __m512i __A) +{ + return (__m512i) __builtin_ia32_selectq_512 ((__mmask8) __U, + (__v8di) __A, + (__v8di) _mm512_setzero_si512 ()); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_mask_load_epi64 (__m512i __W, __mmask8 __U, void const *__P) +{ + return (__m512i) __builtin_ia32_movdqa64load512_mask ((const __v8di *) __P, + (__v8di) __W, + (__mmask8) __U); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_maskz_load_epi64 (__mmask8 __U, void const *__P) +{ + return (__m512i) __builtin_ia32_movdqa64load512_mask ((const __v8di *) __P, + (__v8di) + _mm512_setzero_si512 (), + (__mmask8) __U); +} + +static __inline__ void __DEFAULT_FN_ATTRS512 +_mm512_mask_store_epi64 (void *__P, __mmask8 __U, __m512i __A) +{ + __builtin_ia32_movdqa64store512_mask ((__v8di *) __P, (__v8di) __A, + (__mmask8) __U); +} + +static __inline__ __m512d __DEFAULT_FN_ATTRS512 +_mm512_movedup_pd (__m512d __A) +{ + return (__m512d)__builtin_shufflevector((__v8df)__A, (__v8df)__A, + 0, 0, 2, 2, 4, 4, 6, 6); +} + +static __inline__ __m512d __DEFAULT_FN_ATTRS512 +_mm512_mask_movedup_pd (__m512d __W, __mmask8 __U, __m512d __A) +{ + return (__m512d)__builtin_ia32_selectpd_512((__mmask8)__U, + (__v8df)_mm512_movedup_pd(__A), + (__v8df)__W); +} + +static __inline__ __m512d __DEFAULT_FN_ATTRS512 +_mm512_maskz_movedup_pd (__mmask8 __U, __m512d __A) +{ + return (__m512d)__builtin_ia32_selectpd_512((__mmask8)__U, + (__v8df)_mm512_movedup_pd(__A), + (__v8df)_mm512_setzero_pd()); +} + +#define _mm512_fixupimm_round_pd(A, B, C, imm, R) \ + ((__m512d)__builtin_ia32_fixupimmpd512_mask((__v8df)(__m512d)(A), \ + (__v8df)(__m512d)(B), \ + (__v8di)(__m512i)(C), (int)(imm), \ + (__mmask8)-1, (int)(R))) + +#define _mm512_mask_fixupimm_round_pd(A, U, B, C, imm, R) \ + ((__m512d)__builtin_ia32_fixupimmpd512_mask((__v8df)(__m512d)(A), \ + (__v8df)(__m512d)(B), \ + (__v8di)(__m512i)(C), (int)(imm), \ + (__mmask8)(U), (int)(R))) + +#define _mm512_fixupimm_pd(A, B, C, imm) \ + ((__m512d)__builtin_ia32_fixupimmpd512_mask((__v8df)(__m512d)(A), \ + (__v8df)(__m512d)(B), \ + (__v8di)(__m512i)(C), (int)(imm), \ + (__mmask8)-1, \ + _MM_FROUND_CUR_DIRECTION)) + +#define _mm512_mask_fixupimm_pd(A, U, B, C, imm) \ + ((__m512d)__builtin_ia32_fixupimmpd512_mask((__v8df)(__m512d)(A), \ + (__v8df)(__m512d)(B), \ + (__v8di)(__m512i)(C), (int)(imm), \ + (__mmask8)(U), \ + _MM_FROUND_CUR_DIRECTION)) + +#define _mm512_maskz_fixupimm_round_pd(U, A, B, C, imm, R) \ + ((__m512d)__builtin_ia32_fixupimmpd512_maskz((__v8df)(__m512d)(A), \ + (__v8df)(__m512d)(B), \ + (__v8di)(__m512i)(C), \ + (int)(imm), (__mmask8)(U), \ + (int)(R))) + +#define _mm512_maskz_fixupimm_pd(U, A, B, C, imm) \ + ((__m512d)__builtin_ia32_fixupimmpd512_maskz((__v8df)(__m512d)(A), \ + (__v8df)(__m512d)(B), \ + (__v8di)(__m512i)(C), \ + (int)(imm), (__mmask8)(U), \ + _MM_FROUND_CUR_DIRECTION)) + +#define _mm512_fixupimm_round_ps(A, B, C, imm, R) \ + ((__m512)__builtin_ia32_fixupimmps512_mask((__v16sf)(__m512)(A), \ + (__v16sf)(__m512)(B), \ + (__v16si)(__m512i)(C), (int)(imm), \ + (__mmask16)-1, (int)(R))) + +#define _mm512_mask_fixupimm_round_ps(A, U, B, C, imm, R) \ + ((__m512)__builtin_ia32_fixupimmps512_mask((__v16sf)(__m512)(A), \ + (__v16sf)(__m512)(B), \ + (__v16si)(__m512i)(C), (int)(imm), \ + (__mmask16)(U), (int)(R))) + +#define _mm512_fixupimm_ps(A, B, C, imm) \ + ((__m512)__builtin_ia32_fixupimmps512_mask((__v16sf)(__m512)(A), \ + (__v16sf)(__m512)(B), \ + (__v16si)(__m512i)(C), (int)(imm), \ + (__mmask16)-1, \ + _MM_FROUND_CUR_DIRECTION)) + +#define _mm512_mask_fixupimm_ps(A, U, B, C, imm) \ + ((__m512)__builtin_ia32_fixupimmps512_mask((__v16sf)(__m512)(A), \ + (__v16sf)(__m512)(B), \ + (__v16si)(__m512i)(C), (int)(imm), \ + (__mmask16)(U), \ + _MM_FROUND_CUR_DIRECTION)) + +#define _mm512_maskz_fixupimm_round_ps(U, A, B, C, imm, R) \ + ((__m512)__builtin_ia32_fixupimmps512_maskz((__v16sf)(__m512)(A), \ + (__v16sf)(__m512)(B), \ + (__v16si)(__m512i)(C), \ + (int)(imm), (__mmask16)(U), \ + (int)(R))) + +#define _mm512_maskz_fixupimm_ps(U, A, B, C, imm) \ + ((__m512)__builtin_ia32_fixupimmps512_maskz((__v16sf)(__m512)(A), \ + (__v16sf)(__m512)(B), \ + (__v16si)(__m512i)(C), \ + (int)(imm), (__mmask16)(U), \ + _MM_FROUND_CUR_DIRECTION)) + +#define _mm_fixupimm_round_sd(A, B, C, imm, R) \ + ((__m128d)__builtin_ia32_fixupimmsd_mask((__v2df)(__m128d)(A), \ + (__v2df)(__m128d)(B), \ + (__v2di)(__m128i)(C), (int)(imm), \ + (__mmask8)-1, (int)(R))) + +#define _mm_mask_fixupimm_round_sd(A, U, B, C, imm, R) \ + ((__m128d)__builtin_ia32_fixupimmsd_mask((__v2df)(__m128d)(A), \ + (__v2df)(__m128d)(B), \ + (__v2di)(__m128i)(C), (int)(imm), \ + (__mmask8)(U), (int)(R))) + +#define _mm_fixupimm_sd(A, B, C, imm) \ + ((__m128d)__builtin_ia32_fixupimmsd_mask((__v2df)(__m128d)(A), \ + (__v2df)(__m128d)(B), \ + (__v2di)(__m128i)(C), (int)(imm), \ + (__mmask8)-1, \ + _MM_FROUND_CUR_DIRECTION)) + +#define _mm_mask_fixupimm_sd(A, U, B, C, imm) \ + ((__m128d)__builtin_ia32_fixupimmsd_mask((__v2df)(__m128d)(A), \ + (__v2df)(__m128d)(B), \ + (__v2di)(__m128i)(C), (int)(imm), \ + (__mmask8)(U), \ + _MM_FROUND_CUR_DIRECTION)) + +#define _mm_maskz_fixupimm_round_sd(U, A, B, C, imm, R) \ + ((__m128d)__builtin_ia32_fixupimmsd_maskz((__v2df)(__m128d)(A), \ + (__v2df)(__m128d)(B), \ + (__v2di)(__m128i)(C), (int)(imm), \ + (__mmask8)(U), (int)(R))) + +#define _mm_maskz_fixupimm_sd(U, A, B, C, imm) \ + ((__m128d)__builtin_ia32_fixupimmsd_maskz((__v2df)(__m128d)(A), \ + (__v2df)(__m128d)(B), \ + (__v2di)(__m128i)(C), (int)(imm), \ + (__mmask8)(U), \ + _MM_FROUND_CUR_DIRECTION)) + +#define _mm_fixupimm_round_ss(A, B, C, imm, R) \ + ((__m128)__builtin_ia32_fixupimmss_mask((__v4sf)(__m128)(A), \ + (__v4sf)(__m128)(B), \ + (__v4si)(__m128i)(C), (int)(imm), \ + (__mmask8)-1, (int)(R))) + +#define _mm_mask_fixupimm_round_ss(A, U, B, C, imm, R) \ + ((__m128)__builtin_ia32_fixupimmss_mask((__v4sf)(__m128)(A), \ + (__v4sf)(__m128)(B), \ + (__v4si)(__m128i)(C), (int)(imm), \ + (__mmask8)(U), (int)(R))) + +#define _mm_fixupimm_ss(A, B, C, imm) \ + ((__m128)__builtin_ia32_fixupimmss_mask((__v4sf)(__m128)(A), \ + (__v4sf)(__m128)(B), \ + (__v4si)(__m128i)(C), (int)(imm), \ + (__mmask8)-1, \ + _MM_FROUND_CUR_DIRECTION)) + +#define _mm_mask_fixupimm_ss(A, U, B, C, imm) \ + ((__m128)__builtin_ia32_fixupimmss_mask((__v4sf)(__m128)(A), \ + (__v4sf)(__m128)(B), \ + (__v4si)(__m128i)(C), (int)(imm), \ + (__mmask8)(U), \ + _MM_FROUND_CUR_DIRECTION)) + +#define _mm_maskz_fixupimm_round_ss(U, A, B, C, imm, R) \ + ((__m128)__builtin_ia32_fixupimmss_maskz((__v4sf)(__m128)(A), \ + (__v4sf)(__m128)(B), \ + (__v4si)(__m128i)(C), (int)(imm), \ + (__mmask8)(U), (int)(R))) + +#define _mm_maskz_fixupimm_ss(U, A, B, C, imm) \ + ((__m128)__builtin_ia32_fixupimmss_maskz((__v4sf)(__m128)(A), \ + (__v4sf)(__m128)(B), \ + (__v4si)(__m128i)(C), (int)(imm), \ + (__mmask8)(U), \ + _MM_FROUND_CUR_DIRECTION)) + +#define _mm_getexp_round_sd(A, B, R) \ + ((__m128d)__builtin_ia32_getexpsd128_round_mask((__v2df)(__m128d)(A), \ + (__v2df)(__m128d)(B), \ + (__v2df)_mm_setzero_pd(), \ + (__mmask8)-1, (int)(R))) + + +static __inline__ __m128d __DEFAULT_FN_ATTRS128 +_mm_getexp_sd (__m128d __A, __m128d __B) +{ + return (__m128d) __builtin_ia32_getexpsd128_round_mask ((__v2df) __A, + (__v2df) __B, (__v2df) _mm_setzero_pd(), (__mmask8) -1, _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m128d __DEFAULT_FN_ATTRS128 +_mm_mask_getexp_sd (__m128d __W, __mmask8 __U, __m128d __A, __m128d __B) +{ + return (__m128d) __builtin_ia32_getexpsd128_round_mask ( (__v2df) __A, + (__v2df) __B, + (__v2df) __W, + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +#define _mm_mask_getexp_round_sd(W, U, A, B, R) \ + ((__m128d)__builtin_ia32_getexpsd128_round_mask((__v2df)(__m128d)(A), \ + (__v2df)(__m128d)(B), \ + (__v2df)(__m128d)(W), \ + (__mmask8)(U), (int)(R))) + +static __inline__ __m128d __DEFAULT_FN_ATTRS128 +_mm_maskz_getexp_sd (__mmask8 __U, __m128d __A, __m128d __B) +{ + return (__m128d) __builtin_ia32_getexpsd128_round_mask ( (__v2df) __A, + (__v2df) __B, + (__v2df) _mm_setzero_pd (), + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +#define _mm_maskz_getexp_round_sd(U, A, B, R) \ + ((__m128d)__builtin_ia32_getexpsd128_round_mask((__v2df)(__m128d)(A), \ + (__v2df)(__m128d)(B), \ + (__v2df)_mm_setzero_pd(), \ + (__mmask8)(U), (int)(R))) + +#define _mm_getexp_round_ss(A, B, R) \ + ((__m128)__builtin_ia32_getexpss128_round_mask((__v4sf)(__m128)(A), \ + (__v4sf)(__m128)(B), \ + (__v4sf)_mm_setzero_ps(), \ + (__mmask8)-1, (int)(R))) + +static __inline__ __m128 __DEFAULT_FN_ATTRS128 +_mm_getexp_ss (__m128 __A, __m128 __B) +{ + return (__m128) __builtin_ia32_getexpss128_round_mask ((__v4sf) __A, + (__v4sf) __B, (__v4sf) _mm_setzero_ps(), (__mmask8) -1, _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m128 __DEFAULT_FN_ATTRS128 +_mm_mask_getexp_ss (__m128 __W, __mmask8 __U, __m128 __A, __m128 __B) +{ + return (__m128) __builtin_ia32_getexpss128_round_mask ((__v4sf) __A, + (__v4sf) __B, + (__v4sf) __W, + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +#define _mm_mask_getexp_round_ss(W, U, A, B, R) \ + ((__m128)__builtin_ia32_getexpss128_round_mask((__v4sf)(__m128)(A), \ + (__v4sf)(__m128)(B), \ + (__v4sf)(__m128)(W), \ + (__mmask8)(U), (int)(R))) + +static __inline__ __m128 __DEFAULT_FN_ATTRS128 +_mm_maskz_getexp_ss (__mmask8 __U, __m128 __A, __m128 __B) +{ + return (__m128) __builtin_ia32_getexpss128_round_mask ((__v4sf) __A, + (__v4sf) __B, + (__v4sf) _mm_setzero_ps (), + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +#define _mm_maskz_getexp_round_ss(U, A, B, R) \ + ((__m128)__builtin_ia32_getexpss128_round_mask((__v4sf)(__m128)(A), \ + (__v4sf)(__m128)(B), \ + (__v4sf)_mm_setzero_ps(), \ + (__mmask8)(U), (int)(R))) + +#define _mm_getmant_round_sd(A, B, C, D, R) \ + ((__m128d)__builtin_ia32_getmantsd_round_mask((__v2df)(__m128d)(A), \ + (__v2df)(__m128d)(B), \ + (int)(((D)<<2) | (C)), \ + (__v2df)_mm_setzero_pd(), \ + (__mmask8)-1, (int)(R))) + +#define _mm_getmant_sd(A, B, C, D) \ + ((__m128d)__builtin_ia32_getmantsd_round_mask((__v2df)(__m128d)(A), \ + (__v2df)(__m128d)(B), \ + (int)(((D)<<2) | (C)), \ + (__v2df)_mm_setzero_pd(), \ + (__mmask8)-1, \ + _MM_FROUND_CUR_DIRECTION)) + +#define _mm_mask_getmant_sd(W, U, A, B, C, D) \ + ((__m128d)__builtin_ia32_getmantsd_round_mask((__v2df)(__m128d)(A), \ + (__v2df)(__m128d)(B), \ + (int)(((D)<<2) | (C)), \ + (__v2df)(__m128d)(W), \ + (__mmask8)(U), \ + _MM_FROUND_CUR_DIRECTION)) + +#define _mm_mask_getmant_round_sd(W, U, A, B, C, D, R) \ + ((__m128d)__builtin_ia32_getmantsd_round_mask((__v2df)(__m128d)(A), \ + (__v2df)(__m128d)(B), \ + (int)(((D)<<2) | (C)), \ + (__v2df)(__m128d)(W), \ + (__mmask8)(U), (int)(R))) + +#define _mm_maskz_getmant_sd(U, A, B, C, D) \ + ((__m128d)__builtin_ia32_getmantsd_round_mask((__v2df)(__m128d)(A), \ + (__v2df)(__m128d)(B), \ + (int)(((D)<<2) | (C)), \ + (__v2df)_mm_setzero_pd(), \ + (__mmask8)(U), \ + _MM_FROUND_CUR_DIRECTION)) + +#define _mm_maskz_getmant_round_sd(U, A, B, C, D, R) \ + ((__m128d)__builtin_ia32_getmantsd_round_mask((__v2df)(__m128d)(A), \ + (__v2df)(__m128d)(B), \ + (int)(((D)<<2) | (C)), \ + (__v2df)_mm_setzero_pd(), \ + (__mmask8)(U), (int)(R))) + +#define _mm_getmant_round_ss(A, B, C, D, R) \ + ((__m128)__builtin_ia32_getmantss_round_mask((__v4sf)(__m128)(A), \ + (__v4sf)(__m128)(B), \ + (int)(((D)<<2) | (C)), \ + (__v4sf)_mm_setzero_ps(), \ + (__mmask8)-1, (int)(R))) + +#define _mm_getmant_ss(A, B, C, D) \ + ((__m128)__builtin_ia32_getmantss_round_mask((__v4sf)(__m128)(A), \ + (__v4sf)(__m128)(B), \ + (int)(((D)<<2) | (C)), \ + (__v4sf)_mm_setzero_ps(), \ + (__mmask8)-1, \ + _MM_FROUND_CUR_DIRECTION)) + +#define _mm_mask_getmant_ss(W, U, A, B, C, D) \ + ((__m128)__builtin_ia32_getmantss_round_mask((__v4sf)(__m128)(A), \ + (__v4sf)(__m128)(B), \ + (int)(((D)<<2) | (C)), \ + (__v4sf)(__m128)(W), \ + (__mmask8)(U), \ + _MM_FROUND_CUR_DIRECTION)) + +#define _mm_mask_getmant_round_ss(W, U, A, B, C, D, R) \ + ((__m128)__builtin_ia32_getmantss_round_mask((__v4sf)(__m128)(A), \ + (__v4sf)(__m128)(B), \ + (int)(((D)<<2) | (C)), \ + (__v4sf)(__m128)(W), \ + (__mmask8)(U), (int)(R))) + +#define _mm_maskz_getmant_ss(U, A, B, C, D) \ + ((__m128)__builtin_ia32_getmantss_round_mask((__v4sf)(__m128)(A), \ + (__v4sf)(__m128)(B), \ + (int)(((D)<<2) | (C)), \ + (__v4sf)_mm_setzero_ps(), \ + (__mmask8)(U), \ + _MM_FROUND_CUR_DIRECTION)) + +#define _mm_maskz_getmant_round_ss(U, A, B, C, D, R) \ + ((__m128)__builtin_ia32_getmantss_round_mask((__v4sf)(__m128)(A), \ + (__v4sf)(__m128)(B), \ + (int)(((D)<<2) | (C)), \ + (__v4sf)_mm_setzero_ps(), \ + (__mmask8)(U), (int)(R))) + +static __inline__ __mmask16 __DEFAULT_FN_ATTRS +_mm512_kmov (__mmask16 __A) +{ + return __A; +} + +#define _mm_comi_round_sd(A, B, P, R) \ + ((int)__builtin_ia32_vcomisd((__v2df)(__m128d)(A), (__v2df)(__m128d)(B), \ + (int)(P), (int)(R))) + +#define _mm_comi_round_ss(A, B, P, R) \ + ((int)__builtin_ia32_vcomiss((__v4sf)(__m128)(A), (__v4sf)(__m128)(B), \ + (int)(P), (int)(R))) + +#ifdef __x86_64__ +#define _mm_cvt_roundsd_si64(A, R) \ + ((long long)__builtin_ia32_vcvtsd2si64((__v2df)(__m128d)(A), (int)(R))) +#endif + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_sll_epi32(__m512i __A, __m128i __B) +{ + return (__m512i)__builtin_ia32_pslld512((__v16si) __A, (__v4si)__B); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_mask_sll_epi32(__m512i __W, __mmask16 __U, __m512i __A, __m128i __B) +{ + return (__m512i)__builtin_ia32_selectd_512((__mmask16)__U, + (__v16si)_mm512_sll_epi32(__A, __B), + (__v16si)__W); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_maskz_sll_epi32(__mmask16 __U, __m512i __A, __m128i __B) +{ + return (__m512i)__builtin_ia32_selectd_512((__mmask16)__U, + (__v16si)_mm512_sll_epi32(__A, __B), + (__v16si)_mm512_setzero_si512()); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_sll_epi64(__m512i __A, __m128i __B) +{ + return (__m512i)__builtin_ia32_psllq512((__v8di)__A, (__v2di)__B); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_mask_sll_epi64(__m512i __W, __mmask8 __U, __m512i __A, __m128i __B) +{ + return (__m512i)__builtin_ia32_selectq_512((__mmask8)__U, + (__v8di)_mm512_sll_epi64(__A, __B), + (__v8di)__W); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_maskz_sll_epi64(__mmask8 __U, __m512i __A, __m128i __B) +{ + return (__m512i)__builtin_ia32_selectq_512((__mmask8)__U, + (__v8di)_mm512_sll_epi64(__A, __B), + (__v8di)_mm512_setzero_si512()); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_sllv_epi32(__m512i __X, __m512i __Y) +{ + return (__m512i)__builtin_ia32_psllv16si((__v16si)__X, (__v16si)__Y); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_mask_sllv_epi32(__m512i __W, __mmask16 __U, __m512i __X, __m512i __Y) +{ + return (__m512i)__builtin_ia32_selectd_512((__mmask16)__U, + (__v16si)_mm512_sllv_epi32(__X, __Y), + (__v16si)__W); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_maskz_sllv_epi32(__mmask16 __U, __m512i __X, __m512i __Y) +{ + return (__m512i)__builtin_ia32_selectd_512((__mmask16)__U, + (__v16si)_mm512_sllv_epi32(__X, __Y), + (__v16si)_mm512_setzero_si512()); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_sllv_epi64(__m512i __X, __m512i __Y) +{ + return (__m512i)__builtin_ia32_psllv8di((__v8di)__X, (__v8di)__Y); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_mask_sllv_epi64(__m512i __W, __mmask8 __U, __m512i __X, __m512i __Y) +{ + return (__m512i)__builtin_ia32_selectq_512((__mmask8)__U, + (__v8di)_mm512_sllv_epi64(__X, __Y), + (__v8di)__W); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_maskz_sllv_epi64(__mmask8 __U, __m512i __X, __m512i __Y) +{ + return (__m512i)__builtin_ia32_selectq_512((__mmask8)__U, + (__v8di)_mm512_sllv_epi64(__X, __Y), + (__v8di)_mm512_setzero_si512()); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_sra_epi32(__m512i __A, __m128i __B) +{ + return (__m512i)__builtin_ia32_psrad512((__v16si) __A, (__v4si)__B); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_mask_sra_epi32(__m512i __W, __mmask16 __U, __m512i __A, __m128i __B) +{ + return (__m512i)__builtin_ia32_selectd_512((__mmask16)__U, + (__v16si)_mm512_sra_epi32(__A, __B), + (__v16si)__W); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_maskz_sra_epi32(__mmask16 __U, __m512i __A, __m128i __B) +{ + return (__m512i)__builtin_ia32_selectd_512((__mmask16)__U, + (__v16si)_mm512_sra_epi32(__A, __B), + (__v16si)_mm512_setzero_si512()); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_sra_epi64(__m512i __A, __m128i __B) +{ + return (__m512i)__builtin_ia32_psraq512((__v8di)__A, (__v2di)__B); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_mask_sra_epi64(__m512i __W, __mmask8 __U, __m512i __A, __m128i __B) +{ + return (__m512i)__builtin_ia32_selectq_512((__mmask8)__U, + (__v8di)_mm512_sra_epi64(__A, __B), + (__v8di)__W); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_maskz_sra_epi64(__mmask8 __U, __m512i __A, __m128i __B) +{ + return (__m512i)__builtin_ia32_selectq_512((__mmask8)__U, + (__v8di)_mm512_sra_epi64(__A, __B), + (__v8di)_mm512_setzero_si512()); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_srav_epi32(__m512i __X, __m512i __Y) +{ + return (__m512i)__builtin_ia32_psrav16si((__v16si)__X, (__v16si)__Y); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_mask_srav_epi32(__m512i __W, __mmask16 __U, __m512i __X, __m512i __Y) +{ + return (__m512i)__builtin_ia32_selectd_512((__mmask16)__U, + (__v16si)_mm512_srav_epi32(__X, __Y), + (__v16si)__W); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_maskz_srav_epi32(__mmask16 __U, __m512i __X, __m512i __Y) +{ + return (__m512i)__builtin_ia32_selectd_512((__mmask16)__U, + (__v16si)_mm512_srav_epi32(__X, __Y), + (__v16si)_mm512_setzero_si512()); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_srav_epi64(__m512i __X, __m512i __Y) +{ + return (__m512i)__builtin_ia32_psrav8di((__v8di)__X, (__v8di)__Y); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_mask_srav_epi64(__m512i __W, __mmask8 __U, __m512i __X, __m512i __Y) +{ + return (__m512i)__builtin_ia32_selectq_512((__mmask8)__U, + (__v8di)_mm512_srav_epi64(__X, __Y), + (__v8di)__W); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_maskz_srav_epi64(__mmask8 __U, __m512i __X, __m512i __Y) +{ + return (__m512i)__builtin_ia32_selectq_512((__mmask8)__U, + (__v8di)_mm512_srav_epi64(__X, __Y), + (__v8di)_mm512_setzero_si512()); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_srl_epi32(__m512i __A, __m128i __B) +{ + return (__m512i)__builtin_ia32_psrld512((__v16si) __A, (__v4si)__B); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_mask_srl_epi32(__m512i __W, __mmask16 __U, __m512i __A, __m128i __B) +{ + return (__m512i)__builtin_ia32_selectd_512((__mmask16)__U, + (__v16si)_mm512_srl_epi32(__A, __B), + (__v16si)__W); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_maskz_srl_epi32(__mmask16 __U, __m512i __A, __m128i __B) +{ + return (__m512i)__builtin_ia32_selectd_512((__mmask16)__U, + (__v16si)_mm512_srl_epi32(__A, __B), + (__v16si)_mm512_setzero_si512()); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_srl_epi64(__m512i __A, __m128i __B) +{ + return (__m512i)__builtin_ia32_psrlq512((__v8di)__A, (__v2di)__B); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_mask_srl_epi64(__m512i __W, __mmask8 __U, __m512i __A, __m128i __B) +{ + return (__m512i)__builtin_ia32_selectq_512((__mmask8)__U, + (__v8di)_mm512_srl_epi64(__A, __B), + (__v8di)__W); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_maskz_srl_epi64(__mmask8 __U, __m512i __A, __m128i __B) +{ + return (__m512i)__builtin_ia32_selectq_512((__mmask8)__U, + (__v8di)_mm512_srl_epi64(__A, __B), + (__v8di)_mm512_setzero_si512()); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_srlv_epi32(__m512i __X, __m512i __Y) +{ + return (__m512i)__builtin_ia32_psrlv16si((__v16si)__X, (__v16si)__Y); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_mask_srlv_epi32(__m512i __W, __mmask16 __U, __m512i __X, __m512i __Y) +{ + return (__m512i)__builtin_ia32_selectd_512((__mmask16)__U, + (__v16si)_mm512_srlv_epi32(__X, __Y), + (__v16si)__W); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_maskz_srlv_epi32(__mmask16 __U, __m512i __X, __m512i __Y) +{ + return (__m512i)__builtin_ia32_selectd_512((__mmask16)__U, + (__v16si)_mm512_srlv_epi32(__X, __Y), + (__v16si)_mm512_setzero_si512()); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_srlv_epi64 (__m512i __X, __m512i __Y) +{ + return (__m512i)__builtin_ia32_psrlv8di((__v8di)__X, (__v8di)__Y); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_mask_srlv_epi64(__m512i __W, __mmask8 __U, __m512i __X, __m512i __Y) +{ + return (__m512i)__builtin_ia32_selectq_512((__mmask8)__U, + (__v8di)_mm512_srlv_epi64(__X, __Y), + (__v8di)__W); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_maskz_srlv_epi64(__mmask8 __U, __m512i __X, __m512i __Y) +{ + return (__m512i)__builtin_ia32_selectq_512((__mmask8)__U, + (__v8di)_mm512_srlv_epi64(__X, __Y), + (__v8di)_mm512_setzero_si512()); +} + +/// \enum _MM_TERNLOG_ENUM +/// A helper to represent the ternary logic operations among vector \a A, +/// \a B and \a C. The representation is passed to \a imm. +typedef enum { + _MM_TERNLOG_A = 0xF0, + _MM_TERNLOG_B = 0xCC, + _MM_TERNLOG_C = 0xAA +} _MM_TERNLOG_ENUM; + +#define _mm512_ternarylogic_epi32(A, B, C, imm) \ + ((__m512i)__builtin_ia32_pternlogd512_mask( \ + (__v16si)(__m512i)(A), (__v16si)(__m512i)(B), (__v16si)(__m512i)(C), \ + (unsigned char)(imm), (__mmask16)-1)) + +#define _mm512_mask_ternarylogic_epi32(A, U, B, C, imm) \ + ((__m512i)__builtin_ia32_pternlogd512_mask( \ + (__v16si)(__m512i)(A), (__v16si)(__m512i)(B), (__v16si)(__m512i)(C), \ + (unsigned char)(imm), (__mmask16)(U))) + +#define _mm512_maskz_ternarylogic_epi32(U, A, B, C, imm) \ + ((__m512i)__builtin_ia32_pternlogd512_maskz( \ + (__v16si)(__m512i)(A), (__v16si)(__m512i)(B), (__v16si)(__m512i)(C), \ + (unsigned char)(imm), (__mmask16)(U))) + +#define _mm512_ternarylogic_epi64(A, B, C, imm) \ + ((__m512i)__builtin_ia32_pternlogq512_mask( \ + (__v8di)(__m512i)(A), (__v8di)(__m512i)(B), (__v8di)(__m512i)(C), \ + (unsigned char)(imm), (__mmask8)-1)) + +#define _mm512_mask_ternarylogic_epi64(A, U, B, C, imm) \ + ((__m512i)__builtin_ia32_pternlogq512_mask( \ + (__v8di)(__m512i)(A), (__v8di)(__m512i)(B), (__v8di)(__m512i)(C), \ + (unsigned char)(imm), (__mmask8)(U))) + +#define _mm512_maskz_ternarylogic_epi64(U, A, B, C, imm) \ + ((__m512i)__builtin_ia32_pternlogq512_maskz( \ + (__v8di)(__m512i)(A), (__v8di)(__m512i)(B), (__v8di)(__m512i)(C), \ + (unsigned char)(imm), (__mmask8)(U))) + +#ifdef __x86_64__ +#define _mm_cvt_roundsd_i64(A, R) \ + ((long long)__builtin_ia32_vcvtsd2si64((__v2df)(__m128d)(A), (int)(R))) +#endif + +#define _mm_cvt_roundsd_si32(A, R) \ + ((int)__builtin_ia32_vcvtsd2si32((__v2df)(__m128d)(A), (int)(R))) + +#define _mm_cvt_roundsd_i32(A, R) \ + ((int)__builtin_ia32_vcvtsd2si32((__v2df)(__m128d)(A), (int)(R))) + +#define _mm_cvt_roundsd_u32(A, R) \ + ((unsigned int)__builtin_ia32_vcvtsd2usi32((__v2df)(__m128d)(A), (int)(R))) + +static __inline__ unsigned __DEFAULT_FN_ATTRS128 +_mm_cvtsd_u32 (__m128d __A) +{ + return (unsigned) __builtin_ia32_vcvtsd2usi32 ((__v2df) __A, + _MM_FROUND_CUR_DIRECTION); +} + +#ifdef __x86_64__ +#define _mm_cvt_roundsd_u64(A, R) \ + ((unsigned long long)__builtin_ia32_vcvtsd2usi64((__v2df)(__m128d)(A), \ + (int)(R))) + +static __inline__ unsigned long long __DEFAULT_FN_ATTRS128 +_mm_cvtsd_u64 (__m128d __A) +{ + return (unsigned long long) __builtin_ia32_vcvtsd2usi64 ((__v2df) + __A, + _MM_FROUND_CUR_DIRECTION); +} +#endif + +#define _mm_cvt_roundss_si32(A, R) \ + ((int)__builtin_ia32_vcvtss2si32((__v4sf)(__m128)(A), (int)(R))) + +#define _mm_cvt_roundss_i32(A, R) \ + ((int)__builtin_ia32_vcvtss2si32((__v4sf)(__m128)(A), (int)(R))) + +#ifdef __x86_64__ +#define _mm_cvt_roundss_si64(A, R) \ + ((long long)__builtin_ia32_vcvtss2si64((__v4sf)(__m128)(A), (int)(R))) + +#define _mm_cvt_roundss_i64(A, R) \ + ((long long)__builtin_ia32_vcvtss2si64((__v4sf)(__m128)(A), (int)(R))) +#endif + +#define _mm_cvt_roundss_u32(A, R) \ + ((unsigned int)__builtin_ia32_vcvtss2usi32((__v4sf)(__m128)(A), (int)(R))) + +static __inline__ unsigned __DEFAULT_FN_ATTRS128 +_mm_cvtss_u32 (__m128 __A) +{ + return (unsigned) __builtin_ia32_vcvtss2usi32 ((__v4sf) __A, + _MM_FROUND_CUR_DIRECTION); +} + +#ifdef __x86_64__ +#define _mm_cvt_roundss_u64(A, R) \ + ((unsigned long long)__builtin_ia32_vcvtss2usi64((__v4sf)(__m128)(A), \ + (int)(R))) + +static __inline__ unsigned long long __DEFAULT_FN_ATTRS128 +_mm_cvtss_u64 (__m128 __A) +{ + return (unsigned long long) __builtin_ia32_vcvtss2usi64 ((__v4sf) + __A, + _MM_FROUND_CUR_DIRECTION); +} +#endif + +#define _mm_cvtt_roundsd_i32(A, R) \ + ((int)__builtin_ia32_vcvttsd2si32((__v2df)(__m128d)(A), (int)(R))) + +#define _mm_cvtt_roundsd_si32(A, R) \ + ((int)__builtin_ia32_vcvttsd2si32((__v2df)(__m128d)(A), (int)(R))) + +static __inline__ int __DEFAULT_FN_ATTRS128 +_mm_cvttsd_i32 (__m128d __A) +{ + return (int) __builtin_ia32_vcvttsd2si32 ((__v2df) __A, + _MM_FROUND_CUR_DIRECTION); +} + +#ifdef __x86_64__ +#define _mm_cvtt_roundsd_si64(A, R) \ + ((long long)__builtin_ia32_vcvttsd2si64((__v2df)(__m128d)(A), (int)(R))) + +#define _mm_cvtt_roundsd_i64(A, R) \ + ((long long)__builtin_ia32_vcvttsd2si64((__v2df)(__m128d)(A), (int)(R))) + +static __inline__ long long __DEFAULT_FN_ATTRS128 +_mm_cvttsd_i64 (__m128d __A) +{ + return (long long) __builtin_ia32_vcvttsd2si64 ((__v2df) __A, + _MM_FROUND_CUR_DIRECTION); +} +#endif + +#define _mm_cvtt_roundsd_u32(A, R) \ + ((unsigned int)__builtin_ia32_vcvttsd2usi32((__v2df)(__m128d)(A), (int)(R))) + +static __inline__ unsigned __DEFAULT_FN_ATTRS128 +_mm_cvttsd_u32 (__m128d __A) +{ + return (unsigned) __builtin_ia32_vcvttsd2usi32 ((__v2df) __A, + _MM_FROUND_CUR_DIRECTION); +} + +#ifdef __x86_64__ +#define _mm_cvtt_roundsd_u64(A, R) \ + ((unsigned long long)__builtin_ia32_vcvttsd2usi64((__v2df)(__m128d)(A), \ + (int)(R))) + +static __inline__ unsigned long long __DEFAULT_FN_ATTRS128 +_mm_cvttsd_u64 (__m128d __A) +{ + return (unsigned long long) __builtin_ia32_vcvttsd2usi64 ((__v2df) + __A, + _MM_FROUND_CUR_DIRECTION); +} +#endif + +#define _mm_cvtt_roundss_i32(A, R) \ + ((int)__builtin_ia32_vcvttss2si32((__v4sf)(__m128)(A), (int)(R))) + +#define _mm_cvtt_roundss_si32(A, R) \ + ((int)__builtin_ia32_vcvttss2si32((__v4sf)(__m128)(A), (int)(R))) + +static __inline__ int __DEFAULT_FN_ATTRS128 +_mm_cvttss_i32 (__m128 __A) +{ + return (int) __builtin_ia32_vcvttss2si32 ((__v4sf) __A, + _MM_FROUND_CUR_DIRECTION); +} + +#ifdef __x86_64__ +#define _mm_cvtt_roundss_i64(A, R) \ + ((long long)__builtin_ia32_vcvttss2si64((__v4sf)(__m128)(A), (int)(R))) + +#define _mm_cvtt_roundss_si64(A, R) \ + ((long long)__builtin_ia32_vcvttss2si64((__v4sf)(__m128)(A), (int)(R))) + +static __inline__ long long __DEFAULT_FN_ATTRS128 +_mm_cvttss_i64 (__m128 __A) +{ + return (long long) __builtin_ia32_vcvttss2si64 ((__v4sf) __A, + _MM_FROUND_CUR_DIRECTION); +} +#endif + +#define _mm_cvtt_roundss_u32(A, R) \ + ((unsigned int)__builtin_ia32_vcvttss2usi32((__v4sf)(__m128)(A), (int)(R))) + +static __inline__ unsigned __DEFAULT_FN_ATTRS128 +_mm_cvttss_u32 (__m128 __A) +{ + return (unsigned) __builtin_ia32_vcvttss2usi32 ((__v4sf) __A, + _MM_FROUND_CUR_DIRECTION); +} + +#ifdef __x86_64__ +#define _mm_cvtt_roundss_u64(A, R) \ + ((unsigned long long)__builtin_ia32_vcvttss2usi64((__v4sf)(__m128)(A), \ + (int)(R))) + +static __inline__ unsigned long long __DEFAULT_FN_ATTRS128 +_mm_cvttss_u64 (__m128 __A) +{ + return (unsigned long long) __builtin_ia32_vcvttss2usi64 ((__v4sf) + __A, + _MM_FROUND_CUR_DIRECTION); +} +#endif + +#define _mm512_permute_pd(X, C) \ + ((__m512d)__builtin_ia32_vpermilpd512((__v8df)(__m512d)(X), (int)(C))) + +#define _mm512_mask_permute_pd(W, U, X, C) \ + ((__m512d)__builtin_ia32_selectpd_512((__mmask8)(U), \ + (__v8df)_mm512_permute_pd((X), (C)), \ + (__v8df)(__m512d)(W))) + +#define _mm512_maskz_permute_pd(U, X, C) \ + ((__m512d)__builtin_ia32_selectpd_512((__mmask8)(U), \ + (__v8df)_mm512_permute_pd((X), (C)), \ + (__v8df)_mm512_setzero_pd())) + +#define _mm512_permute_ps(X, C) \ + ((__m512)__builtin_ia32_vpermilps512((__v16sf)(__m512)(X), (int)(C))) + +#define _mm512_mask_permute_ps(W, U, X, C) \ + ((__m512)__builtin_ia32_selectps_512((__mmask16)(U), \ + (__v16sf)_mm512_permute_ps((X), (C)), \ + (__v16sf)(__m512)(W))) + +#define _mm512_maskz_permute_ps(U, X, C) \ + ((__m512)__builtin_ia32_selectps_512((__mmask16)(U), \ + (__v16sf)_mm512_permute_ps((X), (C)), \ + (__v16sf)_mm512_setzero_ps())) + +static __inline__ __m512d __DEFAULT_FN_ATTRS512 +_mm512_permutevar_pd(__m512d __A, __m512i __C) +{ + return (__m512d)__builtin_ia32_vpermilvarpd512((__v8df)__A, (__v8di)__C); +} + +static __inline__ __m512d __DEFAULT_FN_ATTRS512 +_mm512_mask_permutevar_pd(__m512d __W, __mmask8 __U, __m512d __A, __m512i __C) +{ + return (__m512d)__builtin_ia32_selectpd_512((__mmask8)__U, + (__v8df)_mm512_permutevar_pd(__A, __C), + (__v8df)__W); +} + +static __inline__ __m512d __DEFAULT_FN_ATTRS512 +_mm512_maskz_permutevar_pd(__mmask8 __U, __m512d __A, __m512i __C) +{ + return (__m512d)__builtin_ia32_selectpd_512((__mmask8)__U, + (__v8df)_mm512_permutevar_pd(__A, __C), + (__v8df)_mm512_setzero_pd()); +} + +static __inline__ __m512 __DEFAULT_FN_ATTRS512 +_mm512_permutevar_ps(__m512 __A, __m512i __C) +{ + return (__m512)__builtin_ia32_vpermilvarps512((__v16sf)__A, (__v16si)__C); +} + +static __inline__ __m512 __DEFAULT_FN_ATTRS512 +_mm512_mask_permutevar_ps(__m512 __W, __mmask16 __U, __m512 __A, __m512i __C) +{ + return (__m512)__builtin_ia32_selectps_512((__mmask16)__U, + (__v16sf)_mm512_permutevar_ps(__A, __C), + (__v16sf)__W); +} + +static __inline__ __m512 __DEFAULT_FN_ATTRS512 +_mm512_maskz_permutevar_ps(__mmask16 __U, __m512 __A, __m512i __C) +{ + return (__m512)__builtin_ia32_selectps_512((__mmask16)__U, + (__v16sf)_mm512_permutevar_ps(__A, __C), + (__v16sf)_mm512_setzero_ps()); +} + +static __inline __m512d __DEFAULT_FN_ATTRS512 +_mm512_permutex2var_pd(__m512d __A, __m512i __I, __m512d __B) +{ + return (__m512d)__builtin_ia32_vpermi2varpd512((__v8df)__A, (__v8di)__I, + (__v8df)__B); +} + +static __inline__ __m512d __DEFAULT_FN_ATTRS512 +_mm512_mask_permutex2var_pd(__m512d __A, __mmask8 __U, __m512i __I, __m512d __B) +{ + return (__m512d)__builtin_ia32_selectpd_512(__U, + (__v8df)_mm512_permutex2var_pd(__A, __I, __B), + (__v8df)__A); +} + +static __inline__ __m512d __DEFAULT_FN_ATTRS512 +_mm512_mask2_permutex2var_pd(__m512d __A, __m512i __I, __mmask8 __U, + __m512d __B) +{ + return (__m512d)__builtin_ia32_selectpd_512(__U, + (__v8df)_mm512_permutex2var_pd(__A, __I, __B), + (__v8df)(__m512d)__I); +} + +static __inline__ __m512d __DEFAULT_FN_ATTRS512 +_mm512_maskz_permutex2var_pd(__mmask8 __U, __m512d __A, __m512i __I, + __m512d __B) +{ + return (__m512d)__builtin_ia32_selectpd_512(__U, + (__v8df)_mm512_permutex2var_pd(__A, __I, __B), + (__v8df)_mm512_setzero_pd()); +} + +static __inline __m512 __DEFAULT_FN_ATTRS512 +_mm512_permutex2var_ps(__m512 __A, __m512i __I, __m512 __B) +{ + return (__m512)__builtin_ia32_vpermi2varps512((__v16sf)__A, (__v16si)__I, + (__v16sf) __B); +} + +static __inline__ __m512 __DEFAULT_FN_ATTRS512 +_mm512_mask_permutex2var_ps(__m512 __A, __mmask16 __U, __m512i __I, __m512 __B) +{ + return (__m512)__builtin_ia32_selectps_512(__U, + (__v16sf)_mm512_permutex2var_ps(__A, __I, __B), + (__v16sf)__A); +} + +static __inline__ __m512 __DEFAULT_FN_ATTRS512 +_mm512_mask2_permutex2var_ps(__m512 __A, __m512i __I, __mmask16 __U, __m512 __B) +{ + return (__m512)__builtin_ia32_selectps_512(__U, + (__v16sf)_mm512_permutex2var_ps(__A, __I, __B), + (__v16sf)(__m512)__I); +} + +static __inline__ __m512 __DEFAULT_FN_ATTRS512 +_mm512_maskz_permutex2var_ps(__mmask16 __U, __m512 __A, __m512i __I, __m512 __B) +{ + return (__m512)__builtin_ia32_selectps_512(__U, + (__v16sf)_mm512_permutex2var_ps(__A, __I, __B), + (__v16sf)_mm512_setzero_ps()); +} + + +#define _mm512_cvtt_roundpd_epu32(A, R) \ + ((__m256i)__builtin_ia32_cvttpd2udq512_mask((__v8df)(__m512d)(A), \ + (__v8si)_mm256_undefined_si256(), \ + (__mmask8)-1, (int)(R))) + +#define _mm512_mask_cvtt_roundpd_epu32(W, U, A, R) \ + ((__m256i)__builtin_ia32_cvttpd2udq512_mask((__v8df)(__m512d)(A), \ + (__v8si)(__m256i)(W), \ + (__mmask8)(U), (int)(R))) + +#define _mm512_maskz_cvtt_roundpd_epu32(U, A, R) \ + ((__m256i)__builtin_ia32_cvttpd2udq512_mask((__v8df)(__m512d)(A), \ + (__v8si)_mm256_setzero_si256(), \ + (__mmask8)(U), (int)(R))) + +static __inline__ __m256i __DEFAULT_FN_ATTRS512 +_mm512_cvttpd_epu32 (__m512d __A) +{ + return (__m256i) __builtin_ia32_cvttpd2udq512_mask ((__v8df) __A, + (__v8si) + _mm256_undefined_si256 (), + (__mmask8) -1, + _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS512 +_mm512_mask_cvttpd_epu32 (__m256i __W, __mmask8 __U, __m512d __A) +{ + return (__m256i) __builtin_ia32_cvttpd2udq512_mask ((__v8df) __A, + (__v8si) __W, + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS512 +_mm512_maskz_cvttpd_epu32 (__mmask8 __U, __m512d __A) +{ + return (__m256i) __builtin_ia32_cvttpd2udq512_mask ((__v8df) __A, + (__v8si) + _mm256_setzero_si256 (), + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +#define _mm_roundscale_round_sd(A, B, imm, R) \ + ((__m128d)__builtin_ia32_rndscalesd_round_mask((__v2df)(__m128d)(A), \ + (__v2df)(__m128d)(B), \ + (__v2df)_mm_setzero_pd(), \ + (__mmask8)-1, (int)(imm), \ + (int)(R))) + +#define _mm_roundscale_sd(A, B, imm) \ + ((__m128d)__builtin_ia32_rndscalesd_round_mask((__v2df)(__m128d)(A), \ + (__v2df)(__m128d)(B), \ + (__v2df)_mm_setzero_pd(), \ + (__mmask8)-1, (int)(imm), \ + _MM_FROUND_CUR_DIRECTION)) + +#define _mm_mask_roundscale_sd(W, U, A, B, imm) \ + ((__m128d)__builtin_ia32_rndscalesd_round_mask((__v2df)(__m128d)(A), \ + (__v2df)(__m128d)(B), \ + (__v2df)(__m128d)(W), \ + (__mmask8)(U), (int)(imm), \ + _MM_FROUND_CUR_DIRECTION)) + +#define _mm_mask_roundscale_round_sd(W, U, A, B, I, R) \ + ((__m128d)__builtin_ia32_rndscalesd_round_mask((__v2df)(__m128d)(A), \ + (__v2df)(__m128d)(B), \ + (__v2df)(__m128d)(W), \ + (__mmask8)(U), (int)(I), \ + (int)(R))) + +#define _mm_maskz_roundscale_sd(U, A, B, I) \ + ((__m128d)__builtin_ia32_rndscalesd_round_mask((__v2df)(__m128d)(A), \ + (__v2df)(__m128d)(B), \ + (__v2df)_mm_setzero_pd(), \ + (__mmask8)(U), (int)(I), \ + _MM_FROUND_CUR_DIRECTION)) + +#define _mm_maskz_roundscale_round_sd(U, A, B, I, R) \ + ((__m128d)__builtin_ia32_rndscalesd_round_mask((__v2df)(__m128d)(A), \ + (__v2df)(__m128d)(B), \ + (__v2df)_mm_setzero_pd(), \ + (__mmask8)(U), (int)(I), \ + (int)(R))) + +#define _mm_roundscale_round_ss(A, B, imm, R) \ + ((__m128)__builtin_ia32_rndscaless_round_mask((__v4sf)(__m128)(A), \ + (__v4sf)(__m128)(B), \ + (__v4sf)_mm_setzero_ps(), \ + (__mmask8)-1, (int)(imm), \ + (int)(R))) + +#define _mm_roundscale_ss(A, B, imm) \ + ((__m128)__builtin_ia32_rndscaless_round_mask((__v4sf)(__m128)(A), \ + (__v4sf)(__m128)(B), \ + (__v4sf)_mm_setzero_ps(), \ + (__mmask8)-1, (int)(imm), \ + _MM_FROUND_CUR_DIRECTION)) + +#define _mm_mask_roundscale_ss(W, U, A, B, I) \ + ((__m128)__builtin_ia32_rndscaless_round_mask((__v4sf)(__m128)(A), \ + (__v4sf)(__m128)(B), \ + (__v4sf)(__m128)(W), \ + (__mmask8)(U), (int)(I), \ + _MM_FROUND_CUR_DIRECTION)) + +#define _mm_mask_roundscale_round_ss(W, U, A, B, I, R) \ + ((__m128)__builtin_ia32_rndscaless_round_mask((__v4sf)(__m128)(A), \ + (__v4sf)(__m128)(B), \ + (__v4sf)(__m128)(W), \ + (__mmask8)(U), (int)(I), \ + (int)(R))) + +#define _mm_maskz_roundscale_ss(U, A, B, I) \ + ((__m128)__builtin_ia32_rndscaless_round_mask((__v4sf)(__m128)(A), \ + (__v4sf)(__m128)(B), \ + (__v4sf)_mm_setzero_ps(), \ + (__mmask8)(U), (int)(I), \ + _MM_FROUND_CUR_DIRECTION)) + +#define _mm_maskz_roundscale_round_ss(U, A, B, I, R) \ + ((__m128)__builtin_ia32_rndscaless_round_mask((__v4sf)(__m128)(A), \ + (__v4sf)(__m128)(B), \ + (__v4sf)_mm_setzero_ps(), \ + (__mmask8)(U), (int)(I), \ + (int)(R))) + +#define _mm512_scalef_round_pd(A, B, R) \ + ((__m512d)__builtin_ia32_scalefpd512_mask((__v8df)(__m512d)(A), \ + (__v8df)(__m512d)(B), \ + (__v8df)_mm512_undefined_pd(), \ + (__mmask8)-1, (int)(R))) + +#define _mm512_mask_scalef_round_pd(W, U, A, B, R) \ + ((__m512d)__builtin_ia32_scalefpd512_mask((__v8df)(__m512d)(A), \ + (__v8df)(__m512d)(B), \ + (__v8df)(__m512d)(W), \ + (__mmask8)(U), (int)(R))) + +#define _mm512_maskz_scalef_round_pd(U, A, B, R) \ + ((__m512d)__builtin_ia32_scalefpd512_mask((__v8df)(__m512d)(A), \ + (__v8df)(__m512d)(B), \ + (__v8df)_mm512_setzero_pd(), \ + (__mmask8)(U), (int)(R))) + +static __inline__ __m512d __DEFAULT_FN_ATTRS512 +_mm512_scalef_pd (__m512d __A, __m512d __B) +{ + return (__m512d) __builtin_ia32_scalefpd512_mask ((__v8df) __A, + (__v8df) __B, + (__v8df) + _mm512_undefined_pd (), + (__mmask8) -1, + _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m512d __DEFAULT_FN_ATTRS512 +_mm512_mask_scalef_pd (__m512d __W, __mmask8 __U, __m512d __A, __m512d __B) +{ + return (__m512d) __builtin_ia32_scalefpd512_mask ((__v8df) __A, + (__v8df) __B, + (__v8df) __W, + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m512d __DEFAULT_FN_ATTRS512 +_mm512_maskz_scalef_pd (__mmask8 __U, __m512d __A, __m512d __B) +{ + return (__m512d) __builtin_ia32_scalefpd512_mask ((__v8df) __A, + (__v8df) __B, + (__v8df) + _mm512_setzero_pd (), + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +#define _mm512_scalef_round_ps(A, B, R) \ + ((__m512)__builtin_ia32_scalefps512_mask((__v16sf)(__m512)(A), \ + (__v16sf)(__m512)(B), \ + (__v16sf)_mm512_undefined_ps(), \ + (__mmask16)-1, (int)(R))) + +#define _mm512_mask_scalef_round_ps(W, U, A, B, R) \ + ((__m512)__builtin_ia32_scalefps512_mask((__v16sf)(__m512)(A), \ + (__v16sf)(__m512)(B), \ + (__v16sf)(__m512)(W), \ + (__mmask16)(U), (int)(R))) + +#define _mm512_maskz_scalef_round_ps(U, A, B, R) \ + ((__m512)__builtin_ia32_scalefps512_mask((__v16sf)(__m512)(A), \ + (__v16sf)(__m512)(B), \ + (__v16sf)_mm512_setzero_ps(), \ + (__mmask16)(U), (int)(R))) + +static __inline__ __m512 __DEFAULT_FN_ATTRS512 +_mm512_scalef_ps (__m512 __A, __m512 __B) +{ + return (__m512) __builtin_ia32_scalefps512_mask ((__v16sf) __A, + (__v16sf) __B, + (__v16sf) + _mm512_undefined_ps (), + (__mmask16) -1, + _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m512 __DEFAULT_FN_ATTRS512 +_mm512_mask_scalef_ps (__m512 __W, __mmask16 __U, __m512 __A, __m512 __B) +{ + return (__m512) __builtin_ia32_scalefps512_mask ((__v16sf) __A, + (__v16sf) __B, + (__v16sf) __W, + (__mmask16) __U, + _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m512 __DEFAULT_FN_ATTRS512 +_mm512_maskz_scalef_ps (__mmask16 __U, __m512 __A, __m512 __B) +{ + return (__m512) __builtin_ia32_scalefps512_mask ((__v16sf) __A, + (__v16sf) __B, + (__v16sf) + _mm512_setzero_ps (), + (__mmask16) __U, + _MM_FROUND_CUR_DIRECTION); +} + +#define _mm_scalef_round_sd(A, B, R) \ + ((__m128d)__builtin_ia32_scalefsd_round_mask((__v2df)(__m128d)(A), \ + (__v2df)(__m128d)(B), \ + (__v2df)_mm_setzero_pd(), \ + (__mmask8)-1, (int)(R))) + +static __inline__ __m128d __DEFAULT_FN_ATTRS128 +_mm_scalef_sd (__m128d __A, __m128d __B) +{ + return (__m128d) __builtin_ia32_scalefsd_round_mask ((__v2df) __A, + (__v2df)( __B), (__v2df) _mm_setzero_pd(), + (__mmask8) -1, + _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m128d __DEFAULT_FN_ATTRS128 +_mm_mask_scalef_sd (__m128d __W, __mmask8 __U, __m128d __A, __m128d __B) +{ + return (__m128d) __builtin_ia32_scalefsd_round_mask ( (__v2df) __A, + (__v2df) __B, + (__v2df) __W, + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +#define _mm_mask_scalef_round_sd(W, U, A, B, R) \ + ((__m128d)__builtin_ia32_scalefsd_round_mask((__v2df)(__m128d)(A), \ + (__v2df)(__m128d)(B), \ + (__v2df)(__m128d)(W), \ + (__mmask8)(U), (int)(R))) + +static __inline__ __m128d __DEFAULT_FN_ATTRS128 +_mm_maskz_scalef_sd (__mmask8 __U, __m128d __A, __m128d __B) +{ + return (__m128d) __builtin_ia32_scalefsd_round_mask ( (__v2df) __A, + (__v2df) __B, + (__v2df) _mm_setzero_pd (), + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +#define _mm_maskz_scalef_round_sd(U, A, B, R) \ + ((__m128d)__builtin_ia32_scalefsd_round_mask((__v2df)(__m128d)(A), \ + (__v2df)(__m128d)(B), \ + (__v2df)_mm_setzero_pd(), \ + (__mmask8)(U), (int)(R))) + +#define _mm_scalef_round_ss(A, B, R) \ + ((__m128)__builtin_ia32_scalefss_round_mask((__v4sf)(__m128)(A), \ + (__v4sf)(__m128)(B), \ + (__v4sf)_mm_setzero_ps(), \ + (__mmask8)-1, (int)(R))) + +static __inline__ __m128 __DEFAULT_FN_ATTRS128 +_mm_scalef_ss (__m128 __A, __m128 __B) +{ + return (__m128) __builtin_ia32_scalefss_round_mask ((__v4sf) __A, + (__v4sf)( __B), (__v4sf) _mm_setzero_ps(), + (__mmask8) -1, + _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m128 __DEFAULT_FN_ATTRS128 +_mm_mask_scalef_ss (__m128 __W, __mmask8 __U, __m128 __A, __m128 __B) +{ + return (__m128) __builtin_ia32_scalefss_round_mask ( (__v4sf) __A, + (__v4sf) __B, + (__v4sf) __W, + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +#define _mm_mask_scalef_round_ss(W, U, A, B, R) \ + ((__m128)__builtin_ia32_scalefss_round_mask((__v4sf)(__m128)(A), \ + (__v4sf)(__m128)(B), \ + (__v4sf)(__m128)(W), \ + (__mmask8)(U), (int)(R))) + +static __inline__ __m128 __DEFAULT_FN_ATTRS128 +_mm_maskz_scalef_ss (__mmask8 __U, __m128 __A, __m128 __B) +{ + return (__m128) __builtin_ia32_scalefss_round_mask ( (__v4sf) __A, + (__v4sf) __B, + (__v4sf) _mm_setzero_ps (), + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +#define _mm_maskz_scalef_round_ss(U, A, B, R) \ + ((__m128)__builtin_ia32_scalefss_round_mask((__v4sf)(__m128)(A), \ + (__v4sf)(__m128)(B), \ + (__v4sf)_mm_setzero_ps(), \ + (__mmask8)(U), \ + (int)(R))) + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_srai_epi32(__m512i __A, unsigned int __B) +{ + return (__m512i)__builtin_ia32_psradi512((__v16si)__A, (int)__B); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_mask_srai_epi32(__m512i __W, __mmask16 __U, __m512i __A, + unsigned int __B) +{ + return (__m512i)__builtin_ia32_selectd_512((__mmask16)__U, + (__v16si)_mm512_srai_epi32(__A, __B), + (__v16si)__W); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_maskz_srai_epi32(__mmask16 __U, __m512i __A, + unsigned int __B) { + return (__m512i)__builtin_ia32_selectd_512((__mmask16)__U, + (__v16si)_mm512_srai_epi32(__A, __B), + (__v16si)_mm512_setzero_si512()); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_srai_epi64(__m512i __A, unsigned int __B) +{ + return (__m512i)__builtin_ia32_psraqi512((__v8di)__A, (int)__B); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_mask_srai_epi64(__m512i __W, __mmask8 __U, __m512i __A, unsigned int __B) +{ + return (__m512i)__builtin_ia32_selectq_512((__mmask8)__U, + (__v8di)_mm512_srai_epi64(__A, __B), + (__v8di)__W); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_maskz_srai_epi64(__mmask8 __U, __m512i __A, unsigned int __B) +{ + return (__m512i)__builtin_ia32_selectq_512((__mmask8)__U, + (__v8di)_mm512_srai_epi64(__A, __B), + (__v8di)_mm512_setzero_si512()); +} + +#define _mm512_shuffle_f32x4(A, B, imm) \ + ((__m512)__builtin_ia32_shuf_f32x4((__v16sf)(__m512)(A), \ + (__v16sf)(__m512)(B), (int)(imm))) + +#define _mm512_mask_shuffle_f32x4(W, U, A, B, imm) \ + ((__m512)__builtin_ia32_selectps_512((__mmask16)(U), \ + (__v16sf)_mm512_shuffle_f32x4((A), (B), (imm)), \ + (__v16sf)(__m512)(W))) + +#define _mm512_maskz_shuffle_f32x4(U, A, B, imm) \ + ((__m512)__builtin_ia32_selectps_512((__mmask16)(U), \ + (__v16sf)_mm512_shuffle_f32x4((A), (B), (imm)), \ + (__v16sf)_mm512_setzero_ps())) + +#define _mm512_shuffle_f64x2(A, B, imm) \ + ((__m512d)__builtin_ia32_shuf_f64x2((__v8df)(__m512d)(A), \ + (__v8df)(__m512d)(B), (int)(imm))) + +#define _mm512_mask_shuffle_f64x2(W, U, A, B, imm) \ + ((__m512d)__builtin_ia32_selectpd_512((__mmask8)(U), \ + (__v8df)_mm512_shuffle_f64x2((A), (B), (imm)), \ + (__v8df)(__m512d)(W))) + +#define _mm512_maskz_shuffle_f64x2(U, A, B, imm) \ + ((__m512d)__builtin_ia32_selectpd_512((__mmask8)(U), \ + (__v8df)_mm512_shuffle_f64x2((A), (B), (imm)), \ + (__v8df)_mm512_setzero_pd())) + +#define _mm512_shuffle_i32x4(A, B, imm) \ + ((__m512i)__builtin_ia32_shuf_i32x4((__v16si)(__m512i)(A), \ + (__v16si)(__m512i)(B), (int)(imm))) + +#define _mm512_mask_shuffle_i32x4(W, U, A, B, imm) \ + ((__m512i)__builtin_ia32_selectd_512((__mmask16)(U), \ + (__v16si)_mm512_shuffle_i32x4((A), (B), (imm)), \ + (__v16si)(__m512i)(W))) + +#define _mm512_maskz_shuffle_i32x4(U, A, B, imm) \ + ((__m512i)__builtin_ia32_selectd_512((__mmask16)(U), \ + (__v16si)_mm512_shuffle_i32x4((A), (B), (imm)), \ + (__v16si)_mm512_setzero_si512())) + +#define _mm512_shuffle_i64x2(A, B, imm) \ + ((__m512i)__builtin_ia32_shuf_i64x2((__v8di)(__m512i)(A), \ + (__v8di)(__m512i)(B), (int)(imm))) + +#define _mm512_mask_shuffle_i64x2(W, U, A, B, imm) \ + ((__m512i)__builtin_ia32_selectq_512((__mmask8)(U), \ + (__v8di)_mm512_shuffle_i64x2((A), (B), (imm)), \ + (__v8di)(__m512i)(W))) + +#define _mm512_maskz_shuffle_i64x2(U, A, B, imm) \ + ((__m512i)__builtin_ia32_selectq_512((__mmask8)(U), \ + (__v8di)_mm512_shuffle_i64x2((A), (B), (imm)), \ + (__v8di)_mm512_setzero_si512())) + +#define _mm512_shuffle_pd(A, B, M) \ + ((__m512d)__builtin_ia32_shufpd512((__v8df)(__m512d)(A), \ + (__v8df)(__m512d)(B), (int)(M))) + +#define _mm512_mask_shuffle_pd(W, U, A, B, M) \ + ((__m512d)__builtin_ia32_selectpd_512((__mmask8)(U), \ + (__v8df)_mm512_shuffle_pd((A), (B), (M)), \ + (__v8df)(__m512d)(W))) + +#define _mm512_maskz_shuffle_pd(U, A, B, M) \ + ((__m512d)__builtin_ia32_selectpd_512((__mmask8)(U), \ + (__v8df)_mm512_shuffle_pd((A), (B), (M)), \ + (__v8df)_mm512_setzero_pd())) + +#define _mm512_shuffle_ps(A, B, M) \ + ((__m512)__builtin_ia32_shufps512((__v16sf)(__m512)(A), \ + (__v16sf)(__m512)(B), (int)(M))) + +#define _mm512_mask_shuffle_ps(W, U, A, B, M) \ + ((__m512)__builtin_ia32_selectps_512((__mmask16)(U), \ + (__v16sf)_mm512_shuffle_ps((A), (B), (M)), \ + (__v16sf)(__m512)(W))) + +#define _mm512_maskz_shuffle_ps(U, A, B, M) \ + ((__m512)__builtin_ia32_selectps_512((__mmask16)(U), \ + (__v16sf)_mm512_shuffle_ps((A), (B), (M)), \ + (__v16sf)_mm512_setzero_ps())) + +#define _mm_sqrt_round_sd(A, B, R) \ + ((__m128d)__builtin_ia32_sqrtsd_round_mask((__v2df)(__m128d)(A), \ + (__v2df)(__m128d)(B), \ + (__v2df)_mm_setzero_pd(), \ + (__mmask8)-1, (int)(R))) + +static __inline__ __m128d __DEFAULT_FN_ATTRS128 +_mm_mask_sqrt_sd (__m128d __W, __mmask8 __U, __m128d __A, __m128d __B) +{ + return (__m128d) __builtin_ia32_sqrtsd_round_mask ( (__v2df) __A, + (__v2df) __B, + (__v2df) __W, + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +#define _mm_mask_sqrt_round_sd(W, U, A, B, R) \ + ((__m128d)__builtin_ia32_sqrtsd_round_mask((__v2df)(__m128d)(A), \ + (__v2df)(__m128d)(B), \ + (__v2df)(__m128d)(W), \ + (__mmask8)(U), (int)(R))) + +static __inline__ __m128d __DEFAULT_FN_ATTRS128 +_mm_maskz_sqrt_sd (__mmask8 __U, __m128d __A, __m128d __B) +{ + return (__m128d) __builtin_ia32_sqrtsd_round_mask ( (__v2df) __A, + (__v2df) __B, + (__v2df) _mm_setzero_pd (), + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +#define _mm_maskz_sqrt_round_sd(U, A, B, R) \ + ((__m128d)__builtin_ia32_sqrtsd_round_mask((__v2df)(__m128d)(A), \ + (__v2df)(__m128d)(B), \ + (__v2df)_mm_setzero_pd(), \ + (__mmask8)(U), (int)(R))) + +#define _mm_sqrt_round_ss(A, B, R) \ + ((__m128)__builtin_ia32_sqrtss_round_mask((__v4sf)(__m128)(A), \ + (__v4sf)(__m128)(B), \ + (__v4sf)_mm_setzero_ps(), \ + (__mmask8)-1, (int)(R))) + +static __inline__ __m128 __DEFAULT_FN_ATTRS128 +_mm_mask_sqrt_ss (__m128 __W, __mmask8 __U, __m128 __A, __m128 __B) +{ + return (__m128) __builtin_ia32_sqrtss_round_mask ( (__v4sf) __A, + (__v4sf) __B, + (__v4sf) __W, + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +#define _mm_mask_sqrt_round_ss(W, U, A, B, R) \ + ((__m128)__builtin_ia32_sqrtss_round_mask((__v4sf)(__m128)(A), \ + (__v4sf)(__m128)(B), \ + (__v4sf)(__m128)(W), (__mmask8)(U), \ + (int)(R))) + +static __inline__ __m128 __DEFAULT_FN_ATTRS128 +_mm_maskz_sqrt_ss (__mmask8 __U, __m128 __A, __m128 __B) +{ + return (__m128) __builtin_ia32_sqrtss_round_mask ( (__v4sf) __A, + (__v4sf) __B, + (__v4sf) _mm_setzero_ps (), + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +#define _mm_maskz_sqrt_round_ss(U, A, B, R) \ + ((__m128)__builtin_ia32_sqrtss_round_mask((__v4sf)(__m128)(A), \ + (__v4sf)(__m128)(B), \ + (__v4sf)_mm_setzero_ps(), \ + (__mmask8)(U), (int)(R))) + +static __inline__ __m512 __DEFAULT_FN_ATTRS512 +_mm512_broadcast_f32x4(__m128 __A) +{ + return (__m512)__builtin_shufflevector((__v4sf)__A, (__v4sf)__A, + 0, 1, 2, 3, 0, 1, 2, 3, + 0, 1, 2, 3, 0, 1, 2, 3); +} + +static __inline__ __m512 __DEFAULT_FN_ATTRS512 +_mm512_mask_broadcast_f32x4(__m512 __O, __mmask16 __M, __m128 __A) +{ + return (__m512)__builtin_ia32_selectps_512((__mmask16)__M, + (__v16sf)_mm512_broadcast_f32x4(__A), + (__v16sf)__O); +} + +static __inline__ __m512 __DEFAULT_FN_ATTRS512 +_mm512_maskz_broadcast_f32x4(__mmask16 __M, __m128 __A) +{ + return (__m512)__builtin_ia32_selectps_512((__mmask16)__M, + (__v16sf)_mm512_broadcast_f32x4(__A), + (__v16sf)_mm512_setzero_ps()); +} + +static __inline__ __m512d __DEFAULT_FN_ATTRS512 +_mm512_broadcast_f64x4(__m256d __A) +{ + return (__m512d)__builtin_shufflevector((__v4df)__A, (__v4df)__A, + 0, 1, 2, 3, 0, 1, 2, 3); +} + +static __inline__ __m512d __DEFAULT_FN_ATTRS512 +_mm512_mask_broadcast_f64x4(__m512d __O, __mmask8 __M, __m256d __A) +{ + return (__m512d)__builtin_ia32_selectpd_512((__mmask8)__M, + (__v8df)_mm512_broadcast_f64x4(__A), + (__v8df)__O); +} + +static __inline__ __m512d __DEFAULT_FN_ATTRS512 +_mm512_maskz_broadcast_f64x4(__mmask8 __M, __m256d __A) +{ + return (__m512d)__builtin_ia32_selectpd_512((__mmask8)__M, + (__v8df)_mm512_broadcast_f64x4(__A), + (__v8df)_mm512_setzero_pd()); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_broadcast_i32x4(__m128i __A) +{ + return (__m512i)__builtin_shufflevector((__v4si)__A, (__v4si)__A, + 0, 1, 2, 3, 0, 1, 2, 3, + 0, 1, 2, 3, 0, 1, 2, 3); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_mask_broadcast_i32x4(__m512i __O, __mmask16 __M, __m128i __A) +{ + return (__m512i)__builtin_ia32_selectd_512((__mmask16)__M, + (__v16si)_mm512_broadcast_i32x4(__A), + (__v16si)__O); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_maskz_broadcast_i32x4(__mmask16 __M, __m128i __A) +{ + return (__m512i)__builtin_ia32_selectd_512((__mmask16)__M, + (__v16si)_mm512_broadcast_i32x4(__A), + (__v16si)_mm512_setzero_si512()); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_broadcast_i64x4(__m256i __A) +{ + return (__m512i)__builtin_shufflevector((__v4di)__A, (__v4di)__A, + 0, 1, 2, 3, 0, 1, 2, 3); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_mask_broadcast_i64x4(__m512i __O, __mmask8 __M, __m256i __A) +{ + return (__m512i)__builtin_ia32_selectq_512((__mmask8)__M, + (__v8di)_mm512_broadcast_i64x4(__A), + (__v8di)__O); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_maskz_broadcast_i64x4(__mmask8 __M, __m256i __A) +{ + return (__m512i)__builtin_ia32_selectq_512((__mmask8)__M, + (__v8di)_mm512_broadcast_i64x4(__A), + (__v8di)_mm512_setzero_si512()); +} + +static __inline__ __m512d __DEFAULT_FN_ATTRS512 +_mm512_mask_broadcastsd_pd (__m512d __O, __mmask8 __M, __m128d __A) +{ + return (__m512d)__builtin_ia32_selectpd_512(__M, + (__v8df) _mm512_broadcastsd_pd(__A), + (__v8df) __O); +} + +static __inline__ __m512d __DEFAULT_FN_ATTRS512 +_mm512_maskz_broadcastsd_pd (__mmask8 __M, __m128d __A) +{ + return (__m512d)__builtin_ia32_selectpd_512(__M, + (__v8df) _mm512_broadcastsd_pd(__A), + (__v8df) _mm512_setzero_pd()); +} + +static __inline__ __m512 __DEFAULT_FN_ATTRS512 +_mm512_mask_broadcastss_ps (__m512 __O, __mmask16 __M, __m128 __A) +{ + return (__m512)__builtin_ia32_selectps_512(__M, + (__v16sf) _mm512_broadcastss_ps(__A), + (__v16sf) __O); +} + +static __inline__ __m512 __DEFAULT_FN_ATTRS512 +_mm512_maskz_broadcastss_ps (__mmask16 __M, __m128 __A) +{ + return (__m512)__builtin_ia32_selectps_512(__M, + (__v16sf) _mm512_broadcastss_ps(__A), + (__v16sf) _mm512_setzero_ps()); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS512 +_mm512_cvtsepi32_epi8 (__m512i __A) +{ + return (__m128i) __builtin_ia32_pmovsdb512_mask ((__v16si) __A, + (__v16qi) _mm_undefined_si128 (), + (__mmask16) -1); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS512 +_mm512_mask_cvtsepi32_epi8 (__m128i __O, __mmask16 __M, __m512i __A) +{ + return (__m128i) __builtin_ia32_pmovsdb512_mask ((__v16si) __A, + (__v16qi) __O, __M); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS512 +_mm512_maskz_cvtsepi32_epi8 (__mmask16 __M, __m512i __A) +{ + return (__m128i) __builtin_ia32_pmovsdb512_mask ((__v16si) __A, + (__v16qi) _mm_setzero_si128 (), + __M); +} + +static __inline__ void __DEFAULT_FN_ATTRS512 +_mm512_mask_cvtsepi32_storeu_epi8 (void * __P, __mmask16 __M, __m512i __A) +{ + __builtin_ia32_pmovsdb512mem_mask ((__v16qi *) __P, (__v16si) __A, __M); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS512 +_mm512_cvtsepi32_epi16 (__m512i __A) +{ + return (__m256i) __builtin_ia32_pmovsdw512_mask ((__v16si) __A, + (__v16hi) _mm256_undefined_si256 (), + (__mmask16) -1); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS512 +_mm512_mask_cvtsepi32_epi16 (__m256i __O, __mmask16 __M, __m512i __A) +{ + return (__m256i) __builtin_ia32_pmovsdw512_mask ((__v16si) __A, + (__v16hi) __O, __M); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS512 +_mm512_maskz_cvtsepi32_epi16 (__mmask16 __M, __m512i __A) +{ + return (__m256i) __builtin_ia32_pmovsdw512_mask ((__v16si) __A, + (__v16hi) _mm256_setzero_si256 (), + __M); +} + +static __inline__ void __DEFAULT_FN_ATTRS512 +_mm512_mask_cvtsepi32_storeu_epi16 (void *__P, __mmask16 __M, __m512i __A) +{ + __builtin_ia32_pmovsdw512mem_mask ((__v16hi*) __P, (__v16si) __A, __M); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS512 +_mm512_cvtsepi64_epi8 (__m512i __A) +{ + return (__m128i) __builtin_ia32_pmovsqb512_mask ((__v8di) __A, + (__v16qi) _mm_undefined_si128 (), + (__mmask8) -1); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS512 +_mm512_mask_cvtsepi64_epi8 (__m128i __O, __mmask8 __M, __m512i __A) +{ + return (__m128i) __builtin_ia32_pmovsqb512_mask ((__v8di) __A, + (__v16qi) __O, __M); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS512 +_mm512_maskz_cvtsepi64_epi8 (__mmask8 __M, __m512i __A) +{ + return (__m128i) __builtin_ia32_pmovsqb512_mask ((__v8di) __A, + (__v16qi) _mm_setzero_si128 (), + __M); +} + +static __inline__ void __DEFAULT_FN_ATTRS512 +_mm512_mask_cvtsepi64_storeu_epi8 (void * __P, __mmask8 __M, __m512i __A) +{ + __builtin_ia32_pmovsqb512mem_mask ((__v16qi *) __P, (__v8di) __A, __M); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS512 +_mm512_cvtsepi64_epi32 (__m512i __A) +{ + return (__m256i) __builtin_ia32_pmovsqd512_mask ((__v8di) __A, + (__v8si) _mm256_undefined_si256 (), + (__mmask8) -1); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS512 +_mm512_mask_cvtsepi64_epi32 (__m256i __O, __mmask8 __M, __m512i __A) +{ + return (__m256i) __builtin_ia32_pmovsqd512_mask ((__v8di) __A, + (__v8si) __O, __M); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS512 +_mm512_maskz_cvtsepi64_epi32 (__mmask8 __M, __m512i __A) +{ + return (__m256i) __builtin_ia32_pmovsqd512_mask ((__v8di) __A, + (__v8si) _mm256_setzero_si256 (), + __M); +} + +static __inline__ void __DEFAULT_FN_ATTRS512 +_mm512_mask_cvtsepi64_storeu_epi32 (void *__P, __mmask8 __M, __m512i __A) +{ + __builtin_ia32_pmovsqd512mem_mask ((__v8si *) __P, (__v8di) __A, __M); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS512 +_mm512_cvtsepi64_epi16 (__m512i __A) +{ + return (__m128i) __builtin_ia32_pmovsqw512_mask ((__v8di) __A, + (__v8hi) _mm_undefined_si128 (), + (__mmask8) -1); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS512 +_mm512_mask_cvtsepi64_epi16 (__m128i __O, __mmask8 __M, __m512i __A) +{ + return (__m128i) __builtin_ia32_pmovsqw512_mask ((__v8di) __A, + (__v8hi) __O, __M); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS512 +_mm512_maskz_cvtsepi64_epi16 (__mmask8 __M, __m512i __A) +{ + return (__m128i) __builtin_ia32_pmovsqw512_mask ((__v8di) __A, + (__v8hi) _mm_setzero_si128 (), + __M); +} + +static __inline__ void __DEFAULT_FN_ATTRS512 +_mm512_mask_cvtsepi64_storeu_epi16 (void * __P, __mmask8 __M, __m512i __A) +{ + __builtin_ia32_pmovsqw512mem_mask ((__v8hi *) __P, (__v8di) __A, __M); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS512 +_mm512_cvtusepi32_epi8 (__m512i __A) +{ + return (__m128i) __builtin_ia32_pmovusdb512_mask ((__v16si) __A, + (__v16qi) _mm_undefined_si128 (), + (__mmask16) -1); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS512 +_mm512_mask_cvtusepi32_epi8 (__m128i __O, __mmask16 __M, __m512i __A) +{ + return (__m128i) __builtin_ia32_pmovusdb512_mask ((__v16si) __A, + (__v16qi) __O, + __M); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS512 +_mm512_maskz_cvtusepi32_epi8 (__mmask16 __M, __m512i __A) +{ + return (__m128i) __builtin_ia32_pmovusdb512_mask ((__v16si) __A, + (__v16qi) _mm_setzero_si128 (), + __M); +} + +static __inline__ void __DEFAULT_FN_ATTRS512 +_mm512_mask_cvtusepi32_storeu_epi8 (void * __P, __mmask16 __M, __m512i __A) +{ + __builtin_ia32_pmovusdb512mem_mask ((__v16qi *) __P, (__v16si) __A, __M); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS512 +_mm512_cvtusepi32_epi16 (__m512i __A) +{ + return (__m256i) __builtin_ia32_pmovusdw512_mask ((__v16si) __A, + (__v16hi) _mm256_undefined_si256 (), + (__mmask16) -1); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS512 +_mm512_mask_cvtusepi32_epi16 (__m256i __O, __mmask16 __M, __m512i __A) +{ + return (__m256i) __builtin_ia32_pmovusdw512_mask ((__v16si) __A, + (__v16hi) __O, + __M); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS512 +_mm512_maskz_cvtusepi32_epi16 (__mmask16 __M, __m512i __A) +{ + return (__m256i) __builtin_ia32_pmovusdw512_mask ((__v16si) __A, + (__v16hi) _mm256_setzero_si256 (), + __M); +} + +static __inline__ void __DEFAULT_FN_ATTRS512 +_mm512_mask_cvtusepi32_storeu_epi16 (void *__P, __mmask16 __M, __m512i __A) +{ + __builtin_ia32_pmovusdw512mem_mask ((__v16hi*) __P, (__v16si) __A, __M); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS512 +_mm512_cvtusepi64_epi8 (__m512i __A) +{ + return (__m128i) __builtin_ia32_pmovusqb512_mask ((__v8di) __A, + (__v16qi) _mm_undefined_si128 (), + (__mmask8) -1); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS512 +_mm512_mask_cvtusepi64_epi8 (__m128i __O, __mmask8 __M, __m512i __A) +{ + return (__m128i) __builtin_ia32_pmovusqb512_mask ((__v8di) __A, + (__v16qi) __O, + __M); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS512 +_mm512_maskz_cvtusepi64_epi8 (__mmask8 __M, __m512i __A) +{ + return (__m128i) __builtin_ia32_pmovusqb512_mask ((__v8di) __A, + (__v16qi) _mm_setzero_si128 (), + __M); +} + +static __inline__ void __DEFAULT_FN_ATTRS512 +_mm512_mask_cvtusepi64_storeu_epi8 (void * __P, __mmask8 __M, __m512i __A) +{ + __builtin_ia32_pmovusqb512mem_mask ((__v16qi *) __P, (__v8di) __A, __M); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS512 +_mm512_cvtusepi64_epi32 (__m512i __A) +{ + return (__m256i) __builtin_ia32_pmovusqd512_mask ((__v8di) __A, + (__v8si) _mm256_undefined_si256 (), + (__mmask8) -1); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS512 +_mm512_mask_cvtusepi64_epi32 (__m256i __O, __mmask8 __M, __m512i __A) +{ + return (__m256i) __builtin_ia32_pmovusqd512_mask ((__v8di) __A, + (__v8si) __O, __M); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS512 +_mm512_maskz_cvtusepi64_epi32 (__mmask8 __M, __m512i __A) +{ + return (__m256i) __builtin_ia32_pmovusqd512_mask ((__v8di) __A, + (__v8si) _mm256_setzero_si256 (), + __M); +} + +static __inline__ void __DEFAULT_FN_ATTRS512 +_mm512_mask_cvtusepi64_storeu_epi32 (void* __P, __mmask8 __M, __m512i __A) +{ + __builtin_ia32_pmovusqd512mem_mask ((__v8si*) __P, (__v8di) __A, __M); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS512 +_mm512_cvtusepi64_epi16 (__m512i __A) +{ + return (__m128i) __builtin_ia32_pmovusqw512_mask ((__v8di) __A, + (__v8hi) _mm_undefined_si128 (), + (__mmask8) -1); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS512 +_mm512_mask_cvtusepi64_epi16 (__m128i __O, __mmask8 __M, __m512i __A) +{ + return (__m128i) __builtin_ia32_pmovusqw512_mask ((__v8di) __A, + (__v8hi) __O, __M); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS512 +_mm512_maskz_cvtusepi64_epi16 (__mmask8 __M, __m512i __A) +{ + return (__m128i) __builtin_ia32_pmovusqw512_mask ((__v8di) __A, + (__v8hi) _mm_setzero_si128 (), + __M); +} + +static __inline__ void __DEFAULT_FN_ATTRS512 +_mm512_mask_cvtusepi64_storeu_epi16 (void *__P, __mmask8 __M, __m512i __A) +{ + __builtin_ia32_pmovusqw512mem_mask ((__v8hi*) __P, (__v8di) __A, __M); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS512 +_mm512_cvtepi32_epi8 (__m512i __A) +{ + return (__m128i) __builtin_ia32_pmovdb512_mask ((__v16si) __A, + (__v16qi) _mm_undefined_si128 (), + (__mmask16) -1); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS512 +_mm512_mask_cvtepi32_epi8 (__m128i __O, __mmask16 __M, __m512i __A) +{ + return (__m128i) __builtin_ia32_pmovdb512_mask ((__v16si) __A, + (__v16qi) __O, __M); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS512 +_mm512_maskz_cvtepi32_epi8 (__mmask16 __M, __m512i __A) +{ + return (__m128i) __builtin_ia32_pmovdb512_mask ((__v16si) __A, + (__v16qi) _mm_setzero_si128 (), + __M); +} + +static __inline__ void __DEFAULT_FN_ATTRS512 +_mm512_mask_cvtepi32_storeu_epi8 (void * __P, __mmask16 __M, __m512i __A) +{ + __builtin_ia32_pmovdb512mem_mask ((__v16qi *) __P, (__v16si) __A, __M); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS512 +_mm512_cvtepi32_epi16 (__m512i __A) +{ + return (__m256i) __builtin_ia32_pmovdw512_mask ((__v16si) __A, + (__v16hi) _mm256_undefined_si256 (), + (__mmask16) -1); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS512 +_mm512_mask_cvtepi32_epi16 (__m256i __O, __mmask16 __M, __m512i __A) +{ + return (__m256i) __builtin_ia32_pmovdw512_mask ((__v16si) __A, + (__v16hi) __O, __M); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS512 +_mm512_maskz_cvtepi32_epi16 (__mmask16 __M, __m512i __A) +{ + return (__m256i) __builtin_ia32_pmovdw512_mask ((__v16si) __A, + (__v16hi) _mm256_setzero_si256 (), + __M); +} + +static __inline__ void __DEFAULT_FN_ATTRS512 +_mm512_mask_cvtepi32_storeu_epi16 (void * __P, __mmask16 __M, __m512i __A) +{ + __builtin_ia32_pmovdw512mem_mask ((__v16hi *) __P, (__v16si) __A, __M); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS512 +_mm512_cvtepi64_epi8 (__m512i __A) +{ + return (__m128i) __builtin_ia32_pmovqb512_mask ((__v8di) __A, + (__v16qi) _mm_undefined_si128 (), + (__mmask8) -1); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS512 +_mm512_mask_cvtepi64_epi8 (__m128i __O, __mmask8 __M, __m512i __A) +{ + return (__m128i) __builtin_ia32_pmovqb512_mask ((__v8di) __A, + (__v16qi) __O, __M); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS512 +_mm512_maskz_cvtepi64_epi8 (__mmask8 __M, __m512i __A) +{ + return (__m128i) __builtin_ia32_pmovqb512_mask ((__v8di) __A, + (__v16qi) _mm_setzero_si128 (), + __M); +} + +static __inline__ void __DEFAULT_FN_ATTRS512 +_mm512_mask_cvtepi64_storeu_epi8 (void * __P, __mmask8 __M, __m512i __A) +{ + __builtin_ia32_pmovqb512mem_mask ((__v16qi *) __P, (__v8di) __A, __M); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS512 +_mm512_cvtepi64_epi32 (__m512i __A) +{ + return (__m256i) __builtin_ia32_pmovqd512_mask ((__v8di) __A, + (__v8si) _mm256_undefined_si256 (), + (__mmask8) -1); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS512 +_mm512_mask_cvtepi64_epi32 (__m256i __O, __mmask8 __M, __m512i __A) +{ + return (__m256i) __builtin_ia32_pmovqd512_mask ((__v8di) __A, + (__v8si) __O, __M); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS512 +_mm512_maskz_cvtepi64_epi32 (__mmask8 __M, __m512i __A) +{ + return (__m256i) __builtin_ia32_pmovqd512_mask ((__v8di) __A, + (__v8si) _mm256_setzero_si256 (), + __M); +} + +static __inline__ void __DEFAULT_FN_ATTRS512 +_mm512_mask_cvtepi64_storeu_epi32 (void* __P, __mmask8 __M, __m512i __A) +{ + __builtin_ia32_pmovqd512mem_mask ((__v8si *) __P, (__v8di) __A, __M); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS512 +_mm512_cvtepi64_epi16 (__m512i __A) +{ + return (__m128i) __builtin_ia32_pmovqw512_mask ((__v8di) __A, + (__v8hi) _mm_undefined_si128 (), + (__mmask8) -1); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS512 +_mm512_mask_cvtepi64_epi16 (__m128i __O, __mmask8 __M, __m512i __A) +{ + return (__m128i) __builtin_ia32_pmovqw512_mask ((__v8di) __A, + (__v8hi) __O, __M); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS512 +_mm512_maskz_cvtepi64_epi16 (__mmask8 __M, __m512i __A) +{ + return (__m128i) __builtin_ia32_pmovqw512_mask ((__v8di) __A, + (__v8hi) _mm_setzero_si128 (), + __M); +} + +static __inline__ void __DEFAULT_FN_ATTRS512 +_mm512_mask_cvtepi64_storeu_epi16 (void *__P, __mmask8 __M, __m512i __A) +{ + __builtin_ia32_pmovqw512mem_mask ((__v8hi *) __P, (__v8di) __A, __M); +} + +#define _mm512_extracti32x4_epi32(A, imm) \ + ((__m128i)__builtin_ia32_extracti32x4_mask((__v16si)(__m512i)(A), (int)(imm), \ + (__v4si)_mm_undefined_si128(), \ + (__mmask8)-1)) + +#define _mm512_mask_extracti32x4_epi32(W, U, A, imm) \ + ((__m128i)__builtin_ia32_extracti32x4_mask((__v16si)(__m512i)(A), (int)(imm), \ + (__v4si)(__m128i)(W), \ + (__mmask8)(U))) + +#define _mm512_maskz_extracti32x4_epi32(U, A, imm) \ + ((__m128i)__builtin_ia32_extracti32x4_mask((__v16si)(__m512i)(A), (int)(imm), \ + (__v4si)_mm_setzero_si128(), \ + (__mmask8)(U))) + +#define _mm512_extracti64x4_epi64(A, imm) \ + ((__m256i)__builtin_ia32_extracti64x4_mask((__v8di)(__m512i)(A), (int)(imm), \ + (__v4di)_mm256_undefined_si256(), \ + (__mmask8)-1)) + +#define _mm512_mask_extracti64x4_epi64(W, U, A, imm) \ + ((__m256i)__builtin_ia32_extracti64x4_mask((__v8di)(__m512i)(A), (int)(imm), \ + (__v4di)(__m256i)(W), \ + (__mmask8)(U))) + +#define _mm512_maskz_extracti64x4_epi64(U, A, imm) \ + ((__m256i)__builtin_ia32_extracti64x4_mask((__v8di)(__m512i)(A), (int)(imm), \ + (__v4di)_mm256_setzero_si256(), \ + (__mmask8)(U))) + +#define _mm512_insertf64x4(A, B, imm) \ + ((__m512d)__builtin_ia32_insertf64x4((__v8df)(__m512d)(A), \ + (__v4df)(__m256d)(B), (int)(imm))) + +#define _mm512_mask_insertf64x4(W, U, A, B, imm) \ + ((__m512d)__builtin_ia32_selectpd_512((__mmask8)(U), \ + (__v8df)_mm512_insertf64x4((A), (B), (imm)), \ + (__v8df)(__m512d)(W))) + +#define _mm512_maskz_insertf64x4(U, A, B, imm) \ + ((__m512d)__builtin_ia32_selectpd_512((__mmask8)(U), \ + (__v8df)_mm512_insertf64x4((A), (B), (imm)), \ + (__v8df)_mm512_setzero_pd())) + +#define _mm512_inserti64x4(A, B, imm) \ + ((__m512i)__builtin_ia32_inserti64x4((__v8di)(__m512i)(A), \ + (__v4di)(__m256i)(B), (int)(imm))) + +#define _mm512_mask_inserti64x4(W, U, A, B, imm) \ + ((__m512i)__builtin_ia32_selectq_512((__mmask8)(U), \ + (__v8di)_mm512_inserti64x4((A), (B), (imm)), \ + (__v8di)(__m512i)(W))) + +#define _mm512_maskz_inserti64x4(U, A, B, imm) \ + ((__m512i)__builtin_ia32_selectq_512((__mmask8)(U), \ + (__v8di)_mm512_inserti64x4((A), (B), (imm)), \ + (__v8di)_mm512_setzero_si512())) + +#define _mm512_insertf32x4(A, B, imm) \ + ((__m512)__builtin_ia32_insertf32x4((__v16sf)(__m512)(A), \ + (__v4sf)(__m128)(B), (int)(imm))) + +#define _mm512_mask_insertf32x4(W, U, A, B, imm) \ + ((__m512)__builtin_ia32_selectps_512((__mmask16)(U), \ + (__v16sf)_mm512_insertf32x4((A), (B), (imm)), \ + (__v16sf)(__m512)(W))) + +#define _mm512_maskz_insertf32x4(U, A, B, imm) \ + ((__m512)__builtin_ia32_selectps_512((__mmask16)(U), \ + (__v16sf)_mm512_insertf32x4((A), (B), (imm)), \ + (__v16sf)_mm512_setzero_ps())) + +#define _mm512_inserti32x4(A, B, imm) \ + ((__m512i)__builtin_ia32_inserti32x4((__v16si)(__m512i)(A), \ + (__v4si)(__m128i)(B), (int)(imm))) + +#define _mm512_mask_inserti32x4(W, U, A, B, imm) \ + ((__m512i)__builtin_ia32_selectd_512((__mmask16)(U), \ + (__v16si)_mm512_inserti32x4((A), (B), (imm)), \ + (__v16si)(__m512i)(W))) + +#define _mm512_maskz_inserti32x4(U, A, B, imm) \ + ((__m512i)__builtin_ia32_selectd_512((__mmask16)(U), \ + (__v16si)_mm512_inserti32x4((A), (B), (imm)), \ + (__v16si)_mm512_setzero_si512())) + +#define _mm512_getmant_round_pd(A, B, C, R) \ + ((__m512d)__builtin_ia32_getmantpd512_mask((__v8df)(__m512d)(A), \ + (int)(((C)<<2) | (B)), \ + (__v8df)_mm512_undefined_pd(), \ + (__mmask8)-1, (int)(R))) + +#define _mm512_mask_getmant_round_pd(W, U, A, B, C, R) \ + ((__m512d)__builtin_ia32_getmantpd512_mask((__v8df)(__m512d)(A), \ + (int)(((C)<<2) | (B)), \ + (__v8df)(__m512d)(W), \ + (__mmask8)(U), (int)(R))) + +#define _mm512_maskz_getmant_round_pd(U, A, B, C, R) \ + ((__m512d)__builtin_ia32_getmantpd512_mask((__v8df)(__m512d)(A), \ + (int)(((C)<<2) | (B)), \ + (__v8df)_mm512_setzero_pd(), \ + (__mmask8)(U), (int)(R))) + +#define _mm512_getmant_pd(A, B, C) \ + ((__m512d)__builtin_ia32_getmantpd512_mask((__v8df)(__m512d)(A), \ + (int)(((C)<<2) | (B)), \ + (__v8df)_mm512_setzero_pd(), \ + (__mmask8)-1, \ + _MM_FROUND_CUR_DIRECTION)) + +#define _mm512_mask_getmant_pd(W, U, A, B, C) \ + ((__m512d)__builtin_ia32_getmantpd512_mask((__v8df)(__m512d)(A), \ + (int)(((C)<<2) | (B)), \ + (__v8df)(__m512d)(W), \ + (__mmask8)(U), \ + _MM_FROUND_CUR_DIRECTION)) + +#define _mm512_maskz_getmant_pd(U, A, B, C) \ + ((__m512d)__builtin_ia32_getmantpd512_mask((__v8df)(__m512d)(A), \ + (int)(((C)<<2) | (B)), \ + (__v8df)_mm512_setzero_pd(), \ + (__mmask8)(U), \ + _MM_FROUND_CUR_DIRECTION)) + +#define _mm512_getmant_round_ps(A, B, C, R) \ + ((__m512)__builtin_ia32_getmantps512_mask((__v16sf)(__m512)(A), \ + (int)(((C)<<2) | (B)), \ + (__v16sf)_mm512_undefined_ps(), \ + (__mmask16)-1, (int)(R))) + +#define _mm512_mask_getmant_round_ps(W, U, A, B, C, R) \ + ((__m512)__builtin_ia32_getmantps512_mask((__v16sf)(__m512)(A), \ + (int)(((C)<<2) | (B)), \ + (__v16sf)(__m512)(W), \ + (__mmask16)(U), (int)(R))) + +#define _mm512_maskz_getmant_round_ps(U, A, B, C, R) \ + ((__m512)__builtin_ia32_getmantps512_mask((__v16sf)(__m512)(A), \ + (int)(((C)<<2) | (B)), \ + (__v16sf)_mm512_setzero_ps(), \ + (__mmask16)(U), (int)(R))) + +#define _mm512_getmant_ps(A, B, C) \ + ((__m512)__builtin_ia32_getmantps512_mask((__v16sf)(__m512)(A), \ + (int)(((C)<<2)|(B)), \ + (__v16sf)_mm512_undefined_ps(), \ + (__mmask16)-1, \ + _MM_FROUND_CUR_DIRECTION)) + +#define _mm512_mask_getmant_ps(W, U, A, B, C) \ + ((__m512)__builtin_ia32_getmantps512_mask((__v16sf)(__m512)(A), \ + (int)(((C)<<2)|(B)), \ + (__v16sf)(__m512)(W), \ + (__mmask16)(U), \ + _MM_FROUND_CUR_DIRECTION)) + +#define _mm512_maskz_getmant_ps(U, A, B, C) \ + ((__m512)__builtin_ia32_getmantps512_mask((__v16sf)(__m512)(A), \ + (int)(((C)<<2)|(B)), \ + (__v16sf)_mm512_setzero_ps(), \ + (__mmask16)(U), \ + _MM_FROUND_CUR_DIRECTION)) + +#define _mm512_getexp_round_pd(A, R) \ + ((__m512d)__builtin_ia32_getexppd512_mask((__v8df)(__m512d)(A), \ + (__v8df)_mm512_undefined_pd(), \ + (__mmask8)-1, (int)(R))) + +#define _mm512_mask_getexp_round_pd(W, U, A, R) \ + ((__m512d)__builtin_ia32_getexppd512_mask((__v8df)(__m512d)(A), \ + (__v8df)(__m512d)(W), \ + (__mmask8)(U), (int)(R))) + +#define _mm512_maskz_getexp_round_pd(U, A, R) \ + ((__m512d)__builtin_ia32_getexppd512_mask((__v8df)(__m512d)(A), \ + (__v8df)_mm512_setzero_pd(), \ + (__mmask8)(U), (int)(R))) + +static __inline__ __m512d __DEFAULT_FN_ATTRS512 +_mm512_getexp_pd (__m512d __A) +{ + return (__m512d) __builtin_ia32_getexppd512_mask ((__v8df) __A, + (__v8df) _mm512_undefined_pd (), + (__mmask8) -1, + _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m512d __DEFAULT_FN_ATTRS512 +_mm512_mask_getexp_pd (__m512d __W, __mmask8 __U, __m512d __A) +{ + return (__m512d) __builtin_ia32_getexppd512_mask ((__v8df) __A, + (__v8df) __W, + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m512d __DEFAULT_FN_ATTRS512 +_mm512_maskz_getexp_pd (__mmask8 __U, __m512d __A) +{ + return (__m512d) __builtin_ia32_getexppd512_mask ((__v8df) __A, + (__v8df) _mm512_setzero_pd (), + (__mmask8) __U, + _MM_FROUND_CUR_DIRECTION); +} + +#define _mm512_getexp_round_ps(A, R) \ + ((__m512)__builtin_ia32_getexpps512_mask((__v16sf)(__m512)(A), \ + (__v16sf)_mm512_undefined_ps(), \ + (__mmask16)-1, (int)(R))) + +#define _mm512_mask_getexp_round_ps(W, U, A, R) \ + ((__m512)__builtin_ia32_getexpps512_mask((__v16sf)(__m512)(A), \ + (__v16sf)(__m512)(W), \ + (__mmask16)(U), (int)(R))) + +#define _mm512_maskz_getexp_round_ps(U, A, R) \ + ((__m512)__builtin_ia32_getexpps512_mask((__v16sf)(__m512)(A), \ + (__v16sf)_mm512_setzero_ps(), \ + (__mmask16)(U), (int)(R))) + +static __inline__ __m512 __DEFAULT_FN_ATTRS512 +_mm512_getexp_ps (__m512 __A) +{ + return (__m512) __builtin_ia32_getexpps512_mask ((__v16sf) __A, + (__v16sf) _mm512_undefined_ps (), + (__mmask16) -1, + _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m512 __DEFAULT_FN_ATTRS512 +_mm512_mask_getexp_ps (__m512 __W, __mmask16 __U, __m512 __A) +{ + return (__m512) __builtin_ia32_getexpps512_mask ((__v16sf) __A, + (__v16sf) __W, + (__mmask16) __U, + _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m512 __DEFAULT_FN_ATTRS512 +_mm512_maskz_getexp_ps (__mmask16 __U, __m512 __A) +{ + return (__m512) __builtin_ia32_getexpps512_mask ((__v16sf) __A, + (__v16sf) _mm512_setzero_ps (), + (__mmask16) __U, + _MM_FROUND_CUR_DIRECTION); +} + +#define _mm512_i64gather_ps(index, addr, scale) \ + ((__m256)__builtin_ia32_gatherdiv16sf((__v8sf)_mm256_undefined_ps(), \ + (void const *)(addr), \ + (__v8di)(__m512i)(index), (__mmask8)-1, \ + (int)(scale))) + +#define _mm512_mask_i64gather_ps(v1_old, mask, index, addr, scale) \ + ((__m256)__builtin_ia32_gatherdiv16sf((__v8sf)(__m256)(v1_old),\ + (void const *)(addr), \ + (__v8di)(__m512i)(index), \ + (__mmask8)(mask), (int)(scale))) + +#define _mm512_i64gather_epi32(index, addr, scale) \ + ((__m256i)__builtin_ia32_gatherdiv16si((__v8si)_mm256_undefined_si256(), \ + (void const *)(addr), \ + (__v8di)(__m512i)(index), \ + (__mmask8)-1, (int)(scale))) + +#define _mm512_mask_i64gather_epi32(v1_old, mask, index, addr, scale) \ + ((__m256i)__builtin_ia32_gatherdiv16si((__v8si)(__m256i)(v1_old), \ + (void const *)(addr), \ + (__v8di)(__m512i)(index), \ + (__mmask8)(mask), (int)(scale))) + +#define _mm512_i64gather_pd(index, addr, scale) \ + ((__m512d)__builtin_ia32_gatherdiv8df((__v8df)_mm512_undefined_pd(), \ + (void const *)(addr), \ + (__v8di)(__m512i)(index), (__mmask8)-1, \ + (int)(scale))) + +#define _mm512_mask_i64gather_pd(v1_old, mask, index, addr, scale) \ + ((__m512d)__builtin_ia32_gatherdiv8df((__v8df)(__m512d)(v1_old), \ + (void const *)(addr), \ + (__v8di)(__m512i)(index), \ + (__mmask8)(mask), (int)(scale))) + +#define _mm512_i64gather_epi64(index, addr, scale) \ + ((__m512i)__builtin_ia32_gatherdiv8di((__v8di)_mm512_undefined_epi32(), \ + (void const *)(addr), \ + (__v8di)(__m512i)(index), (__mmask8)-1, \ + (int)(scale))) + +#define _mm512_mask_i64gather_epi64(v1_old, mask, index, addr, scale) \ + ((__m512i)__builtin_ia32_gatherdiv8di((__v8di)(__m512i)(v1_old), \ + (void const *)(addr), \ + (__v8di)(__m512i)(index), \ + (__mmask8)(mask), (int)(scale))) + +#define _mm512_i32gather_ps(index, addr, scale) \ + ((__m512)__builtin_ia32_gathersiv16sf((__v16sf)_mm512_undefined_ps(), \ + (void const *)(addr), \ + (__v16si)(__m512)(index), \ + (__mmask16)-1, (int)(scale))) + +#define _mm512_mask_i32gather_ps(v1_old, mask, index, addr, scale) \ + ((__m512)__builtin_ia32_gathersiv16sf((__v16sf)(__m512)(v1_old), \ + (void const *)(addr), \ + (__v16si)(__m512)(index), \ + (__mmask16)(mask), (int)(scale))) + +#define _mm512_i32gather_epi32(index, addr, scale) \ + ((__m512i)__builtin_ia32_gathersiv16si((__v16si)_mm512_undefined_epi32(), \ + (void const *)(addr), \ + (__v16si)(__m512i)(index), \ + (__mmask16)-1, (int)(scale))) + +#define _mm512_mask_i32gather_epi32(v1_old, mask, index, addr, scale) \ + ((__m512i)__builtin_ia32_gathersiv16si((__v16si)(__m512i)(v1_old), \ + (void const *)(addr), \ + (__v16si)(__m512i)(index), \ + (__mmask16)(mask), (int)(scale))) + +#define _mm512_i32gather_pd(index, addr, scale) \ + ((__m512d)__builtin_ia32_gathersiv8df((__v8df)_mm512_undefined_pd(), \ + (void const *)(addr), \ + (__v8si)(__m256i)(index), (__mmask8)-1, \ + (int)(scale))) + +#define _mm512_mask_i32gather_pd(v1_old, mask, index, addr, scale) \ + ((__m512d)__builtin_ia32_gathersiv8df((__v8df)(__m512d)(v1_old), \ + (void const *)(addr), \ + (__v8si)(__m256i)(index), \ + (__mmask8)(mask), (int)(scale))) + +#define _mm512_i32gather_epi64(index, addr, scale) \ + ((__m512i)__builtin_ia32_gathersiv8di((__v8di)_mm512_undefined_epi32(), \ + (void const *)(addr), \ + (__v8si)(__m256i)(index), (__mmask8)-1, \ + (int)(scale))) + +#define _mm512_mask_i32gather_epi64(v1_old, mask, index, addr, scale) \ + ((__m512i)__builtin_ia32_gathersiv8di((__v8di)(__m512i)(v1_old), \ + (void const *)(addr), \ + (__v8si)(__m256i)(index), \ + (__mmask8)(mask), (int)(scale))) + +#define _mm512_i64scatter_ps(addr, index, v1, scale) \ + __builtin_ia32_scatterdiv16sf((void *)(addr), (__mmask8)-1, \ + (__v8di)(__m512i)(index), \ + (__v8sf)(__m256)(v1), (int)(scale)) + +#define _mm512_mask_i64scatter_ps(addr, mask, index, v1, scale) \ + __builtin_ia32_scatterdiv16sf((void *)(addr), (__mmask8)(mask), \ + (__v8di)(__m512i)(index), \ + (__v8sf)(__m256)(v1), (int)(scale)) + +#define _mm512_i64scatter_epi32(addr, index, v1, scale) \ + __builtin_ia32_scatterdiv16si((void *)(addr), (__mmask8)-1, \ + (__v8di)(__m512i)(index), \ + (__v8si)(__m256i)(v1), (int)(scale)) + +#define _mm512_mask_i64scatter_epi32(addr, mask, index, v1, scale) \ + __builtin_ia32_scatterdiv16si((void *)(addr), (__mmask8)(mask), \ + (__v8di)(__m512i)(index), \ + (__v8si)(__m256i)(v1), (int)(scale)) + +#define _mm512_i64scatter_pd(addr, index, v1, scale) \ + __builtin_ia32_scatterdiv8df((void *)(addr), (__mmask8)-1, \ + (__v8di)(__m512i)(index), \ + (__v8df)(__m512d)(v1), (int)(scale)) + +#define _mm512_mask_i64scatter_pd(addr, mask, index, v1, scale) \ + __builtin_ia32_scatterdiv8df((void *)(addr), (__mmask8)(mask), \ + (__v8di)(__m512i)(index), \ + (__v8df)(__m512d)(v1), (int)(scale)) + +#define _mm512_i64scatter_epi64(addr, index, v1, scale) \ + __builtin_ia32_scatterdiv8di((void *)(addr), (__mmask8)-1, \ + (__v8di)(__m512i)(index), \ + (__v8di)(__m512i)(v1), (int)(scale)) + +#define _mm512_mask_i64scatter_epi64(addr, mask, index, v1, scale) \ + __builtin_ia32_scatterdiv8di((void *)(addr), (__mmask8)(mask), \ + (__v8di)(__m512i)(index), \ + (__v8di)(__m512i)(v1), (int)(scale)) + +#define _mm512_i32scatter_ps(addr, index, v1, scale) \ + __builtin_ia32_scattersiv16sf((void *)(addr), (__mmask16)-1, \ + (__v16si)(__m512i)(index), \ + (__v16sf)(__m512)(v1), (int)(scale)) + +#define _mm512_mask_i32scatter_ps(addr, mask, index, v1, scale) \ + __builtin_ia32_scattersiv16sf((void *)(addr), (__mmask16)(mask), \ + (__v16si)(__m512i)(index), \ + (__v16sf)(__m512)(v1), (int)(scale)) + +#define _mm512_i32scatter_epi32(addr, index, v1, scale) \ + __builtin_ia32_scattersiv16si((void *)(addr), (__mmask16)-1, \ + (__v16si)(__m512i)(index), \ + (__v16si)(__m512i)(v1), (int)(scale)) + +#define _mm512_mask_i32scatter_epi32(addr, mask, index, v1, scale) \ + __builtin_ia32_scattersiv16si((void *)(addr), (__mmask16)(mask), \ + (__v16si)(__m512i)(index), \ + (__v16si)(__m512i)(v1), (int)(scale)) + +#define _mm512_i32scatter_pd(addr, index, v1, scale) \ + __builtin_ia32_scattersiv8df((void *)(addr), (__mmask8)-1, \ + (__v8si)(__m256i)(index), \ + (__v8df)(__m512d)(v1), (int)(scale)) + +#define _mm512_mask_i32scatter_pd(addr, mask, index, v1, scale) \ + __builtin_ia32_scattersiv8df((void *)(addr), (__mmask8)(mask), \ + (__v8si)(__m256i)(index), \ + (__v8df)(__m512d)(v1), (int)(scale)) + +#define _mm512_i32scatter_epi64(addr, index, v1, scale) \ + __builtin_ia32_scattersiv8di((void *)(addr), (__mmask8)-1, \ + (__v8si)(__m256i)(index), \ + (__v8di)(__m512i)(v1), (int)(scale)) + +#define _mm512_mask_i32scatter_epi64(addr, mask, index, v1, scale) \ + __builtin_ia32_scattersiv8di((void *)(addr), (__mmask8)(mask), \ + (__v8si)(__m256i)(index), \ + (__v8di)(__m512i)(v1), (int)(scale)) + +static __inline__ __m128 __DEFAULT_FN_ATTRS128 +_mm_mask_fmadd_ss (__m128 __W, __mmask8 __U, __m128 __A, __m128 __B) +{ + return __builtin_ia32_vfmaddss3_mask((__v4sf)__W, + (__v4sf)__A, + (__v4sf)__B, + (__mmask8)__U, + _MM_FROUND_CUR_DIRECTION); +} + +#define _mm_fmadd_round_ss(A, B, C, R) \ + ((__m128)__builtin_ia32_vfmaddss3_mask((__v4sf)(__m128)(A), \ + (__v4sf)(__m128)(B), \ + (__v4sf)(__m128)(C), (__mmask8)-1, \ + (int)(R))) + +#define _mm_mask_fmadd_round_ss(W, U, A, B, R) \ + ((__m128)__builtin_ia32_vfmaddss3_mask((__v4sf)(__m128)(W), \ + (__v4sf)(__m128)(A), \ + (__v4sf)(__m128)(B), (__mmask8)(U), \ + (int)(R))) + +static __inline__ __m128 __DEFAULT_FN_ATTRS128 +_mm_maskz_fmadd_ss (__mmask8 __U, __m128 __A, __m128 __B, __m128 __C) +{ + return __builtin_ia32_vfmaddss3_maskz((__v4sf)__A, + (__v4sf)__B, + (__v4sf)__C, + (__mmask8)__U, + _MM_FROUND_CUR_DIRECTION); +} + +#define _mm_maskz_fmadd_round_ss(U, A, B, C, R) \ + ((__m128)__builtin_ia32_vfmaddss3_maskz((__v4sf)(__m128)(A), \ + (__v4sf)(__m128)(B), \ + (__v4sf)(__m128)(C), (__mmask8)(U), \ + (int)(R))) + +static __inline__ __m128 __DEFAULT_FN_ATTRS128 +_mm_mask3_fmadd_ss (__m128 __W, __m128 __X, __m128 __Y, __mmask8 __U) +{ + return __builtin_ia32_vfmaddss3_mask3((__v4sf)__W, + (__v4sf)__X, + (__v4sf)__Y, + (__mmask8)__U, + _MM_FROUND_CUR_DIRECTION); +} + +#define _mm_mask3_fmadd_round_ss(W, X, Y, U, R) \ + ((__m128)__builtin_ia32_vfmaddss3_mask3((__v4sf)(__m128)(W), \ + (__v4sf)(__m128)(X), \ + (__v4sf)(__m128)(Y), (__mmask8)(U), \ + (int)(R))) + +static __inline__ __m128 __DEFAULT_FN_ATTRS128 +_mm_mask_fmsub_ss (__m128 __W, __mmask8 __U, __m128 __A, __m128 __B) +{ + return __builtin_ia32_vfmaddss3_mask((__v4sf)__W, + (__v4sf)__A, + -(__v4sf)__B, + (__mmask8)__U, + _MM_FROUND_CUR_DIRECTION); +} + +#define _mm_fmsub_round_ss(A, B, C, R) \ + ((__m128)__builtin_ia32_vfmaddss3_mask((__v4sf)(__m128)(A), \ + (__v4sf)(__m128)(B), \ + -(__v4sf)(__m128)(C), (__mmask8)-1, \ + (int)(R))) + +#define _mm_mask_fmsub_round_ss(W, U, A, B, R) \ + ((__m128)__builtin_ia32_vfmaddss3_mask((__v4sf)(__m128)(W), \ + (__v4sf)(__m128)(A), \ + -(__v4sf)(__m128)(B), (__mmask8)(U), \ + (int)(R))) + +static __inline__ __m128 __DEFAULT_FN_ATTRS128 +_mm_maskz_fmsub_ss (__mmask8 __U, __m128 __A, __m128 __B, __m128 __C) +{ + return __builtin_ia32_vfmaddss3_maskz((__v4sf)__A, + (__v4sf)__B, + -(__v4sf)__C, + (__mmask8)__U, + _MM_FROUND_CUR_DIRECTION); +} + +#define _mm_maskz_fmsub_round_ss(U, A, B, C, R) \ + ((__m128)__builtin_ia32_vfmaddss3_maskz((__v4sf)(__m128)(A), \ + (__v4sf)(__m128)(B), \ + -(__v4sf)(__m128)(C), (__mmask8)(U), \ + (int)(R))) + +static __inline__ __m128 __DEFAULT_FN_ATTRS128 +_mm_mask3_fmsub_ss (__m128 __W, __m128 __X, __m128 __Y, __mmask8 __U) +{ + return __builtin_ia32_vfmsubss3_mask3((__v4sf)__W, + (__v4sf)__X, + (__v4sf)__Y, + (__mmask8)__U, + _MM_FROUND_CUR_DIRECTION); +} + +#define _mm_mask3_fmsub_round_ss(W, X, Y, U, R) \ + ((__m128)__builtin_ia32_vfmsubss3_mask3((__v4sf)(__m128)(W), \ + (__v4sf)(__m128)(X), \ + (__v4sf)(__m128)(Y), (__mmask8)(U), \ + (int)(R))) + +static __inline__ __m128 __DEFAULT_FN_ATTRS128 +_mm_mask_fnmadd_ss (__m128 __W, __mmask8 __U, __m128 __A, __m128 __B) +{ + return __builtin_ia32_vfmaddss3_mask((__v4sf)__W, + -(__v4sf)__A, + (__v4sf)__B, + (__mmask8)__U, + _MM_FROUND_CUR_DIRECTION); +} + +#define _mm_fnmadd_round_ss(A, B, C, R) \ + ((__m128)__builtin_ia32_vfmaddss3_mask((__v4sf)(__m128)(A), \ + -(__v4sf)(__m128)(B), \ + (__v4sf)(__m128)(C), (__mmask8)-1, \ + (int)(R))) + +#define _mm_mask_fnmadd_round_ss(W, U, A, B, R) \ + ((__m128)__builtin_ia32_vfmaddss3_mask((__v4sf)(__m128)(W), \ + -(__v4sf)(__m128)(A), \ + (__v4sf)(__m128)(B), (__mmask8)(U), \ + (int)(R))) + +static __inline__ __m128 __DEFAULT_FN_ATTRS128 +_mm_maskz_fnmadd_ss (__mmask8 __U, __m128 __A, __m128 __B, __m128 __C) +{ + return __builtin_ia32_vfmaddss3_maskz((__v4sf)__A, + -(__v4sf)__B, + (__v4sf)__C, + (__mmask8)__U, + _MM_FROUND_CUR_DIRECTION); +} + +#define _mm_maskz_fnmadd_round_ss(U, A, B, C, R) \ + ((__m128)__builtin_ia32_vfmaddss3_maskz((__v4sf)(__m128)(A), \ + -(__v4sf)(__m128)(B), \ + (__v4sf)(__m128)(C), (__mmask8)(U), \ + (int)(R))) + +static __inline__ __m128 __DEFAULT_FN_ATTRS128 +_mm_mask3_fnmadd_ss (__m128 __W, __m128 __X, __m128 __Y, __mmask8 __U) +{ + return __builtin_ia32_vfmaddss3_mask3((__v4sf)__W, + -(__v4sf)__X, + (__v4sf)__Y, + (__mmask8)__U, + _MM_FROUND_CUR_DIRECTION); +} + +#define _mm_mask3_fnmadd_round_ss(W, X, Y, U, R) \ + ((__m128)__builtin_ia32_vfmaddss3_mask3((__v4sf)(__m128)(W), \ + -(__v4sf)(__m128)(X), \ + (__v4sf)(__m128)(Y), (__mmask8)(U), \ + (int)(R))) + +static __inline__ __m128 __DEFAULT_FN_ATTRS128 +_mm_mask_fnmsub_ss (__m128 __W, __mmask8 __U, __m128 __A, __m128 __B) +{ + return __builtin_ia32_vfmaddss3_mask((__v4sf)__W, + -(__v4sf)__A, + -(__v4sf)__B, + (__mmask8)__U, + _MM_FROUND_CUR_DIRECTION); +} + +#define _mm_fnmsub_round_ss(A, B, C, R) \ + ((__m128)__builtin_ia32_vfmaddss3_mask((__v4sf)(__m128)(A), \ + -(__v4sf)(__m128)(B), \ + -(__v4sf)(__m128)(C), (__mmask8)-1, \ + (int)(R))) + +#define _mm_mask_fnmsub_round_ss(W, U, A, B, R) \ + ((__m128)__builtin_ia32_vfmaddss3_mask((__v4sf)(__m128)(W), \ + -(__v4sf)(__m128)(A), \ + -(__v4sf)(__m128)(B), (__mmask8)(U), \ + (int)(R))) + +static __inline__ __m128 __DEFAULT_FN_ATTRS128 +_mm_maskz_fnmsub_ss (__mmask8 __U, __m128 __A, __m128 __B, __m128 __C) +{ + return __builtin_ia32_vfmaddss3_maskz((__v4sf)__A, + -(__v4sf)__B, + -(__v4sf)__C, + (__mmask8)__U, + _MM_FROUND_CUR_DIRECTION); +} + +#define _mm_maskz_fnmsub_round_ss(U, A, B, C, R) \ + ((__m128)__builtin_ia32_vfmaddss3_maskz((__v4sf)(__m128)(A), \ + -(__v4sf)(__m128)(B), \ + -(__v4sf)(__m128)(C), (__mmask8)(U), \ + (int)(R))) + +static __inline__ __m128 __DEFAULT_FN_ATTRS128 +_mm_mask3_fnmsub_ss (__m128 __W, __m128 __X, __m128 __Y, __mmask8 __U) +{ + return __builtin_ia32_vfmsubss3_mask3((__v4sf)__W, + -(__v4sf)__X, + (__v4sf)__Y, + (__mmask8)__U, + _MM_FROUND_CUR_DIRECTION); +} + +#define _mm_mask3_fnmsub_round_ss(W, X, Y, U, R) \ + ((__m128)__builtin_ia32_vfmsubss3_mask3((__v4sf)(__m128)(W), \ + -(__v4sf)(__m128)(X), \ + (__v4sf)(__m128)(Y), (__mmask8)(U), \ + (int)(R))) + +static __inline__ __m128d __DEFAULT_FN_ATTRS128 +_mm_mask_fmadd_sd (__m128d __W, __mmask8 __U, __m128d __A, __m128d __B) +{ + return __builtin_ia32_vfmaddsd3_mask((__v2df)__W, + (__v2df)__A, + (__v2df)__B, + (__mmask8)__U, + _MM_FROUND_CUR_DIRECTION); +} + +#define _mm_fmadd_round_sd(A, B, C, R) \ + ((__m128d)__builtin_ia32_vfmaddsd3_mask((__v2df)(__m128d)(A), \ + (__v2df)(__m128d)(B), \ + (__v2df)(__m128d)(C), (__mmask8)-1, \ + (int)(R))) + +#define _mm_mask_fmadd_round_sd(W, U, A, B, R) \ + ((__m128d)__builtin_ia32_vfmaddsd3_mask((__v2df)(__m128d)(W), \ + (__v2df)(__m128d)(A), \ + (__v2df)(__m128d)(B), (__mmask8)(U), \ + (int)(R))) + +static __inline__ __m128d __DEFAULT_FN_ATTRS128 +_mm_maskz_fmadd_sd (__mmask8 __U, __m128d __A, __m128d __B, __m128d __C) +{ + return __builtin_ia32_vfmaddsd3_maskz((__v2df)__A, + (__v2df)__B, + (__v2df)__C, + (__mmask8)__U, + _MM_FROUND_CUR_DIRECTION); +} + +#define _mm_maskz_fmadd_round_sd(U, A, B, C, R) \ + ((__m128d)__builtin_ia32_vfmaddsd3_maskz((__v2df)(__m128d)(A), \ + (__v2df)(__m128d)(B), \ + (__v2df)(__m128d)(C), (__mmask8)(U), \ + (int)(R))) + +static __inline__ __m128d __DEFAULT_FN_ATTRS128 +_mm_mask3_fmadd_sd (__m128d __W, __m128d __X, __m128d __Y, __mmask8 __U) +{ + return __builtin_ia32_vfmaddsd3_mask3((__v2df)__W, + (__v2df)__X, + (__v2df)__Y, + (__mmask8)__U, + _MM_FROUND_CUR_DIRECTION); +} + +#define _mm_mask3_fmadd_round_sd(W, X, Y, U, R) \ + ((__m128d)__builtin_ia32_vfmaddsd3_mask3((__v2df)(__m128d)(W), \ + (__v2df)(__m128d)(X), \ + (__v2df)(__m128d)(Y), (__mmask8)(U), \ + (int)(R))) + +static __inline__ __m128d __DEFAULT_FN_ATTRS128 +_mm_mask_fmsub_sd (__m128d __W, __mmask8 __U, __m128d __A, __m128d __B) +{ + return __builtin_ia32_vfmaddsd3_mask((__v2df)__W, + (__v2df)__A, + -(__v2df)__B, + (__mmask8)__U, + _MM_FROUND_CUR_DIRECTION); +} + +#define _mm_fmsub_round_sd(A, B, C, R) \ + ((__m128d)__builtin_ia32_vfmaddsd3_mask((__v2df)(__m128d)(A), \ + (__v2df)(__m128d)(B), \ + -(__v2df)(__m128d)(C), (__mmask8)-1, \ + (int)(R))) + +#define _mm_mask_fmsub_round_sd(W, U, A, B, R) \ + ((__m128d)__builtin_ia32_vfmaddsd3_mask((__v2df)(__m128d)(W), \ + (__v2df)(__m128d)(A), \ + -(__v2df)(__m128d)(B), (__mmask8)(U), \ + (int)(R))) + +static __inline__ __m128d __DEFAULT_FN_ATTRS128 +_mm_maskz_fmsub_sd (__mmask8 __U, __m128d __A, __m128d __B, __m128d __C) +{ + return __builtin_ia32_vfmaddsd3_maskz((__v2df)__A, + (__v2df)__B, + -(__v2df)__C, + (__mmask8)__U, + _MM_FROUND_CUR_DIRECTION); +} + +#define _mm_maskz_fmsub_round_sd(U, A, B, C, R) \ + ((__m128d)__builtin_ia32_vfmaddsd3_maskz((__v2df)(__m128d)(A), \ + (__v2df)(__m128d)(B), \ + -(__v2df)(__m128d)(C), \ + (__mmask8)(U), (int)(R))) + +static __inline__ __m128d __DEFAULT_FN_ATTRS128 +_mm_mask3_fmsub_sd (__m128d __W, __m128d __X, __m128d __Y, __mmask8 __U) +{ + return __builtin_ia32_vfmsubsd3_mask3((__v2df)__W, + (__v2df)__X, + (__v2df)__Y, + (__mmask8)__U, + _MM_FROUND_CUR_DIRECTION); +} + +#define _mm_mask3_fmsub_round_sd(W, X, Y, U, R) \ + ((__m128d)__builtin_ia32_vfmsubsd3_mask3((__v2df)(__m128d)(W), \ + (__v2df)(__m128d)(X), \ + (__v2df)(__m128d)(Y), \ + (__mmask8)(U), (int)(R))) + +static __inline__ __m128d __DEFAULT_FN_ATTRS128 +_mm_mask_fnmadd_sd (__m128d __W, __mmask8 __U, __m128d __A, __m128d __B) +{ + return __builtin_ia32_vfmaddsd3_mask((__v2df)__W, + -(__v2df)__A, + (__v2df)__B, + (__mmask8)__U, + _MM_FROUND_CUR_DIRECTION); +} + +#define _mm_fnmadd_round_sd(A, B, C, R) \ + ((__m128d)__builtin_ia32_vfmaddsd3_mask((__v2df)(__m128d)(A), \ + -(__v2df)(__m128d)(B), \ + (__v2df)(__m128d)(C), (__mmask8)-1, \ + (int)(R))) + +#define _mm_mask_fnmadd_round_sd(W, U, A, B, R) \ + ((__m128d)__builtin_ia32_vfmaddsd3_mask((__v2df)(__m128d)(W), \ + -(__v2df)(__m128d)(A), \ + (__v2df)(__m128d)(B), (__mmask8)(U), \ + (int)(R))) + +static __inline__ __m128d __DEFAULT_FN_ATTRS128 +_mm_maskz_fnmadd_sd (__mmask8 __U, __m128d __A, __m128d __B, __m128d __C) +{ + return __builtin_ia32_vfmaddsd3_maskz((__v2df)__A, + -(__v2df)__B, + (__v2df)__C, + (__mmask8)__U, + _MM_FROUND_CUR_DIRECTION); +} + +#define _mm_maskz_fnmadd_round_sd(U, A, B, C, R) \ + ((__m128d)__builtin_ia32_vfmaddsd3_maskz((__v2df)(__m128d)(A), \ + -(__v2df)(__m128d)(B), \ + (__v2df)(__m128d)(C), (__mmask8)(U), \ + (int)(R))) + +static __inline__ __m128d __DEFAULT_FN_ATTRS128 +_mm_mask3_fnmadd_sd (__m128d __W, __m128d __X, __m128d __Y, __mmask8 __U) +{ + return __builtin_ia32_vfmaddsd3_mask3((__v2df)__W, + -(__v2df)__X, + (__v2df)__Y, + (__mmask8)__U, + _MM_FROUND_CUR_DIRECTION); +} + +#define _mm_mask3_fnmadd_round_sd(W, X, Y, U, R) \ + ((__m128d)__builtin_ia32_vfmaddsd3_mask3((__v2df)(__m128d)(W), \ + -(__v2df)(__m128d)(X), \ + (__v2df)(__m128d)(Y), (__mmask8)(U), \ + (int)(R))) + +static __inline__ __m128d __DEFAULT_FN_ATTRS128 +_mm_mask_fnmsub_sd (__m128d __W, __mmask8 __U, __m128d __A, __m128d __B) +{ + return __builtin_ia32_vfmaddsd3_mask((__v2df)__W, + -(__v2df)__A, + -(__v2df)__B, + (__mmask8)__U, + _MM_FROUND_CUR_DIRECTION); +} + +#define _mm_fnmsub_round_sd(A, B, C, R) \ + ((__m128d)__builtin_ia32_vfmaddsd3_mask((__v2df)(__m128d)(A), \ + -(__v2df)(__m128d)(B), \ + -(__v2df)(__m128d)(C), (__mmask8)-1, \ + (int)(R))) + +#define _mm_mask_fnmsub_round_sd(W, U, A, B, R) \ + ((__m128d)__builtin_ia32_vfmaddsd3_mask((__v2df)(__m128d)(W), \ + -(__v2df)(__m128d)(A), \ + -(__v2df)(__m128d)(B), (__mmask8)(U), \ + (int)(R))) + +static __inline__ __m128d __DEFAULT_FN_ATTRS128 +_mm_maskz_fnmsub_sd (__mmask8 __U, __m128d __A, __m128d __B, __m128d __C) +{ + return __builtin_ia32_vfmaddsd3_maskz((__v2df)__A, + -(__v2df)__B, + -(__v2df)__C, + (__mmask8)__U, + _MM_FROUND_CUR_DIRECTION); +} + +#define _mm_maskz_fnmsub_round_sd(U, A, B, C, R) \ + ((__m128d)__builtin_ia32_vfmaddsd3_maskz((__v2df)(__m128d)(A), \ + -(__v2df)(__m128d)(B), \ + -(__v2df)(__m128d)(C), \ + (__mmask8)(U), \ + (int)(R))) + +static __inline__ __m128d __DEFAULT_FN_ATTRS128 +_mm_mask3_fnmsub_sd (__m128d __W, __m128d __X, __m128d __Y, __mmask8 __U) +{ + return __builtin_ia32_vfmsubsd3_mask3((__v2df)__W, + -(__v2df)__X, + (__v2df)__Y, + (__mmask8)__U, + _MM_FROUND_CUR_DIRECTION); +} + +#define _mm_mask3_fnmsub_round_sd(W, X, Y, U, R) \ + ((__m128d)__builtin_ia32_vfmsubsd3_mask3((__v2df)(__m128d)(W), \ + -(__v2df)(__m128d)(X), \ + (__v2df)(__m128d)(Y), \ + (__mmask8)(U), (int)(R))) + +#define _mm512_permutex_pd(X, C) \ + ((__m512d)__builtin_ia32_permdf512((__v8df)(__m512d)(X), (int)(C))) + +#define _mm512_mask_permutex_pd(W, U, X, C) \ + ((__m512d)__builtin_ia32_selectpd_512((__mmask8)(U), \ + (__v8df)_mm512_permutex_pd((X), (C)), \ + (__v8df)(__m512d)(W))) + +#define _mm512_maskz_permutex_pd(U, X, C) \ + ((__m512d)__builtin_ia32_selectpd_512((__mmask8)(U), \ + (__v8df)_mm512_permutex_pd((X), (C)), \ + (__v8df)_mm512_setzero_pd())) + +#define _mm512_permutex_epi64(X, C) \ + ((__m512i)__builtin_ia32_permdi512((__v8di)(__m512i)(X), (int)(C))) + +#define _mm512_mask_permutex_epi64(W, U, X, C) \ + ((__m512i)__builtin_ia32_selectq_512((__mmask8)(U), \ + (__v8di)_mm512_permutex_epi64((X), (C)), \ + (__v8di)(__m512i)(W))) + +#define _mm512_maskz_permutex_epi64(U, X, C) \ + ((__m512i)__builtin_ia32_selectq_512((__mmask8)(U), \ + (__v8di)_mm512_permutex_epi64((X), (C)), \ + (__v8di)_mm512_setzero_si512())) + +static __inline__ __m512d __DEFAULT_FN_ATTRS512 +_mm512_permutexvar_pd (__m512i __X, __m512d __Y) +{ + return (__m512d)__builtin_ia32_permvardf512((__v8df) __Y, (__v8di) __X); +} + +static __inline__ __m512d __DEFAULT_FN_ATTRS512 +_mm512_mask_permutexvar_pd (__m512d __W, __mmask8 __U, __m512i __X, __m512d __Y) +{ + return (__m512d)__builtin_ia32_selectpd_512((__mmask8)__U, + (__v8df)_mm512_permutexvar_pd(__X, __Y), + (__v8df)__W); +} + +static __inline__ __m512d __DEFAULT_FN_ATTRS512 +_mm512_maskz_permutexvar_pd (__mmask8 __U, __m512i __X, __m512d __Y) +{ + return (__m512d)__builtin_ia32_selectpd_512((__mmask8)__U, + (__v8df)_mm512_permutexvar_pd(__X, __Y), + (__v8df)_mm512_setzero_pd()); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_permutexvar_epi64 (__m512i __X, __m512i __Y) +{ + return (__m512i)__builtin_ia32_permvardi512((__v8di)__Y, (__v8di)__X); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_maskz_permutexvar_epi64 (__mmask8 __M, __m512i __X, __m512i __Y) +{ + return (__m512i)__builtin_ia32_selectq_512((__mmask8)__M, + (__v8di)_mm512_permutexvar_epi64(__X, __Y), + (__v8di)_mm512_setzero_si512()); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_mask_permutexvar_epi64 (__m512i __W, __mmask8 __M, __m512i __X, + __m512i __Y) +{ + return (__m512i)__builtin_ia32_selectq_512((__mmask8)__M, + (__v8di)_mm512_permutexvar_epi64(__X, __Y), + (__v8di)__W); +} + +static __inline__ __m512 __DEFAULT_FN_ATTRS512 +_mm512_permutexvar_ps (__m512i __X, __m512 __Y) +{ + return (__m512)__builtin_ia32_permvarsf512((__v16sf)__Y, (__v16si)__X); +} + +static __inline__ __m512 __DEFAULT_FN_ATTRS512 +_mm512_mask_permutexvar_ps (__m512 __W, __mmask16 __U, __m512i __X, __m512 __Y) +{ + return (__m512)__builtin_ia32_selectps_512((__mmask16)__U, + (__v16sf)_mm512_permutexvar_ps(__X, __Y), + (__v16sf)__W); +} + +static __inline__ __m512 __DEFAULT_FN_ATTRS512 +_mm512_maskz_permutexvar_ps (__mmask16 __U, __m512i __X, __m512 __Y) +{ + return (__m512)__builtin_ia32_selectps_512((__mmask16)__U, + (__v16sf)_mm512_permutexvar_ps(__X, __Y), + (__v16sf)_mm512_setzero_ps()); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_permutexvar_epi32 (__m512i __X, __m512i __Y) +{ + return (__m512i)__builtin_ia32_permvarsi512((__v16si)__Y, (__v16si)__X); +} + +#define _mm512_permutevar_epi32 _mm512_permutexvar_epi32 + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_maskz_permutexvar_epi32 (__mmask16 __M, __m512i __X, __m512i __Y) +{ + return (__m512i)__builtin_ia32_selectd_512((__mmask16)__M, + (__v16si)_mm512_permutexvar_epi32(__X, __Y), + (__v16si)_mm512_setzero_si512()); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_mask_permutexvar_epi32 (__m512i __W, __mmask16 __M, __m512i __X, + __m512i __Y) +{ + return (__m512i)__builtin_ia32_selectd_512((__mmask16)__M, + (__v16si)_mm512_permutexvar_epi32(__X, __Y), + (__v16si)__W); +} + +#define _mm512_mask_permutevar_epi32 _mm512_mask_permutexvar_epi32 + +static __inline__ __mmask16 __DEFAULT_FN_ATTRS +_mm512_kand (__mmask16 __A, __mmask16 __B) +{ + return (__mmask16) __builtin_ia32_kandhi ((__mmask16) __A, (__mmask16) __B); +} + +static __inline__ __mmask16 __DEFAULT_FN_ATTRS +_mm512_kandn (__mmask16 __A, __mmask16 __B) +{ + return (__mmask16) __builtin_ia32_kandnhi ((__mmask16) __A, (__mmask16) __B); +} + +static __inline__ __mmask16 __DEFAULT_FN_ATTRS +_mm512_kor (__mmask16 __A, __mmask16 __B) +{ + return (__mmask16) __builtin_ia32_korhi ((__mmask16) __A, (__mmask16) __B); +} + +static __inline__ int __DEFAULT_FN_ATTRS +_mm512_kortestc (__mmask16 __A, __mmask16 __B) +{ + return __builtin_ia32_kortestchi ((__mmask16) __A, (__mmask16) __B); +} + +static __inline__ int __DEFAULT_FN_ATTRS +_mm512_kortestz (__mmask16 __A, __mmask16 __B) +{ + return __builtin_ia32_kortestzhi ((__mmask16) __A, (__mmask16) __B); +} + +static __inline__ unsigned char __DEFAULT_FN_ATTRS +_kortestc_mask16_u8(__mmask16 __A, __mmask16 __B) +{ + return (unsigned char)__builtin_ia32_kortestchi(__A, __B); +} + +static __inline__ unsigned char __DEFAULT_FN_ATTRS +_kortestz_mask16_u8(__mmask16 __A, __mmask16 __B) +{ + return (unsigned char)__builtin_ia32_kortestzhi(__A, __B); +} + +static __inline__ unsigned char __DEFAULT_FN_ATTRS +_kortest_mask16_u8(__mmask16 __A, __mmask16 __B, unsigned char *__C) { + *__C = (unsigned char)__builtin_ia32_kortestchi(__A, __B); + return (unsigned char)__builtin_ia32_kortestzhi(__A, __B); +} + +static __inline__ __mmask16 __DEFAULT_FN_ATTRS +_mm512_kunpackb (__mmask16 __A, __mmask16 __B) +{ + return (__mmask16) __builtin_ia32_kunpckhi ((__mmask16) __A, (__mmask16) __B); +} + +static __inline__ __mmask16 __DEFAULT_FN_ATTRS +_mm512_kxnor (__mmask16 __A, __mmask16 __B) +{ + return (__mmask16) __builtin_ia32_kxnorhi ((__mmask16) __A, (__mmask16) __B); +} + +static __inline__ __mmask16 __DEFAULT_FN_ATTRS +_mm512_kxor (__mmask16 __A, __mmask16 __B) +{ + return (__mmask16) __builtin_ia32_kxorhi ((__mmask16) __A, (__mmask16) __B); +} + +#define _kand_mask16 _mm512_kand +#define _kandn_mask16 _mm512_kandn +#define _knot_mask16 _mm512_knot +#define _kor_mask16 _mm512_kor +#define _kxnor_mask16 _mm512_kxnor +#define _kxor_mask16 _mm512_kxor + +#define _kshiftli_mask16(A, I) \ + ((__mmask16)__builtin_ia32_kshiftlihi((__mmask16)(A), (unsigned int)(I))) + +#define _kshiftri_mask16(A, I) \ + ((__mmask16)__builtin_ia32_kshiftrihi((__mmask16)(A), (unsigned int)(I))) + +static __inline__ unsigned int __DEFAULT_FN_ATTRS +_cvtmask16_u32(__mmask16 __A) { + return (unsigned int)__builtin_ia32_kmovw((__mmask16)__A); +} + +static __inline__ __mmask16 __DEFAULT_FN_ATTRS +_cvtu32_mask16(unsigned int __A) { + return (__mmask16)__builtin_ia32_kmovw((__mmask16)__A); +} + +static __inline__ __mmask16 __DEFAULT_FN_ATTRS +_load_mask16(__mmask16 *__A) { + return (__mmask16)__builtin_ia32_kmovw(*(__mmask16 *)__A); +} + +static __inline__ void __DEFAULT_FN_ATTRS +_store_mask16(__mmask16 *__A, __mmask16 __B) { + *(__mmask16 *)__A = __builtin_ia32_kmovw((__mmask16)__B); +} + +static __inline__ void __DEFAULT_FN_ATTRS512 +_mm512_stream_si512 (void * __P, __m512i __A) +{ + typedef __v8di __v8di_aligned __attribute__((aligned(64))); + __builtin_nontemporal_store((__v8di_aligned)__A, (__v8di_aligned*)__P); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_stream_load_si512 (void const *__P) +{ + typedef __v8di __v8di_aligned __attribute__((aligned(64))); + return (__m512i) __builtin_nontemporal_load((const __v8di_aligned *)__P); +} + +static __inline__ void __DEFAULT_FN_ATTRS512 +_mm512_stream_pd (void *__P, __m512d __A) +{ + typedef __v8df __v8df_aligned __attribute__((aligned(64))); + __builtin_nontemporal_store((__v8df_aligned)__A, (__v8df_aligned*)__P); +} + +static __inline__ void __DEFAULT_FN_ATTRS512 +_mm512_stream_ps (void *__P, __m512 __A) +{ + typedef __v16sf __v16sf_aligned __attribute__((aligned(64))); + __builtin_nontemporal_store((__v16sf_aligned)__A, (__v16sf_aligned*)__P); +} + +static __inline__ __m512d __DEFAULT_FN_ATTRS512 +_mm512_mask_compress_pd (__m512d __W, __mmask8 __U, __m512d __A) +{ + return (__m512d) __builtin_ia32_compressdf512_mask ((__v8df) __A, + (__v8df) __W, + (__mmask8) __U); +} + +static __inline__ __m512d __DEFAULT_FN_ATTRS512 +_mm512_maskz_compress_pd (__mmask8 __U, __m512d __A) +{ + return (__m512d) __builtin_ia32_compressdf512_mask ((__v8df) __A, + (__v8df) + _mm512_setzero_pd (), + (__mmask8) __U); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_mask_compress_epi64 (__m512i __W, __mmask8 __U, __m512i __A) +{ + return (__m512i) __builtin_ia32_compressdi512_mask ((__v8di) __A, + (__v8di) __W, + (__mmask8) __U); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_maskz_compress_epi64 (__mmask8 __U, __m512i __A) +{ + return (__m512i) __builtin_ia32_compressdi512_mask ((__v8di) __A, + (__v8di) + _mm512_setzero_si512 (), + (__mmask8) __U); +} + +static __inline__ __m512 __DEFAULT_FN_ATTRS512 +_mm512_mask_compress_ps (__m512 __W, __mmask16 __U, __m512 __A) +{ + return (__m512) __builtin_ia32_compresssf512_mask ((__v16sf) __A, + (__v16sf) __W, + (__mmask16) __U); +} + +static __inline__ __m512 __DEFAULT_FN_ATTRS512 +_mm512_maskz_compress_ps (__mmask16 __U, __m512 __A) +{ + return (__m512) __builtin_ia32_compresssf512_mask ((__v16sf) __A, + (__v16sf) + _mm512_setzero_ps (), + (__mmask16) __U); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_mask_compress_epi32 (__m512i __W, __mmask16 __U, __m512i __A) +{ + return (__m512i) __builtin_ia32_compresssi512_mask ((__v16si) __A, + (__v16si) __W, + (__mmask16) __U); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_maskz_compress_epi32 (__mmask16 __U, __m512i __A) +{ + return (__m512i) __builtin_ia32_compresssi512_mask ((__v16si) __A, + (__v16si) + _mm512_setzero_si512 (), + (__mmask16) __U); +} + +#define _mm_cmp_round_ss_mask(X, Y, P, R) \ + ((__mmask8)__builtin_ia32_cmpss_mask((__v4sf)(__m128)(X), \ + (__v4sf)(__m128)(Y), (int)(P), \ + (__mmask8)-1, (int)(R))) + +#define _mm_mask_cmp_round_ss_mask(M, X, Y, P, R) \ + ((__mmask8)__builtin_ia32_cmpss_mask((__v4sf)(__m128)(X), \ + (__v4sf)(__m128)(Y), (int)(P), \ + (__mmask8)(M), (int)(R))) + +#define _mm_cmp_ss_mask(X, Y, P) \ + ((__mmask8)__builtin_ia32_cmpss_mask((__v4sf)(__m128)(X), \ + (__v4sf)(__m128)(Y), (int)(P), \ + (__mmask8)-1, \ + _MM_FROUND_CUR_DIRECTION)) + +#define _mm_mask_cmp_ss_mask(M, X, Y, P) \ + ((__mmask8)__builtin_ia32_cmpss_mask((__v4sf)(__m128)(X), \ + (__v4sf)(__m128)(Y), (int)(P), \ + (__mmask8)(M), \ + _MM_FROUND_CUR_DIRECTION)) + +#define _mm_cmp_round_sd_mask(X, Y, P, R) \ + ((__mmask8)__builtin_ia32_cmpsd_mask((__v2df)(__m128d)(X), \ + (__v2df)(__m128d)(Y), (int)(P), \ + (__mmask8)-1, (int)(R))) + +#define _mm_mask_cmp_round_sd_mask(M, X, Y, P, R) \ + ((__mmask8)__builtin_ia32_cmpsd_mask((__v2df)(__m128d)(X), \ + (__v2df)(__m128d)(Y), (int)(P), \ + (__mmask8)(M), (int)(R))) + +#define _mm_cmp_sd_mask(X, Y, P) \ + ((__mmask8)__builtin_ia32_cmpsd_mask((__v2df)(__m128d)(X), \ + (__v2df)(__m128d)(Y), (int)(P), \ + (__mmask8)-1, \ + _MM_FROUND_CUR_DIRECTION)) + +#define _mm_mask_cmp_sd_mask(M, X, Y, P) \ + ((__mmask8)__builtin_ia32_cmpsd_mask((__v2df)(__m128d)(X), \ + (__v2df)(__m128d)(Y), (int)(P), \ + (__mmask8)(M), \ + _MM_FROUND_CUR_DIRECTION)) + +/* Bit Test */ + +static __inline __mmask16 __DEFAULT_FN_ATTRS512 +_mm512_test_epi32_mask (__m512i __A, __m512i __B) +{ + return _mm512_cmpneq_epi32_mask (_mm512_and_epi32(__A, __B), + _mm512_setzero_si512()); +} + +static __inline__ __mmask16 __DEFAULT_FN_ATTRS512 +_mm512_mask_test_epi32_mask (__mmask16 __U, __m512i __A, __m512i __B) +{ + return _mm512_mask_cmpneq_epi32_mask (__U, _mm512_and_epi32 (__A, __B), + _mm512_setzero_si512()); +} + +static __inline __mmask8 __DEFAULT_FN_ATTRS512 +_mm512_test_epi64_mask (__m512i __A, __m512i __B) +{ + return _mm512_cmpneq_epi64_mask (_mm512_and_epi32 (__A, __B), + _mm512_setzero_si512()); +} + +static __inline__ __mmask8 __DEFAULT_FN_ATTRS512 +_mm512_mask_test_epi64_mask (__mmask8 __U, __m512i __A, __m512i __B) +{ + return _mm512_mask_cmpneq_epi64_mask (__U, _mm512_and_epi32 (__A, __B), + _mm512_setzero_si512()); +} + +static __inline__ __mmask16 __DEFAULT_FN_ATTRS512 +_mm512_testn_epi32_mask (__m512i __A, __m512i __B) +{ + return _mm512_cmpeq_epi32_mask (_mm512_and_epi32 (__A, __B), + _mm512_setzero_si512()); +} + +static __inline__ __mmask16 __DEFAULT_FN_ATTRS512 +_mm512_mask_testn_epi32_mask (__mmask16 __U, __m512i __A, __m512i __B) +{ + return _mm512_mask_cmpeq_epi32_mask (__U, _mm512_and_epi32 (__A, __B), + _mm512_setzero_si512()); +} + +static __inline__ __mmask8 __DEFAULT_FN_ATTRS512 +_mm512_testn_epi64_mask (__m512i __A, __m512i __B) +{ + return _mm512_cmpeq_epi64_mask (_mm512_and_epi32 (__A, __B), + _mm512_setzero_si512()); +} + +static __inline__ __mmask8 __DEFAULT_FN_ATTRS512 +_mm512_mask_testn_epi64_mask (__mmask8 __U, __m512i __A, __m512i __B) +{ + return _mm512_mask_cmpeq_epi64_mask (__U, _mm512_and_epi32 (__A, __B), + _mm512_setzero_si512()); +} + +static __inline__ __m512 __DEFAULT_FN_ATTRS512 +_mm512_movehdup_ps (__m512 __A) +{ + return (__m512)__builtin_shufflevector((__v16sf)__A, (__v16sf)__A, + 1, 1, 3, 3, 5, 5, 7, 7, 9, 9, 11, 11, 13, 13, 15, 15); +} + +static __inline__ __m512 __DEFAULT_FN_ATTRS512 +_mm512_mask_movehdup_ps (__m512 __W, __mmask16 __U, __m512 __A) +{ + return (__m512)__builtin_ia32_selectps_512((__mmask16)__U, + (__v16sf)_mm512_movehdup_ps(__A), + (__v16sf)__W); +} + +static __inline__ __m512 __DEFAULT_FN_ATTRS512 +_mm512_maskz_movehdup_ps (__mmask16 __U, __m512 __A) +{ + return (__m512)__builtin_ia32_selectps_512((__mmask16)__U, + (__v16sf)_mm512_movehdup_ps(__A), + (__v16sf)_mm512_setzero_ps()); +} + +static __inline__ __m512 __DEFAULT_FN_ATTRS512 +_mm512_moveldup_ps (__m512 __A) +{ + return (__m512)__builtin_shufflevector((__v16sf)__A, (__v16sf)__A, + 0, 0, 2, 2, 4, 4, 6, 6, 8, 8, 10, 10, 12, 12, 14, 14); +} + +static __inline__ __m512 __DEFAULT_FN_ATTRS512 +_mm512_mask_moveldup_ps (__m512 __W, __mmask16 __U, __m512 __A) +{ + return (__m512)__builtin_ia32_selectps_512((__mmask16)__U, + (__v16sf)_mm512_moveldup_ps(__A), + (__v16sf)__W); +} + +static __inline__ __m512 __DEFAULT_FN_ATTRS512 +_mm512_maskz_moveldup_ps (__mmask16 __U, __m512 __A) +{ + return (__m512)__builtin_ia32_selectps_512((__mmask16)__U, + (__v16sf)_mm512_moveldup_ps(__A), + (__v16sf)_mm512_setzero_ps()); +} + +static __inline__ __m128 __DEFAULT_FN_ATTRS128 +_mm_mask_move_ss (__m128 __W, __mmask8 __U, __m128 __A, __m128 __B) +{ + return __builtin_ia32_selectss_128(__U, _mm_move_ss(__A, __B), __W); +} + +static __inline__ __m128 __DEFAULT_FN_ATTRS128 +_mm_maskz_move_ss (__mmask8 __U, __m128 __A, __m128 __B) +{ + return __builtin_ia32_selectss_128(__U, _mm_move_ss(__A, __B), + _mm_setzero_ps()); +} + +static __inline__ __m128d __DEFAULT_FN_ATTRS128 +_mm_mask_move_sd (__m128d __W, __mmask8 __U, __m128d __A, __m128d __B) +{ + return __builtin_ia32_selectsd_128(__U, _mm_move_sd(__A, __B), __W); +} + +static __inline__ __m128d __DEFAULT_FN_ATTRS128 +_mm_maskz_move_sd (__mmask8 __U, __m128d __A, __m128d __B) +{ + return __builtin_ia32_selectsd_128(__U, _mm_move_sd(__A, __B), + _mm_setzero_pd()); +} + +static __inline__ void __DEFAULT_FN_ATTRS128 +_mm_mask_store_ss (float * __W, __mmask8 __U, __m128 __A) +{ + __builtin_ia32_storess128_mask ((__v4sf *)__W, __A, __U & 1); +} + +static __inline__ void __DEFAULT_FN_ATTRS128 +_mm_mask_store_sd (double * __W, __mmask8 __U, __m128d __A) +{ + __builtin_ia32_storesd128_mask ((__v2df *)__W, __A, __U & 1); +} + +static __inline__ __m128 __DEFAULT_FN_ATTRS128 +_mm_mask_load_ss (__m128 __W, __mmask8 __U, const float* __A) +{ + __m128 src = (__v4sf) __builtin_shufflevector((__v4sf) __W, + (__v4sf)_mm_setzero_ps(), + 0, 4, 4, 4); + + return (__m128) __builtin_ia32_loadss128_mask ((const __v4sf *) __A, src, __U & 1); +} + +static __inline__ __m128 __DEFAULT_FN_ATTRS128 +_mm_maskz_load_ss (__mmask8 __U, const float* __A) +{ + return (__m128)__builtin_ia32_loadss128_mask ((const __v4sf *) __A, + (__v4sf) _mm_setzero_ps(), + __U & 1); +} + +static __inline__ __m128d __DEFAULT_FN_ATTRS128 +_mm_mask_load_sd (__m128d __W, __mmask8 __U, const double* __A) +{ + __m128d src = (__v2df) __builtin_shufflevector((__v2df) __W, + (__v2df)_mm_setzero_pd(), + 0, 2); + + return (__m128d) __builtin_ia32_loadsd128_mask ((const __v2df *) __A, src, __U & 1); +} + +static __inline__ __m128d __DEFAULT_FN_ATTRS128 +_mm_maskz_load_sd (__mmask8 __U, const double* __A) +{ + return (__m128d) __builtin_ia32_loadsd128_mask ((const __v2df *) __A, + (__v2df) _mm_setzero_pd(), + __U & 1); +} + +#define _mm512_shuffle_epi32(A, I) \ + ((__m512i)__builtin_ia32_pshufd512((__v16si)(__m512i)(A), (int)(I))) + +#define _mm512_mask_shuffle_epi32(W, U, A, I) \ + ((__m512i)__builtin_ia32_selectd_512((__mmask16)(U), \ + (__v16si)_mm512_shuffle_epi32((A), (I)), \ + (__v16si)(__m512i)(W))) + +#define _mm512_maskz_shuffle_epi32(U, A, I) \ + ((__m512i)__builtin_ia32_selectd_512((__mmask16)(U), \ + (__v16si)_mm512_shuffle_epi32((A), (I)), \ + (__v16si)_mm512_setzero_si512())) + +static __inline__ __m512d __DEFAULT_FN_ATTRS512 +_mm512_mask_expand_pd (__m512d __W, __mmask8 __U, __m512d __A) +{ + return (__m512d) __builtin_ia32_expanddf512_mask ((__v8df) __A, + (__v8df) __W, + (__mmask8) __U); +} + +static __inline__ __m512d __DEFAULT_FN_ATTRS512 +_mm512_maskz_expand_pd (__mmask8 __U, __m512d __A) +{ + return (__m512d) __builtin_ia32_expanddf512_mask ((__v8df) __A, + (__v8df) _mm512_setzero_pd (), + (__mmask8) __U); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_mask_expand_epi64 (__m512i __W, __mmask8 __U, __m512i __A) +{ + return (__m512i) __builtin_ia32_expanddi512_mask ((__v8di) __A, + (__v8di) __W, + (__mmask8) __U); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_maskz_expand_epi64 ( __mmask8 __U, __m512i __A) +{ + return (__m512i) __builtin_ia32_expanddi512_mask ((__v8di) __A, + (__v8di) _mm512_setzero_si512 (), + (__mmask8) __U); +} + +static __inline__ __m512d __DEFAULT_FN_ATTRS512 +_mm512_mask_expandloadu_pd(__m512d __W, __mmask8 __U, void const *__P) +{ + return (__m512d) __builtin_ia32_expandloaddf512_mask ((const __v8df *)__P, + (__v8df) __W, + (__mmask8) __U); +} + +static __inline__ __m512d __DEFAULT_FN_ATTRS512 +_mm512_maskz_expandloadu_pd(__mmask8 __U, void const *__P) +{ + return (__m512d) __builtin_ia32_expandloaddf512_mask ((const __v8df *)__P, + (__v8df) _mm512_setzero_pd(), + (__mmask8) __U); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_mask_expandloadu_epi64(__m512i __W, __mmask8 __U, void const *__P) +{ + return (__m512i) __builtin_ia32_expandloaddi512_mask ((const __v8di *)__P, + (__v8di) __W, + (__mmask8) __U); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_maskz_expandloadu_epi64(__mmask8 __U, void const *__P) +{ + return (__m512i) __builtin_ia32_expandloaddi512_mask ((const __v8di *)__P, + (__v8di) _mm512_setzero_si512(), + (__mmask8) __U); +} + +static __inline__ __m512 __DEFAULT_FN_ATTRS512 +_mm512_mask_expandloadu_ps(__m512 __W, __mmask16 __U, void const *__P) +{ + return (__m512) __builtin_ia32_expandloadsf512_mask ((const __v16sf *)__P, + (__v16sf) __W, + (__mmask16) __U); +} + +static __inline__ __m512 __DEFAULT_FN_ATTRS512 +_mm512_maskz_expandloadu_ps(__mmask16 __U, void const *__P) +{ + return (__m512) __builtin_ia32_expandloadsf512_mask ((const __v16sf *)__P, + (__v16sf) _mm512_setzero_ps(), + (__mmask16) __U); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_mask_expandloadu_epi32(__m512i __W, __mmask16 __U, void const *__P) +{ + return (__m512i) __builtin_ia32_expandloadsi512_mask ((const __v16si *)__P, + (__v16si) __W, + (__mmask16) __U); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_maskz_expandloadu_epi32(__mmask16 __U, void const *__P) +{ + return (__m512i) __builtin_ia32_expandloadsi512_mask ((const __v16si *)__P, + (__v16si) _mm512_setzero_si512(), + (__mmask16) __U); +} + +static __inline__ __m512 __DEFAULT_FN_ATTRS512 +_mm512_mask_expand_ps (__m512 __W, __mmask16 __U, __m512 __A) +{ + return (__m512) __builtin_ia32_expandsf512_mask ((__v16sf) __A, + (__v16sf) __W, + (__mmask16) __U); +} + +static __inline__ __m512 __DEFAULT_FN_ATTRS512 +_mm512_maskz_expand_ps (__mmask16 __U, __m512 __A) +{ + return (__m512) __builtin_ia32_expandsf512_mask ((__v16sf) __A, + (__v16sf) _mm512_setzero_ps(), + (__mmask16) __U); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_mask_expand_epi32 (__m512i __W, __mmask16 __U, __m512i __A) +{ + return (__m512i) __builtin_ia32_expandsi512_mask ((__v16si) __A, + (__v16si) __W, + (__mmask16) __U); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_maskz_expand_epi32 (__mmask16 __U, __m512i __A) +{ + return (__m512i) __builtin_ia32_expandsi512_mask ((__v16si) __A, + (__v16si) _mm512_setzero_si512(), + (__mmask16) __U); +} + +#define _mm512_cvt_roundps_pd(A, R) \ + ((__m512d)__builtin_ia32_cvtps2pd512_mask((__v8sf)(__m256)(A), \ + (__v8df)_mm512_undefined_pd(), \ + (__mmask8)-1, (int)(R))) + +#define _mm512_mask_cvt_roundps_pd(W, U, A, R) \ + ((__m512d)__builtin_ia32_cvtps2pd512_mask((__v8sf)(__m256)(A), \ + (__v8df)(__m512d)(W), \ + (__mmask8)(U), (int)(R))) + +#define _mm512_maskz_cvt_roundps_pd(U, A, R) \ + ((__m512d)__builtin_ia32_cvtps2pd512_mask((__v8sf)(__m256)(A), \ + (__v8df)_mm512_setzero_pd(), \ + (__mmask8)(U), (int)(R))) + +static __inline__ __m512d __DEFAULT_FN_ATTRS512 +_mm512_cvtps_pd (__m256 __A) +{ + return (__m512d) __builtin_convertvector((__v8sf)__A, __v8df); +} + +static __inline__ __m512d __DEFAULT_FN_ATTRS512 +_mm512_mask_cvtps_pd (__m512d __W, __mmask8 __U, __m256 __A) +{ + return (__m512d)__builtin_ia32_selectpd_512((__mmask8)__U, + (__v8df)_mm512_cvtps_pd(__A), + (__v8df)__W); +} + +static __inline__ __m512d __DEFAULT_FN_ATTRS512 +_mm512_maskz_cvtps_pd (__mmask8 __U, __m256 __A) +{ + return (__m512d)__builtin_ia32_selectpd_512((__mmask8)__U, + (__v8df)_mm512_cvtps_pd(__A), + (__v8df)_mm512_setzero_pd()); +} + +static __inline__ __m512d __DEFAULT_FN_ATTRS512 +_mm512_cvtpslo_pd (__m512 __A) +{ + return (__m512d) _mm512_cvtps_pd(_mm512_castps512_ps256(__A)); +} + +static __inline__ __m512d __DEFAULT_FN_ATTRS512 +_mm512_mask_cvtpslo_pd (__m512d __W, __mmask8 __U, __m512 __A) +{ + return (__m512d) _mm512_mask_cvtps_pd(__W, __U, _mm512_castps512_ps256(__A)); +} + +static __inline__ __m512d __DEFAULT_FN_ATTRS512 +_mm512_mask_mov_pd (__m512d __W, __mmask8 __U, __m512d __A) +{ + return (__m512d) __builtin_ia32_selectpd_512 ((__mmask8) __U, + (__v8df) __A, + (__v8df) __W); +} + +static __inline__ __m512d __DEFAULT_FN_ATTRS512 +_mm512_maskz_mov_pd (__mmask8 __U, __m512d __A) +{ + return (__m512d) __builtin_ia32_selectpd_512 ((__mmask8) __U, + (__v8df) __A, + (__v8df) _mm512_setzero_pd ()); +} + +static __inline__ __m512 __DEFAULT_FN_ATTRS512 +_mm512_mask_mov_ps (__m512 __W, __mmask16 __U, __m512 __A) +{ + return (__m512) __builtin_ia32_selectps_512 ((__mmask16) __U, + (__v16sf) __A, + (__v16sf) __W); +} + +static __inline__ __m512 __DEFAULT_FN_ATTRS512 +_mm512_maskz_mov_ps (__mmask16 __U, __m512 __A) +{ + return (__m512) __builtin_ia32_selectps_512 ((__mmask16) __U, + (__v16sf) __A, + (__v16sf) _mm512_setzero_ps ()); +} + +static __inline__ void __DEFAULT_FN_ATTRS512 +_mm512_mask_compressstoreu_pd (void *__P, __mmask8 __U, __m512d __A) +{ + __builtin_ia32_compressstoredf512_mask ((__v8df *) __P, (__v8df) __A, + (__mmask8) __U); +} + +static __inline__ void __DEFAULT_FN_ATTRS512 +_mm512_mask_compressstoreu_epi64 (void *__P, __mmask8 __U, __m512i __A) +{ + __builtin_ia32_compressstoredi512_mask ((__v8di *) __P, (__v8di) __A, + (__mmask8) __U); +} + +static __inline__ void __DEFAULT_FN_ATTRS512 +_mm512_mask_compressstoreu_ps (void *__P, __mmask16 __U, __m512 __A) +{ + __builtin_ia32_compressstoresf512_mask ((__v16sf *) __P, (__v16sf) __A, + (__mmask16) __U); +} + +static __inline__ void __DEFAULT_FN_ATTRS512 +_mm512_mask_compressstoreu_epi32 (void *__P, __mmask16 __U, __m512i __A) +{ + __builtin_ia32_compressstoresi512_mask ((__v16si *) __P, (__v16si) __A, + (__mmask16) __U); +} + +#define _mm_cvt_roundsd_ss(A, B, R) \ + ((__m128)__builtin_ia32_cvtsd2ss_round_mask((__v4sf)(__m128)(A), \ + (__v2df)(__m128d)(B), \ + (__v4sf)_mm_undefined_ps(), \ + (__mmask8)-1, (int)(R))) + +#define _mm_mask_cvt_roundsd_ss(W, U, A, B, R) \ + ((__m128)__builtin_ia32_cvtsd2ss_round_mask((__v4sf)(__m128)(A), \ + (__v2df)(__m128d)(B), \ + (__v4sf)(__m128)(W), \ + (__mmask8)(U), (int)(R))) + +#define _mm_maskz_cvt_roundsd_ss(U, A, B, R) \ + ((__m128)__builtin_ia32_cvtsd2ss_round_mask((__v4sf)(__m128)(A), \ + (__v2df)(__m128d)(B), \ + (__v4sf)_mm_setzero_ps(), \ + (__mmask8)(U), (int)(R))) + +static __inline__ __m128 __DEFAULT_FN_ATTRS128 +_mm_mask_cvtsd_ss (__m128 __W, __mmask8 __U, __m128 __A, __m128d __B) +{ + return __builtin_ia32_cvtsd2ss_round_mask ((__v4sf)__A, + (__v2df)__B, + (__v4sf)__W, + (__mmask8)__U, _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m128 __DEFAULT_FN_ATTRS128 +_mm_maskz_cvtsd_ss (__mmask8 __U, __m128 __A, __m128d __B) +{ + return __builtin_ia32_cvtsd2ss_round_mask ((__v4sf)__A, + (__v2df)__B, + (__v4sf)_mm_setzero_ps(), + (__mmask8)__U, _MM_FROUND_CUR_DIRECTION); +} + +#define _mm_cvtss_i32 _mm_cvtss_si32 +#define _mm_cvtsd_i32 _mm_cvtsd_si32 +#define _mm_cvti32_sd _mm_cvtsi32_sd +#define _mm_cvti32_ss _mm_cvtsi32_ss +#ifdef __x86_64__ +#define _mm_cvtss_i64 _mm_cvtss_si64 +#define _mm_cvtsd_i64 _mm_cvtsd_si64 +#define _mm_cvti64_sd _mm_cvtsi64_sd +#define _mm_cvti64_ss _mm_cvtsi64_ss +#endif + +#ifdef __x86_64__ +#define _mm_cvt_roundi64_sd(A, B, R) \ + ((__m128d)__builtin_ia32_cvtsi2sd64((__v2df)(__m128d)(A), (long long)(B), \ + (int)(R))) + +#define _mm_cvt_roundsi64_sd(A, B, R) \ + ((__m128d)__builtin_ia32_cvtsi2sd64((__v2df)(__m128d)(A), (long long)(B), \ + (int)(R))) +#endif + +#define _mm_cvt_roundsi32_ss(A, B, R) \ + ((__m128)__builtin_ia32_cvtsi2ss32((__v4sf)(__m128)(A), (int)(B), (int)(R))) + +#define _mm_cvt_roundi32_ss(A, B, R) \ + ((__m128)__builtin_ia32_cvtsi2ss32((__v4sf)(__m128)(A), (int)(B), (int)(R))) + +#ifdef __x86_64__ +#define _mm_cvt_roundsi64_ss(A, B, R) \ + ((__m128)__builtin_ia32_cvtsi2ss64((__v4sf)(__m128)(A), (long long)(B), \ + (int)(R))) + +#define _mm_cvt_roundi64_ss(A, B, R) \ + ((__m128)__builtin_ia32_cvtsi2ss64((__v4sf)(__m128)(A), (long long)(B), \ + (int)(R))) +#endif + +#define _mm_cvt_roundss_sd(A, B, R) \ + ((__m128d)__builtin_ia32_cvtss2sd_round_mask((__v2df)(__m128d)(A), \ + (__v4sf)(__m128)(B), \ + (__v2df)_mm_undefined_pd(), \ + (__mmask8)-1, (int)(R))) + +#define _mm_mask_cvt_roundss_sd(W, U, A, B, R) \ + ((__m128d)__builtin_ia32_cvtss2sd_round_mask((__v2df)(__m128d)(A), \ + (__v4sf)(__m128)(B), \ + (__v2df)(__m128d)(W), \ + (__mmask8)(U), (int)(R))) + +#define _mm_maskz_cvt_roundss_sd(U, A, B, R) \ + ((__m128d)__builtin_ia32_cvtss2sd_round_mask((__v2df)(__m128d)(A), \ + (__v4sf)(__m128)(B), \ + (__v2df)_mm_setzero_pd(), \ + (__mmask8)(U), (int)(R))) + +static __inline__ __m128d __DEFAULT_FN_ATTRS128 +_mm_mask_cvtss_sd (__m128d __W, __mmask8 __U, __m128d __A, __m128 __B) +{ + return __builtin_ia32_cvtss2sd_round_mask((__v2df)__A, + (__v4sf)__B, + (__v2df)__W, + (__mmask8)__U, _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m128d __DEFAULT_FN_ATTRS128 +_mm_maskz_cvtss_sd (__mmask8 __U, __m128d __A, __m128 __B) +{ + return __builtin_ia32_cvtss2sd_round_mask((__v2df)__A, + (__v4sf)__B, + (__v2df)_mm_setzero_pd(), + (__mmask8)__U, _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m128d __DEFAULT_FN_ATTRS128 +_mm_cvtu32_sd (__m128d __A, unsigned __B) +{ + __A[0] = __B; + return __A; +} + +#ifdef __x86_64__ +#define _mm_cvt_roundu64_sd(A, B, R) \ + ((__m128d)__builtin_ia32_cvtusi2sd64((__v2df)(__m128d)(A), \ + (unsigned long long)(B), (int)(R))) + +static __inline__ __m128d __DEFAULT_FN_ATTRS128 +_mm_cvtu64_sd (__m128d __A, unsigned long long __B) +{ + __A[0] = __B; + return __A; +} +#endif + +#define _mm_cvt_roundu32_ss(A, B, R) \ + ((__m128)__builtin_ia32_cvtusi2ss32((__v4sf)(__m128)(A), (unsigned int)(B), \ + (int)(R))) + +static __inline__ __m128 __DEFAULT_FN_ATTRS128 +_mm_cvtu32_ss (__m128 __A, unsigned __B) +{ + __A[0] = __B; + return __A; +} + +#ifdef __x86_64__ +#define _mm_cvt_roundu64_ss(A, B, R) \ + ((__m128)__builtin_ia32_cvtusi2ss64((__v4sf)(__m128)(A), \ + (unsigned long long)(B), (int)(R))) + +static __inline__ __m128 __DEFAULT_FN_ATTRS128 +_mm_cvtu64_ss (__m128 __A, unsigned long long __B) +{ + __A[0] = __B; + return __A; +} +#endif + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_mask_set1_epi32 (__m512i __O, __mmask16 __M, int __A) +{ + return (__m512i) __builtin_ia32_selectd_512(__M, + (__v16si) _mm512_set1_epi32(__A), + (__v16si) __O); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_mask_set1_epi64 (__m512i __O, __mmask8 __M, long long __A) +{ + return (__m512i) __builtin_ia32_selectq_512(__M, + (__v8di) _mm512_set1_epi64(__A), + (__v8di) __O); +} + +static __inline __m512i __DEFAULT_FN_ATTRS512 +_mm512_set_epi8 (char __e63, char __e62, char __e61, char __e60, char __e59, + char __e58, char __e57, char __e56, char __e55, char __e54, char __e53, + char __e52, char __e51, char __e50, char __e49, char __e48, char __e47, + char __e46, char __e45, char __e44, char __e43, char __e42, char __e41, + char __e40, char __e39, char __e38, char __e37, char __e36, char __e35, + char __e34, char __e33, char __e32, char __e31, char __e30, char __e29, + char __e28, char __e27, char __e26, char __e25, char __e24, char __e23, + char __e22, char __e21, char __e20, char __e19, char __e18, char __e17, + char __e16, char __e15, char __e14, char __e13, char __e12, char __e11, + char __e10, char __e9, char __e8, char __e7, char __e6, char __e5, + char __e4, char __e3, char __e2, char __e1, char __e0) { + + return __extension__ (__m512i)(__v64qi) + {__e0, __e1, __e2, __e3, __e4, __e5, __e6, __e7, + __e8, __e9, __e10, __e11, __e12, __e13, __e14, __e15, + __e16, __e17, __e18, __e19, __e20, __e21, __e22, __e23, + __e24, __e25, __e26, __e27, __e28, __e29, __e30, __e31, + __e32, __e33, __e34, __e35, __e36, __e37, __e38, __e39, + __e40, __e41, __e42, __e43, __e44, __e45, __e46, __e47, + __e48, __e49, __e50, __e51, __e52, __e53, __e54, __e55, + __e56, __e57, __e58, __e59, __e60, __e61, __e62, __e63}; +} + +static __inline __m512i __DEFAULT_FN_ATTRS512 +_mm512_set_epi16(short __e31, short __e30, short __e29, short __e28, + short __e27, short __e26, short __e25, short __e24, short __e23, + short __e22, short __e21, short __e20, short __e19, short __e18, + short __e17, short __e16, short __e15, short __e14, short __e13, + short __e12, short __e11, short __e10, short __e9, short __e8, + short __e7, short __e6, short __e5, short __e4, short __e3, + short __e2, short __e1, short __e0) { + return __extension__ (__m512i)(__v32hi) + {__e0, __e1, __e2, __e3, __e4, __e5, __e6, __e7, + __e8, __e9, __e10, __e11, __e12, __e13, __e14, __e15, + __e16, __e17, __e18, __e19, __e20, __e21, __e22, __e23, + __e24, __e25, __e26, __e27, __e28, __e29, __e30, __e31 }; +} + +static __inline __m512i __DEFAULT_FN_ATTRS512 +_mm512_set_epi32 (int __A, int __B, int __C, int __D, + int __E, int __F, int __G, int __H, + int __I, int __J, int __K, int __L, + int __M, int __N, int __O, int __P) +{ + return __extension__ (__m512i)(__v16si) + { __P, __O, __N, __M, __L, __K, __J, __I, + __H, __G, __F, __E, __D, __C, __B, __A }; +} + +#define _mm512_setr_epi32(e0,e1,e2,e3,e4,e5,e6,e7, \ + e8,e9,e10,e11,e12,e13,e14,e15) \ + _mm512_set_epi32((e15),(e14),(e13),(e12),(e11),(e10),(e9),(e8),(e7),(e6), \ + (e5),(e4),(e3),(e2),(e1),(e0)) + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_set_epi64 (long long __A, long long __B, long long __C, + long long __D, long long __E, long long __F, + long long __G, long long __H) +{ + return __extension__ (__m512i) (__v8di) + { __H, __G, __F, __E, __D, __C, __B, __A }; +} + +#define _mm512_setr_epi64(e0,e1,e2,e3,e4,e5,e6,e7) \ + _mm512_set_epi64((e7),(e6),(e5),(e4),(e3),(e2),(e1),(e0)) + +static __inline__ __m512d __DEFAULT_FN_ATTRS512 +_mm512_set_pd (double __A, double __B, double __C, double __D, + double __E, double __F, double __G, double __H) +{ + return __extension__ (__m512d) + { __H, __G, __F, __E, __D, __C, __B, __A }; +} + +#define _mm512_setr_pd(e0,e1,e2,e3,e4,e5,e6,e7) \ + _mm512_set_pd((e7),(e6),(e5),(e4),(e3),(e2),(e1),(e0)) + +static __inline__ __m512 __DEFAULT_FN_ATTRS512 +_mm512_set_ps (float __A, float __B, float __C, float __D, + float __E, float __F, float __G, float __H, + float __I, float __J, float __K, float __L, + float __M, float __N, float __O, float __P) +{ + return __extension__ (__m512) + { __P, __O, __N, __M, __L, __K, __J, __I, + __H, __G, __F, __E, __D, __C, __B, __A }; +} + +#define _mm512_setr_ps(e0,e1,e2,e3,e4,e5,e6,e7,e8,e9,e10,e11,e12,e13,e14,e15) \ + _mm512_set_ps((e15),(e14),(e13),(e12),(e11),(e10),(e9),(e8),(e7),(e6),(e5), \ + (e4),(e3),(e2),(e1),(e0)) + +static __inline__ __m512 __DEFAULT_FN_ATTRS512 +_mm512_abs_ps(__m512 __A) +{ + return (__m512)_mm512_and_epi32(_mm512_set1_epi32(0x7FFFFFFF),(__m512i)__A) ; +} + +static __inline__ __m512 __DEFAULT_FN_ATTRS512 +_mm512_mask_abs_ps(__m512 __W, __mmask16 __K, __m512 __A) +{ + return (__m512)_mm512_mask_and_epi32((__m512i)__W, __K, _mm512_set1_epi32(0x7FFFFFFF),(__m512i)__A) ; +} + +static __inline__ __m512d __DEFAULT_FN_ATTRS512 +_mm512_abs_pd(__m512d __A) +{ + return (__m512d)_mm512_and_epi64(_mm512_set1_epi64(0x7FFFFFFFFFFFFFFF),(__v8di)__A) ; +} + +static __inline__ __m512d __DEFAULT_FN_ATTRS512 +_mm512_mask_abs_pd(__m512d __W, __mmask8 __K, __m512d __A) +{ + return (__m512d)_mm512_mask_and_epi64((__v8di)__W, __K, _mm512_set1_epi64(0x7FFFFFFFFFFFFFFF),(__v8di)__A); +} + +/* Vector-reduction arithmetic accepts vectors as inputs and produces scalars as + * outputs. This class of vector operation forms the basis of many scientific + * computations. In vector-reduction arithmetic, the evaluation order is + * independent of the order of the input elements of V. + + * For floating-point intrinsics: + * 1. When using fadd/fmul intrinsics, the order of operations within the + * vector is unspecified (associative math). + * 2. When using fmin/fmax intrinsics, NaN or -0.0 elements within the vector + * produce unspecified results. + + * Used bisection method. At each step, we partition the vector with previous + * step in half, and the operation is performed on its two halves. + * This takes log2(n) steps where n is the number of elements in the vector. + */ + +static __inline__ long long __DEFAULT_FN_ATTRS512 _mm512_reduce_add_epi64(__m512i __W) { + return __builtin_reduce_add((__v8di)__W); +} + +static __inline__ long long __DEFAULT_FN_ATTRS512 _mm512_reduce_mul_epi64(__m512i __W) { + return __builtin_reduce_mul((__v8di)__W); +} + +static __inline__ long long __DEFAULT_FN_ATTRS512 _mm512_reduce_and_epi64(__m512i __W) { + return __builtin_reduce_and((__v8di)__W); +} + +static __inline__ long long __DEFAULT_FN_ATTRS512 _mm512_reduce_or_epi64(__m512i __W) { + return __builtin_reduce_or((__v8di)__W); +} + +static __inline__ long long __DEFAULT_FN_ATTRS512 +_mm512_mask_reduce_add_epi64(__mmask8 __M, __m512i __W) { + __W = _mm512_maskz_mov_epi64(__M, __W); + return __builtin_reduce_add((__v8di)__W); +} + +static __inline__ long long __DEFAULT_FN_ATTRS512 +_mm512_mask_reduce_mul_epi64(__mmask8 __M, __m512i __W) { + __W = _mm512_mask_mov_epi64(_mm512_set1_epi64(1), __M, __W); + return __builtin_reduce_mul((__v8di)__W); +} + +static __inline__ long long __DEFAULT_FN_ATTRS512 +_mm512_mask_reduce_and_epi64(__mmask8 __M, __m512i __W) { + __W = _mm512_mask_mov_epi64(_mm512_set1_epi64(-1LL), __M, __W); + return __builtin_reduce_and((__v8di)__W); +} + +static __inline__ long long __DEFAULT_FN_ATTRS512 +_mm512_mask_reduce_or_epi64(__mmask8 __M, __m512i __W) { + __W = _mm512_maskz_mov_epi64(__M, __W); + return __builtin_reduce_or((__v8di)__W); +} + +// -0.0 is used to ignore the start value since it is the neutral value of +// floating point addition. For more information, please refer to +// https://llvm.org/docs/LangRef.html#llvm-vector-reduce-fadd-intrinsic +static __inline__ double __DEFAULT_FN_ATTRS512 _mm512_reduce_add_pd(__m512d __W) { + return __builtin_ia32_reduce_fadd_pd512(-0.0, __W); +} + +static __inline__ double __DEFAULT_FN_ATTRS512 _mm512_reduce_mul_pd(__m512d __W) { + return __builtin_ia32_reduce_fmul_pd512(1.0, __W); +} + +static __inline__ double __DEFAULT_FN_ATTRS512 +_mm512_mask_reduce_add_pd(__mmask8 __M, __m512d __W) { + __W = _mm512_maskz_mov_pd(__M, __W); + return __builtin_ia32_reduce_fadd_pd512(-0.0, __W); +} + +static __inline__ double __DEFAULT_FN_ATTRS512 +_mm512_mask_reduce_mul_pd(__mmask8 __M, __m512d __W) { + __W = _mm512_mask_mov_pd(_mm512_set1_pd(1.0), __M, __W); + return __builtin_ia32_reduce_fmul_pd512(1.0, __W); +} + +static __inline__ int __DEFAULT_FN_ATTRS512 +_mm512_reduce_add_epi32(__m512i __W) { + return __builtin_reduce_add((__v16si)__W); +} + +static __inline__ int __DEFAULT_FN_ATTRS512 +_mm512_reduce_mul_epi32(__m512i __W) { + return __builtin_reduce_mul((__v16si)__W); +} + +static __inline__ int __DEFAULT_FN_ATTRS512 +_mm512_reduce_and_epi32(__m512i __W) { + return __builtin_reduce_and((__v16si)__W); +} + +static __inline__ int __DEFAULT_FN_ATTRS512 +_mm512_reduce_or_epi32(__m512i __W) { + return __builtin_reduce_or((__v16si)__W); +} + +static __inline__ int __DEFAULT_FN_ATTRS512 +_mm512_mask_reduce_add_epi32( __mmask16 __M, __m512i __W) { + __W = _mm512_maskz_mov_epi32(__M, __W); + return __builtin_reduce_add((__v16si)__W); +} + +static __inline__ int __DEFAULT_FN_ATTRS512 +_mm512_mask_reduce_mul_epi32( __mmask16 __M, __m512i __W) { + __W = _mm512_mask_mov_epi32(_mm512_set1_epi32(1), __M, __W); + return __builtin_reduce_mul((__v16si)__W); +} + +static __inline__ int __DEFAULT_FN_ATTRS512 +_mm512_mask_reduce_and_epi32( __mmask16 __M, __m512i __W) { + __W = _mm512_mask_mov_epi32(_mm512_set1_epi32(-1), __M, __W); + return __builtin_reduce_and((__v16si)__W); +} + +static __inline__ int __DEFAULT_FN_ATTRS512 +_mm512_mask_reduce_or_epi32(__mmask16 __M, __m512i __W) { + __W = _mm512_maskz_mov_epi32(__M, __W); + return __builtin_reduce_or((__v16si)__W); +} + +static __inline__ float __DEFAULT_FN_ATTRS512 +_mm512_reduce_add_ps(__m512 __W) { + return __builtin_ia32_reduce_fadd_ps512(-0.0f, __W); +} + +static __inline__ float __DEFAULT_FN_ATTRS512 +_mm512_reduce_mul_ps(__m512 __W) { + return __builtin_ia32_reduce_fmul_ps512(1.0f, __W); +} + +static __inline__ float __DEFAULT_FN_ATTRS512 +_mm512_mask_reduce_add_ps(__mmask16 __M, __m512 __W) { + __W = _mm512_maskz_mov_ps(__M, __W); + return __builtin_ia32_reduce_fadd_ps512(-0.0f, __W); +} + +static __inline__ float __DEFAULT_FN_ATTRS512 +_mm512_mask_reduce_mul_ps(__mmask16 __M, __m512 __W) { + __W = _mm512_mask_mov_ps(_mm512_set1_ps(1.0f), __M, __W); + return __builtin_ia32_reduce_fmul_ps512(1.0f, __W); +} + +static __inline__ long long __DEFAULT_FN_ATTRS512 +_mm512_reduce_max_epi64(__m512i __V) { + return __builtin_reduce_max((__v8di)__V); +} + +static __inline__ unsigned long long __DEFAULT_FN_ATTRS512 +_mm512_reduce_max_epu64(__m512i __V) { + return __builtin_reduce_max((__v8du)__V); +} + +static __inline__ long long __DEFAULT_FN_ATTRS512 +_mm512_reduce_min_epi64(__m512i __V) { + return __builtin_reduce_min((__v8di)__V); +} + +static __inline__ unsigned long long __DEFAULT_FN_ATTRS512 +_mm512_reduce_min_epu64(__m512i __V) { + return __builtin_reduce_min((__v8du)__V); +} + +static __inline__ long long __DEFAULT_FN_ATTRS512 +_mm512_mask_reduce_max_epi64(__mmask8 __M, __m512i __V) { + __V = _mm512_mask_mov_epi64(_mm512_set1_epi64(-__LONG_LONG_MAX__ - 1LL), __M, __V); + return __builtin_reduce_max((__v8di)__V); +} + +static __inline__ unsigned long long __DEFAULT_FN_ATTRS512 +_mm512_mask_reduce_max_epu64(__mmask8 __M, __m512i __V) { + __V = _mm512_maskz_mov_epi64(__M, __V); + return __builtin_reduce_max((__v8du)__V); +} + +static __inline__ long long __DEFAULT_FN_ATTRS512 +_mm512_mask_reduce_min_epi64(__mmask8 __M, __m512i __V) { + __V = _mm512_mask_mov_epi64(_mm512_set1_epi64(__LONG_LONG_MAX__), __M, __V); + return __builtin_reduce_min((__v8di)__V); +} + +static __inline__ unsigned long long __DEFAULT_FN_ATTRS512 +_mm512_mask_reduce_min_epu64(__mmask8 __M, __m512i __V) { + __V = _mm512_mask_mov_epi64(_mm512_set1_epi64(-1LL), __M, __V); + return __builtin_reduce_min((__v8du)__V); +} +static __inline__ int __DEFAULT_FN_ATTRS512 +_mm512_reduce_max_epi32(__m512i __V) { + return __builtin_reduce_max((__v16si)__V); +} + +static __inline__ unsigned int __DEFAULT_FN_ATTRS512 +_mm512_reduce_max_epu32(__m512i __V) { + return __builtin_reduce_max((__v16su)__V); +} + +static __inline__ int __DEFAULT_FN_ATTRS512 +_mm512_reduce_min_epi32(__m512i __V) { + return __builtin_reduce_min((__v16si)__V); +} + +static __inline__ unsigned int __DEFAULT_FN_ATTRS512 +_mm512_reduce_min_epu32(__m512i __V) { + return __builtin_reduce_min((__v16su)__V); +} + +static __inline__ int __DEFAULT_FN_ATTRS512 +_mm512_mask_reduce_max_epi32(__mmask16 __M, __m512i __V) { + __V = _mm512_mask_mov_epi32(_mm512_set1_epi32(-__INT_MAX__ - 1), __M, __V); + return __builtin_reduce_max((__v16si)__V); +} + +static __inline__ unsigned int __DEFAULT_FN_ATTRS512 +_mm512_mask_reduce_max_epu32(__mmask16 __M, __m512i __V) { + __V = _mm512_maskz_mov_epi32(__M, __V); + return __builtin_reduce_max((__v16su)__V); +} + +static __inline__ int __DEFAULT_FN_ATTRS512 +_mm512_mask_reduce_min_epi32(__mmask16 __M, __m512i __V) { + __V = _mm512_mask_mov_epi32(_mm512_set1_epi32(__INT_MAX__), __M, __V); + return __builtin_reduce_min((__v16si)__V); +} + +static __inline__ unsigned int __DEFAULT_FN_ATTRS512 +_mm512_mask_reduce_min_epu32(__mmask16 __M, __m512i __V) { + __V = _mm512_mask_mov_epi32(_mm512_set1_epi32(-1), __M, __V); + return __builtin_reduce_min((__v16su)__V); +} + +static __inline__ double __DEFAULT_FN_ATTRS512 +_mm512_reduce_max_pd(__m512d __V) { + return __builtin_ia32_reduce_fmax_pd512(__V); +} + +static __inline__ double __DEFAULT_FN_ATTRS512 +_mm512_reduce_min_pd(__m512d __V) { + return __builtin_ia32_reduce_fmin_pd512(__V); +} + +static __inline__ double __DEFAULT_FN_ATTRS512 +_mm512_mask_reduce_max_pd(__mmask8 __M, __m512d __V) { + __V = _mm512_mask_mov_pd(_mm512_set1_pd(-__builtin_inf()), __M, __V); + return __builtin_ia32_reduce_fmax_pd512(__V); +} + +static __inline__ double __DEFAULT_FN_ATTRS512 +_mm512_mask_reduce_min_pd(__mmask8 __M, __m512d __V) { + __V = _mm512_mask_mov_pd(_mm512_set1_pd(__builtin_inf()), __M, __V); + return __builtin_ia32_reduce_fmin_pd512(__V); +} + +static __inline__ float __DEFAULT_FN_ATTRS512 +_mm512_reduce_max_ps(__m512 __V) { + return __builtin_ia32_reduce_fmax_ps512(__V); +} + +static __inline__ float __DEFAULT_FN_ATTRS512 +_mm512_reduce_min_ps(__m512 __V) { + return __builtin_ia32_reduce_fmin_ps512(__V); +} + +static __inline__ float __DEFAULT_FN_ATTRS512 +_mm512_mask_reduce_max_ps(__mmask16 __M, __m512 __V) { + __V = _mm512_mask_mov_ps(_mm512_set1_ps(-__builtin_inff()), __M, __V); + return __builtin_ia32_reduce_fmax_ps512(__V); +} + +static __inline__ float __DEFAULT_FN_ATTRS512 +_mm512_mask_reduce_min_ps(__mmask16 __M, __m512 __V) { + __V = _mm512_mask_mov_ps(_mm512_set1_ps(__builtin_inff()), __M, __V); + return __builtin_ia32_reduce_fmin_ps512(__V); +} + +/// Moves the least significant 32 bits of a vector of [16 x i32] to a +/// 32-bit signed integer value. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VMOVD / MOVD instruction. +/// +/// \param __A +/// A vector of [16 x i32]. The least significant 32 bits are moved to the +/// destination. +/// \returns A 32-bit signed integer containing the moved value. +static __inline__ int __DEFAULT_FN_ATTRS512 +_mm512_cvtsi512_si32(__m512i __A) { + __v16si __b = (__v16si)__A; + return __b[0]; +} + +/// Loads 8 double-precision (64-bit) floating-point elements stored at memory +/// locations starting at location \a base_addr at packed 32-bit integer indices +/// stored in the lower half of \a vindex scaled by \a scale them in dst. +/// +/// This intrinsic corresponds to the VGATHERDPD instructions. +/// +/// \code{.operation} +/// FOR j := 0 to 7 +/// i := j*64 +/// m := j*32 +/// addr := base_addr + SignExtend64(vindex[m+31:m]) * ZeroExtend64(scale) * 8 +/// dst[i+63:i] := MEM[addr+63:addr] +/// ENDFOR +/// dst[MAX:512] := 0 +/// \endcode +#define _mm512_i32logather_pd(vindex, base_addr, scale) \ + _mm512_i32gather_pd(_mm512_castsi512_si256(vindex), (base_addr), (scale)) + +/// Loads 8 double-precision (64-bit) floating-point elements from memory +/// starting at location \a base_addr at packed 32-bit integer indices stored in +/// the lower half of \a vindex scaled by \a scale into dst using writemask +/// \a mask (elements are copied from \a src when the corresponding mask bit is +/// not set). +/// +/// This intrinsic corresponds to the VGATHERDPD instructions. +/// +/// \code{.operation} +/// FOR j := 0 to 7 +/// i := j*64 +/// m := j*32 +/// IF mask[j] +/// addr := base_addr + SignExtend64(vindex[m+31:m]) * ZeroExtend64(scale) * 8 +/// dst[i+63:i] := MEM[addr+63:addr] +/// ELSE +/// dst[i+63:i] := src[i+63:i] +/// FI +/// ENDFOR +/// dst[MAX:512] := 0 +/// \endcode +#define _mm512_mask_i32logather_pd(src, mask, vindex, base_addr, scale) \ + _mm512_mask_i32gather_pd((src), (mask), _mm512_castsi512_si256(vindex), \ + (base_addr), (scale)) + +/// Loads 8 64-bit integer elements from memory starting at location \a base_addr +/// at packed 32-bit integer indices stored in the lower half of \a vindex +/// scaled by \a scale and stores them in dst. +/// +/// This intrinsic corresponds to the VPGATHERDQ instructions. +/// +/// \code{.operation} +/// FOR j := 0 to 7 +/// i := j*64 +/// m := j*32 +/// addr := base_addr + SignExtend64(vindex[m+31:m]) * ZeroExtend64(scale) * 8 +/// dst[i+63:i] := MEM[addr+63:addr] +/// ENDFOR +/// dst[MAX:512] := 0 +/// \endcode +#define _mm512_i32logather_epi64(vindex, base_addr, scale) \ + _mm512_i32gather_epi64(_mm512_castsi512_si256(vindex), (base_addr), (scale)) + +/// Loads 8 64-bit integer elements from memory starting at location \a base_addr +/// at packed 32-bit integer indices stored in the lower half of \a vindex +/// scaled by \a scale and stores them in dst using writemask \a mask (elements +/// are copied from \a src when the corresponding mask bit is not set). +/// +/// This intrinsic corresponds to the VPGATHERDQ instructions. +/// +/// \code{.operation} +/// FOR j := 0 to 7 +/// i := j*64 +/// m := j*32 +/// IF mask[j] +/// addr := base_addr + SignExtend64(vindex[m+31:m]) * ZeroExtend64(scale) * 8 +/// dst[i+63:i] := MEM[addr+63:addr] +/// ELSE +/// dst[i+63:i] := src[i+63:i] +/// FI +/// ENDFOR +/// dst[MAX:512] := 0 +/// \endcode +#define _mm512_mask_i32logather_epi64(src, mask, vindex, base_addr, scale) \ + _mm512_mask_i32gather_epi64((src), (mask), _mm512_castsi512_si256(vindex), \ + (base_addr), (scale)) + +/// Stores 8 packed double-precision (64-bit) floating-point elements in \a v1 +/// and to memory locations starting at location \a base_addr at packed 32-bit +/// integer indices stored in \a vindex scaled by \a scale. +/// +/// This intrinsic corresponds to the VSCATTERDPD instructions. +/// +/// \code{.operation} +/// FOR j := 0 to 7 +/// i := j*64 +/// m := j*32 +/// addr := base_addr + SignExtend64(vindex[m+31:m]) * ZeroExtend64(scale) * 8 +/// MEM[addr+63:addr] := v1[i+63:i] +/// ENDFOR +/// \endcode +#define _mm512_i32loscatter_pd(base_addr, vindex, v1, scale) \ + _mm512_i32scatter_pd((base_addr), _mm512_castsi512_si256(vindex), (v1), (scale)) + +/// Stores 8 packed double-precision (64-bit) floating-point elements in \a v1 +/// to memory locations starting at location \a base_addr at packed 32-bit +/// integer indices stored in \a vindex scaled by \a scale. Only those elements +/// whose corresponding mask bit is set in writemask \a mask are written to +/// memory. +/// +/// This intrinsic corresponds to the VSCATTERDPD instructions. +/// +/// \code{.operation} +/// FOR j := 0 to 7 +/// i := j*64 +/// m := j*32 +/// IF mask[j] +/// addr := base_addr + SignExtend64(vindex[m+31:m]) * ZeroExtend64(scale) * 8 +/// MEM[addr+63:addr] := a[i+63:i] +/// FI +/// ENDFOR +/// \endcode +#define _mm512_mask_i32loscatter_pd(base_addr, mask, vindex, v1, scale) \ + _mm512_mask_i32scatter_pd((base_addr), (mask), \ + _mm512_castsi512_si256(vindex), (v1), (scale)) + +/// Stores 8 packed 64-bit integer elements located in \a v1 and stores them in +/// memory locations starting at location \a base_addr at packed 32-bit integer +/// indices stored in \a vindex scaled by \a scale. +/// +/// This intrinsic corresponds to the VPSCATTERDQ instructions. +/// +/// \code{.operation} +/// FOR j := 0 to 7 +/// i := j*64 +/// m := j*32 +/// addr := base_addr + SignExtend64(vindex[m+31:m]) * ZeroExtend64(scale) * 8 +/// MEM[addr+63:addr] := a[i+63:i] +/// ENDFOR +/// \endcode +#define _mm512_i32loscatter_epi64(base_addr, vindex, v1, scale) \ + _mm512_i32scatter_epi64((base_addr), \ + _mm512_castsi512_si256(vindex), (v1), (scale)) + +/// Stores 8 packed 64-bit integer elements located in a and stores them in +/// memory locations starting at location \a base_addr at packed 32-bit integer +/// indices stored in \a vindex scaled by scale using writemask \a mask (elements +/// whose corresponding mask bit is not set are not written to memory). +/// +/// This intrinsic corresponds to the VPSCATTERDQ instructions. +/// +/// \code{.operation} +/// FOR j := 0 to 7 +/// i := j*64 +/// m := j*32 +/// IF mask[j] +/// addr := base_addr + SignExtend64(vindex[m+31:m]) * ZeroExtend64(scale) * 8 +/// MEM[addr+63:addr] := a[i+63:i] +/// FI +/// ENDFOR +/// \endcode +#define _mm512_mask_i32loscatter_epi64(base_addr, mask, vindex, v1, scale) \ + _mm512_mask_i32scatter_epi64((base_addr), (mask), \ + _mm512_castsi512_si256(vindex), (v1), (scale)) + +#undef __DEFAULT_FN_ATTRS512 +#undef __DEFAULT_FN_ATTRS128 +#undef __DEFAULT_FN_ATTRS + +#endif /* __AVX512FINTRIN_H */ diff --git a/third_party/intel/clang/avx512fp16intrin.h b/third_party/intel/clang/avx512fp16intrin.h new file mode 100644 index 000000000..e136aa14a --- /dev/null +++ b/third_party/intel/clang/avx512fp16intrin.h @@ -0,0 +1,3352 @@ +/*===----------- avx512fp16intrin.h - AVX512-FP16 intrinsics ---------------=== + * + * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. + * See https://llvm.org/LICENSE.txt for license information. + * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + * + *===-----------------------------------------------------------------------=== + */ +#ifndef __IMMINTRIN_H +#error "Never use directly; include instead." +#endif + +#ifdef __SSE2__ + +#ifndef __AVX512FP16INTRIN_H +#define __AVX512FP16INTRIN_H + +/* Define the default attributes for the functions in this file. */ +typedef _Float16 __v32hf __attribute__((__vector_size__(64), __aligned__(64))); +typedef _Float16 __m512h __attribute__((__vector_size__(64), __aligned__(64))); +typedef _Float16 __m512h_u __attribute__((__vector_size__(64), __aligned__(1))); + +/* Define the default attributes for the functions in this file. */ +#define __DEFAULT_FN_ATTRS512 \ + __attribute__((__always_inline__, __nodebug__, \ + __target__("avx512fp16,evex512"), __min_vector_width__(512))) +#define __DEFAULT_FN_ATTRS256 \ + __attribute__((__always_inline__, __nodebug__, \ + __target__("avx512fp16,no-evex512"), \ + __min_vector_width__(256))) +#define __DEFAULT_FN_ATTRS128 \ + __attribute__((__always_inline__, __nodebug__, \ + __target__("avx512fp16,no-evex512"), \ + __min_vector_width__(128))) + +static __inline__ _Float16 __DEFAULT_FN_ATTRS512 _mm512_cvtsh_h(__m512h __a) { + return __a[0]; +} + +static __inline __m128h __DEFAULT_FN_ATTRS128 _mm_setzero_ph(void) { + return (__m128h){0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0}; +} + +static __inline __m256h __DEFAULT_FN_ATTRS256 _mm256_setzero_ph(void) { + return (__m256h){0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, + 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0}; +} + +static __inline__ __m256h __DEFAULT_FN_ATTRS256 _mm256_undefined_ph(void) { + return (__m256h)__builtin_ia32_undef256(); +} + +static __inline __m512h __DEFAULT_FN_ATTRS512 _mm512_setzero_ph(void) { + return (__m512h){0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, + 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, + 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0}; +} + +static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_undefined_ph(void) { + return (__m128h)__builtin_ia32_undef128(); +} + +static __inline__ __m512h __DEFAULT_FN_ATTRS512 _mm512_undefined_ph(void) { + return (__m512h)__builtin_ia32_undef512(); +} + +static __inline __m512h __DEFAULT_FN_ATTRS512 _mm512_set1_ph(_Float16 __h) { + return (__m512h)(__v32hf){__h, __h, __h, __h, __h, __h, __h, __h, + __h, __h, __h, __h, __h, __h, __h, __h, + __h, __h, __h, __h, __h, __h, __h, __h, + __h, __h, __h, __h, __h, __h, __h, __h}; +} + +static __inline __m512h __DEFAULT_FN_ATTRS512 +_mm512_set_ph(_Float16 __h1, _Float16 __h2, _Float16 __h3, _Float16 __h4, + _Float16 __h5, _Float16 __h6, _Float16 __h7, _Float16 __h8, + _Float16 __h9, _Float16 __h10, _Float16 __h11, _Float16 __h12, + _Float16 __h13, _Float16 __h14, _Float16 __h15, _Float16 __h16, + _Float16 __h17, _Float16 __h18, _Float16 __h19, _Float16 __h20, + _Float16 __h21, _Float16 __h22, _Float16 __h23, _Float16 __h24, + _Float16 __h25, _Float16 __h26, _Float16 __h27, _Float16 __h28, + _Float16 __h29, _Float16 __h30, _Float16 __h31, _Float16 __h32) { + return (__m512h)(__v32hf){__h32, __h31, __h30, __h29, __h28, __h27, __h26, + __h25, __h24, __h23, __h22, __h21, __h20, __h19, + __h18, __h17, __h16, __h15, __h14, __h13, __h12, + __h11, __h10, __h9, __h8, __h7, __h6, __h5, + __h4, __h3, __h2, __h1}; +} + +#define _mm512_setr_ph(h1, h2, h3, h4, h5, h6, h7, h8, h9, h10, h11, h12, h13, \ + h14, h15, h16, h17, h18, h19, h20, h21, h22, h23, h24, \ + h25, h26, h27, h28, h29, h30, h31, h32) \ + _mm512_set_ph((h32), (h31), (h30), (h29), (h28), (h27), (h26), (h25), (h24), \ + (h23), (h22), (h21), (h20), (h19), (h18), (h17), (h16), (h15), \ + (h14), (h13), (h12), (h11), (h10), (h9), (h8), (h7), (h6), \ + (h5), (h4), (h3), (h2), (h1)) + +static __inline __m512h __DEFAULT_FN_ATTRS512 +_mm512_set1_pch(_Float16 _Complex __h) { + return (__m512h)_mm512_set1_ps(__builtin_bit_cast(float, __h)); +} + +static __inline__ __m128 __DEFAULT_FN_ATTRS128 _mm_castph_ps(__m128h __a) { + return (__m128)__a; +} + +static __inline__ __m256 __DEFAULT_FN_ATTRS256 _mm256_castph_ps(__m256h __a) { + return (__m256)__a; +} + +static __inline__ __m512 __DEFAULT_FN_ATTRS512 _mm512_castph_ps(__m512h __a) { + return (__m512)__a; +} + +static __inline__ __m128d __DEFAULT_FN_ATTRS128 _mm_castph_pd(__m128h __a) { + return (__m128d)__a; +} + +static __inline__ __m256d __DEFAULT_FN_ATTRS256 _mm256_castph_pd(__m256h __a) { + return (__m256d)__a; +} + +static __inline__ __m512d __DEFAULT_FN_ATTRS512 _mm512_castph_pd(__m512h __a) { + return (__m512d)__a; +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_castph_si128(__m128h __a) { + return (__m128i)__a; +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_castph_si256(__m256h __a) { + return (__m256i)__a; +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_castph_si512(__m512h __a) { + return (__m512i)__a; +} + +static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_castps_ph(__m128 __a) { + return (__m128h)__a; +} + +static __inline__ __m256h __DEFAULT_FN_ATTRS256 _mm256_castps_ph(__m256 __a) { + return (__m256h)__a; +} + +static __inline__ __m512h __DEFAULT_FN_ATTRS512 _mm512_castps_ph(__m512 __a) { + return (__m512h)__a; +} + +static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_castpd_ph(__m128d __a) { + return (__m128h)__a; +} + +static __inline__ __m256h __DEFAULT_FN_ATTRS256 _mm256_castpd_ph(__m256d __a) { + return (__m256h)__a; +} + +static __inline__ __m512h __DEFAULT_FN_ATTRS512 _mm512_castpd_ph(__m512d __a) { + return (__m512h)__a; +} + +static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_castsi128_ph(__m128i __a) { + return (__m128h)__a; +} + +static __inline__ __m256h __DEFAULT_FN_ATTRS256 +_mm256_castsi256_ph(__m256i __a) { + return (__m256h)__a; +} + +static __inline__ __m512h __DEFAULT_FN_ATTRS512 +_mm512_castsi512_ph(__m512i __a) { + return (__m512h)__a; +} + +static __inline__ __m128h __DEFAULT_FN_ATTRS256 +_mm256_castph256_ph128(__m256h __a) { + return __builtin_shufflevector(__a, __a, 0, 1, 2, 3, 4, 5, 6, 7); +} + +static __inline__ __m128h __DEFAULT_FN_ATTRS512 +_mm512_castph512_ph128(__m512h __a) { + return __builtin_shufflevector(__a, __a, 0, 1, 2, 3, 4, 5, 6, 7); +} + +static __inline__ __m256h __DEFAULT_FN_ATTRS512 +_mm512_castph512_ph256(__m512h __a) { + return __builtin_shufflevector(__a, __a, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, + 12, 13, 14, 15); +} + +static __inline__ __m256h __DEFAULT_FN_ATTRS256 +_mm256_castph128_ph256(__m128h __a) { + return __builtin_shufflevector(__a, __builtin_nondeterministic_value(__a), + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15); +} + +static __inline__ __m512h __DEFAULT_FN_ATTRS512 +_mm512_castph128_ph512(__m128h __a) { + __m256h __b = __builtin_nondeterministic_value(__b); + return __builtin_shufflevector( + __builtin_shufflevector(__a, __builtin_nondeterministic_value(__a), + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15), + __b, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, + 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31); +} + +static __inline__ __m512h __DEFAULT_FN_ATTRS512 +_mm512_castph256_ph512(__m256h __a) { + return __builtin_shufflevector(__a, __builtin_nondeterministic_value(__a), 0, + 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, + 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, + 27, 28, 29, 30, 31); +} + +/// Constructs a 256-bit floating-point vector of [16 x half] from a +/// 128-bit floating-point vector of [8 x half]. The lower 128 bits +/// contain the value of the source vector. The upper 384 bits are set +/// to zero. +/// +/// \headerfile +/// +/// This intrinsic has no corresponding instruction. +/// +/// \param __a +/// A 128-bit vector of [8 x half]. +/// \returns A 512-bit floating-point vector of [16 x half]. The lower 128 bits +/// contain the value of the parameter. The upper 384 bits are set to zero. +static __inline__ __m256h __DEFAULT_FN_ATTRS256 +_mm256_zextph128_ph256(__m128h __a) { + return __builtin_shufflevector(__a, (__v8hf)_mm_setzero_ph(), 0, 1, 2, 3, 4, + 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15); +} + +/// Constructs a 512-bit floating-point vector of [32 x half] from a +/// 128-bit floating-point vector of [8 x half]. The lower 128 bits +/// contain the value of the source vector. The upper 384 bits are set +/// to zero. +/// +/// \headerfile +/// +/// This intrinsic has no corresponding instruction. +/// +/// \param __a +/// A 128-bit vector of [8 x half]. +/// \returns A 512-bit floating-point vector of [32 x half]. The lower 128 bits +/// contain the value of the parameter. The upper 384 bits are set to zero. +static __inline__ __m512h __DEFAULT_FN_ATTRS512 +_mm512_zextph128_ph512(__m128h __a) { + return __builtin_shufflevector( + __a, (__v8hf)_mm_setzero_ph(), 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, + 13, 14, 15, 8, 9, 10, 11, 12, 13, 14, 15, 8, 9, 10, 11, 12, 13, 14, 15); +} + +/// Constructs a 512-bit floating-point vector of [32 x half] from a +/// 256-bit floating-point vector of [16 x half]. The lower 256 bits +/// contain the value of the source vector. The upper 256 bits are set +/// to zero. +/// +/// \headerfile +/// +/// This intrinsic has no corresponding instruction. +/// +/// \param __a +/// A 256-bit vector of [16 x half]. +/// \returns A 512-bit floating-point vector of [32 x half]. The lower 256 bits +/// contain the value of the parameter. The upper 256 bits are set to zero. +static __inline__ __m512h __DEFAULT_FN_ATTRS512 +_mm512_zextph256_ph512(__m256h __a) { + return __builtin_shufflevector(__a, (__v16hf)_mm256_setzero_ph(), 0, 1, 2, 3, + 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, + 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, + 29, 30, 31); +} + +#define _mm_comi_round_sh(A, B, P, R) \ + __builtin_ia32_vcomish((__v8hf)A, (__v8hf)B, (int)(P), (int)(R)) + +#define _mm_comi_sh(A, B, pred) \ + _mm_comi_round_sh((A), (B), (pred), _MM_FROUND_CUR_DIRECTION) + +static __inline__ int __DEFAULT_FN_ATTRS128 _mm_comieq_sh(__m128h __A, + __m128h __B) { + return __builtin_ia32_vcomish((__v8hf)__A, (__v8hf)__B, _CMP_EQ_OS, + _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ int __DEFAULT_FN_ATTRS128 _mm_comilt_sh(__m128h __A, + __m128h __B) { + return __builtin_ia32_vcomish((__v8hf)__A, (__v8hf)__B, _CMP_LT_OS, + _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ int __DEFAULT_FN_ATTRS128 _mm_comile_sh(__m128h __A, + __m128h __B) { + return __builtin_ia32_vcomish((__v8hf)__A, (__v8hf)__B, _CMP_LE_OS, + _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ int __DEFAULT_FN_ATTRS128 _mm_comigt_sh(__m128h __A, + __m128h __B) { + return __builtin_ia32_vcomish((__v8hf)__A, (__v8hf)__B, _CMP_GT_OS, + _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ int __DEFAULT_FN_ATTRS128 _mm_comige_sh(__m128h __A, + __m128h __B) { + return __builtin_ia32_vcomish((__v8hf)__A, (__v8hf)__B, _CMP_GE_OS, + _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ int __DEFAULT_FN_ATTRS128 _mm_comineq_sh(__m128h __A, + __m128h __B) { + return __builtin_ia32_vcomish((__v8hf)__A, (__v8hf)__B, _CMP_NEQ_US, + _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ int __DEFAULT_FN_ATTRS128 _mm_ucomieq_sh(__m128h __A, + __m128h __B) { + return __builtin_ia32_vcomish((__v8hf)__A, (__v8hf)__B, _CMP_EQ_OQ, + _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ int __DEFAULT_FN_ATTRS128 _mm_ucomilt_sh(__m128h __A, + __m128h __B) { + return __builtin_ia32_vcomish((__v8hf)__A, (__v8hf)__B, _CMP_LT_OQ, + _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ int __DEFAULT_FN_ATTRS128 _mm_ucomile_sh(__m128h __A, + __m128h __B) { + return __builtin_ia32_vcomish((__v8hf)__A, (__v8hf)__B, _CMP_LE_OQ, + _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ int __DEFAULT_FN_ATTRS128 _mm_ucomigt_sh(__m128h __A, + __m128h __B) { + return __builtin_ia32_vcomish((__v8hf)__A, (__v8hf)__B, _CMP_GT_OQ, + _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ int __DEFAULT_FN_ATTRS128 _mm_ucomige_sh(__m128h __A, + __m128h __B) { + return __builtin_ia32_vcomish((__v8hf)__A, (__v8hf)__B, _CMP_GE_OQ, + _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ int __DEFAULT_FN_ATTRS128 _mm_ucomineq_sh(__m128h __A, + __m128h __B) { + return __builtin_ia32_vcomish((__v8hf)__A, (__v8hf)__B, _CMP_NEQ_UQ, + _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m512h __DEFAULT_FN_ATTRS512 _mm512_add_ph(__m512h __A, + __m512h __B) { + return (__m512h)((__v32hf)__A + (__v32hf)__B); +} + +static __inline__ __m512h __DEFAULT_FN_ATTRS512 +_mm512_mask_add_ph(__m512h __W, __mmask32 __U, __m512h __A, __m512h __B) { + return (__m512h)__builtin_ia32_selectph_512( + (__mmask32)__U, (__v32hf)_mm512_add_ph(__A, __B), (__v32hf)__W); +} + +static __inline__ __m512h __DEFAULT_FN_ATTRS512 +_mm512_maskz_add_ph(__mmask32 __U, __m512h __A, __m512h __B) { + return (__m512h)__builtin_ia32_selectph_512((__mmask32)__U, + (__v32hf)_mm512_add_ph(__A, __B), + (__v32hf)_mm512_setzero_ph()); +} + +#define _mm512_add_round_ph(A, B, R) \ + ((__m512h)__builtin_ia32_addph512((__v32hf)(__m512h)(A), \ + (__v32hf)(__m512h)(B), (int)(R))) + +#define _mm512_mask_add_round_ph(W, U, A, B, R) \ + ((__m512h)__builtin_ia32_selectph_512( \ + (__mmask32)(U), (__v32hf)_mm512_add_round_ph((A), (B), (R)), \ + (__v32hf)(__m512h)(W))) + +#define _mm512_maskz_add_round_ph(U, A, B, R) \ + ((__m512h)__builtin_ia32_selectph_512( \ + (__mmask32)(U), (__v32hf)_mm512_add_round_ph((A), (B), (R)), \ + (__v32hf)_mm512_setzero_ph())) + +static __inline__ __m512h __DEFAULT_FN_ATTRS512 _mm512_sub_ph(__m512h __A, + __m512h __B) { + return (__m512h)((__v32hf)__A - (__v32hf)__B); +} + +static __inline__ __m512h __DEFAULT_FN_ATTRS512 +_mm512_mask_sub_ph(__m512h __W, __mmask32 __U, __m512h __A, __m512h __B) { + return (__m512h)__builtin_ia32_selectph_512( + (__mmask32)__U, (__v32hf)_mm512_sub_ph(__A, __B), (__v32hf)__W); +} + +static __inline__ __m512h __DEFAULT_FN_ATTRS512 +_mm512_maskz_sub_ph(__mmask32 __U, __m512h __A, __m512h __B) { + return (__m512h)__builtin_ia32_selectph_512((__mmask32)__U, + (__v32hf)_mm512_sub_ph(__A, __B), + (__v32hf)_mm512_setzero_ph()); +} + +#define _mm512_sub_round_ph(A, B, R) \ + ((__m512h)__builtin_ia32_subph512((__v32hf)(__m512h)(A), \ + (__v32hf)(__m512h)(B), (int)(R))) + +#define _mm512_mask_sub_round_ph(W, U, A, B, R) \ + ((__m512h)__builtin_ia32_selectph_512( \ + (__mmask32)(U), (__v32hf)_mm512_sub_round_ph((A), (B), (R)), \ + (__v32hf)(__m512h)(W))) + +#define _mm512_maskz_sub_round_ph(U, A, B, R) \ + ((__m512h)__builtin_ia32_selectph_512( \ + (__mmask32)(U), (__v32hf)_mm512_sub_round_ph((A), (B), (R)), \ + (__v32hf)_mm512_setzero_ph())) + +static __inline__ __m512h __DEFAULT_FN_ATTRS512 _mm512_mul_ph(__m512h __A, + __m512h __B) { + return (__m512h)((__v32hf)__A * (__v32hf)__B); +} + +static __inline__ __m512h __DEFAULT_FN_ATTRS512 +_mm512_mask_mul_ph(__m512h __W, __mmask32 __U, __m512h __A, __m512h __B) { + return (__m512h)__builtin_ia32_selectph_512( + (__mmask32)__U, (__v32hf)_mm512_mul_ph(__A, __B), (__v32hf)__W); +} + +static __inline__ __m512h __DEFAULT_FN_ATTRS512 +_mm512_maskz_mul_ph(__mmask32 __U, __m512h __A, __m512h __B) { + return (__m512h)__builtin_ia32_selectph_512((__mmask32)__U, + (__v32hf)_mm512_mul_ph(__A, __B), + (__v32hf)_mm512_setzero_ph()); +} + +#define _mm512_mul_round_ph(A, B, R) \ + ((__m512h)__builtin_ia32_mulph512((__v32hf)(__m512h)(A), \ + (__v32hf)(__m512h)(B), (int)(R))) + +#define _mm512_mask_mul_round_ph(W, U, A, B, R) \ + ((__m512h)__builtin_ia32_selectph_512( \ + (__mmask32)(U), (__v32hf)_mm512_mul_round_ph((A), (B), (R)), \ + (__v32hf)(__m512h)(W))) + +#define _mm512_maskz_mul_round_ph(U, A, B, R) \ + ((__m512h)__builtin_ia32_selectph_512( \ + (__mmask32)(U), (__v32hf)_mm512_mul_round_ph((A), (B), (R)), \ + (__v32hf)_mm512_setzero_ph())) + +static __inline__ __m512h __DEFAULT_FN_ATTRS512 _mm512_div_ph(__m512h __A, + __m512h __B) { + return (__m512h)((__v32hf)__A / (__v32hf)__B); +} + +static __inline__ __m512h __DEFAULT_FN_ATTRS512 +_mm512_mask_div_ph(__m512h __W, __mmask32 __U, __m512h __A, __m512h __B) { + return (__m512h)__builtin_ia32_selectph_512( + (__mmask32)__U, (__v32hf)_mm512_div_ph(__A, __B), (__v32hf)__W); +} + +static __inline__ __m512h __DEFAULT_FN_ATTRS512 +_mm512_maskz_div_ph(__mmask32 __U, __m512h __A, __m512h __B) { + return (__m512h)__builtin_ia32_selectph_512((__mmask32)__U, + (__v32hf)_mm512_div_ph(__A, __B), + (__v32hf)_mm512_setzero_ph()); +} + +#define _mm512_div_round_ph(A, B, R) \ + ((__m512h)__builtin_ia32_divph512((__v32hf)(__m512h)(A), \ + (__v32hf)(__m512h)(B), (int)(R))) + +#define _mm512_mask_div_round_ph(W, U, A, B, R) \ + ((__m512h)__builtin_ia32_selectph_512( \ + (__mmask32)(U), (__v32hf)_mm512_div_round_ph((A), (B), (R)), \ + (__v32hf)(__m512h)(W))) + +#define _mm512_maskz_div_round_ph(U, A, B, R) \ + ((__m512h)__builtin_ia32_selectph_512( \ + (__mmask32)(U), (__v32hf)_mm512_div_round_ph((A), (B), (R)), \ + (__v32hf)_mm512_setzero_ph())) + +static __inline__ __m512h __DEFAULT_FN_ATTRS512 _mm512_min_ph(__m512h __A, + __m512h __B) { + return (__m512h)__builtin_ia32_minph512((__v32hf)__A, (__v32hf)__B, + _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m512h __DEFAULT_FN_ATTRS512 +_mm512_mask_min_ph(__m512h __W, __mmask32 __U, __m512h __A, __m512h __B) { + return (__m512h)__builtin_ia32_selectph_512( + (__mmask32)__U, (__v32hf)_mm512_min_ph(__A, __B), (__v32hf)__W); +} + +static __inline__ __m512h __DEFAULT_FN_ATTRS512 +_mm512_maskz_min_ph(__mmask32 __U, __m512h __A, __m512h __B) { + return (__m512h)__builtin_ia32_selectph_512((__mmask32)__U, + (__v32hf)_mm512_min_ph(__A, __B), + (__v32hf)_mm512_setzero_ph()); +} + +#define _mm512_min_round_ph(A, B, R) \ + ((__m512h)__builtin_ia32_minph512((__v32hf)(__m512h)(A), \ + (__v32hf)(__m512h)(B), (int)(R))) + +#define _mm512_mask_min_round_ph(W, U, A, B, R) \ + ((__m512h)__builtin_ia32_selectph_512( \ + (__mmask32)(U), (__v32hf)_mm512_min_round_ph((A), (B), (R)), \ + (__v32hf)(__m512h)(W))) + +#define _mm512_maskz_min_round_ph(U, A, B, R) \ + ((__m512h)__builtin_ia32_selectph_512( \ + (__mmask32)(U), (__v32hf)_mm512_min_round_ph((A), (B), (R)), \ + (__v32hf)_mm512_setzero_ph())) + +static __inline__ __m512h __DEFAULT_FN_ATTRS512 _mm512_max_ph(__m512h __A, + __m512h __B) { + return (__m512h)__builtin_ia32_maxph512((__v32hf)__A, (__v32hf)__B, + _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m512h __DEFAULT_FN_ATTRS512 +_mm512_mask_max_ph(__m512h __W, __mmask32 __U, __m512h __A, __m512h __B) { + return (__m512h)__builtin_ia32_selectph_512( + (__mmask32)__U, (__v32hf)_mm512_max_ph(__A, __B), (__v32hf)__W); +} + +static __inline__ __m512h __DEFAULT_FN_ATTRS512 +_mm512_maskz_max_ph(__mmask32 __U, __m512h __A, __m512h __B) { + return (__m512h)__builtin_ia32_selectph_512((__mmask32)__U, + (__v32hf)_mm512_max_ph(__A, __B), + (__v32hf)_mm512_setzero_ph()); +} + +#define _mm512_max_round_ph(A, B, R) \ + ((__m512h)__builtin_ia32_maxph512((__v32hf)(__m512h)(A), \ + (__v32hf)(__m512h)(B), (int)(R))) + +#define _mm512_mask_max_round_ph(W, U, A, B, R) \ + ((__m512h)__builtin_ia32_selectph_512( \ + (__mmask32)(U), (__v32hf)_mm512_max_round_ph((A), (B), (R)), \ + (__v32hf)(__m512h)(W))) + +#define _mm512_maskz_max_round_ph(U, A, B, R) \ + ((__m512h)__builtin_ia32_selectph_512( \ + (__mmask32)(U), (__v32hf)_mm512_max_round_ph((A), (B), (R)), \ + (__v32hf)_mm512_setzero_ph())) + +static __inline__ __m512h __DEFAULT_FN_ATTRS512 _mm512_abs_ph(__m512h __A) { + return (__m512h)_mm512_and_epi32(_mm512_set1_epi32(0x7FFF7FFF), (__m512i)__A); +} + +static __inline__ __m512h __DEFAULT_FN_ATTRS512 _mm512_conj_pch(__m512h __A) { + return (__m512h)_mm512_xor_ps((__m512)__A, _mm512_set1_ps(-0.0f)); +} + +static __inline__ __m512h __DEFAULT_FN_ATTRS512 +_mm512_mask_conj_pch(__m512h __W, __mmask16 __U, __m512h __A) { + return (__m512h)__builtin_ia32_selectps_512( + (__mmask16)__U, (__v16sf)_mm512_conj_pch(__A), (__v16sf)__W); +} + +static __inline__ __m512h __DEFAULT_FN_ATTRS512 +_mm512_maskz_conj_pch(__mmask16 __U, __m512h __A) { + return (__m512h)__builtin_ia32_selectps_512((__mmask16)__U, + (__v16sf)_mm512_conj_pch(__A), + (__v16sf)_mm512_setzero_ps()); +} + +static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_add_sh(__m128h __A, + __m128h __B) { + __A[0] += __B[0]; + return __A; +} + +static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_mask_add_sh(__m128h __W, + __mmask8 __U, + __m128h __A, + __m128h __B) { + __A = _mm_add_sh(__A, __B); + return __builtin_ia32_selectsh_128(__U, __A, __W); +} + +static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_maskz_add_sh(__mmask8 __U, + __m128h __A, + __m128h __B) { + __A = _mm_add_sh(__A, __B); + return __builtin_ia32_selectsh_128(__U, __A, _mm_setzero_ph()); +} + +#define _mm_add_round_sh(A, B, R) \ + ((__m128h)__builtin_ia32_addsh_round_mask( \ + (__v8hf)(__m128h)(A), (__v8hf)(__m128h)(B), (__v8hf)_mm_setzero_ph(), \ + (__mmask8)-1, (int)(R))) + +#define _mm_mask_add_round_sh(W, U, A, B, R) \ + ((__m128h)__builtin_ia32_addsh_round_mask( \ + (__v8hf)(__m128h)(A), (__v8hf)(__m128h)(B), (__v8hf)(__m128h)(W), \ + (__mmask8)(U), (int)(R))) + +#define _mm_maskz_add_round_sh(U, A, B, R) \ + ((__m128h)__builtin_ia32_addsh_round_mask( \ + (__v8hf)(__m128h)(A), (__v8hf)(__m128h)(B), (__v8hf)_mm_setzero_ph(), \ + (__mmask8)(U), (int)(R))) + +static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_sub_sh(__m128h __A, + __m128h __B) { + __A[0] -= __B[0]; + return __A; +} + +static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_mask_sub_sh(__m128h __W, + __mmask8 __U, + __m128h __A, + __m128h __B) { + __A = _mm_sub_sh(__A, __B); + return __builtin_ia32_selectsh_128(__U, __A, __W); +} + +static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_maskz_sub_sh(__mmask8 __U, + __m128h __A, + __m128h __B) { + __A = _mm_sub_sh(__A, __B); + return __builtin_ia32_selectsh_128(__U, __A, _mm_setzero_ph()); +} + +#define _mm_sub_round_sh(A, B, R) \ + ((__m128h)__builtin_ia32_subsh_round_mask( \ + (__v8hf)(__m128h)(A), (__v8hf)(__m128h)(B), (__v8hf)_mm_setzero_ph(), \ + (__mmask8)-1, (int)(R))) + +#define _mm_mask_sub_round_sh(W, U, A, B, R) \ + ((__m128h)__builtin_ia32_subsh_round_mask( \ + (__v8hf)(__m128h)(A), (__v8hf)(__m128h)(B), (__v8hf)(__m128h)(W), \ + (__mmask8)(U), (int)(R))) + +#define _mm_maskz_sub_round_sh(U, A, B, R) \ + ((__m128h)__builtin_ia32_subsh_round_mask( \ + (__v8hf)(__m128h)(A), (__v8hf)(__m128h)(B), (__v8hf)_mm_setzero_ph(), \ + (__mmask8)(U), (int)(R))) + +static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_mul_sh(__m128h __A, + __m128h __B) { + __A[0] *= __B[0]; + return __A; +} + +static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_mask_mul_sh(__m128h __W, + __mmask8 __U, + __m128h __A, + __m128h __B) { + __A = _mm_mul_sh(__A, __B); + return __builtin_ia32_selectsh_128(__U, __A, __W); +} + +static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_maskz_mul_sh(__mmask8 __U, + __m128h __A, + __m128h __B) { + __A = _mm_mul_sh(__A, __B); + return __builtin_ia32_selectsh_128(__U, __A, _mm_setzero_ph()); +} + +#define _mm_mul_round_sh(A, B, R) \ + ((__m128h)__builtin_ia32_mulsh_round_mask( \ + (__v8hf)(__m128h)(A), (__v8hf)(__m128h)(B), (__v8hf)_mm_setzero_ph(), \ + (__mmask8)-1, (int)(R))) + +#define _mm_mask_mul_round_sh(W, U, A, B, R) \ + ((__m128h)__builtin_ia32_mulsh_round_mask( \ + (__v8hf)(__m128h)(A), (__v8hf)(__m128h)(B), (__v8hf)(__m128h)(W), \ + (__mmask8)(U), (int)(R))) + +#define _mm_maskz_mul_round_sh(U, A, B, R) \ + ((__m128h)__builtin_ia32_mulsh_round_mask( \ + (__v8hf)(__m128h)(A), (__v8hf)(__m128h)(B), (__v8hf)_mm_setzero_ph(), \ + (__mmask8)(U), (int)(R))) + +static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_div_sh(__m128h __A, + __m128h __B) { + __A[0] /= __B[0]; + return __A; +} + +static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_mask_div_sh(__m128h __W, + __mmask8 __U, + __m128h __A, + __m128h __B) { + __A = _mm_div_sh(__A, __B); + return __builtin_ia32_selectsh_128(__U, __A, __W); +} + +static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_maskz_div_sh(__mmask8 __U, + __m128h __A, + __m128h __B) { + __A = _mm_div_sh(__A, __B); + return __builtin_ia32_selectsh_128(__U, __A, _mm_setzero_ph()); +} + +#define _mm_div_round_sh(A, B, R) \ + ((__m128h)__builtin_ia32_divsh_round_mask( \ + (__v8hf)(__m128h)(A), (__v8hf)(__m128h)(B), (__v8hf)_mm_setzero_ph(), \ + (__mmask8)-1, (int)(R))) + +#define _mm_mask_div_round_sh(W, U, A, B, R) \ + ((__m128h)__builtin_ia32_divsh_round_mask( \ + (__v8hf)(__m128h)(A), (__v8hf)(__m128h)(B), (__v8hf)(__m128h)(W), \ + (__mmask8)(U), (int)(R))) + +#define _mm_maskz_div_round_sh(U, A, B, R) \ + ((__m128h)__builtin_ia32_divsh_round_mask( \ + (__v8hf)(__m128h)(A), (__v8hf)(__m128h)(B), (__v8hf)_mm_setzero_ph(), \ + (__mmask8)(U), (int)(R))) + +static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_min_sh(__m128h __A, + __m128h __B) { + return (__m128h)__builtin_ia32_minsh_round_mask( + (__v8hf)__A, (__v8hf)__B, (__v8hf)_mm_setzero_ph(), (__mmask8)-1, + _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_mask_min_sh(__m128h __W, + __mmask8 __U, + __m128h __A, + __m128h __B) { + return (__m128h)__builtin_ia32_minsh_round_mask((__v8hf)__A, (__v8hf)__B, + (__v8hf)__W, (__mmask8)__U, + _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_maskz_min_sh(__mmask8 __U, + __m128h __A, + __m128h __B) { + return (__m128h)__builtin_ia32_minsh_round_mask( + (__v8hf)__A, (__v8hf)__B, (__v8hf)_mm_setzero_ph(), (__mmask8)__U, + _MM_FROUND_CUR_DIRECTION); +} + +#define _mm_min_round_sh(A, B, R) \ + ((__m128h)__builtin_ia32_minsh_round_mask( \ + (__v8hf)(__m128h)(A), (__v8hf)(__m128h)(B), (__v8hf)_mm_setzero_ph(), \ + (__mmask8)-1, (int)(R))) + +#define _mm_mask_min_round_sh(W, U, A, B, R) \ + ((__m128h)__builtin_ia32_minsh_round_mask( \ + (__v8hf)(__m128h)(A), (__v8hf)(__m128h)(B), (__v8hf)(__m128h)(W), \ + (__mmask8)(U), (int)(R))) + +#define _mm_maskz_min_round_sh(U, A, B, R) \ + ((__m128h)__builtin_ia32_minsh_round_mask( \ + (__v8hf)(__m128h)(A), (__v8hf)(__m128h)(B), (__v8hf)_mm_setzero_ph(), \ + (__mmask8)(U), (int)(R))) + +static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_max_sh(__m128h __A, + __m128h __B) { + return (__m128h)__builtin_ia32_maxsh_round_mask( + (__v8hf)__A, (__v8hf)__B, (__v8hf)_mm_setzero_ph(), (__mmask8)-1, + _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_mask_max_sh(__m128h __W, + __mmask8 __U, + __m128h __A, + __m128h __B) { + return (__m128h)__builtin_ia32_maxsh_round_mask((__v8hf)__A, (__v8hf)__B, + (__v8hf)__W, (__mmask8)__U, + _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_maskz_max_sh(__mmask8 __U, + __m128h __A, + __m128h __B) { + return (__m128h)__builtin_ia32_maxsh_round_mask( + (__v8hf)__A, (__v8hf)__B, (__v8hf)_mm_setzero_ph(), (__mmask8)__U, + _MM_FROUND_CUR_DIRECTION); +} + +#define _mm_max_round_sh(A, B, R) \ + ((__m128h)__builtin_ia32_maxsh_round_mask( \ + (__v8hf)(__m128h)(A), (__v8hf)(__m128h)(B), (__v8hf)_mm_setzero_ph(), \ + (__mmask8)-1, (int)(R))) + +#define _mm_mask_max_round_sh(W, U, A, B, R) \ + ((__m128h)__builtin_ia32_maxsh_round_mask( \ + (__v8hf)(__m128h)(A), (__v8hf)(__m128h)(B), (__v8hf)(__m128h)(W), \ + (__mmask8)(U), (int)(R))) + +#define _mm_maskz_max_round_sh(U, A, B, R) \ + ((__m128h)__builtin_ia32_maxsh_round_mask( \ + (__v8hf)(__m128h)(A), (__v8hf)(__m128h)(B), (__v8hf)_mm_setzero_ph(), \ + (__mmask8)(U), (int)(R))) + +#define _mm512_cmp_round_ph_mask(A, B, P, R) \ + ((__mmask32)__builtin_ia32_cmpph512_mask((__v32hf)(__m512h)(A), \ + (__v32hf)(__m512h)(B), (int)(P), \ + (__mmask32)-1, (int)(R))) + +#define _mm512_mask_cmp_round_ph_mask(U, A, B, P, R) \ + ((__mmask32)__builtin_ia32_cmpph512_mask((__v32hf)(__m512h)(A), \ + (__v32hf)(__m512h)(B), (int)(P), \ + (__mmask32)(U), (int)(R))) + +#define _mm512_cmp_ph_mask(A, B, P) \ + _mm512_cmp_round_ph_mask((A), (B), (P), _MM_FROUND_CUR_DIRECTION) + +#define _mm512_mask_cmp_ph_mask(U, A, B, P) \ + _mm512_mask_cmp_round_ph_mask((U), (A), (B), (P), _MM_FROUND_CUR_DIRECTION) + +#define _mm_cmp_round_sh_mask(X, Y, P, R) \ + ((__mmask8)__builtin_ia32_cmpsh_mask((__v8hf)(__m128h)(X), \ + (__v8hf)(__m128h)(Y), (int)(P), \ + (__mmask8)-1, (int)(R))) + +#define _mm_mask_cmp_round_sh_mask(M, X, Y, P, R) \ + ((__mmask8)__builtin_ia32_cmpsh_mask((__v8hf)(__m128h)(X), \ + (__v8hf)(__m128h)(Y), (int)(P), \ + (__mmask8)(M), (int)(R))) + +#define _mm_cmp_sh_mask(X, Y, P) \ + ((__mmask8)__builtin_ia32_cmpsh_mask( \ + (__v8hf)(__m128h)(X), (__v8hf)(__m128h)(Y), (int)(P), (__mmask8)-1, \ + _MM_FROUND_CUR_DIRECTION)) + +#define _mm_mask_cmp_sh_mask(M, X, Y, P) \ + ((__mmask8)__builtin_ia32_cmpsh_mask( \ + (__v8hf)(__m128h)(X), (__v8hf)(__m128h)(Y), (int)(P), (__mmask8)(M), \ + _MM_FROUND_CUR_DIRECTION)) +// loads with vmovsh: +static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_load_sh(void const *__dp) { + struct __mm_load_sh_struct { + _Float16 __u; + } __attribute__((__packed__, __may_alias__)); + _Float16 __u = ((const struct __mm_load_sh_struct *)__dp)->__u; + return (__m128h){__u, 0, 0, 0, 0, 0, 0, 0}; +} + +static __inline__ __m128h __DEFAULT_FN_ATTRS128 +_mm_mask_load_sh(__m128h __W, __mmask8 __U, const void *__A) { + __m128h src = (__v8hf)__builtin_shufflevector( + (__v8hf)__W, (__v8hf)_mm_setzero_ph(), 0, 8, 8, 8, 8, 8, 8, 8); + + return (__m128h)__builtin_ia32_loadsh128_mask((const __v8hf *)__A, src, __U & 1); +} + +static __inline__ __m128h __DEFAULT_FN_ATTRS128 +_mm_maskz_load_sh(__mmask8 __U, const void *__A) { + return (__m128h)__builtin_ia32_loadsh128_mask( + (const __v8hf *)__A, (__v8hf)_mm_setzero_ph(), __U & 1); +} + +static __inline__ __m512h __DEFAULT_FN_ATTRS512 +_mm512_load_ph(void const *__p) { + return *(const __m512h *)__p; +} + +static __inline__ __m256h __DEFAULT_FN_ATTRS256 +_mm256_load_ph(void const *__p) { + return *(const __m256h *)__p; +} + +static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_load_ph(void const *__p) { + return *(const __m128h *)__p; +} + +static __inline__ __m512h __DEFAULT_FN_ATTRS512 +_mm512_loadu_ph(void const *__p) { + struct __loadu_ph { + __m512h_u __v; + } __attribute__((__packed__, __may_alias__)); + return ((const struct __loadu_ph *)__p)->__v; +} + +static __inline__ __m256h __DEFAULT_FN_ATTRS256 +_mm256_loadu_ph(void const *__p) { + struct __loadu_ph { + __m256h_u __v; + } __attribute__((__packed__, __may_alias__)); + return ((const struct __loadu_ph *)__p)->__v; +} + +static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_loadu_ph(void const *__p) { + struct __loadu_ph { + __m128h_u __v; + } __attribute__((__packed__, __may_alias__)); + return ((const struct __loadu_ph *)__p)->__v; +} + +// stores with vmovsh: +static __inline__ void __DEFAULT_FN_ATTRS128 _mm_store_sh(void *__dp, + __m128h __a) { + struct __mm_store_sh_struct { + _Float16 __u; + } __attribute__((__packed__, __may_alias__)); + ((struct __mm_store_sh_struct *)__dp)->__u = __a[0]; +} + +static __inline__ void __DEFAULT_FN_ATTRS128 _mm_mask_store_sh(void *__W, + __mmask8 __U, + __m128h __A) { + __builtin_ia32_storesh128_mask((__v8hf *)__W, __A, __U & 1); +} + +static __inline__ void __DEFAULT_FN_ATTRS512 _mm512_store_ph(void *__P, + __m512h __A) { + *(__m512h *)__P = __A; +} + +static __inline__ void __DEFAULT_FN_ATTRS256 _mm256_store_ph(void *__P, + __m256h __A) { + *(__m256h *)__P = __A; +} + +static __inline__ void __DEFAULT_FN_ATTRS128 _mm_store_ph(void *__P, + __m128h __A) { + *(__m128h *)__P = __A; +} + +static __inline__ void __DEFAULT_FN_ATTRS512 _mm512_storeu_ph(void *__P, + __m512h __A) { + struct __storeu_ph { + __m512h_u __v; + } __attribute__((__packed__, __may_alias__)); + ((struct __storeu_ph *)__P)->__v = __A; +} + +static __inline__ void __DEFAULT_FN_ATTRS256 _mm256_storeu_ph(void *__P, + __m256h __A) { + struct __storeu_ph { + __m256h_u __v; + } __attribute__((__packed__, __may_alias__)); + ((struct __storeu_ph *)__P)->__v = __A; +} + +static __inline__ void __DEFAULT_FN_ATTRS128 _mm_storeu_ph(void *__P, + __m128h __A) { + struct __storeu_ph { + __m128h_u __v; + } __attribute__((__packed__, __may_alias__)); + ((struct __storeu_ph *)__P)->__v = __A; +} + +// moves with vmovsh: +static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_move_sh(__m128h __a, + __m128h __b) { + __a[0] = __b[0]; + return __a; +} + +static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_mask_move_sh(__m128h __W, + __mmask8 __U, + __m128h __A, + __m128h __B) { + return __builtin_ia32_selectsh_128(__U, _mm_move_sh(__A, __B), __W); +} + +static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_maskz_move_sh(__mmask8 __U, + __m128h __A, + __m128h __B) { + return __builtin_ia32_selectsh_128(__U, _mm_move_sh(__A, __B), + _mm_setzero_ph()); +} + +// vmovw: +static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_cvtsi16_si128(short __a) { + return (__m128i)(__v8hi){__a, 0, 0, 0, 0, 0, 0, 0}; +} + +static __inline__ short __DEFAULT_FN_ATTRS128 _mm_cvtsi128_si16(__m128i __a) { + __v8hi __b = (__v8hi)__a; + return __b[0]; +} + +static __inline__ __m512h __DEFAULT_FN_ATTRS512 _mm512_rcp_ph(__m512h __A) { + return (__m512h)__builtin_ia32_rcpph512_mask( + (__v32hf)__A, (__v32hf)_mm512_undefined_ph(), (__mmask32)-1); +} + +static __inline__ __m512h __DEFAULT_FN_ATTRS512 +_mm512_mask_rcp_ph(__m512h __W, __mmask32 __U, __m512h __A) { + return (__m512h)__builtin_ia32_rcpph512_mask((__v32hf)__A, (__v32hf)__W, + (__mmask32)__U); +} + +static __inline__ __m512h __DEFAULT_FN_ATTRS512 +_mm512_maskz_rcp_ph(__mmask32 __U, __m512h __A) { + return (__m512h)__builtin_ia32_rcpph512_mask( + (__v32hf)__A, (__v32hf)_mm512_setzero_ph(), (__mmask32)__U); +} + +static __inline__ __m512h __DEFAULT_FN_ATTRS512 _mm512_rsqrt_ph(__m512h __A) { + return (__m512h)__builtin_ia32_rsqrtph512_mask( + (__v32hf)__A, (__v32hf)_mm512_undefined_ph(), (__mmask32)-1); +} + +static __inline__ __m512h __DEFAULT_FN_ATTRS512 +_mm512_mask_rsqrt_ph(__m512h __W, __mmask32 __U, __m512h __A) { + return (__m512h)__builtin_ia32_rsqrtph512_mask((__v32hf)__A, (__v32hf)__W, + (__mmask32)__U); +} + +static __inline__ __m512h __DEFAULT_FN_ATTRS512 +_mm512_maskz_rsqrt_ph(__mmask32 __U, __m512h __A) { + return (__m512h)__builtin_ia32_rsqrtph512_mask( + (__v32hf)__A, (__v32hf)_mm512_setzero_ph(), (__mmask32)__U); +} + +#define _mm512_getmant_ph(A, B, C) \ + ((__m512h)__builtin_ia32_getmantph512_mask( \ + (__v32hf)(__m512h)(A), (int)(((C) << 2) | (B)), \ + (__v32hf)_mm512_undefined_ph(), (__mmask32)-1, \ + _MM_FROUND_CUR_DIRECTION)) + +#define _mm512_mask_getmant_ph(W, U, A, B, C) \ + ((__m512h)__builtin_ia32_getmantph512_mask( \ + (__v32hf)(__m512h)(A), (int)(((C) << 2) | (B)), (__v32hf)(__m512h)(W), \ + (__mmask32)(U), _MM_FROUND_CUR_DIRECTION)) + +#define _mm512_maskz_getmant_ph(U, A, B, C) \ + ((__m512h)__builtin_ia32_getmantph512_mask( \ + (__v32hf)(__m512h)(A), (int)(((C) << 2) | (B)), \ + (__v32hf)_mm512_setzero_ph(), (__mmask32)(U), _MM_FROUND_CUR_DIRECTION)) + +#define _mm512_getmant_round_ph(A, B, C, R) \ + ((__m512h)__builtin_ia32_getmantph512_mask( \ + (__v32hf)(__m512h)(A), (int)(((C) << 2) | (B)), \ + (__v32hf)_mm512_undefined_ph(), (__mmask32)-1, (int)(R))) + +#define _mm512_mask_getmant_round_ph(W, U, A, B, C, R) \ + ((__m512h)__builtin_ia32_getmantph512_mask( \ + (__v32hf)(__m512h)(A), (int)(((C) << 2) | (B)), (__v32hf)(__m512h)(W), \ + (__mmask32)(U), (int)(R))) + +#define _mm512_maskz_getmant_round_ph(U, A, B, C, R) \ + ((__m512h)__builtin_ia32_getmantph512_mask( \ + (__v32hf)(__m512h)(A), (int)(((C) << 2) | (B)), \ + (__v32hf)_mm512_setzero_ph(), (__mmask32)(U), (int)(R))) + +static __inline__ __m512h __DEFAULT_FN_ATTRS512 _mm512_getexp_ph(__m512h __A) { + return (__m512h)__builtin_ia32_getexpph512_mask( + (__v32hf)__A, (__v32hf)_mm512_undefined_ph(), (__mmask32)-1, + _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m512h __DEFAULT_FN_ATTRS512 +_mm512_mask_getexp_ph(__m512h __W, __mmask32 __U, __m512h __A) { + return (__m512h)__builtin_ia32_getexpph512_mask( + (__v32hf)__A, (__v32hf)__W, (__mmask32)__U, _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m512h __DEFAULT_FN_ATTRS512 +_mm512_maskz_getexp_ph(__mmask32 __U, __m512h __A) { + return (__m512h)__builtin_ia32_getexpph512_mask( + (__v32hf)__A, (__v32hf)_mm512_setzero_ph(), (__mmask32)__U, + _MM_FROUND_CUR_DIRECTION); +} + +#define _mm512_getexp_round_ph(A, R) \ + ((__m512h)__builtin_ia32_getexpph512_mask((__v32hf)(__m512h)(A), \ + (__v32hf)_mm512_undefined_ph(), \ + (__mmask32)-1, (int)(R))) + +#define _mm512_mask_getexp_round_ph(W, U, A, R) \ + ((__m512h)__builtin_ia32_getexpph512_mask( \ + (__v32hf)(__m512h)(A), (__v32hf)(__m512h)(W), (__mmask32)(U), (int)(R))) + +#define _mm512_maskz_getexp_round_ph(U, A, R) \ + ((__m512h)__builtin_ia32_getexpph512_mask((__v32hf)(__m512h)(A), \ + (__v32hf)_mm512_setzero_ph(), \ + (__mmask32)(U), (int)(R))) + +static __inline__ __m512h __DEFAULT_FN_ATTRS512 _mm512_scalef_ph(__m512h __A, + __m512h __B) { + return (__m512h)__builtin_ia32_scalefph512_mask( + (__v32hf)__A, (__v32hf)__B, (__v32hf)_mm512_undefined_ph(), (__mmask32)-1, + _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m512h __DEFAULT_FN_ATTRS512 +_mm512_mask_scalef_ph(__m512h __W, __mmask32 __U, __m512h __A, __m512h __B) { + return (__m512h)__builtin_ia32_scalefph512_mask((__v32hf)__A, (__v32hf)__B, + (__v32hf)__W, (__mmask32)__U, + _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m512h __DEFAULT_FN_ATTRS512 +_mm512_maskz_scalef_ph(__mmask32 __U, __m512h __A, __m512h __B) { + return (__m512h)__builtin_ia32_scalefph512_mask( + (__v32hf)__A, (__v32hf)__B, (__v32hf)_mm512_setzero_ph(), (__mmask32)__U, + _MM_FROUND_CUR_DIRECTION); +} + +#define _mm512_scalef_round_ph(A, B, R) \ + ((__m512h)__builtin_ia32_scalefph512_mask( \ + (__v32hf)(__m512h)(A), (__v32hf)(__m512h)(B), \ + (__v32hf)_mm512_undefined_ph(), (__mmask32)-1, (int)(R))) + +#define _mm512_mask_scalef_round_ph(W, U, A, B, R) \ + ((__m512h)__builtin_ia32_scalefph512_mask( \ + (__v32hf)(__m512h)(A), (__v32hf)(__m512h)(B), (__v32hf)(__m512h)(W), \ + (__mmask32)(U), (int)(R))) + +#define _mm512_maskz_scalef_round_ph(U, A, B, R) \ + ((__m512h)__builtin_ia32_scalefph512_mask( \ + (__v32hf)(__m512h)(A), (__v32hf)(__m512h)(B), \ + (__v32hf)_mm512_setzero_ph(), (__mmask32)(U), (int)(R))) + +#define _mm512_roundscale_ph(A, B) \ + ((__m512h)__builtin_ia32_rndscaleph_mask( \ + (__v32hf)(__m512h)(A), (int)(B), (__v32hf)(__m512h)(A), (__mmask32)-1, \ + _MM_FROUND_CUR_DIRECTION)) + +#define _mm512_mask_roundscale_ph(A, B, C, imm) \ + ((__m512h)__builtin_ia32_rndscaleph_mask( \ + (__v32hf)(__m512h)(C), (int)(imm), (__v32hf)(__m512h)(A), \ + (__mmask32)(B), _MM_FROUND_CUR_DIRECTION)) + +#define _mm512_maskz_roundscale_ph(A, B, imm) \ + ((__m512h)__builtin_ia32_rndscaleph_mask( \ + (__v32hf)(__m512h)(B), (int)(imm), (__v32hf)_mm512_setzero_ph(), \ + (__mmask32)(A), _MM_FROUND_CUR_DIRECTION)) + +#define _mm512_mask_roundscale_round_ph(A, B, C, imm, R) \ + ((__m512h)__builtin_ia32_rndscaleph_mask((__v32hf)(__m512h)(C), (int)(imm), \ + (__v32hf)(__m512h)(A), \ + (__mmask32)(B), (int)(R))) + +#define _mm512_maskz_roundscale_round_ph(A, B, imm, R) \ + ((__m512h)__builtin_ia32_rndscaleph_mask((__v32hf)(__m512h)(B), (int)(imm), \ + (__v32hf)_mm512_setzero_ph(), \ + (__mmask32)(A), (int)(R))) + +#define _mm512_roundscale_round_ph(A, imm, R) \ + ((__m512h)__builtin_ia32_rndscaleph_mask((__v32hf)(__m512h)(A), (int)(imm), \ + (__v32hf)_mm512_undefined_ph(), \ + (__mmask32)-1, (int)(R))) + +#define _mm512_reduce_ph(A, imm) \ + ((__m512h)__builtin_ia32_reduceph512_mask( \ + (__v32hf)(__m512h)(A), (int)(imm), (__v32hf)_mm512_undefined_ph(), \ + (__mmask32)-1, _MM_FROUND_CUR_DIRECTION)) + +#define _mm512_mask_reduce_ph(W, U, A, imm) \ + ((__m512h)__builtin_ia32_reduceph512_mask( \ + (__v32hf)(__m512h)(A), (int)(imm), (__v32hf)(__m512h)(W), \ + (__mmask32)(U), _MM_FROUND_CUR_DIRECTION)) + +#define _mm512_maskz_reduce_ph(U, A, imm) \ + ((__m512h)__builtin_ia32_reduceph512_mask( \ + (__v32hf)(__m512h)(A), (int)(imm), (__v32hf)_mm512_setzero_ph(), \ + (__mmask32)(U), _MM_FROUND_CUR_DIRECTION)) + +#define _mm512_mask_reduce_round_ph(W, U, A, imm, R) \ + ((__m512h)__builtin_ia32_reduceph512_mask((__v32hf)(__m512h)(A), (int)(imm), \ + (__v32hf)(__m512h)(W), \ + (__mmask32)(U), (int)(R))) + +#define _mm512_maskz_reduce_round_ph(U, A, imm, R) \ + ((__m512h)__builtin_ia32_reduceph512_mask((__v32hf)(__m512h)(A), (int)(imm), \ + (__v32hf)_mm512_setzero_ph(), \ + (__mmask32)(U), (int)(R))) + +#define _mm512_reduce_round_ph(A, imm, R) \ + ((__m512h)__builtin_ia32_reduceph512_mask((__v32hf)(__m512h)(A), (int)(imm), \ + (__v32hf)_mm512_undefined_ph(), \ + (__mmask32)-1, (int)(R))) + +static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_rcp_sh(__m128h __A, + __m128h __B) { + return (__m128h)__builtin_ia32_rcpsh_mask( + (__v8hf)__A, (__v8hf)__B, (__v8hf)_mm_setzero_ph(), (__mmask8)-1); +} + +static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_mask_rcp_sh(__m128h __W, + __mmask8 __U, + __m128h __A, + __m128h __B) { + return (__m128h)__builtin_ia32_rcpsh_mask((__v8hf)__A, (__v8hf)__B, + (__v8hf)__W, (__mmask8)__U); +} + +static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_maskz_rcp_sh(__mmask8 __U, + __m128h __A, + __m128h __B) { + return (__m128h)__builtin_ia32_rcpsh_mask( + (__v8hf)__A, (__v8hf)__B, (__v8hf)_mm_setzero_ph(), (__mmask8)__U); +} + +static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_rsqrt_sh(__m128h __A, + __m128h __B) { + return (__m128h)__builtin_ia32_rsqrtsh_mask( + (__v8hf)__A, (__v8hf)__B, (__v8hf)_mm_setzero_ph(), (__mmask8)-1); +} + +static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_mask_rsqrt_sh(__m128h __W, + __mmask8 __U, + __m128h __A, + __m128h __B) { + return (__m128h)__builtin_ia32_rsqrtsh_mask((__v8hf)__A, (__v8hf)__B, + (__v8hf)__W, (__mmask8)__U); +} + +static __inline__ __m128h __DEFAULT_FN_ATTRS128 +_mm_maskz_rsqrt_sh(__mmask8 __U, __m128h __A, __m128h __B) { + return (__m128h)__builtin_ia32_rsqrtsh_mask( + (__v8hf)__A, (__v8hf)__B, (__v8hf)_mm_setzero_ph(), (__mmask8)__U); +} + +#define _mm_getmant_round_sh(A, B, C, D, R) \ + ((__m128h)__builtin_ia32_getmantsh_round_mask( \ + (__v8hf)(__m128h)(A), (__v8hf)(__m128h)(B), (int)(((D) << 2) | (C)), \ + (__v8hf)_mm_setzero_ph(), (__mmask8)-1, (int)(R))) + +#define _mm_getmant_sh(A, B, C, D) \ + ((__m128h)__builtin_ia32_getmantsh_round_mask( \ + (__v8hf)(__m128h)(A), (__v8hf)(__m128h)(B), (int)(((D) << 2) | (C)), \ + (__v8hf)_mm_setzero_ph(), (__mmask8)-1, _MM_FROUND_CUR_DIRECTION)) + +#define _mm_mask_getmant_sh(W, U, A, B, C, D) \ + ((__m128h)__builtin_ia32_getmantsh_round_mask( \ + (__v8hf)(__m128h)(A), (__v8hf)(__m128h)(B), (int)(((D) << 2) | (C)), \ + (__v8hf)(__m128h)(W), (__mmask8)(U), _MM_FROUND_CUR_DIRECTION)) + +#define _mm_mask_getmant_round_sh(W, U, A, B, C, D, R) \ + ((__m128h)__builtin_ia32_getmantsh_round_mask( \ + (__v8hf)(__m128h)(A), (__v8hf)(__m128h)(B), (int)(((D) << 2) | (C)), \ + (__v8hf)(__m128h)(W), (__mmask8)(U), (int)(R))) + +#define _mm_maskz_getmant_sh(U, A, B, C, D) \ + ((__m128h)__builtin_ia32_getmantsh_round_mask( \ + (__v8hf)(__m128h)(A), (__v8hf)(__m128h)(B), (int)(((D) << 2) | (C)), \ + (__v8hf)_mm_setzero_ph(), (__mmask8)(U), _MM_FROUND_CUR_DIRECTION)) + +#define _mm_maskz_getmant_round_sh(U, A, B, C, D, R) \ + ((__m128h)__builtin_ia32_getmantsh_round_mask( \ + (__v8hf)(__m128h)(A), (__v8hf)(__m128h)(B), (int)(((D) << 2) | (C)), \ + (__v8hf)_mm_setzero_ph(), (__mmask8)(U), (int)(R))) + +#define _mm_getexp_round_sh(A, B, R) \ + ((__m128h)__builtin_ia32_getexpsh128_round_mask( \ + (__v8hf)(__m128h)(A), (__v8hf)(__m128h)(B), (__v8hf)_mm_setzero_ph(), \ + (__mmask8)-1, (int)(R))) + +static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_getexp_sh(__m128h __A, + __m128h __B) { + return (__m128h)__builtin_ia32_getexpsh128_round_mask( + (__v8hf)__A, (__v8hf)__B, (__v8hf)_mm_setzero_ph(), (__mmask8)-1, + _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m128h __DEFAULT_FN_ATTRS128 +_mm_mask_getexp_sh(__m128h __W, __mmask8 __U, __m128h __A, __m128h __B) { + return (__m128h)__builtin_ia32_getexpsh128_round_mask( + (__v8hf)__A, (__v8hf)__B, (__v8hf)__W, (__mmask8)__U, + _MM_FROUND_CUR_DIRECTION); +} + +#define _mm_mask_getexp_round_sh(W, U, A, B, R) \ + ((__m128h)__builtin_ia32_getexpsh128_round_mask( \ + (__v8hf)(__m128h)(A), (__v8hf)(__m128h)(B), (__v8hf)(__m128h)(W), \ + (__mmask8)(U), (int)(R))) + +static __inline__ __m128h __DEFAULT_FN_ATTRS128 +_mm_maskz_getexp_sh(__mmask8 __U, __m128h __A, __m128h __B) { + return (__m128h)__builtin_ia32_getexpsh128_round_mask( + (__v8hf)__A, (__v8hf)__B, (__v8hf)_mm_setzero_ph(), (__mmask8)__U, + _MM_FROUND_CUR_DIRECTION); +} + +#define _mm_maskz_getexp_round_sh(U, A, B, R) \ + ((__m128h)__builtin_ia32_getexpsh128_round_mask( \ + (__v8hf)(__m128h)(A), (__v8hf)(__m128h)(B), (__v8hf)_mm_setzero_ph(), \ + (__mmask8)(U), (int)(R))) + +#define _mm_scalef_round_sh(A, B, R) \ + ((__m128h)__builtin_ia32_scalefsh_round_mask( \ + (__v8hf)(__m128h)(A), (__v8hf)(__m128h)(B), (__v8hf)_mm_setzero_ph(), \ + (__mmask8)-1, (int)(R))) + +static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_scalef_sh(__m128h __A, + __m128h __B) { + return (__m128h)__builtin_ia32_scalefsh_round_mask( + (__v8hf)__A, (__v8hf)(__B), (__v8hf)_mm_setzero_ph(), (__mmask8)-1, + _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m128h __DEFAULT_FN_ATTRS128 +_mm_mask_scalef_sh(__m128h __W, __mmask8 __U, __m128h __A, __m128h __B) { + return (__m128h)__builtin_ia32_scalefsh_round_mask((__v8hf)__A, (__v8hf)__B, + (__v8hf)__W, (__mmask8)__U, + _MM_FROUND_CUR_DIRECTION); +} + +#define _mm_mask_scalef_round_sh(W, U, A, B, R) \ + ((__m128h)__builtin_ia32_scalefsh_round_mask( \ + (__v8hf)(__m128h)(A), (__v8hf)(__m128h)(B), (__v8hf)(__m128h)(W), \ + (__mmask8)(U), (int)(R))) + +static __inline__ __m128h __DEFAULT_FN_ATTRS128 +_mm_maskz_scalef_sh(__mmask8 __U, __m128h __A, __m128h __B) { + return (__m128h)__builtin_ia32_scalefsh_round_mask( + (__v8hf)__A, (__v8hf)__B, (__v8hf)_mm_setzero_ph(), (__mmask8)__U, + _MM_FROUND_CUR_DIRECTION); +} + +#define _mm_maskz_scalef_round_sh(U, A, B, R) \ + ((__m128h)__builtin_ia32_scalefsh_round_mask( \ + (__v8hf)(__m128h)(A), (__v8hf)(__m128h)(B), (__v8hf)_mm_setzero_ph(), \ + (__mmask8)(U), (int)(R))) + +#define _mm_roundscale_round_sh(A, B, imm, R) \ + ((__m128h)__builtin_ia32_rndscalesh_round_mask( \ + (__v8hf)(__m128h)(A), (__v8hf)(__m128h)(B), (__v8hf)_mm_setzero_ph(), \ + (__mmask8)-1, (int)(imm), (int)(R))) + +#define _mm_roundscale_sh(A, B, imm) \ + ((__m128h)__builtin_ia32_rndscalesh_round_mask( \ + (__v8hf)(__m128h)(A), (__v8hf)(__m128h)(B), (__v8hf)_mm_setzero_ph(), \ + (__mmask8)-1, (int)(imm), _MM_FROUND_CUR_DIRECTION)) + +#define _mm_mask_roundscale_sh(W, U, A, B, I) \ + ((__m128h)__builtin_ia32_rndscalesh_round_mask( \ + (__v8hf)(__m128h)(A), (__v8hf)(__m128h)(B), (__v8hf)(__m128h)(W), \ + (__mmask8)(U), (int)(I), _MM_FROUND_CUR_DIRECTION)) + +#define _mm_mask_roundscale_round_sh(W, U, A, B, I, R) \ + ((__m128h)__builtin_ia32_rndscalesh_round_mask( \ + (__v8hf)(__m128h)(A), (__v8hf)(__m128h)(B), (__v8hf)(__m128h)(W), \ + (__mmask8)(U), (int)(I), (int)(R))) + +#define _mm_maskz_roundscale_sh(U, A, B, I) \ + ((__m128h)__builtin_ia32_rndscalesh_round_mask( \ + (__v8hf)(__m128h)(A), (__v8hf)(__m128h)(B), (__v8hf)_mm_setzero_ph(), \ + (__mmask8)(U), (int)(I), _MM_FROUND_CUR_DIRECTION)) + +#define _mm_maskz_roundscale_round_sh(U, A, B, I, R) \ + ((__m128h)__builtin_ia32_rndscalesh_round_mask( \ + (__v8hf)(__m128h)(A), (__v8hf)(__m128h)(B), (__v8hf)_mm_setzero_ph(), \ + (__mmask8)(U), (int)(I), (int)(R))) + +#define _mm_reduce_sh(A, B, C) \ + ((__m128h)__builtin_ia32_reducesh_mask( \ + (__v8hf)(__m128h)(A), (__v8hf)(__m128h)(B), (__v8hf)_mm_setzero_ph(), \ + (__mmask8)-1, (int)(C), _MM_FROUND_CUR_DIRECTION)) + +#define _mm_mask_reduce_sh(W, U, A, B, C) \ + ((__m128h)__builtin_ia32_reducesh_mask( \ + (__v8hf)(__m128h)(A), (__v8hf)(__m128h)(B), (__v8hf)(__m128h)(W), \ + (__mmask8)(U), (int)(C), _MM_FROUND_CUR_DIRECTION)) + +#define _mm_maskz_reduce_sh(U, A, B, C) \ + ((__m128h)__builtin_ia32_reducesh_mask( \ + (__v8hf)(__m128h)(A), (__v8hf)(__m128h)(B), (__v8hf)_mm_setzero_ph(), \ + (__mmask8)(U), (int)(C), _MM_FROUND_CUR_DIRECTION)) + +#define _mm_reduce_round_sh(A, B, C, R) \ + ((__m128h)__builtin_ia32_reducesh_mask( \ + (__v8hf)(__m128h)(A), (__v8hf)(__m128h)(B), (__v8hf)_mm_setzero_ph(), \ + (__mmask8)-1, (int)(C), (int)(R))) + +#define _mm_mask_reduce_round_sh(W, U, A, B, C, R) \ + ((__m128h)__builtin_ia32_reducesh_mask( \ + (__v8hf)(__m128h)(A), (__v8hf)(__m128h)(B), (__v8hf)(__m128h)(W), \ + (__mmask8)(U), (int)(C), (int)(R))) + +#define _mm_maskz_reduce_round_sh(U, A, B, C, R) \ + ((__m128h)__builtin_ia32_reducesh_mask( \ + (__v8hf)(__m128h)(A), (__v8hf)(__m128h)(B), (__v8hf)_mm_setzero_ph(), \ + (__mmask8)(U), (int)(C), (int)(R))) + +#define _mm512_sqrt_round_ph(A, R) \ + ((__m512h)__builtin_ia32_sqrtph512((__v32hf)(__m512h)(A), (int)(R))) + +#define _mm512_mask_sqrt_round_ph(W, U, A, R) \ + ((__m512h)__builtin_ia32_selectph_512( \ + (__mmask32)(U), (__v32hf)_mm512_sqrt_round_ph((A), (R)), \ + (__v32hf)(__m512h)(W))) + +#define _mm512_maskz_sqrt_round_ph(U, A, R) \ + ((__m512h)__builtin_ia32_selectph_512( \ + (__mmask32)(U), (__v32hf)_mm512_sqrt_round_ph((A), (R)), \ + (__v32hf)_mm512_setzero_ph())) + +static __inline__ __m512h __DEFAULT_FN_ATTRS512 _mm512_sqrt_ph(__m512h __A) { + return (__m512h)__builtin_ia32_sqrtph512((__v32hf)__A, + _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m512h __DEFAULT_FN_ATTRS512 +_mm512_mask_sqrt_ph(__m512h __W, __mmask32 __U, __m512h __A) { + return (__m512h)__builtin_ia32_selectph_512( + (__mmask32)(__U), + (__v32hf)__builtin_ia32_sqrtph512((__A), (_MM_FROUND_CUR_DIRECTION)), + (__v32hf)(__m512h)(__W)); +} + +static __inline__ __m512h __DEFAULT_FN_ATTRS512 +_mm512_maskz_sqrt_ph(__mmask32 __U, __m512h __A) { + return (__m512h)__builtin_ia32_selectph_512( + (__mmask32)(__U), + (__v32hf)__builtin_ia32_sqrtph512((__A), (_MM_FROUND_CUR_DIRECTION)), + (__v32hf)_mm512_setzero_ph()); +} + +#define _mm_sqrt_round_sh(A, B, R) \ + ((__m128h)__builtin_ia32_sqrtsh_round_mask( \ + (__v8hf)(__m128h)(A), (__v8hf)(__m128h)(B), (__v8hf)_mm_setzero_ph(), \ + (__mmask8)-1, (int)(R))) + +#define _mm_mask_sqrt_round_sh(W, U, A, B, R) \ + ((__m128h)__builtin_ia32_sqrtsh_round_mask( \ + (__v8hf)(__m128h)(A), (__v8hf)(__m128h)(B), (__v8hf)(__m128h)(W), \ + (__mmask8)(U), (int)(R))) + +#define _mm_maskz_sqrt_round_sh(U, A, B, R) \ + ((__m128h)__builtin_ia32_sqrtsh_round_mask( \ + (__v8hf)(__m128h)(A), (__v8hf)(__m128h)(B), (__v8hf)_mm_setzero_ph(), \ + (__mmask8)(U), (int)(R))) + +static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_sqrt_sh(__m128h __A, + __m128h __B) { + return (__m128h)__builtin_ia32_sqrtsh_round_mask( + (__v8hf)(__m128h)(__A), (__v8hf)(__m128h)(__B), (__v8hf)_mm_setzero_ph(), + (__mmask8)-1, _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_mask_sqrt_sh(__m128h __W, + __mmask32 __U, + __m128h __A, + __m128h __B) { + return (__m128h)__builtin_ia32_sqrtsh_round_mask( + (__v8hf)(__m128h)(__A), (__v8hf)(__m128h)(__B), (__v8hf)(__m128h)(__W), + (__mmask8)(__U), _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_maskz_sqrt_sh(__mmask32 __U, + __m128h __A, + __m128h __B) { + return (__m128h)__builtin_ia32_sqrtsh_round_mask( + (__v8hf)(__m128h)(__A), (__v8hf)(__m128h)(__B), (__v8hf)_mm_setzero_ph(), + (__mmask8)(__U), _MM_FROUND_CUR_DIRECTION); +} + +#define _mm512_mask_fpclass_ph_mask(U, A, imm) \ + ((__mmask32)__builtin_ia32_fpclassph512_mask((__v32hf)(__m512h)(A), \ + (int)(imm), (__mmask32)(U))) + +#define _mm512_fpclass_ph_mask(A, imm) \ + ((__mmask32)__builtin_ia32_fpclassph512_mask((__v32hf)(__m512h)(A), \ + (int)(imm), (__mmask32)-1)) + +#define _mm_fpclass_sh_mask(A, imm) \ + ((__mmask8)__builtin_ia32_fpclasssh_mask((__v8hf)(__m128h)(A), (int)(imm), \ + (__mmask8)-1)) + +#define _mm_mask_fpclass_sh_mask(U, A, imm) \ + ((__mmask8)__builtin_ia32_fpclasssh_mask((__v8hf)(__m128h)(A), (int)(imm), \ + (__mmask8)(U))) + +#define _mm512_cvt_roundpd_ph(A, R) \ + ((__m128h)__builtin_ia32_vcvtpd2ph512_mask( \ + (__v8df)(A), (__v8hf)_mm_undefined_ph(), (__mmask8)(-1), (int)(R))) + +#define _mm512_mask_cvt_roundpd_ph(W, U, A, R) \ + ((__m128h)__builtin_ia32_vcvtpd2ph512_mask((__v8df)(A), (__v8hf)(W), \ + (__mmask8)(U), (int)(R))) + +#define _mm512_maskz_cvt_roundpd_ph(U, A, R) \ + ((__m128h)__builtin_ia32_vcvtpd2ph512_mask( \ + (__v8df)(A), (__v8hf)_mm_setzero_ph(), (__mmask8)(U), (int)(R))) + +static __inline__ __m128h __DEFAULT_FN_ATTRS512 _mm512_cvtpd_ph(__m512d __A) { + return (__m128h)__builtin_ia32_vcvtpd2ph512_mask( + (__v8df)__A, (__v8hf)_mm_setzero_ph(), (__mmask8)-1, + _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m128h __DEFAULT_FN_ATTRS512 +_mm512_mask_cvtpd_ph(__m128h __W, __mmask8 __U, __m512d __A) { + return (__m128h)__builtin_ia32_vcvtpd2ph512_mask( + (__v8df)__A, (__v8hf)__W, (__mmask8)__U, _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m128h __DEFAULT_FN_ATTRS512 +_mm512_maskz_cvtpd_ph(__mmask8 __U, __m512d __A) { + return (__m128h)__builtin_ia32_vcvtpd2ph512_mask( + (__v8df)__A, (__v8hf)_mm_setzero_ph(), (__mmask8)__U, + _MM_FROUND_CUR_DIRECTION); +} + +#define _mm512_cvt_roundph_pd(A, R) \ + ((__m512d)__builtin_ia32_vcvtph2pd512_mask( \ + (__v8hf)(A), (__v8df)_mm512_undefined_pd(), (__mmask8)(-1), (int)(R))) + +#define _mm512_mask_cvt_roundph_pd(W, U, A, R) \ + ((__m512d)__builtin_ia32_vcvtph2pd512_mask((__v8hf)(A), (__v8df)(W), \ + (__mmask8)(U), (int)(R))) + +#define _mm512_maskz_cvt_roundph_pd(U, A, R) \ + ((__m512d)__builtin_ia32_vcvtph2pd512_mask( \ + (__v8hf)(A), (__v8df)_mm512_setzero_pd(), (__mmask8)(U), (int)(R))) + +static __inline__ __m512d __DEFAULT_FN_ATTRS512 _mm512_cvtph_pd(__m128h __A) { + return (__m512d)__builtin_ia32_vcvtph2pd512_mask( + (__v8hf)__A, (__v8df)_mm512_setzero_pd(), (__mmask8)-1, + _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m512d __DEFAULT_FN_ATTRS512 +_mm512_mask_cvtph_pd(__m512d __W, __mmask8 __U, __m128h __A) { + return (__m512d)__builtin_ia32_vcvtph2pd512_mask( + (__v8hf)__A, (__v8df)__W, (__mmask8)__U, _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m512d __DEFAULT_FN_ATTRS512 +_mm512_maskz_cvtph_pd(__mmask8 __U, __m128h __A) { + return (__m512d)__builtin_ia32_vcvtph2pd512_mask( + (__v8hf)__A, (__v8df)_mm512_setzero_pd(), (__mmask8)__U, + _MM_FROUND_CUR_DIRECTION); +} + +#define _mm_cvt_roundsh_ss(A, B, R) \ + ((__m128)__builtin_ia32_vcvtsh2ss_round_mask((__v4sf)(A), (__v8hf)(B), \ + (__v4sf)_mm_undefined_ps(), \ + (__mmask8)(-1), (int)(R))) + +#define _mm_mask_cvt_roundsh_ss(W, U, A, B, R) \ + ((__m128)__builtin_ia32_vcvtsh2ss_round_mask( \ + (__v4sf)(A), (__v8hf)(B), (__v4sf)(W), (__mmask8)(U), (int)(R))) + +#define _mm_maskz_cvt_roundsh_ss(U, A, B, R) \ + ((__m128)__builtin_ia32_vcvtsh2ss_round_mask((__v4sf)(A), (__v8hf)(B), \ + (__v4sf)_mm_setzero_ps(), \ + (__mmask8)(U), (int)(R))) + +static __inline__ __m128 __DEFAULT_FN_ATTRS128 _mm_cvtsh_ss(__m128 __A, + __m128h __B) { + return (__m128)__builtin_ia32_vcvtsh2ss_round_mask( + (__v4sf)__A, (__v8hf)__B, (__v4sf)_mm_undefined_ps(), (__mmask8)-1, + _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m128 __DEFAULT_FN_ATTRS128 _mm_mask_cvtsh_ss(__m128 __W, + __mmask8 __U, + __m128 __A, + __m128h __B) { + return (__m128)__builtin_ia32_vcvtsh2ss_round_mask((__v4sf)__A, (__v8hf)__B, + (__v4sf)__W, (__mmask8)__U, + _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m128 __DEFAULT_FN_ATTRS128 _mm_maskz_cvtsh_ss(__mmask8 __U, + __m128 __A, + __m128h __B) { + return (__m128)__builtin_ia32_vcvtsh2ss_round_mask( + (__v4sf)__A, (__v8hf)__B, (__v4sf)_mm_setzero_ps(), (__mmask8)__U, + _MM_FROUND_CUR_DIRECTION); +} + +#define _mm_cvt_roundss_sh(A, B, R) \ + ((__m128h)__builtin_ia32_vcvtss2sh_round_mask((__v8hf)(A), (__v4sf)(B), \ + (__v8hf)_mm_undefined_ph(), \ + (__mmask8)(-1), (int)(R))) + +#define _mm_mask_cvt_roundss_sh(W, U, A, B, R) \ + ((__m128h)__builtin_ia32_vcvtss2sh_round_mask( \ + (__v8hf)(A), (__v4sf)(B), (__v8hf)(W), (__mmask8)(U), (int)(R))) + +#define _mm_maskz_cvt_roundss_sh(U, A, B, R) \ + ((__m128h)__builtin_ia32_vcvtss2sh_round_mask((__v8hf)(A), (__v4sf)(B), \ + (__v8hf)_mm_setzero_ph(), \ + (__mmask8)(U), (int)(R))) + +static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_cvtss_sh(__m128h __A, + __m128 __B) { + return (__m128h)__builtin_ia32_vcvtss2sh_round_mask( + (__v8hf)__A, (__v4sf)__B, (__v8hf)_mm_undefined_ph(), (__mmask8)-1, + _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_mask_cvtss_sh(__m128h __W, + __mmask8 __U, + __m128h __A, + __m128 __B) { + return (__m128h)__builtin_ia32_vcvtss2sh_round_mask( + (__v8hf)__A, (__v4sf)__B, (__v8hf)__W, (__mmask8)__U, + _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_maskz_cvtss_sh(__mmask8 __U, + __m128h __A, + __m128 __B) { + return (__m128h)__builtin_ia32_vcvtss2sh_round_mask( + (__v8hf)__A, (__v4sf)__B, (__v8hf)_mm_setzero_ph(), (__mmask8)__U, + _MM_FROUND_CUR_DIRECTION); +} + +#define _mm_cvt_roundsd_sh(A, B, R) \ + ((__m128h)__builtin_ia32_vcvtsd2sh_round_mask((__v8hf)(A), (__v2df)(B), \ + (__v8hf)_mm_undefined_ph(), \ + (__mmask8)(-1), (int)(R))) + +#define _mm_mask_cvt_roundsd_sh(W, U, A, B, R) \ + ((__m128h)__builtin_ia32_vcvtsd2sh_round_mask( \ + (__v8hf)(A), (__v2df)(B), (__v8hf)(W), (__mmask8)(U), (int)(R))) + +#define _mm_maskz_cvt_roundsd_sh(U, A, B, R) \ + ((__m128h)__builtin_ia32_vcvtsd2sh_round_mask((__v8hf)(A), (__v2df)(B), \ + (__v8hf)_mm_setzero_ph(), \ + (__mmask8)(U), (int)(R))) + +static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_cvtsd_sh(__m128h __A, + __m128d __B) { + return (__m128h)__builtin_ia32_vcvtsd2sh_round_mask( + (__v8hf)__A, (__v2df)__B, (__v8hf)_mm_undefined_ph(), (__mmask8)-1, + _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_mask_cvtsd_sh(__m128h __W, + __mmask8 __U, + __m128h __A, + __m128d __B) { + return (__m128h)__builtin_ia32_vcvtsd2sh_round_mask( + (__v8hf)__A, (__v2df)__B, (__v8hf)__W, (__mmask8)__U, + _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m128h __DEFAULT_FN_ATTRS128 +_mm_maskz_cvtsd_sh(__mmask8 __U, __m128h __A, __m128d __B) { + return (__m128h)__builtin_ia32_vcvtsd2sh_round_mask( + (__v8hf)__A, (__v2df)__B, (__v8hf)_mm_setzero_ph(), (__mmask8)__U, + _MM_FROUND_CUR_DIRECTION); +} + +#define _mm_cvt_roundsh_sd(A, B, R) \ + ((__m128d)__builtin_ia32_vcvtsh2sd_round_mask((__v2df)(A), (__v8hf)(B), \ + (__v2df)_mm_undefined_pd(), \ + (__mmask8)(-1), (int)(R))) + +#define _mm_mask_cvt_roundsh_sd(W, U, A, B, R) \ + ((__m128d)__builtin_ia32_vcvtsh2sd_round_mask( \ + (__v2df)(A), (__v8hf)(B), (__v2df)(W), (__mmask8)(U), (int)(R))) + +#define _mm_maskz_cvt_roundsh_sd(U, A, B, R) \ + ((__m128d)__builtin_ia32_vcvtsh2sd_round_mask((__v2df)(A), (__v8hf)(B), \ + (__v2df)_mm_setzero_pd(), \ + (__mmask8)(U), (int)(R))) + +static __inline__ __m128d __DEFAULT_FN_ATTRS128 _mm_cvtsh_sd(__m128d __A, + __m128h __B) { + return (__m128d)__builtin_ia32_vcvtsh2sd_round_mask( + (__v2df)__A, (__v8hf)__B, (__v2df)_mm_undefined_pd(), (__mmask8)-1, + _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m128d __DEFAULT_FN_ATTRS128 _mm_mask_cvtsh_sd(__m128d __W, + __mmask8 __U, + __m128d __A, + __m128h __B) { + return (__m128d)__builtin_ia32_vcvtsh2sd_round_mask( + (__v2df)__A, (__v8hf)__B, (__v2df)__W, (__mmask8)__U, + _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m128d __DEFAULT_FN_ATTRS128 +_mm_maskz_cvtsh_sd(__mmask8 __U, __m128d __A, __m128h __B) { + return (__m128d)__builtin_ia32_vcvtsh2sd_round_mask( + (__v2df)__A, (__v8hf)__B, (__v2df)_mm_setzero_pd(), (__mmask8)__U, + _MM_FROUND_CUR_DIRECTION); +} + +#define _mm512_cvt_roundph_epi16(A, R) \ + ((__m512i)__builtin_ia32_vcvtph2w512_mask((__v32hf)(A), \ + (__v32hi)_mm512_undefined_epi32(), \ + (__mmask32)(-1), (int)(R))) + +#define _mm512_mask_cvt_roundph_epi16(W, U, A, R) \ + ((__m512i)__builtin_ia32_vcvtph2w512_mask((__v32hf)(A), (__v32hi)(W), \ + (__mmask32)(U), (int)(R))) + +#define _mm512_maskz_cvt_roundph_epi16(U, A, R) \ + ((__m512i)__builtin_ia32_vcvtph2w512_mask((__v32hf)(A), \ + (__v32hi)_mm512_setzero_epi32(), \ + (__mmask32)(U), (int)(R))) + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_cvtph_epi16(__m512h __A) { + return (__m512i)__builtin_ia32_vcvtph2w512_mask( + (__v32hf)__A, (__v32hi)_mm512_setzero_epi32(), (__mmask32)-1, + _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_mask_cvtph_epi16(__m512i __W, __mmask32 __U, __m512h __A) { + return (__m512i)__builtin_ia32_vcvtph2w512_mask( + (__v32hf)__A, (__v32hi)__W, (__mmask32)__U, _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_maskz_cvtph_epi16(__mmask32 __U, __m512h __A) { + return (__m512i)__builtin_ia32_vcvtph2w512_mask( + (__v32hf)__A, (__v32hi)_mm512_setzero_epi32(), (__mmask32)__U, + _MM_FROUND_CUR_DIRECTION); +} + +#define _mm512_cvtt_roundph_epi16(A, R) \ + ((__m512i)__builtin_ia32_vcvttph2w512_mask( \ + (__v32hf)(A), (__v32hi)_mm512_undefined_epi32(), (__mmask32)(-1), \ + (int)(R))) + +#define _mm512_mask_cvtt_roundph_epi16(W, U, A, R) \ + ((__m512i)__builtin_ia32_vcvttph2w512_mask((__v32hf)(A), (__v32hi)(W), \ + (__mmask32)(U), (int)(R))) + +#define _mm512_maskz_cvtt_roundph_epi16(U, A, R) \ + ((__m512i)__builtin_ia32_vcvttph2w512_mask((__v32hf)(A), \ + (__v32hi)_mm512_setzero_epi32(), \ + (__mmask32)(U), (int)(R))) + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_cvttph_epi16(__m512h __A) { + return (__m512i)__builtin_ia32_vcvttph2w512_mask( + (__v32hf)__A, (__v32hi)_mm512_setzero_epi32(), (__mmask32)-1, + _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_mask_cvttph_epi16(__m512i __W, __mmask32 __U, __m512h __A) { + return (__m512i)__builtin_ia32_vcvttph2w512_mask( + (__v32hf)__A, (__v32hi)__W, (__mmask32)__U, _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_maskz_cvttph_epi16(__mmask32 __U, __m512h __A) { + return (__m512i)__builtin_ia32_vcvttph2w512_mask( + (__v32hf)__A, (__v32hi)_mm512_setzero_epi32(), (__mmask32)__U, + _MM_FROUND_CUR_DIRECTION); +} + +#define _mm512_cvt_roundepi16_ph(A, R) \ + ((__m512h)__builtin_ia32_vcvtw2ph512_mask((__v32hi)(A), \ + (__v32hf)_mm512_undefined_ph(), \ + (__mmask32)(-1), (int)(R))) + +#define _mm512_mask_cvt_roundepi16_ph(W, U, A, R) \ + ((__m512h)__builtin_ia32_vcvtw2ph512_mask((__v32hi)(A), (__v32hf)(W), \ + (__mmask32)(U), (int)(R))) + +#define _mm512_maskz_cvt_roundepi16_ph(U, A, R) \ + ((__m512h)__builtin_ia32_vcvtw2ph512_mask( \ + (__v32hi)(A), (__v32hf)_mm512_setzero_ph(), (__mmask32)(U), (int)(R))) + +static __inline__ __m512h __DEFAULT_FN_ATTRS512 +_mm512_cvtepi16_ph(__m512i __A) { + return (__m512h)__builtin_ia32_vcvtw2ph512_mask( + (__v32hi)__A, (__v32hf)_mm512_setzero_ph(), (__mmask32)-1, + _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m512h __DEFAULT_FN_ATTRS512 +_mm512_mask_cvtepi16_ph(__m512h __W, __mmask32 __U, __m512i __A) { + return (__m512h)__builtin_ia32_vcvtw2ph512_mask( + (__v32hi)__A, (__v32hf)__W, (__mmask32)__U, _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m512h __DEFAULT_FN_ATTRS512 +_mm512_maskz_cvtepi16_ph(__mmask32 __U, __m512i __A) { + return (__m512h)__builtin_ia32_vcvtw2ph512_mask( + (__v32hi)__A, (__v32hf)_mm512_setzero_ph(), (__mmask32)__U, + _MM_FROUND_CUR_DIRECTION); +} + +#define _mm512_cvt_roundph_epu16(A, R) \ + ((__m512i)__builtin_ia32_vcvtph2uw512_mask( \ + (__v32hf)(A), (__v32hu)_mm512_undefined_epi32(), (__mmask32)(-1), \ + (int)(R))) + +#define _mm512_mask_cvt_roundph_epu16(W, U, A, R) \ + ((__m512i)__builtin_ia32_vcvtph2uw512_mask((__v32hf)(A), (__v32hu)(W), \ + (__mmask32)(U), (int)(R))) + +#define _mm512_maskz_cvt_roundph_epu16(U, A, R) \ + ((__m512i)__builtin_ia32_vcvtph2uw512_mask((__v32hf)(A), \ + (__v32hu)_mm512_setzero_epi32(), \ + (__mmask32)(U), (int)(R))) + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_cvtph_epu16(__m512h __A) { + return (__m512i)__builtin_ia32_vcvtph2uw512_mask( + (__v32hf)__A, (__v32hu)_mm512_setzero_epi32(), (__mmask32)-1, + _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_mask_cvtph_epu16(__m512i __W, __mmask32 __U, __m512h __A) { + return (__m512i)__builtin_ia32_vcvtph2uw512_mask( + (__v32hf)__A, (__v32hu)__W, (__mmask32)__U, _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_maskz_cvtph_epu16(__mmask32 __U, __m512h __A) { + return (__m512i)__builtin_ia32_vcvtph2uw512_mask( + (__v32hf)__A, (__v32hu)_mm512_setzero_epi32(), (__mmask32)__U, + _MM_FROUND_CUR_DIRECTION); +} + +#define _mm512_cvtt_roundph_epu16(A, R) \ + ((__m512i)__builtin_ia32_vcvttph2uw512_mask( \ + (__v32hf)(A), (__v32hu)_mm512_undefined_epi32(), (__mmask32)(-1), \ + (int)(R))) + +#define _mm512_mask_cvtt_roundph_epu16(W, U, A, R) \ + ((__m512i)__builtin_ia32_vcvttph2uw512_mask((__v32hf)(A), (__v32hu)(W), \ + (__mmask32)(U), (int)(R))) + +#define _mm512_maskz_cvtt_roundph_epu16(U, A, R) \ + ((__m512i)__builtin_ia32_vcvttph2uw512_mask((__v32hf)(A), \ + (__v32hu)_mm512_setzero_epi32(), \ + (__mmask32)(U), (int)(R))) + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_cvttph_epu16(__m512h __A) { + return (__m512i)__builtin_ia32_vcvttph2uw512_mask( + (__v32hf)__A, (__v32hu)_mm512_setzero_epi32(), (__mmask32)-1, + _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_mask_cvttph_epu16(__m512i __W, __mmask32 __U, __m512h __A) { + return (__m512i)__builtin_ia32_vcvttph2uw512_mask( + (__v32hf)__A, (__v32hu)__W, (__mmask32)__U, _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_maskz_cvttph_epu16(__mmask32 __U, __m512h __A) { + return (__m512i)__builtin_ia32_vcvttph2uw512_mask( + (__v32hf)__A, (__v32hu)_mm512_setzero_epi32(), (__mmask32)__U, + _MM_FROUND_CUR_DIRECTION); +} + +#define _mm512_cvt_roundepu16_ph(A, R) \ + ((__m512h)__builtin_ia32_vcvtuw2ph512_mask((__v32hu)(A), \ + (__v32hf)_mm512_undefined_ph(), \ + (__mmask32)(-1), (int)(R))) + +#define _mm512_mask_cvt_roundepu16_ph(W, U, A, R) \ + ((__m512h)__builtin_ia32_vcvtuw2ph512_mask((__v32hu)(A), (__v32hf)(W), \ + (__mmask32)(U), (int)(R))) + +#define _mm512_maskz_cvt_roundepu16_ph(U, A, R) \ + ((__m512h)__builtin_ia32_vcvtuw2ph512_mask( \ + (__v32hu)(A), (__v32hf)_mm512_setzero_ph(), (__mmask32)(U), (int)(R))) + +static __inline__ __m512h __DEFAULT_FN_ATTRS512 +_mm512_cvtepu16_ph(__m512i __A) { + return (__m512h)__builtin_ia32_vcvtuw2ph512_mask( + (__v32hu)__A, (__v32hf)_mm512_setzero_ph(), (__mmask32)-1, + _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m512h __DEFAULT_FN_ATTRS512 +_mm512_mask_cvtepu16_ph(__m512h __W, __mmask32 __U, __m512i __A) { + return (__m512h)__builtin_ia32_vcvtuw2ph512_mask( + (__v32hu)__A, (__v32hf)__W, (__mmask32)__U, _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m512h __DEFAULT_FN_ATTRS512 +_mm512_maskz_cvtepu16_ph(__mmask32 __U, __m512i __A) { + return (__m512h)__builtin_ia32_vcvtuw2ph512_mask( + (__v32hu)__A, (__v32hf)_mm512_setzero_ph(), (__mmask32)__U, + _MM_FROUND_CUR_DIRECTION); +} + +#define _mm512_cvt_roundph_epi32(A, R) \ + ((__m512i)__builtin_ia32_vcvtph2dq512_mask( \ + (__v16hf)(A), (__v16si)_mm512_undefined_epi32(), (__mmask16)(-1), \ + (int)(R))) + +#define _mm512_mask_cvt_roundph_epi32(W, U, A, R) \ + ((__m512i)__builtin_ia32_vcvtph2dq512_mask((__v16hf)(A), (__v16si)(W), \ + (__mmask16)(U), (int)(R))) + +#define _mm512_maskz_cvt_roundph_epi32(U, A, R) \ + ((__m512i)__builtin_ia32_vcvtph2dq512_mask((__v16hf)(A), \ + (__v16si)_mm512_setzero_epi32(), \ + (__mmask16)(U), (int)(R))) + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_cvtph_epi32(__m256h __A) { + return (__m512i)__builtin_ia32_vcvtph2dq512_mask( + (__v16hf)__A, (__v16si)_mm512_setzero_epi32(), (__mmask16)-1, + _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_mask_cvtph_epi32(__m512i __W, __mmask16 __U, __m256h __A) { + return (__m512i)__builtin_ia32_vcvtph2dq512_mask( + (__v16hf)__A, (__v16si)__W, (__mmask16)__U, _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_maskz_cvtph_epi32(__mmask16 __U, __m256h __A) { + return (__m512i)__builtin_ia32_vcvtph2dq512_mask( + (__v16hf)__A, (__v16si)_mm512_setzero_epi32(), (__mmask16)__U, + _MM_FROUND_CUR_DIRECTION); +} + +#define _mm512_cvt_roundph_epu32(A, R) \ + ((__m512i)__builtin_ia32_vcvtph2udq512_mask( \ + (__v16hf)(A), (__v16su)_mm512_undefined_epi32(), (__mmask16)(-1), \ + (int)(R))) + +#define _mm512_mask_cvt_roundph_epu32(W, U, A, R) \ + ((__m512i)__builtin_ia32_vcvtph2udq512_mask((__v16hf)(A), (__v16su)(W), \ + (__mmask16)(U), (int)(R))) + +#define _mm512_maskz_cvt_roundph_epu32(U, A, R) \ + ((__m512i)__builtin_ia32_vcvtph2udq512_mask((__v16hf)(A), \ + (__v16su)_mm512_setzero_epi32(), \ + (__mmask16)(U), (int)(R))) + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_cvtph_epu32(__m256h __A) { + return (__m512i)__builtin_ia32_vcvtph2udq512_mask( + (__v16hf)__A, (__v16su)_mm512_setzero_epi32(), (__mmask16)-1, + _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_mask_cvtph_epu32(__m512i __W, __mmask16 __U, __m256h __A) { + return (__m512i)__builtin_ia32_vcvtph2udq512_mask( + (__v16hf)__A, (__v16su)__W, (__mmask16)__U, _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_maskz_cvtph_epu32(__mmask16 __U, __m256h __A) { + return (__m512i)__builtin_ia32_vcvtph2udq512_mask( + (__v16hf)__A, (__v16su)_mm512_setzero_epi32(), (__mmask16)__U, + _MM_FROUND_CUR_DIRECTION); +} + +#define _mm512_cvt_roundepi32_ph(A, R) \ + ((__m256h)__builtin_ia32_vcvtdq2ph512_mask((__v16si)(A), \ + (__v16hf)_mm256_undefined_ph(), \ + (__mmask16)(-1), (int)(R))) + +#define _mm512_mask_cvt_roundepi32_ph(W, U, A, R) \ + ((__m256h)__builtin_ia32_vcvtdq2ph512_mask((__v16si)(A), (__v16hf)(W), \ + (__mmask16)(U), (int)(R))) + +#define _mm512_maskz_cvt_roundepi32_ph(U, A, R) \ + ((__m256h)__builtin_ia32_vcvtdq2ph512_mask( \ + (__v16si)(A), (__v16hf)_mm256_setzero_ph(), (__mmask16)(U), (int)(R))) + +static __inline__ __m256h __DEFAULT_FN_ATTRS512 +_mm512_cvtepi32_ph(__m512i __A) { + return (__m256h)__builtin_ia32_vcvtdq2ph512_mask( + (__v16si)__A, (__v16hf)_mm256_setzero_ph(), (__mmask16)-1, + _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m256h __DEFAULT_FN_ATTRS512 +_mm512_mask_cvtepi32_ph(__m256h __W, __mmask16 __U, __m512i __A) { + return (__m256h)__builtin_ia32_vcvtdq2ph512_mask( + (__v16si)__A, (__v16hf)__W, (__mmask16)__U, _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m256h __DEFAULT_FN_ATTRS512 +_mm512_maskz_cvtepi32_ph(__mmask16 __U, __m512i __A) { + return (__m256h)__builtin_ia32_vcvtdq2ph512_mask( + (__v16si)__A, (__v16hf)_mm256_setzero_ph(), (__mmask16)__U, + _MM_FROUND_CUR_DIRECTION); +} + +#define _mm512_cvt_roundepu32_ph(A, R) \ + ((__m256h)__builtin_ia32_vcvtudq2ph512_mask((__v16su)(A), \ + (__v16hf)_mm256_undefined_ph(), \ + (__mmask16)(-1), (int)(R))) + +#define _mm512_mask_cvt_roundepu32_ph(W, U, A, R) \ + ((__m256h)__builtin_ia32_vcvtudq2ph512_mask((__v16su)(A), (__v16hf)(W), \ + (__mmask16)(U), (int)(R))) + +#define _mm512_maskz_cvt_roundepu32_ph(U, A, R) \ + ((__m256h)__builtin_ia32_vcvtudq2ph512_mask( \ + (__v16su)(A), (__v16hf)_mm256_setzero_ph(), (__mmask16)(U), (int)(R))) + +static __inline__ __m256h __DEFAULT_FN_ATTRS512 +_mm512_cvtepu32_ph(__m512i __A) { + return (__m256h)__builtin_ia32_vcvtudq2ph512_mask( + (__v16su)__A, (__v16hf)_mm256_setzero_ph(), (__mmask16)-1, + _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m256h __DEFAULT_FN_ATTRS512 +_mm512_mask_cvtepu32_ph(__m256h __W, __mmask16 __U, __m512i __A) { + return (__m256h)__builtin_ia32_vcvtudq2ph512_mask( + (__v16su)__A, (__v16hf)__W, (__mmask16)__U, _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m256h __DEFAULT_FN_ATTRS512 +_mm512_maskz_cvtepu32_ph(__mmask16 __U, __m512i __A) { + return (__m256h)__builtin_ia32_vcvtudq2ph512_mask( + (__v16su)__A, (__v16hf)_mm256_setzero_ph(), (__mmask16)__U, + _MM_FROUND_CUR_DIRECTION); +} + +#define _mm512_cvtt_roundph_epi32(A, R) \ + ((__m512i)__builtin_ia32_vcvttph2dq512_mask( \ + (__v16hf)(A), (__v16si)_mm512_undefined_epi32(), (__mmask16)(-1), \ + (int)(R))) + +#define _mm512_mask_cvtt_roundph_epi32(W, U, A, R) \ + ((__m512i)__builtin_ia32_vcvttph2dq512_mask((__v16hf)(A), (__v16si)(W), \ + (__mmask16)(U), (int)(R))) + +#define _mm512_maskz_cvtt_roundph_epi32(U, A, R) \ + ((__m512i)__builtin_ia32_vcvttph2dq512_mask((__v16hf)(A), \ + (__v16si)_mm512_setzero_epi32(), \ + (__mmask16)(U), (int)(R))) + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_cvttph_epi32(__m256h __A) { + return (__m512i)__builtin_ia32_vcvttph2dq512_mask( + (__v16hf)__A, (__v16si)_mm512_setzero_epi32(), (__mmask16)-1, + _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_mask_cvttph_epi32(__m512i __W, __mmask16 __U, __m256h __A) { + return (__m512i)__builtin_ia32_vcvttph2dq512_mask( + (__v16hf)__A, (__v16si)__W, (__mmask16)__U, _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_maskz_cvttph_epi32(__mmask16 __U, __m256h __A) { + return (__m512i)__builtin_ia32_vcvttph2dq512_mask( + (__v16hf)__A, (__v16si)_mm512_setzero_epi32(), (__mmask16)__U, + _MM_FROUND_CUR_DIRECTION); +} + +#define _mm512_cvtt_roundph_epu32(A, R) \ + ((__m512i)__builtin_ia32_vcvttph2udq512_mask( \ + (__v16hf)(A), (__v16su)_mm512_undefined_epi32(), (__mmask16)(-1), \ + (int)(R))) + +#define _mm512_mask_cvtt_roundph_epu32(W, U, A, R) \ + ((__m512i)__builtin_ia32_vcvttph2udq512_mask((__v16hf)(A), (__v16su)(W), \ + (__mmask16)(U), (int)(R))) + +#define _mm512_maskz_cvtt_roundph_epu32(U, A, R) \ + ((__m512i)__builtin_ia32_vcvttph2udq512_mask( \ + (__v16hf)(A), (__v16su)_mm512_setzero_epi32(), (__mmask16)(U), \ + (int)(R))) + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_cvttph_epu32(__m256h __A) { + return (__m512i)__builtin_ia32_vcvttph2udq512_mask( + (__v16hf)__A, (__v16su)_mm512_setzero_epi32(), (__mmask16)-1, + _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_mask_cvttph_epu32(__m512i __W, __mmask16 __U, __m256h __A) { + return (__m512i)__builtin_ia32_vcvttph2udq512_mask( + (__v16hf)__A, (__v16su)__W, (__mmask16)__U, _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_maskz_cvttph_epu32(__mmask16 __U, __m256h __A) { + return (__m512i)__builtin_ia32_vcvttph2udq512_mask( + (__v16hf)__A, (__v16su)_mm512_setzero_epi32(), (__mmask16)__U, + _MM_FROUND_CUR_DIRECTION); +} + +#define _mm512_cvt_roundepi64_ph(A, R) \ + ((__m128h)__builtin_ia32_vcvtqq2ph512_mask( \ + (__v8di)(A), (__v8hf)_mm_undefined_ph(), (__mmask8)(-1), (int)(R))) + +#define _mm512_mask_cvt_roundepi64_ph(W, U, A, R) \ + ((__m128h)__builtin_ia32_vcvtqq2ph512_mask((__v8di)(A), (__v8hf)(W), \ + (__mmask8)(U), (int)(R))) + +#define _mm512_maskz_cvt_roundepi64_ph(U, A, R) \ + ((__m128h)__builtin_ia32_vcvtqq2ph512_mask( \ + (__v8di)(A), (__v8hf)_mm_setzero_ph(), (__mmask8)(U), (int)(R))) + +static __inline__ __m128h __DEFAULT_FN_ATTRS512 +_mm512_cvtepi64_ph(__m512i __A) { + return (__m128h)__builtin_ia32_vcvtqq2ph512_mask( + (__v8di)__A, (__v8hf)_mm_setzero_ph(), (__mmask8)-1, + _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m128h __DEFAULT_FN_ATTRS512 +_mm512_mask_cvtepi64_ph(__m128h __W, __mmask8 __U, __m512i __A) { + return (__m128h)__builtin_ia32_vcvtqq2ph512_mask( + (__v8di)__A, (__v8hf)__W, (__mmask8)__U, _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m128h __DEFAULT_FN_ATTRS512 +_mm512_maskz_cvtepi64_ph(__mmask8 __U, __m512i __A) { + return (__m128h)__builtin_ia32_vcvtqq2ph512_mask( + (__v8di)__A, (__v8hf)_mm_setzero_ph(), (__mmask8)__U, + _MM_FROUND_CUR_DIRECTION); +} + +#define _mm512_cvt_roundph_epi64(A, R) \ + ((__m512i)__builtin_ia32_vcvtph2qq512_mask((__v8hf)(A), \ + (__v8di)_mm512_undefined_epi32(), \ + (__mmask8)(-1), (int)(R))) + +#define _mm512_mask_cvt_roundph_epi64(W, U, A, R) \ + ((__m512i)__builtin_ia32_vcvtph2qq512_mask((__v8hf)(A), (__v8di)(W), \ + (__mmask8)(U), (int)(R))) + +#define _mm512_maskz_cvt_roundph_epi64(U, A, R) \ + ((__m512i)__builtin_ia32_vcvtph2qq512_mask( \ + (__v8hf)(A), (__v8di)_mm512_setzero_epi32(), (__mmask8)(U), (int)(R))) + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_cvtph_epi64(__m128h __A) { + return (__m512i)__builtin_ia32_vcvtph2qq512_mask( + (__v8hf)__A, (__v8di)_mm512_setzero_epi32(), (__mmask8)-1, + _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_mask_cvtph_epi64(__m512i __W, __mmask8 __U, __m128h __A) { + return (__m512i)__builtin_ia32_vcvtph2qq512_mask( + (__v8hf)__A, (__v8di)__W, (__mmask8)__U, _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_maskz_cvtph_epi64(__mmask8 __U, __m128h __A) { + return (__m512i)__builtin_ia32_vcvtph2qq512_mask( + (__v8hf)__A, (__v8di)_mm512_setzero_epi32(), (__mmask8)__U, + _MM_FROUND_CUR_DIRECTION); +} + +#define _mm512_cvt_roundepu64_ph(A, R) \ + ((__m128h)__builtin_ia32_vcvtuqq2ph512_mask( \ + (__v8du)(A), (__v8hf)_mm_undefined_ph(), (__mmask8)(-1), (int)(R))) + +#define _mm512_mask_cvt_roundepu64_ph(W, U, A, R) \ + ((__m128h)__builtin_ia32_vcvtuqq2ph512_mask((__v8du)(A), (__v8hf)(W), \ + (__mmask8)(U), (int)(R))) + +#define _mm512_maskz_cvt_roundepu64_ph(U, A, R) \ + ((__m128h)__builtin_ia32_vcvtuqq2ph512_mask( \ + (__v8du)(A), (__v8hf)_mm_setzero_ph(), (__mmask8)(U), (int)(R))) + +static __inline__ __m128h __DEFAULT_FN_ATTRS512 +_mm512_cvtepu64_ph(__m512i __A) { + return (__m128h)__builtin_ia32_vcvtuqq2ph512_mask( + (__v8du)__A, (__v8hf)_mm_setzero_ph(), (__mmask8)-1, + _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m128h __DEFAULT_FN_ATTRS512 +_mm512_mask_cvtepu64_ph(__m128h __W, __mmask8 __U, __m512i __A) { + return (__m128h)__builtin_ia32_vcvtuqq2ph512_mask( + (__v8du)__A, (__v8hf)__W, (__mmask8)__U, _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m128h __DEFAULT_FN_ATTRS512 +_mm512_maskz_cvtepu64_ph(__mmask8 __U, __m512i __A) { + return (__m128h)__builtin_ia32_vcvtuqq2ph512_mask( + (__v8du)__A, (__v8hf)_mm_setzero_ph(), (__mmask8)__U, + _MM_FROUND_CUR_DIRECTION); +} + +#define _mm512_cvt_roundph_epu64(A, R) \ + ((__m512i)__builtin_ia32_vcvtph2uqq512_mask( \ + (__v8hf)(A), (__v8du)_mm512_undefined_epi32(), (__mmask8)(-1), \ + (int)(R))) + +#define _mm512_mask_cvt_roundph_epu64(W, U, A, R) \ + ((__m512i)__builtin_ia32_vcvtph2uqq512_mask((__v8hf)(A), (__v8du)(W), \ + (__mmask8)(U), (int)(R))) + +#define _mm512_maskz_cvt_roundph_epu64(U, A, R) \ + ((__m512i)__builtin_ia32_vcvtph2uqq512_mask( \ + (__v8hf)(A), (__v8du)_mm512_setzero_epi32(), (__mmask8)(U), (int)(R))) + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_cvtph_epu64(__m128h __A) { + return (__m512i)__builtin_ia32_vcvtph2uqq512_mask( + (__v8hf)__A, (__v8du)_mm512_setzero_epi32(), (__mmask8)-1, + _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_mask_cvtph_epu64(__m512i __W, __mmask8 __U, __m128h __A) { + return (__m512i)__builtin_ia32_vcvtph2uqq512_mask( + (__v8hf)__A, (__v8du)__W, (__mmask8)__U, _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_maskz_cvtph_epu64(__mmask8 __U, __m128h __A) { + return (__m512i)__builtin_ia32_vcvtph2uqq512_mask( + (__v8hf)__A, (__v8du)_mm512_setzero_epi32(), (__mmask8)__U, + _MM_FROUND_CUR_DIRECTION); +} + +#define _mm512_cvtt_roundph_epi64(A, R) \ + ((__m512i)__builtin_ia32_vcvttph2qq512_mask( \ + (__v8hf)(A), (__v8di)_mm512_undefined_epi32(), (__mmask8)(-1), \ + (int)(R))) + +#define _mm512_mask_cvtt_roundph_epi64(W, U, A, R) \ + ((__m512i)__builtin_ia32_vcvttph2qq512_mask((__v8hf)(A), (__v8di)(W), \ + (__mmask8)(U), (int)(R))) + +#define _mm512_maskz_cvtt_roundph_epi64(U, A, R) \ + ((__m512i)__builtin_ia32_vcvttph2qq512_mask( \ + (__v8hf)(A), (__v8di)_mm512_setzero_epi32(), (__mmask8)(U), (int)(R))) + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_cvttph_epi64(__m128h __A) { + return (__m512i)__builtin_ia32_vcvttph2qq512_mask( + (__v8hf)__A, (__v8di)_mm512_setzero_epi32(), (__mmask8)-1, + _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_mask_cvttph_epi64(__m512i __W, __mmask8 __U, __m128h __A) { + return (__m512i)__builtin_ia32_vcvttph2qq512_mask( + (__v8hf)__A, (__v8di)__W, (__mmask8)__U, _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_maskz_cvttph_epi64(__mmask8 __U, __m128h __A) { + return (__m512i)__builtin_ia32_vcvttph2qq512_mask( + (__v8hf)__A, (__v8di)_mm512_setzero_epi32(), (__mmask8)__U, + _MM_FROUND_CUR_DIRECTION); +} + +#define _mm512_cvtt_roundph_epu64(A, R) \ + ((__m512i)__builtin_ia32_vcvttph2uqq512_mask( \ + (__v8hf)(A), (__v8du)_mm512_undefined_epi32(), (__mmask8)(-1), \ + (int)(R))) + +#define _mm512_mask_cvtt_roundph_epu64(W, U, A, R) \ + ((__m512i)__builtin_ia32_vcvttph2uqq512_mask((__v8hf)(A), (__v8du)(W), \ + (__mmask8)(U), (int)(R))) + +#define _mm512_maskz_cvtt_roundph_epu64(U, A, R) \ + ((__m512i)__builtin_ia32_vcvttph2uqq512_mask( \ + (__v8hf)(A), (__v8du)_mm512_setzero_epi32(), (__mmask8)(U), (int)(R))) + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_cvttph_epu64(__m128h __A) { + return (__m512i)__builtin_ia32_vcvttph2uqq512_mask( + (__v8hf)__A, (__v8du)_mm512_setzero_epi32(), (__mmask8)-1, + _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_mask_cvttph_epu64(__m512i __W, __mmask8 __U, __m128h __A) { + return (__m512i)__builtin_ia32_vcvttph2uqq512_mask( + (__v8hf)__A, (__v8du)__W, (__mmask8)__U, _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS512 +_mm512_maskz_cvttph_epu64(__mmask8 __U, __m128h __A) { + return (__m512i)__builtin_ia32_vcvttph2uqq512_mask( + (__v8hf)__A, (__v8du)_mm512_setzero_epi32(), (__mmask8)__U, + _MM_FROUND_CUR_DIRECTION); +} + +#define _mm_cvt_roundsh_i32(A, R) \ + ((int)__builtin_ia32_vcvtsh2si32((__v8hf)(A), (int)(R))) + +static __inline__ int __DEFAULT_FN_ATTRS128 _mm_cvtsh_i32(__m128h __A) { + return (int)__builtin_ia32_vcvtsh2si32((__v8hf)__A, _MM_FROUND_CUR_DIRECTION); +} + +#define _mm_cvt_roundsh_u32(A, R) \ + ((unsigned int)__builtin_ia32_vcvtsh2usi32((__v8hf)(A), (int)(R))) + +static __inline__ unsigned int __DEFAULT_FN_ATTRS128 +_mm_cvtsh_u32(__m128h __A) { + return (unsigned int)__builtin_ia32_vcvtsh2usi32((__v8hf)__A, + _MM_FROUND_CUR_DIRECTION); +} + +#ifdef __x86_64__ +#define _mm_cvt_roundsh_i64(A, R) \ + ((long long)__builtin_ia32_vcvtsh2si64((__v8hf)(A), (int)(R))) + +static __inline__ long long __DEFAULT_FN_ATTRS128 _mm_cvtsh_i64(__m128h __A) { + return (long long)__builtin_ia32_vcvtsh2si64((__v8hf)__A, + _MM_FROUND_CUR_DIRECTION); +} + +#define _mm_cvt_roundsh_u64(A, R) \ + ((unsigned long long)__builtin_ia32_vcvtsh2usi64((__v8hf)(A), (int)(R))) + +static __inline__ unsigned long long __DEFAULT_FN_ATTRS128 +_mm_cvtsh_u64(__m128h __A) { + return (unsigned long long)__builtin_ia32_vcvtsh2usi64( + (__v8hf)__A, _MM_FROUND_CUR_DIRECTION); +} +#endif // __x86_64__ + +#define _mm_cvt_roundu32_sh(A, B, R) \ + ((__m128h)__builtin_ia32_vcvtusi2sh((__v8hf)(A), (unsigned int)(B), (int)(R))) + +static __inline__ __m128h __DEFAULT_FN_ATTRS128 +_mm_cvtu32_sh(__m128h __A, unsigned int __B) { + __A[0] = __B; + return __A; +} + +#ifdef __x86_64__ +#define _mm_cvt_roundu64_sh(A, B, R) \ + ((__m128h)__builtin_ia32_vcvtusi642sh((__v8hf)(A), (unsigned long long)(B), \ + (int)(R))) + +static __inline__ __m128h __DEFAULT_FN_ATTRS128 +_mm_cvtu64_sh(__m128h __A, unsigned long long __B) { + __A[0] = __B; + return __A; +} +#endif + +#define _mm_cvt_roundi32_sh(A, B, R) \ + ((__m128h)__builtin_ia32_vcvtsi2sh((__v8hf)(A), (int)(B), (int)(R))) + +static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_cvti32_sh(__m128h __A, + int __B) { + __A[0] = __B; + return __A; +} + +#ifdef __x86_64__ +#define _mm_cvt_roundi64_sh(A, B, R) \ + ((__m128h)__builtin_ia32_vcvtsi642sh((__v8hf)(A), (long long)(B), (int)(R))) + +static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_cvti64_sh(__m128h __A, + long long __B) { + __A[0] = __B; + return __A; +} +#endif + +#define _mm_cvtt_roundsh_i32(A, R) \ + ((int)__builtin_ia32_vcvttsh2si32((__v8hf)(A), (int)(R))) + +static __inline__ int __DEFAULT_FN_ATTRS128 _mm_cvttsh_i32(__m128h __A) { + return (int)__builtin_ia32_vcvttsh2si32((__v8hf)__A, + _MM_FROUND_CUR_DIRECTION); +} + +#ifdef __x86_64__ +#define _mm_cvtt_roundsh_i64(A, R) \ + ((long long)__builtin_ia32_vcvttsh2si64((__v8hf)(A), (int)(R))) + +static __inline__ long long __DEFAULT_FN_ATTRS128 _mm_cvttsh_i64(__m128h __A) { + return (long long)__builtin_ia32_vcvttsh2si64((__v8hf)__A, + _MM_FROUND_CUR_DIRECTION); +} +#endif + +#define _mm_cvtt_roundsh_u32(A, R) \ + ((unsigned int)__builtin_ia32_vcvttsh2usi32((__v8hf)(A), (int)(R))) + +static __inline__ unsigned int __DEFAULT_FN_ATTRS128 +_mm_cvttsh_u32(__m128h __A) { + return (unsigned int)__builtin_ia32_vcvttsh2usi32((__v8hf)__A, + _MM_FROUND_CUR_DIRECTION); +} + +#ifdef __x86_64__ +#define _mm_cvtt_roundsh_u64(A, R) \ + ((unsigned long long)__builtin_ia32_vcvttsh2usi64((__v8hf)(A), (int)(R))) + +static __inline__ unsigned long long __DEFAULT_FN_ATTRS128 +_mm_cvttsh_u64(__m128h __A) { + return (unsigned long long)__builtin_ia32_vcvttsh2usi64( + (__v8hf)__A, _MM_FROUND_CUR_DIRECTION); +} +#endif + +#define _mm512_cvtx_roundph_ps(A, R) \ + ((__m512)__builtin_ia32_vcvtph2psx512_mask((__v16hf)(A), \ + (__v16sf)_mm512_undefined_ps(), \ + (__mmask16)(-1), (int)(R))) + +#define _mm512_mask_cvtx_roundph_ps(W, U, A, R) \ + ((__m512)__builtin_ia32_vcvtph2psx512_mask((__v16hf)(A), (__v16sf)(W), \ + (__mmask16)(U), (int)(R))) + +#define _mm512_maskz_cvtx_roundph_ps(U, A, R) \ + ((__m512)__builtin_ia32_vcvtph2psx512_mask( \ + (__v16hf)(A), (__v16sf)_mm512_setzero_ps(), (__mmask16)(U), (int)(R))) + +static __inline__ __m512 __DEFAULT_FN_ATTRS512 _mm512_cvtxph_ps(__m256h __A) { + return (__m512)__builtin_ia32_vcvtph2psx512_mask( + (__v16hf)__A, (__v16sf)_mm512_setzero_ps(), (__mmask16)-1, + _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m512 __DEFAULT_FN_ATTRS512 +_mm512_mask_cvtxph_ps(__m512 __W, __mmask16 __U, __m256h __A) { + return (__m512)__builtin_ia32_vcvtph2psx512_mask( + (__v16hf)__A, (__v16sf)__W, (__mmask16)__U, _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m512 __DEFAULT_FN_ATTRS512 +_mm512_maskz_cvtxph_ps(__mmask16 __U, __m256h __A) { + return (__m512)__builtin_ia32_vcvtph2psx512_mask( + (__v16hf)__A, (__v16sf)_mm512_setzero_ps(), (__mmask16)__U, + _MM_FROUND_CUR_DIRECTION); +} + +#define _mm512_cvtx_roundps_ph(A, R) \ + ((__m256h)__builtin_ia32_vcvtps2phx512_mask((__v16sf)(A), \ + (__v16hf)_mm256_undefined_ph(), \ + (__mmask16)(-1), (int)(R))) + +#define _mm512_mask_cvtx_roundps_ph(W, U, A, R) \ + ((__m256h)__builtin_ia32_vcvtps2phx512_mask((__v16sf)(A), (__v16hf)(W), \ + (__mmask16)(U), (int)(R))) + +#define _mm512_maskz_cvtx_roundps_ph(U, A, R) \ + ((__m256h)__builtin_ia32_vcvtps2phx512_mask( \ + (__v16sf)(A), (__v16hf)_mm256_setzero_ph(), (__mmask16)(U), (int)(R))) + +static __inline__ __m256h __DEFAULT_FN_ATTRS512 _mm512_cvtxps_ph(__m512 __A) { + return (__m256h)__builtin_ia32_vcvtps2phx512_mask( + (__v16sf)__A, (__v16hf)_mm256_setzero_ph(), (__mmask16)-1, + _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m256h __DEFAULT_FN_ATTRS512 +_mm512_mask_cvtxps_ph(__m256h __W, __mmask16 __U, __m512 __A) { + return (__m256h)__builtin_ia32_vcvtps2phx512_mask( + (__v16sf)__A, (__v16hf)__W, (__mmask16)__U, _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m256h __DEFAULT_FN_ATTRS512 +_mm512_maskz_cvtxps_ph(__mmask16 __U, __m512 __A) { + return (__m256h)__builtin_ia32_vcvtps2phx512_mask( + (__v16sf)__A, (__v16hf)_mm256_setzero_ph(), (__mmask16)__U, + _MM_FROUND_CUR_DIRECTION); +} + +#define _mm512_fmadd_round_ph(A, B, C, R) \ + ((__m512h)__builtin_ia32_vfmaddph512_mask( \ + (__v32hf)(__m512h)(A), (__v32hf)(__m512h)(B), (__v32hf)(__m512h)(C), \ + (__mmask32)-1, (int)(R))) + +#define _mm512_mask_fmadd_round_ph(A, U, B, C, R) \ + ((__m512h)__builtin_ia32_vfmaddph512_mask( \ + (__v32hf)(__m512h)(A), (__v32hf)(__m512h)(B), (__v32hf)(__m512h)(C), \ + (__mmask32)(U), (int)(R))) + +#define _mm512_mask3_fmadd_round_ph(A, B, C, U, R) \ + ((__m512h)__builtin_ia32_vfmaddph512_mask3( \ + (__v32hf)(__m512h)(A), (__v32hf)(__m512h)(B), (__v32hf)(__m512h)(C), \ + (__mmask32)(U), (int)(R))) + +#define _mm512_maskz_fmadd_round_ph(U, A, B, C, R) \ + ((__m512h)__builtin_ia32_vfmaddph512_maskz( \ + (__v32hf)(__m512h)(A), (__v32hf)(__m512h)(B), (__v32hf)(__m512h)(C), \ + (__mmask32)(U), (int)(R))) + +#define _mm512_fmsub_round_ph(A, B, C, R) \ + ((__m512h)__builtin_ia32_vfmaddph512_mask( \ + (__v32hf)(__m512h)(A), (__v32hf)(__m512h)(B), -(__v32hf)(__m512h)(C), \ + (__mmask32)-1, (int)(R))) + +#define _mm512_mask_fmsub_round_ph(A, U, B, C, R) \ + ((__m512h)__builtin_ia32_vfmaddph512_mask( \ + (__v32hf)(__m512h)(A), (__v32hf)(__m512h)(B), -(__v32hf)(__m512h)(C), \ + (__mmask32)(U), (int)(R))) + +#define _mm512_maskz_fmsub_round_ph(U, A, B, C, R) \ + ((__m512h)__builtin_ia32_vfmaddph512_maskz( \ + (__v32hf)(__m512h)(A), (__v32hf)(__m512h)(B), -(__v32hf)(__m512h)(C), \ + (__mmask32)(U), (int)(R))) + +#define _mm512_fnmadd_round_ph(A, B, C, R) \ + ((__m512h)__builtin_ia32_vfmaddph512_mask( \ + (__v32hf)(__m512h)(A), -(__v32hf)(__m512h)(B), (__v32hf)(__m512h)(C), \ + (__mmask32)-1, (int)(R))) + +#define _mm512_mask3_fnmadd_round_ph(A, B, C, U, R) \ + ((__m512h)__builtin_ia32_vfmaddph512_mask3( \ + -(__v32hf)(__m512h)(A), (__v32hf)(__m512h)(B), (__v32hf)(__m512h)(C), \ + (__mmask32)(U), (int)(R))) + +#define _mm512_maskz_fnmadd_round_ph(U, A, B, C, R) \ + ((__m512h)__builtin_ia32_vfmaddph512_maskz( \ + -(__v32hf)(__m512h)(A), (__v32hf)(__m512h)(B), (__v32hf)(__m512h)(C), \ + (__mmask32)(U), (int)(R))) + +#define _mm512_fnmsub_round_ph(A, B, C, R) \ + ((__m512h)__builtin_ia32_vfmaddph512_mask( \ + (__v32hf)(__m512h)(A), -(__v32hf)(__m512h)(B), -(__v32hf)(__m512h)(C), \ + (__mmask32)-1, (int)(R))) + +#define _mm512_maskz_fnmsub_round_ph(U, A, B, C, R) \ + ((__m512h)__builtin_ia32_vfmaddph512_maskz( \ + -(__v32hf)(__m512h)(A), (__v32hf)(__m512h)(B), -(__v32hf)(__m512h)(C), \ + (__mmask32)(U), (int)(R))) + +static __inline__ __m512h __DEFAULT_FN_ATTRS512 _mm512_fmadd_ph(__m512h __A, + __m512h __B, + __m512h __C) { + return (__m512h)__builtin_ia32_vfmaddph512_mask((__v32hf)__A, (__v32hf)__B, + (__v32hf)__C, (__mmask32)-1, + _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m512h __DEFAULT_FN_ATTRS512 +_mm512_mask_fmadd_ph(__m512h __A, __mmask32 __U, __m512h __B, __m512h __C) { + return (__m512h)__builtin_ia32_vfmaddph512_mask((__v32hf)__A, (__v32hf)__B, + (__v32hf)__C, (__mmask32)__U, + _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m512h __DEFAULT_FN_ATTRS512 +_mm512_mask3_fmadd_ph(__m512h __A, __m512h __B, __m512h __C, __mmask32 __U) { + return (__m512h)__builtin_ia32_vfmaddph512_mask3((__v32hf)__A, (__v32hf)__B, + (__v32hf)__C, (__mmask32)__U, + _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m512h __DEFAULT_FN_ATTRS512 +_mm512_maskz_fmadd_ph(__mmask32 __U, __m512h __A, __m512h __B, __m512h __C) { + return (__m512h)__builtin_ia32_vfmaddph512_maskz((__v32hf)__A, (__v32hf)__B, + (__v32hf)__C, (__mmask32)__U, + _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m512h __DEFAULT_FN_ATTRS512 _mm512_fmsub_ph(__m512h __A, + __m512h __B, + __m512h __C) { + return (__m512h)__builtin_ia32_vfmaddph512_mask((__v32hf)__A, (__v32hf)__B, + -(__v32hf)__C, (__mmask32)-1, + _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m512h __DEFAULT_FN_ATTRS512 +_mm512_mask_fmsub_ph(__m512h __A, __mmask32 __U, __m512h __B, __m512h __C) { + return (__m512h)__builtin_ia32_vfmaddph512_mask((__v32hf)__A, (__v32hf)__B, + -(__v32hf)__C, (__mmask32)__U, + _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m512h __DEFAULT_FN_ATTRS512 +_mm512_maskz_fmsub_ph(__mmask32 __U, __m512h __A, __m512h __B, __m512h __C) { + return (__m512h)__builtin_ia32_vfmaddph512_maskz( + (__v32hf)__A, (__v32hf)__B, -(__v32hf)__C, (__mmask32)__U, + _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m512h __DEFAULT_FN_ATTRS512 _mm512_fnmadd_ph(__m512h __A, + __m512h __B, + __m512h __C) { + return (__m512h)__builtin_ia32_vfmaddph512_mask((__v32hf)__A, -(__v32hf)__B, + (__v32hf)__C, (__mmask32)-1, + _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m512h __DEFAULT_FN_ATTRS512 +_mm512_mask3_fnmadd_ph(__m512h __A, __m512h __B, __m512h __C, __mmask32 __U) { + return (__m512h)__builtin_ia32_vfmaddph512_mask3(-(__v32hf)__A, (__v32hf)__B, + (__v32hf)__C, (__mmask32)__U, + _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m512h __DEFAULT_FN_ATTRS512 +_mm512_maskz_fnmadd_ph(__mmask32 __U, __m512h __A, __m512h __B, __m512h __C) { + return (__m512h)__builtin_ia32_vfmaddph512_maskz(-(__v32hf)__A, (__v32hf)__B, + (__v32hf)__C, (__mmask32)__U, + _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m512h __DEFAULT_FN_ATTRS512 _mm512_fnmsub_ph(__m512h __A, + __m512h __B, + __m512h __C) { + return (__m512h)__builtin_ia32_vfmaddph512_mask((__v32hf)__A, -(__v32hf)__B, + -(__v32hf)__C, (__mmask32)-1, + _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m512h __DEFAULT_FN_ATTRS512 +_mm512_maskz_fnmsub_ph(__mmask32 __U, __m512h __A, __m512h __B, __m512h __C) { + return (__m512h)__builtin_ia32_vfmaddph512_maskz( + -(__v32hf)__A, (__v32hf)__B, -(__v32hf)__C, (__mmask32)__U, + _MM_FROUND_CUR_DIRECTION); +} + +#define _mm512_fmaddsub_round_ph(A, B, C, R) \ + ((__m512h)__builtin_ia32_vfmaddsubph512_mask( \ + (__v32hf)(__m512h)(A), (__v32hf)(__m512h)(B), (__v32hf)(__m512h)(C), \ + (__mmask32)-1, (int)(R))) + +#define _mm512_mask_fmaddsub_round_ph(A, U, B, C, R) \ + ((__m512h)__builtin_ia32_vfmaddsubph512_mask( \ + (__v32hf)(__m512h)(A), (__v32hf)(__m512h)(B), (__v32hf)(__m512h)(C), \ + (__mmask32)(U), (int)(R))) + +#define _mm512_mask3_fmaddsub_round_ph(A, B, C, U, R) \ + ((__m512h)__builtin_ia32_vfmaddsubph512_mask3( \ + (__v32hf)(__m512h)(A), (__v32hf)(__m512h)(B), (__v32hf)(__m512h)(C), \ + (__mmask32)(U), (int)(R))) + +#define _mm512_maskz_fmaddsub_round_ph(U, A, B, C, R) \ + ((__m512h)__builtin_ia32_vfmaddsubph512_maskz( \ + (__v32hf)(__m512h)(A), (__v32hf)(__m512h)(B), (__v32hf)(__m512h)(C), \ + (__mmask32)(U), (int)(R))) + +#define _mm512_fmsubadd_round_ph(A, B, C, R) \ + ((__m512h)__builtin_ia32_vfmaddsubph512_mask( \ + (__v32hf)(__m512h)(A), (__v32hf)(__m512h)(B), -(__v32hf)(__m512h)(C), \ + (__mmask32)-1, (int)(R))) + +#define _mm512_mask_fmsubadd_round_ph(A, U, B, C, R) \ + ((__m512h)__builtin_ia32_vfmaddsubph512_mask( \ + (__v32hf)(__m512h)(A), (__v32hf)(__m512h)(B), -(__v32hf)(__m512h)(C), \ + (__mmask32)(U), (int)(R))) + +#define _mm512_maskz_fmsubadd_round_ph(U, A, B, C, R) \ + ((__m512h)__builtin_ia32_vfmaddsubph512_maskz( \ + (__v32hf)(__m512h)(A), (__v32hf)(__m512h)(B), -(__v32hf)(__m512h)(C), \ + (__mmask32)(U), (int)(R))) + +static __inline__ __m512h __DEFAULT_FN_ATTRS512 +_mm512_fmaddsub_ph(__m512h __A, __m512h __B, __m512h __C) { + return (__m512h)__builtin_ia32_vfmaddsubph512_mask( + (__v32hf)__A, (__v32hf)__B, (__v32hf)__C, (__mmask32)-1, + _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m512h __DEFAULT_FN_ATTRS512 +_mm512_mask_fmaddsub_ph(__m512h __A, __mmask32 __U, __m512h __B, __m512h __C) { + return (__m512h)__builtin_ia32_vfmaddsubph512_mask( + (__v32hf)__A, (__v32hf)__B, (__v32hf)__C, (__mmask32)__U, + _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m512h __DEFAULT_FN_ATTRS512 +_mm512_mask3_fmaddsub_ph(__m512h __A, __m512h __B, __m512h __C, __mmask32 __U) { + return (__m512h)__builtin_ia32_vfmaddsubph512_mask3( + (__v32hf)__A, (__v32hf)__B, (__v32hf)__C, (__mmask32)__U, + _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m512h __DEFAULT_FN_ATTRS512 +_mm512_maskz_fmaddsub_ph(__mmask32 __U, __m512h __A, __m512h __B, __m512h __C) { + return (__m512h)__builtin_ia32_vfmaddsubph512_maskz( + (__v32hf)__A, (__v32hf)__B, (__v32hf)__C, (__mmask32)__U, + _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m512h __DEFAULT_FN_ATTRS512 +_mm512_fmsubadd_ph(__m512h __A, __m512h __B, __m512h __C) { + return (__m512h)__builtin_ia32_vfmaddsubph512_mask( + (__v32hf)__A, (__v32hf)__B, -(__v32hf)__C, (__mmask32)-1, + _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m512h __DEFAULT_FN_ATTRS512 +_mm512_mask_fmsubadd_ph(__m512h __A, __mmask32 __U, __m512h __B, __m512h __C) { + return (__m512h)__builtin_ia32_vfmaddsubph512_mask( + (__v32hf)__A, (__v32hf)__B, -(__v32hf)__C, (__mmask32)__U, + _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m512h __DEFAULT_FN_ATTRS512 +_mm512_maskz_fmsubadd_ph(__mmask32 __U, __m512h __A, __m512h __B, __m512h __C) { + return (__m512h)__builtin_ia32_vfmaddsubph512_maskz( + (__v32hf)__A, (__v32hf)__B, -(__v32hf)__C, (__mmask32)__U, + _MM_FROUND_CUR_DIRECTION); +} + +#define _mm512_mask3_fmsub_round_ph(A, B, C, U, R) \ + ((__m512h)__builtin_ia32_vfmsubph512_mask3( \ + (__v32hf)(__m512h)(A), (__v32hf)(__m512h)(B), (__v32hf)(__m512h)(C), \ + (__mmask32)(U), (int)(R))) + +static __inline__ __m512h __DEFAULT_FN_ATTRS512 +_mm512_mask3_fmsub_ph(__m512h __A, __m512h __B, __m512h __C, __mmask32 __U) { + return (__m512h)__builtin_ia32_vfmsubph512_mask3((__v32hf)__A, (__v32hf)__B, + (__v32hf)__C, (__mmask32)__U, + _MM_FROUND_CUR_DIRECTION); +} + +#define _mm512_mask3_fmsubadd_round_ph(A, B, C, U, R) \ + ((__m512h)__builtin_ia32_vfmsubaddph512_mask3( \ + (__v32hf)(__m512h)(A), (__v32hf)(__m512h)(B), (__v32hf)(__m512h)(C), \ + (__mmask32)(U), (int)(R))) + +static __inline__ __m512h __DEFAULT_FN_ATTRS512 +_mm512_mask3_fmsubadd_ph(__m512h __A, __m512h __B, __m512h __C, __mmask32 __U) { + return (__m512h)__builtin_ia32_vfmsubaddph512_mask3( + (__v32hf)__A, (__v32hf)__B, (__v32hf)__C, (__mmask32)__U, + _MM_FROUND_CUR_DIRECTION); +} + +#define _mm512_mask_fnmadd_round_ph(A, U, B, C, R) \ + ((__m512h)__builtin_ia32_vfmaddph512_mask( \ + (__v32hf)(__m512h)(A), -(__v32hf)(__m512h)(B), (__v32hf)(__m512h)(C), \ + (__mmask32)(U), (int)(R))) + +static __inline__ __m512h __DEFAULT_FN_ATTRS512 +_mm512_mask_fnmadd_ph(__m512h __A, __mmask32 __U, __m512h __B, __m512h __C) { + return (__m512h)__builtin_ia32_vfmaddph512_mask((__v32hf)__A, -(__v32hf)__B, + (__v32hf)__C, (__mmask32)__U, + _MM_FROUND_CUR_DIRECTION); +} + +#define _mm512_mask_fnmsub_round_ph(A, U, B, C, R) \ + ((__m512h)__builtin_ia32_vfmaddph512_mask( \ + (__v32hf)(__m512h)(A), -(__v32hf)(__m512h)(B), -(__v32hf)(__m512h)(C), \ + (__mmask32)(U), (int)(R))) + +#define _mm512_mask3_fnmsub_round_ph(A, B, C, U, R) \ + ((__m512h)__builtin_ia32_vfmsubph512_mask3( \ + -(__v32hf)(__m512h)(A), (__v32hf)(__m512h)(B), (__v32hf)(__m512h)(C), \ + (__mmask32)(U), (int)(R))) + +static __inline__ __m512h __DEFAULT_FN_ATTRS512 +_mm512_mask_fnmsub_ph(__m512h __A, __mmask32 __U, __m512h __B, __m512h __C) { + return (__m512h)__builtin_ia32_vfmaddph512_mask((__v32hf)__A, -(__v32hf)__B, + -(__v32hf)__C, (__mmask32)__U, + _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m512h __DEFAULT_FN_ATTRS512 +_mm512_mask3_fnmsub_ph(__m512h __A, __m512h __B, __m512h __C, __mmask32 __U) { + return (__m512h)__builtin_ia32_vfmsubph512_mask3(-(__v32hf)__A, (__v32hf)__B, + (__v32hf)__C, (__mmask32)__U, + _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_fmadd_sh(__m128h __W, + __m128h __A, + __m128h __B) { + return __builtin_ia32_vfmaddsh3_mask((__v8hf)__W, (__v8hf)__A, (__v8hf)__B, + (__mmask8)-1, _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_mask_fmadd_sh(__m128h __W, + __mmask8 __U, + __m128h __A, + __m128h __B) { + return __builtin_ia32_vfmaddsh3_mask((__v8hf)__W, (__v8hf)__A, (__v8hf)__B, + (__mmask8)__U, _MM_FROUND_CUR_DIRECTION); +} + +#define _mm_fmadd_round_sh(A, B, C, R) \ + ((__m128h)__builtin_ia32_vfmaddsh3_mask( \ + (__v8hf)(__m128h)(A), (__v8hf)(__m128h)(B), (__v8hf)(__m128h)(C), \ + (__mmask8)-1, (int)(R))) + +#define _mm_mask_fmadd_round_sh(W, U, A, B, R) \ + ((__m128h)__builtin_ia32_vfmaddsh3_mask( \ + (__v8hf)(__m128h)(W), (__v8hf)(__m128h)(A), (__v8hf)(__m128h)(B), \ + (__mmask8)(U), (int)(R))) + +static __inline__ __m128h __DEFAULT_FN_ATTRS128 +_mm_maskz_fmadd_sh(__mmask8 __U, __m128h __A, __m128h __B, __m128h __C) { + return __builtin_ia32_vfmaddsh3_maskz((__v8hf)__A, (__v8hf)__B, (__v8hf)__C, + (__mmask8)__U, + _MM_FROUND_CUR_DIRECTION); +} + +#define _mm_maskz_fmadd_round_sh(U, A, B, C, R) \ + ((__m128h)__builtin_ia32_vfmaddsh3_maskz( \ + (__v8hf)(__m128h)(A), (__v8hf)(__m128h)(B), (__v8hf)(__m128h)(C), \ + (__mmask8)(U), (int)(R))) + +static __inline__ __m128h __DEFAULT_FN_ATTRS128 +_mm_mask3_fmadd_sh(__m128h __W, __m128h __X, __m128h __Y, __mmask8 __U) { + return __builtin_ia32_vfmaddsh3_mask3((__v8hf)__W, (__v8hf)__X, (__v8hf)__Y, + (__mmask8)__U, + _MM_FROUND_CUR_DIRECTION); +} + +#define _mm_mask3_fmadd_round_sh(W, X, Y, U, R) \ + ((__m128h)__builtin_ia32_vfmaddsh3_mask3( \ + (__v8hf)(__m128h)(W), (__v8hf)(__m128h)(X), (__v8hf)(__m128h)(Y), \ + (__mmask8)(U), (int)(R))) + +static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_fmsub_sh(__m128h __W, + __m128h __A, + __m128h __B) { + return (__m128h)__builtin_ia32_vfmaddsh3_mask((__v8hf)__W, (__v8hf)__A, + -(__v8hf)__B, (__mmask8)-1, + _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_mask_fmsub_sh(__m128h __W, + __mmask8 __U, + __m128h __A, + __m128h __B) { + return (__m128h)__builtin_ia32_vfmaddsh3_mask((__v8hf)__W, (__v8hf)__A, + -(__v8hf)__B, (__mmask8)__U, + _MM_FROUND_CUR_DIRECTION); +} + +#define _mm_fmsub_round_sh(A, B, C, R) \ + ((__m128h)__builtin_ia32_vfmaddsh3_mask( \ + (__v8hf)(__m128h)(A), (__v8hf)(__m128h)(B), -(__v8hf)(__m128h)(C), \ + (__mmask8)-1, (int)(R))) + +#define _mm_mask_fmsub_round_sh(W, U, A, B, R) \ + ((__m128h)__builtin_ia32_vfmaddsh3_mask( \ + (__v8hf)(__m128h)(W), (__v8hf)(__m128h)(A), -(__v8hf)(__m128h)(B), \ + (__mmask8)(U), (int)(R))) + +static __inline__ __m128h __DEFAULT_FN_ATTRS128 +_mm_maskz_fmsub_sh(__mmask8 __U, __m128h __A, __m128h __B, __m128h __C) { + return (__m128h)__builtin_ia32_vfmaddsh3_maskz((__v8hf)__A, (__v8hf)__B, + -(__v8hf)__C, (__mmask8)__U, + _MM_FROUND_CUR_DIRECTION); +} + +#define _mm_maskz_fmsub_round_sh(U, A, B, C, R) \ + ((__m128h)__builtin_ia32_vfmaddsh3_maskz( \ + (__v8hf)(__m128h)(A), (__v8hf)(__m128h)(B), -(__v8hf)(__m128h)(C), \ + (__mmask8)(U), (int)R)) + +static __inline__ __m128h __DEFAULT_FN_ATTRS128 +_mm_mask3_fmsub_sh(__m128h __W, __m128h __X, __m128h __Y, __mmask8 __U) { + return __builtin_ia32_vfmsubsh3_mask3((__v8hf)__W, (__v8hf)__X, (__v8hf)__Y, + (__mmask8)__U, + _MM_FROUND_CUR_DIRECTION); +} + +#define _mm_mask3_fmsub_round_sh(W, X, Y, U, R) \ + ((__m128h)__builtin_ia32_vfmsubsh3_mask3( \ + (__v8hf)(__m128h)(W), (__v8hf)(__m128h)(X), (__v8hf)(__m128h)(Y), \ + (__mmask8)(U), (int)(R))) + +static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_fnmadd_sh(__m128h __W, + __m128h __A, + __m128h __B) { + return __builtin_ia32_vfmaddsh3_mask((__v8hf)__W, -(__v8hf)__A, (__v8hf)__B, + (__mmask8)-1, _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m128h __DEFAULT_FN_ATTRS128 +_mm_mask_fnmadd_sh(__m128h __W, __mmask8 __U, __m128h __A, __m128h __B) { + return __builtin_ia32_vfmaddsh3_mask((__v8hf)__W, -(__v8hf)__A, (__v8hf)__B, + (__mmask8)__U, _MM_FROUND_CUR_DIRECTION); +} + +#define _mm_fnmadd_round_sh(A, B, C, R) \ + ((__m128h)__builtin_ia32_vfmaddsh3_mask( \ + (__v8hf)(__m128h)(A), -(__v8hf)(__m128h)(B), (__v8hf)(__m128h)(C), \ + (__mmask8)-1, (int)(R))) + +#define _mm_mask_fnmadd_round_sh(W, U, A, B, R) \ + ((__m128h)__builtin_ia32_vfmaddsh3_mask( \ + (__v8hf)(__m128h)(W), -(__v8hf)(__m128h)(A), (__v8hf)(__m128h)(B), \ + (__mmask8)(U), (int)(R))) + +static __inline__ __m128h __DEFAULT_FN_ATTRS128 +_mm_maskz_fnmadd_sh(__mmask8 __U, __m128h __A, __m128h __B, __m128h __C) { + return __builtin_ia32_vfmaddsh3_maskz((__v8hf)__A, -(__v8hf)__B, (__v8hf)__C, + (__mmask8)__U, + _MM_FROUND_CUR_DIRECTION); +} + +#define _mm_maskz_fnmadd_round_sh(U, A, B, C, R) \ + ((__m128h)__builtin_ia32_vfmaddsh3_maskz( \ + (__v8hf)(__m128h)(A), -(__v8hf)(__m128h)(B), (__v8hf)(__m128h)(C), \ + (__mmask8)(U), (int)(R))) + +static __inline__ __m128h __DEFAULT_FN_ATTRS128 +_mm_mask3_fnmadd_sh(__m128h __W, __m128h __X, __m128h __Y, __mmask8 __U) { + return __builtin_ia32_vfmaddsh3_mask3((__v8hf)__W, -(__v8hf)__X, (__v8hf)__Y, + (__mmask8)__U, + _MM_FROUND_CUR_DIRECTION); +} + +#define _mm_mask3_fnmadd_round_sh(W, X, Y, U, R) \ + ((__m128h)__builtin_ia32_vfmaddsh3_mask3( \ + (__v8hf)(__m128h)(W), -(__v8hf)(__m128h)(X), (__v8hf)(__m128h)(Y), \ + (__mmask8)(U), (int)(R))) + +static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_fnmsub_sh(__m128h __W, + __m128h __A, + __m128h __B) { + return __builtin_ia32_vfmaddsh3_mask((__v8hf)__W, -(__v8hf)__A, -(__v8hf)__B, + (__mmask8)-1, _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m128h __DEFAULT_FN_ATTRS128 +_mm_mask_fnmsub_sh(__m128h __W, __mmask8 __U, __m128h __A, __m128h __B) { + return __builtin_ia32_vfmaddsh3_mask((__v8hf)__W, -(__v8hf)__A, -(__v8hf)__B, + (__mmask8)__U, _MM_FROUND_CUR_DIRECTION); +} + +#define _mm_fnmsub_round_sh(A, B, C, R) \ + ((__m128h)__builtin_ia32_vfmaddsh3_mask( \ + (__v8hf)(__m128h)(A), -(__v8hf)(__m128h)(B), -(__v8hf)(__m128h)(C), \ + (__mmask8)-1, (int)(R))) + +#define _mm_mask_fnmsub_round_sh(W, U, A, B, R) \ + ((__m128h)__builtin_ia32_vfmaddsh3_mask( \ + (__v8hf)(__m128h)(W), -(__v8hf)(__m128h)(A), -(__v8hf)(__m128h)(B), \ + (__mmask8)(U), (int)(R))) + +static __inline__ __m128h __DEFAULT_FN_ATTRS128 +_mm_maskz_fnmsub_sh(__mmask8 __U, __m128h __A, __m128h __B, __m128h __C) { + return __builtin_ia32_vfmaddsh3_maskz((__v8hf)__A, -(__v8hf)__B, -(__v8hf)__C, + (__mmask8)__U, + _MM_FROUND_CUR_DIRECTION); +} + +#define _mm_maskz_fnmsub_round_sh(U, A, B, C, R) \ + ((__m128h)__builtin_ia32_vfmaddsh3_maskz( \ + (__v8hf)(__m128h)(A), -(__v8hf)(__m128h)(B), -(__v8hf)(__m128h)(C), \ + (__mmask8)(U), (int)(R))) + +static __inline__ __m128h __DEFAULT_FN_ATTRS128 +_mm_mask3_fnmsub_sh(__m128h __W, __m128h __X, __m128h __Y, __mmask8 __U) { + return __builtin_ia32_vfmsubsh3_mask3((__v8hf)__W, -(__v8hf)__X, (__v8hf)__Y, + (__mmask8)__U, + _MM_FROUND_CUR_DIRECTION); +} + +#define _mm_mask3_fnmsub_round_sh(W, X, Y, U, R) \ + ((__m128h)__builtin_ia32_vfmsubsh3_mask3( \ + (__v8hf)(__m128h)(W), -(__v8hf)(__m128h)(X), (__v8hf)(__m128h)(Y), \ + (__mmask8)(U), (int)(R))) + +static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_fcmadd_sch(__m128h __A, + __m128h __B, + __m128h __C) { + return (__m128h)__builtin_ia32_vfcmaddcsh_mask((__v4sf)__A, (__v4sf)__B, + (__v4sf)__C, (__mmask8)-1, + _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m128h __DEFAULT_FN_ATTRS128 +_mm_mask_fcmadd_sch(__m128h __A, __mmask8 __U, __m128h __B, __m128h __C) { + return (__m128h)__builtin_ia32_vfcmaddcsh_round_mask( + (__v4sf)__A, (__v4sf)(__B), (__v4sf)(__C), __U, _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m128h __DEFAULT_FN_ATTRS128 +_mm_maskz_fcmadd_sch(__mmask8 __U, __m128h __A, __m128h __B, __m128h __C) { + return (__m128h)__builtin_ia32_vfcmaddcsh_maskz((__v4sf)__A, (__v4sf)__B, + (__v4sf)__C, (__mmask8)__U, + _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m128h __DEFAULT_FN_ATTRS128 +_mm_mask3_fcmadd_sch(__m128h __A, __m128h __B, __m128h __C, __mmask8 __U) { + return (__m128h)__builtin_ia32_vfcmaddcsh_round_mask3( + (__v4sf)__A, (__v4sf)__B, (__v4sf)__C, __U, _MM_FROUND_CUR_DIRECTION); +} + +#define _mm_fcmadd_round_sch(A, B, C, R) \ + ((__m128h)__builtin_ia32_vfcmaddcsh_mask( \ + (__v4sf)(__m128h)(A), (__v4sf)(__m128h)(B), (__v4sf)(__m128h)(C), \ + (__mmask8)-1, (int)(R))) + +#define _mm_mask_fcmadd_round_sch(A, U, B, C, R) \ + ((__m128h)__builtin_ia32_vfcmaddcsh_round_mask( \ + (__v4sf)(__m128h)(A), (__v4sf)(__m128h)(B), (__v4sf)(__m128h)(C), \ + (__mmask8)(U), (int)(R))) + +#define _mm_maskz_fcmadd_round_sch(U, A, B, C, R) \ + ((__m128h)__builtin_ia32_vfcmaddcsh_maskz( \ + (__v4sf)(__m128h)(A), (__v4sf)(__m128h)(B), (__v4sf)(__m128h)(C), \ + (__mmask8)(U), (int)(R))) + +#define _mm_mask3_fcmadd_round_sch(A, B, C, U, R) \ + ((__m128h)__builtin_ia32_vfcmaddcsh_round_mask3( \ + (__v4sf)(__m128h)(A), (__v4sf)(__m128h)(B), (__v4sf)(__m128h)(C), \ + (__mmask8)(U), (int)(R))) + +static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_fmadd_sch(__m128h __A, + __m128h __B, + __m128h __C) { + return (__m128h)__builtin_ia32_vfmaddcsh_mask((__v4sf)__A, (__v4sf)__B, + (__v4sf)__C, (__mmask8)-1, + _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m128h __DEFAULT_FN_ATTRS128 +_mm_mask_fmadd_sch(__m128h __A, __mmask8 __U, __m128h __B, __m128h __C) { + return (__m128h)__builtin_ia32_vfmaddcsh_round_mask( + (__v4sf)__A, (__v4sf)(__B), (__v4sf)(__C), __U, _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m128h __DEFAULT_FN_ATTRS128 +_mm_maskz_fmadd_sch(__mmask8 __U, __m128h __A, __m128h __B, __m128h __C) { + return (__m128h)__builtin_ia32_vfmaddcsh_maskz((__v4sf)__A, (__v4sf)__B, + (__v4sf)__C, (__mmask8)__U, + _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m128h __DEFAULT_FN_ATTRS128 +_mm_mask3_fmadd_sch(__m128h __A, __m128h __B, __m128h __C, __mmask8 __U) { + return (__m128h)__builtin_ia32_vfmaddcsh_round_mask3( + (__v4sf)__A, (__v4sf)__B, (__v4sf)__C, __U, _MM_FROUND_CUR_DIRECTION); +} + +#define _mm_fmadd_round_sch(A, B, C, R) \ + ((__m128h)__builtin_ia32_vfmaddcsh_mask( \ + (__v4sf)(__m128h)(A), (__v4sf)(__m128h)(B), (__v4sf)(__m128h)(C), \ + (__mmask8)-1, (int)(R))) + +#define _mm_mask_fmadd_round_sch(A, U, B, C, R) \ + ((__m128h)__builtin_ia32_vfmaddcsh_round_mask( \ + (__v4sf)(__m128h)(A), (__v4sf)(__m128h)(B), (__v4sf)(__m128h)(C), \ + (__mmask8)(U), (int)(R))) + +#define _mm_maskz_fmadd_round_sch(U, A, B, C, R) \ + ((__m128h)__builtin_ia32_vfmaddcsh_maskz( \ + (__v4sf)(__m128h)(A), (__v4sf)(__m128h)(B), (__v4sf)(__m128h)(C), \ + (__mmask8)(U), (int)(R))) + +#define _mm_mask3_fmadd_round_sch(A, B, C, U, R) \ + ((__m128h)__builtin_ia32_vfmaddcsh_round_mask3( \ + (__v4sf)(__m128h)(A), (__v4sf)(__m128h)(B), (__v4sf)(__m128h)(C), \ + (__mmask8)(U), (int)(R))) + +static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_fcmul_sch(__m128h __A, + __m128h __B) { + return (__m128h)__builtin_ia32_vfcmulcsh_mask( + (__v4sf)__A, (__v4sf)__B, (__v4sf)_mm_undefined_ph(), (__mmask8)-1, + _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m128h __DEFAULT_FN_ATTRS128 +_mm_mask_fcmul_sch(__m128h __W, __mmask8 __U, __m128h __A, __m128h __B) { + return (__m128h)__builtin_ia32_vfcmulcsh_mask((__v4sf)__A, (__v4sf)__B, + (__v4sf)__W, (__mmask8)__U, + _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m128h __DEFAULT_FN_ATTRS128 +_mm_maskz_fcmul_sch(__mmask8 __U, __m128h __A, __m128h __B) { + return (__m128h)__builtin_ia32_vfcmulcsh_mask( + (__v4sf)__A, (__v4sf)__B, (__v4sf)_mm_setzero_ph(), (__mmask8)__U, + _MM_FROUND_CUR_DIRECTION); +} + +#define _mm_fcmul_round_sch(A, B, R) \ + ((__m128h)__builtin_ia32_vfcmulcsh_mask( \ + (__v4sf)(__m128h)(A), (__v4sf)(__m128h)(B), \ + (__v4sf)(__m128h)_mm_undefined_ph(), (__mmask8)-1, (int)(R))) + +#define _mm_mask_fcmul_round_sch(W, U, A, B, R) \ + ((__m128h)__builtin_ia32_vfcmulcsh_mask( \ + (__v4sf)(__m128h)(A), (__v4sf)(__m128h)(B), (__v4sf)(__m128h)(W), \ + (__mmask8)(U), (int)(R))) + +#define _mm_maskz_fcmul_round_sch(U, A, B, R) \ + ((__m128h)__builtin_ia32_vfcmulcsh_mask( \ + (__v4sf)(__m128h)(A), (__v4sf)(__m128h)(B), \ + (__v4sf)(__m128h)_mm_setzero_ph(), (__mmask8)(U), (int)(R))) + +static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_fmul_sch(__m128h __A, + __m128h __B) { + return (__m128h)__builtin_ia32_vfmulcsh_mask( + (__v4sf)__A, (__v4sf)__B, (__v4sf)_mm_undefined_ph(), (__mmask8)-1, + _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_mask_fmul_sch(__m128h __W, + __mmask8 __U, + __m128h __A, + __m128h __B) { + return (__m128h)__builtin_ia32_vfmulcsh_mask((__v4sf)__A, (__v4sf)__B, + (__v4sf)__W, (__mmask8)__U, + _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m128h __DEFAULT_FN_ATTRS128 +_mm_maskz_fmul_sch(__mmask8 __U, __m128h __A, __m128h __B) { + return (__m128h)__builtin_ia32_vfmulcsh_mask( + (__v4sf)__A, (__v4sf)__B, (__v4sf)_mm_setzero_ph(), (__mmask8)__U, + _MM_FROUND_CUR_DIRECTION); +} + +#define _mm_fmul_round_sch(A, B, R) \ + ((__m128h)__builtin_ia32_vfmulcsh_mask( \ + (__v4sf)(__m128h)(A), (__v4sf)(__m128h)(B), \ + (__v4sf)(__m128h)_mm_undefined_ph(), (__mmask8)-1, (int)(R))) + +#define _mm_mask_fmul_round_sch(W, U, A, B, R) \ + ((__m128h)__builtin_ia32_vfmulcsh_mask( \ + (__v4sf)(__m128h)(A), (__v4sf)(__m128h)(B), (__v4sf)(__m128h)(W), \ + (__mmask8)(U), (int)(R))) + +#define _mm_maskz_fmul_round_sch(U, A, B, R) \ + ((__m128h)__builtin_ia32_vfmulcsh_mask( \ + (__v4sf)(__m128h)(A), (__v4sf)(__m128h)(B), \ + (__v4sf)(__m128h)_mm_setzero_ph(), (__mmask8)(U), (int)(R))) + +static __inline__ __m512h __DEFAULT_FN_ATTRS512 _mm512_fcmul_pch(__m512h __A, + __m512h __B) { + return (__m512h)__builtin_ia32_vfcmulcph512_mask( + (__v16sf)__A, (__v16sf)__B, (__v16sf)_mm512_undefined_ph(), (__mmask16)-1, + _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m512h __DEFAULT_FN_ATTRS512 +_mm512_mask_fcmul_pch(__m512h __W, __mmask16 __U, __m512h __A, __m512h __B) { + return (__m512h)__builtin_ia32_vfcmulcph512_mask((__v16sf)__A, (__v16sf)__B, + (__v16sf)__W, (__mmask16)__U, + _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m512h __DEFAULT_FN_ATTRS512 +_mm512_maskz_fcmul_pch(__mmask16 __U, __m512h __A, __m512h __B) { + return (__m512h)__builtin_ia32_vfcmulcph512_mask( + (__v16sf)__A, (__v16sf)__B, (__v16sf)_mm512_setzero_ph(), (__mmask16)__U, + _MM_FROUND_CUR_DIRECTION); +} + +#define _mm512_fcmul_round_pch(A, B, R) \ + ((__m512h)__builtin_ia32_vfcmulcph512_mask( \ + (__v16sf)(__m512h)(A), (__v16sf)(__m512h)(B), \ + (__v16sf)(__m512h)_mm512_undefined_ph(), (__mmask16)-1, (int)(R))) + +#define _mm512_mask_fcmul_round_pch(W, U, A, B, R) \ + ((__m512h)__builtin_ia32_vfcmulcph512_mask( \ + (__v16sf)(__m512h)(A), (__v16sf)(__m512h)(B), (__v16sf)(__m512h)(W), \ + (__mmask16)(U), (int)(R))) + +#define _mm512_maskz_fcmul_round_pch(U, A, B, R) \ + ((__m512h)__builtin_ia32_vfcmulcph512_mask( \ + (__v16sf)(__m512h)(A), (__v16sf)(__m512h)(B), \ + (__v16sf)(__m512h)_mm512_setzero_ph(), (__mmask16)(U), (int)(R))) + +static __inline__ __m512h __DEFAULT_FN_ATTRS512 _mm512_fmul_pch(__m512h __A, + __m512h __B) { + return (__m512h)__builtin_ia32_vfmulcph512_mask( + (__v16sf)__A, (__v16sf)__B, (__v16sf)_mm512_undefined_ph(), (__mmask16)-1, + _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m512h __DEFAULT_FN_ATTRS512 +_mm512_mask_fmul_pch(__m512h __W, __mmask16 __U, __m512h __A, __m512h __B) { + return (__m512h)__builtin_ia32_vfmulcph512_mask((__v16sf)__A, (__v16sf)__B, + (__v16sf)__W, (__mmask16)__U, + _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m512h __DEFAULT_FN_ATTRS512 +_mm512_maskz_fmul_pch(__mmask16 __U, __m512h __A, __m512h __B) { + return (__m512h)__builtin_ia32_vfmulcph512_mask( + (__v16sf)__A, (__v16sf)__B, (__v16sf)_mm512_setzero_ph(), (__mmask16)__U, + _MM_FROUND_CUR_DIRECTION); +} + +#define _mm512_fmul_round_pch(A, B, R) \ + ((__m512h)__builtin_ia32_vfmulcph512_mask( \ + (__v16sf)(__m512h)(A), (__v16sf)(__m512h)(B), \ + (__v16sf)(__m512h)_mm512_undefined_ph(), (__mmask16)-1, (int)(R))) + +#define _mm512_mask_fmul_round_pch(W, U, A, B, R) \ + ((__m512h)__builtin_ia32_vfmulcph512_mask( \ + (__v16sf)(__m512h)(A), (__v16sf)(__m512h)(B), (__v16sf)(__m512h)(W), \ + (__mmask16)(U), (int)(R))) + +#define _mm512_maskz_fmul_round_pch(U, A, B, R) \ + ((__m512h)__builtin_ia32_vfmulcph512_mask( \ + (__v16sf)(__m512h)(A), (__v16sf)(__m512h)(B), \ + (__v16sf)(__m512h)_mm512_setzero_ph(), (__mmask16)(U), (int)(R))) + +static __inline__ __m512h __DEFAULT_FN_ATTRS512 _mm512_fcmadd_pch(__m512h __A, + __m512h __B, + __m512h __C) { + return (__m512h)__builtin_ia32_vfcmaddcph512_mask3( + (__v16sf)__A, (__v16sf)__B, (__v16sf)__C, (__mmask16)-1, + _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m512h __DEFAULT_FN_ATTRS512 +_mm512_mask_fcmadd_pch(__m512h __A, __mmask16 __U, __m512h __B, __m512h __C) { + return (__m512h)__builtin_ia32_vfcmaddcph512_mask( + (__v16sf)__A, (__v16sf)__B, (__v16sf)__C, (__mmask16)__U, + _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m512h __DEFAULT_FN_ATTRS512 +_mm512_mask3_fcmadd_pch(__m512h __A, __m512h __B, __m512h __C, __mmask16 __U) { + return (__m512h)__builtin_ia32_vfcmaddcph512_mask3( + (__v16sf)__A, (__v16sf)__B, (__v16sf)__C, (__mmask16)__U, + _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m512h __DEFAULT_FN_ATTRS512 +_mm512_maskz_fcmadd_pch(__mmask16 __U, __m512h __A, __m512h __B, __m512h __C) { + return (__m512h)__builtin_ia32_vfcmaddcph512_maskz( + (__v16sf)__A, (__v16sf)__B, (__v16sf)__C, (__mmask16)__U, + _MM_FROUND_CUR_DIRECTION); +} + +#define _mm512_fcmadd_round_pch(A, B, C, R) \ + ((__m512h)__builtin_ia32_vfcmaddcph512_mask3( \ + (__v16sf)(__m512h)(A), (__v16sf)(__m512h)(B), (__v16sf)(__m512h)(C), \ + (__mmask16)-1, (int)(R))) + +#define _mm512_mask_fcmadd_round_pch(A, U, B, C, R) \ + ((__m512h)__builtin_ia32_vfcmaddcph512_mask( \ + (__v16sf)(__m512h)(A), (__v16sf)(__m512h)(B), (__v16sf)(__m512h)(C), \ + (__mmask16)(U), (int)(R))) + +#define _mm512_mask3_fcmadd_round_pch(A, B, C, U, R) \ + ((__m512h)__builtin_ia32_vfcmaddcph512_mask3( \ + (__v16sf)(__m512h)(A), (__v16sf)(__m512h)(B), (__v16sf)(__m512h)(C), \ + (__mmask16)(U), (int)(R))) + +#define _mm512_maskz_fcmadd_round_pch(U, A, B, C, R) \ + ((__m512h)__builtin_ia32_vfcmaddcph512_maskz( \ + (__v16sf)(__m512h)(A), (__v16sf)(__m512h)(B), (__v16sf)(__m512h)(C), \ + (__mmask16)(U), (int)(R))) + +static __inline__ __m512h __DEFAULT_FN_ATTRS512 _mm512_fmadd_pch(__m512h __A, + __m512h __B, + __m512h __C) { + return (__m512h)__builtin_ia32_vfmaddcph512_mask3((__v16sf)__A, (__v16sf)__B, + (__v16sf)__C, (__mmask16)-1, + _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m512h __DEFAULT_FN_ATTRS512 +_mm512_mask_fmadd_pch(__m512h __A, __mmask16 __U, __m512h __B, __m512h __C) { + return (__m512h)__builtin_ia32_vfmaddcph512_mask((__v16sf)__A, (__v16sf)__B, + (__v16sf)__C, (__mmask16)__U, + _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m512h __DEFAULT_FN_ATTRS512 +_mm512_mask3_fmadd_pch(__m512h __A, __m512h __B, __m512h __C, __mmask16 __U) { + return (__m512h)__builtin_ia32_vfmaddcph512_mask3( + (__v16sf)__A, (__v16sf)__B, (__v16sf)__C, (__mmask16)__U, + _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m512h __DEFAULT_FN_ATTRS512 +_mm512_maskz_fmadd_pch(__mmask16 __U, __m512h __A, __m512h __B, __m512h __C) { + return (__m512h)__builtin_ia32_vfmaddcph512_maskz( + (__v16sf)__A, (__v16sf)__B, (__v16sf)__C, (__mmask16)__U, + _MM_FROUND_CUR_DIRECTION); +} + +#define _mm512_fmadd_round_pch(A, B, C, R) \ + ((__m512h)__builtin_ia32_vfmaddcph512_mask3( \ + (__v16sf)(__m512h)(A), (__v16sf)(__m512h)(B), (__v16sf)(__m512h)(C), \ + (__mmask16)-1, (int)(R))) + +#define _mm512_mask_fmadd_round_pch(A, U, B, C, R) \ + ((__m512h)__builtin_ia32_vfmaddcph512_mask( \ + (__v16sf)(__m512h)(A), (__v16sf)(__m512h)(B), (__v16sf)(__m512h)(C), \ + (__mmask16)(U), (int)(R))) + +#define _mm512_mask3_fmadd_round_pch(A, B, C, U, R) \ + ((__m512h)__builtin_ia32_vfmaddcph512_mask3( \ + (__v16sf)(__m512h)(A), (__v16sf)(__m512h)(B), (__v16sf)(__m512h)(C), \ + (__mmask16)(U), (int)(R))) + +#define _mm512_maskz_fmadd_round_pch(U, A, B, C, R) \ + ((__m512h)__builtin_ia32_vfmaddcph512_maskz( \ + (__v16sf)(__m512h)(A), (__v16sf)(__m512h)(B), (__v16sf)(__m512h)(C), \ + (__mmask16)(U), (int)(R))) + +static __inline__ _Float16 __DEFAULT_FN_ATTRS512 +_mm512_reduce_add_ph(__m512h __W) { + return __builtin_ia32_reduce_fadd_ph512(-0.0f16, __W); +} + +static __inline__ _Float16 __DEFAULT_FN_ATTRS512 +_mm512_reduce_mul_ph(__m512h __W) { + return __builtin_ia32_reduce_fmul_ph512(1.0f16, __W); +} + +static __inline__ _Float16 __DEFAULT_FN_ATTRS512 +_mm512_reduce_max_ph(__m512h __V) { + return __builtin_ia32_reduce_fmax_ph512(__V); +} + +static __inline__ _Float16 __DEFAULT_FN_ATTRS512 +_mm512_reduce_min_ph(__m512h __V) { + return __builtin_ia32_reduce_fmin_ph512(__V); +} + +static __inline__ __m512h __DEFAULT_FN_ATTRS512 +_mm512_mask_blend_ph(__mmask32 __U, __m512h __A, __m512h __W) { + return (__m512h)__builtin_ia32_selectph_512((__mmask32)__U, (__v32hf)__W, + (__v32hf)__A); +} + +static __inline__ __m512h __DEFAULT_FN_ATTRS512 +_mm512_permutex2var_ph(__m512h __A, __m512i __I, __m512h __B) { + return (__m512h)__builtin_ia32_vpermi2varhi512((__v32hi)__A, (__v32hi)__I, + (__v32hi)__B); +} + +static __inline__ __m512h __DEFAULT_FN_ATTRS512 +_mm512_permutexvar_ph(__m512i __A, __m512h __B) { + return (__m512h)__builtin_ia32_permvarhi512((__v32hi)__B, (__v32hi)__A); +} + +// intrinsics below are alias for f*mul_*ch +#define _mm512_mul_pch(A, B) _mm512_fmul_pch(A, B) +#define _mm512_mask_mul_pch(W, U, A, B) _mm512_mask_fmul_pch(W, U, A, B) +#define _mm512_maskz_mul_pch(U, A, B) _mm512_maskz_fmul_pch(U, A, B) +#define _mm512_mul_round_pch(A, B, R) _mm512_fmul_round_pch(A, B, R) +#define _mm512_mask_mul_round_pch(W, U, A, B, R) \ + _mm512_mask_fmul_round_pch(W, U, A, B, R) +#define _mm512_maskz_mul_round_pch(U, A, B, R) \ + _mm512_maskz_fmul_round_pch(U, A, B, R) + +#define _mm512_cmul_pch(A, B) _mm512_fcmul_pch(A, B) +#define _mm512_mask_cmul_pch(W, U, A, B) _mm512_mask_fcmul_pch(W, U, A, B) +#define _mm512_maskz_cmul_pch(U, A, B) _mm512_maskz_fcmul_pch(U, A, B) +#define _mm512_cmul_round_pch(A, B, R) _mm512_fcmul_round_pch(A, B, R) +#define _mm512_mask_cmul_round_pch(W, U, A, B, R) \ + _mm512_mask_fcmul_round_pch(W, U, A, B, R) +#define _mm512_maskz_cmul_round_pch(U, A, B, R) \ + _mm512_maskz_fcmul_round_pch(U, A, B, R) + +#define _mm_mul_sch(A, B) _mm_fmul_sch(A, B) +#define _mm_mask_mul_sch(W, U, A, B) _mm_mask_fmul_sch(W, U, A, B) +#define _mm_maskz_mul_sch(U, A, B) _mm_maskz_fmul_sch(U, A, B) +#define _mm_mul_round_sch(A, B, R) _mm_fmul_round_sch(A, B, R) +#define _mm_mask_mul_round_sch(W, U, A, B, R) \ + _mm_mask_fmul_round_sch(W, U, A, B, R) +#define _mm_maskz_mul_round_sch(U, A, B, R) _mm_maskz_fmul_round_sch(U, A, B, R) + +#define _mm_cmul_sch(A, B) _mm_fcmul_sch(A, B) +#define _mm_mask_cmul_sch(W, U, A, B) _mm_mask_fcmul_sch(W, U, A, B) +#define _mm_maskz_cmul_sch(U, A, B) _mm_maskz_fcmul_sch(U, A, B) +#define _mm_cmul_round_sch(A, B, R) _mm_fcmul_round_sch(A, B, R) +#define _mm_mask_cmul_round_sch(W, U, A, B, R) \ + _mm_mask_fcmul_round_sch(W, U, A, B, R) +#define _mm_maskz_cmul_round_sch(U, A, B, R) \ + _mm_maskz_fcmul_round_sch(U, A, B, R) + +#undef __DEFAULT_FN_ATTRS128 +#undef __DEFAULT_FN_ATTRS256 +#undef __DEFAULT_FN_ATTRS512 + +#endif +#endif diff --git a/third_party/intel/clang/avx512ifmaintrin.h b/third_party/intel/clang/avx512ifmaintrin.h new file mode 100644 index 000000000..9468d1755 --- /dev/null +++ b/third_party/intel/clang/avx512ifmaintrin.h @@ -0,0 +1,70 @@ +/*===------------- avx512ifmaintrin.h - IFMA intrinsics ------------------=== + * + * + * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. + * See https://llvm.org/LICENSE.txt for license information. + * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + * + *===-----------------------------------------------------------------------=== + */ +#ifndef __IMMINTRIN_H +#error "Never use directly; include instead." +#endif + +#ifndef __IFMAINTRIN_H +#define __IFMAINTRIN_H + +/* Define the default attributes for the functions in this file. */ +#define __DEFAULT_FN_ATTRS \ + __attribute__((__always_inline__, __nodebug__, \ + __target__("avx512ifma,evex512"), __min_vector_width__(512))) + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_madd52hi_epu64 (__m512i __X, __m512i __Y, __m512i __Z) +{ + return (__m512i)__builtin_ia32_vpmadd52huq512((__v8di) __X, (__v8di) __Y, + (__v8di) __Z); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_mask_madd52hi_epu64 (__m512i __W, __mmask8 __M, __m512i __X, __m512i __Y) +{ + return (__m512i)__builtin_ia32_selectq_512(__M, + (__v8di)_mm512_madd52hi_epu64(__W, __X, __Y), + (__v8di)__W); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_maskz_madd52hi_epu64 (__mmask8 __M, __m512i __X, __m512i __Y, __m512i __Z) +{ + return (__m512i)__builtin_ia32_selectq_512(__M, + (__v8di)_mm512_madd52hi_epu64(__X, __Y, __Z), + (__v8di)_mm512_setzero_si512()); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_madd52lo_epu64 (__m512i __X, __m512i __Y, __m512i __Z) +{ + return (__m512i)__builtin_ia32_vpmadd52luq512((__v8di) __X, (__v8di) __Y, + (__v8di) __Z); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_mask_madd52lo_epu64 (__m512i __W, __mmask8 __M, __m512i __X, __m512i __Y) +{ + return (__m512i)__builtin_ia32_selectq_512(__M, + (__v8di)_mm512_madd52lo_epu64(__W, __X, __Y), + (__v8di)__W); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_maskz_madd52lo_epu64 (__mmask8 __M, __m512i __X, __m512i __Y, __m512i __Z) +{ + return (__m512i)__builtin_ia32_selectq_512(__M, + (__v8di)_mm512_madd52lo_epu64(__X, __Y, __Z), + (__v8di)_mm512_setzero_si512()); +} + +#undef __DEFAULT_FN_ATTRS + +#endif diff --git a/third_party/intel/clang/avx512ifmavlintrin.h b/third_party/intel/clang/avx512ifmavlintrin.h new file mode 100644 index 000000000..8787cd471 --- /dev/null +++ b/third_party/intel/clang/avx512ifmavlintrin.h @@ -0,0 +1,111 @@ +/*===------------- avx512ifmavlintrin.h - IFMA intrinsics ------------------=== + * + * + * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. + * See https://llvm.org/LICENSE.txt for license information. + * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + * + *===-----------------------------------------------------------------------=== + */ +#ifndef __IMMINTRIN_H +#error "Never use directly; include instead." +#endif + +#ifndef __IFMAVLINTRIN_H +#define __IFMAVLINTRIN_H + +/* Define the default attributes for the functions in this file. */ +#define __DEFAULT_FN_ATTRS128 \ + __attribute__((__always_inline__, __nodebug__, \ + __target__("avx512ifma,avx512vl,no-evex512"), \ + __min_vector_width__(128))) +#define __DEFAULT_FN_ATTRS256 \ + __attribute__((__always_inline__, __nodebug__, \ + __target__("avx512ifma,avx512vl,no-evex512"), \ + __min_vector_width__(256))) + +#define _mm_madd52hi_epu64(X, Y, Z) \ + ((__m128i)__builtin_ia32_vpmadd52huq128((__v2di)(X), (__v2di)(Y), \ + (__v2di)(Z))) + +#define _mm256_madd52hi_epu64(X, Y, Z) \ + ((__m256i)__builtin_ia32_vpmadd52huq256((__v4di)(X), (__v4di)(Y), \ + (__v4di)(Z))) + +#define _mm_madd52lo_epu64(X, Y, Z) \ + ((__m128i)__builtin_ia32_vpmadd52luq128((__v2di)(X), (__v2di)(Y), \ + (__v2di)(Z))) + +#define _mm256_madd52lo_epu64(X, Y, Z) \ + ((__m256i)__builtin_ia32_vpmadd52luq256((__v4di)(X), (__v4di)(Y), \ + (__v4di)(Z))) + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_mask_madd52hi_epu64 (__m128i __W, __mmask8 __M, __m128i __X, __m128i __Y) +{ + return (__m128i)__builtin_ia32_selectq_128(__M, + (__v2di)_mm_madd52hi_epu64(__W, __X, __Y), + (__v2di)__W); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_maskz_madd52hi_epu64 (__mmask8 __M, __m128i __X, __m128i __Y, __m128i __Z) +{ + return (__m128i)__builtin_ia32_selectq_128(__M, + (__v2di)_mm_madd52hi_epu64(__X, __Y, __Z), + (__v2di)_mm_setzero_si128()); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_mask_madd52hi_epu64 (__m256i __W, __mmask8 __M, __m256i __X, __m256i __Y) +{ + return (__m256i)__builtin_ia32_selectq_256(__M, + (__v4di)_mm256_madd52hi_epu64(__W, __X, __Y), + (__v4di)__W); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_maskz_madd52hi_epu64 (__mmask8 __M, __m256i __X, __m256i __Y, __m256i __Z) +{ + return (__m256i)__builtin_ia32_selectq_256(__M, + (__v4di)_mm256_madd52hi_epu64(__X, __Y, __Z), + (__v4di)_mm256_setzero_si256()); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_mask_madd52lo_epu64 (__m128i __W, __mmask8 __M, __m128i __X, __m128i __Y) +{ + return (__m128i)__builtin_ia32_selectq_128(__M, + (__v2di)_mm_madd52lo_epu64(__W, __X, __Y), + (__v2di)__W); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_maskz_madd52lo_epu64 (__mmask8 __M, __m128i __X, __m128i __Y, __m128i __Z) +{ + return (__m128i)__builtin_ia32_selectq_128(__M, + (__v2di)_mm_madd52lo_epu64(__X, __Y, __Z), + (__v2di)_mm_setzero_si128()); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_mask_madd52lo_epu64 (__m256i __W, __mmask8 __M, __m256i __X, __m256i __Y) +{ + return (__m256i)__builtin_ia32_selectq_256(__M, + (__v4di)_mm256_madd52lo_epu64(__W, __X, __Y), + (__v4di)__W); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_maskz_madd52lo_epu64 (__mmask8 __M, __m256i __X, __m256i __Y, __m256i __Z) +{ + return (__m256i)__builtin_ia32_selectq_256(__M, + (__v4di)_mm256_madd52lo_epu64(__X, __Y, __Z), + (__v4di)_mm256_setzero_si256()); +} + + +#undef __DEFAULT_FN_ATTRS128 +#undef __DEFAULT_FN_ATTRS256 + +#endif diff --git a/third_party/intel/clang/avx512pfintrin.h b/third_party/intel/clang/avx512pfintrin.h new file mode 100644 index 000000000..f853be021 --- /dev/null +++ b/third_party/intel/clang/avx512pfintrin.h @@ -0,0 +1,92 @@ +/*===------------- avx512pfintrin.h - PF intrinsics ------------------------=== + * + * + * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. + * See https://llvm.org/LICENSE.txt for license information. + * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + * + *===-----------------------------------------------------------------------=== + */ +#ifndef __IMMINTRIN_H +#error "Never use directly; include instead." +#endif + +#ifndef __AVX512PFINTRIN_H +#define __AVX512PFINTRIN_H + +#define _mm512_mask_prefetch_i32gather_pd(index, mask, addr, scale, hint) \ + __builtin_ia32_gatherpfdpd((__mmask8)(mask), (__v8si)(__m256i)(index), \ + (void const *)(addr), (int)(scale), \ + (int)(hint)) + +#define _mm512_prefetch_i32gather_pd(index, addr, scale, hint) \ + __builtin_ia32_gatherpfdpd((__mmask8) -1, (__v8si)(__m256i)(index), \ + (void const *)(addr), (int)(scale), \ + (int)(hint)) + +#define _mm512_mask_prefetch_i32gather_ps(index, mask, addr, scale, hint) \ + __builtin_ia32_gatherpfdps((__mmask16)(mask), \ + (__v16si)(__m512i)(index), (void const *)(addr), \ + (int)(scale), (int)(hint)) + +#define _mm512_prefetch_i32gather_ps(index, addr, scale, hint) \ + __builtin_ia32_gatherpfdps((__mmask16) -1, \ + (__v16si)(__m512i)(index), (void const *)(addr), \ + (int)(scale), (int)(hint)) + +#define _mm512_mask_prefetch_i64gather_pd(index, mask, addr, scale, hint) \ + __builtin_ia32_gatherpfqpd((__mmask8)(mask), (__v8di)(__m512i)(index), \ + (void const *)(addr), (int)(scale), \ + (int)(hint)) + +#define _mm512_prefetch_i64gather_pd(index, addr, scale, hint) \ + __builtin_ia32_gatherpfqpd((__mmask8) -1, (__v8di)(__m512i)(index), \ + (void const *)(addr), (int)(scale), \ + (int)(hint)) + +#define _mm512_mask_prefetch_i64gather_ps(index, mask, addr, scale, hint) \ + __builtin_ia32_gatherpfqps((__mmask8)(mask), (__v8di)(__m512i)(index), \ + (void const *)(addr), (int)(scale), (int)(hint)) + +#define _mm512_prefetch_i64gather_ps(index, addr, scale, hint) \ + __builtin_ia32_gatherpfqps((__mmask8) -1, (__v8di)(__m512i)(index), \ + (void const *)(addr), (int)(scale), (int)(hint)) + +#define _mm512_prefetch_i32scatter_pd(addr, index, scale, hint) \ + __builtin_ia32_scatterpfdpd((__mmask8)-1, (__v8si)(__m256i)(index), \ + (void *)(addr), (int)(scale), \ + (int)(hint)) + +#define _mm512_mask_prefetch_i32scatter_pd(addr, mask, index, scale, hint) \ + __builtin_ia32_scatterpfdpd((__mmask8)(mask), (__v8si)(__m256i)(index), \ + (void *)(addr), (int)(scale), \ + (int)(hint)) + +#define _mm512_prefetch_i32scatter_ps(addr, index, scale, hint) \ + __builtin_ia32_scatterpfdps((__mmask16)-1, (__v16si)(__m512i)(index), \ + (void *)(addr), (int)(scale), (int)(hint)) + +#define _mm512_mask_prefetch_i32scatter_ps(addr, mask, index, scale, hint) \ + __builtin_ia32_scatterpfdps((__mmask16)(mask), \ + (__v16si)(__m512i)(index), (void *)(addr), \ + (int)(scale), (int)(hint)) + +#define _mm512_prefetch_i64scatter_pd(addr, index, scale, hint) \ + __builtin_ia32_scatterpfqpd((__mmask8)-1, (__v8di)(__m512i)(index), \ + (void *)(addr), (int)(scale), \ + (int)(hint)) + +#define _mm512_mask_prefetch_i64scatter_pd(addr, mask, index, scale, hint) \ + __builtin_ia32_scatterpfqpd((__mmask8)(mask), (__v8di)(__m512i)(index), \ + (void *)(addr), (int)(scale), \ + (int)(hint)) + +#define _mm512_prefetch_i64scatter_ps(addr, index, scale, hint) \ + __builtin_ia32_scatterpfqps((__mmask8)-1, (__v8di)(__m512i)(index), \ + (void *)(addr), (int)(scale), (int)(hint)) + +#define _mm512_mask_prefetch_i64scatter_ps(addr, mask, index, scale, hint) \ + __builtin_ia32_scatterpfqps((__mmask8)(mask), (__v8di)(__m512i)(index), \ + (void *)(addr), (int)(scale), (int)(hint)) + +#endif diff --git a/third_party/intel/clang/avx512vbmi2intrin.h b/third_party/intel/clang/avx512vbmi2intrin.h new file mode 100644 index 000000000..11598c888 --- /dev/null +++ b/third_party/intel/clang/avx512vbmi2intrin.h @@ -0,0 +1,357 @@ +/*===------------- avx512vbmi2intrin.h - VBMI2 intrinsics ------------------=== + * + * + * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. + * See https://llvm.org/LICENSE.txt for license information. + * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + * + *===-----------------------------------------------------------------------=== + */ +#ifndef __IMMINTRIN_H +#error "Never use directly; include instead." +#endif + +#ifndef __AVX512VBMI2INTRIN_H +#define __AVX512VBMI2INTRIN_H + +/* Define the default attributes for the functions in this file. */ +#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("avx512vbmi2,evex512"), __min_vector_width__(512))) + + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_mask_compress_epi16(__m512i __S, __mmask32 __U, __m512i __D) +{ + return (__m512i) __builtin_ia32_compresshi512_mask ((__v32hi) __D, + (__v32hi) __S, + __U); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_maskz_compress_epi16(__mmask32 __U, __m512i __D) +{ + return (__m512i) __builtin_ia32_compresshi512_mask ((__v32hi) __D, + (__v32hi) _mm512_setzero_si512(), + __U); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_mask_compress_epi8(__m512i __S, __mmask64 __U, __m512i __D) +{ + return (__m512i) __builtin_ia32_compressqi512_mask ((__v64qi) __D, + (__v64qi) __S, + __U); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_maskz_compress_epi8(__mmask64 __U, __m512i __D) +{ + return (__m512i) __builtin_ia32_compressqi512_mask ((__v64qi) __D, + (__v64qi) _mm512_setzero_si512(), + __U); +} + +static __inline__ void __DEFAULT_FN_ATTRS +_mm512_mask_compressstoreu_epi16(void *__P, __mmask32 __U, __m512i __D) +{ + __builtin_ia32_compressstorehi512_mask ((__v32hi *) __P, (__v32hi) __D, + __U); +} + +static __inline__ void __DEFAULT_FN_ATTRS +_mm512_mask_compressstoreu_epi8(void *__P, __mmask64 __U, __m512i __D) +{ + __builtin_ia32_compressstoreqi512_mask ((__v64qi *) __P, (__v64qi) __D, + __U); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_mask_expand_epi16(__m512i __S, __mmask32 __U, __m512i __D) +{ + return (__m512i) __builtin_ia32_expandhi512_mask ((__v32hi) __D, + (__v32hi) __S, + __U); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_maskz_expand_epi16(__mmask32 __U, __m512i __D) +{ + return (__m512i) __builtin_ia32_expandhi512_mask ((__v32hi) __D, + (__v32hi) _mm512_setzero_si512(), + __U); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_mask_expand_epi8(__m512i __S, __mmask64 __U, __m512i __D) +{ + return (__m512i) __builtin_ia32_expandqi512_mask ((__v64qi) __D, + (__v64qi) __S, + __U); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_maskz_expand_epi8(__mmask64 __U, __m512i __D) +{ + return (__m512i) __builtin_ia32_expandqi512_mask ((__v64qi) __D, + (__v64qi) _mm512_setzero_si512(), + __U); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_mask_expandloadu_epi16(__m512i __S, __mmask32 __U, void const *__P) +{ + return (__m512i) __builtin_ia32_expandloadhi512_mask ((const __v32hi *)__P, + (__v32hi) __S, + __U); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_maskz_expandloadu_epi16(__mmask32 __U, void const *__P) +{ + return (__m512i) __builtin_ia32_expandloadhi512_mask ((const __v32hi *)__P, + (__v32hi) _mm512_setzero_si512(), + __U); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_mask_expandloadu_epi8(__m512i __S, __mmask64 __U, void const *__P) +{ + return (__m512i) __builtin_ia32_expandloadqi512_mask ((const __v64qi *)__P, + (__v64qi) __S, + __U); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_maskz_expandloadu_epi8(__mmask64 __U, void const *__P) +{ + return (__m512i) __builtin_ia32_expandloadqi512_mask ((const __v64qi *)__P, + (__v64qi) _mm512_setzero_si512(), + __U); +} + +#define _mm512_shldi_epi64(A, B, I) \ + ((__m512i)__builtin_ia32_vpshldq512((__v8di)(__m512i)(A), \ + (__v8di)(__m512i)(B), (int)(I))) + +#define _mm512_mask_shldi_epi64(S, U, A, B, I) \ + ((__m512i)__builtin_ia32_selectq_512((__mmask8)(U), \ + (__v8di)_mm512_shldi_epi64((A), (B), (I)), \ + (__v8di)(__m512i)(S))) + +#define _mm512_maskz_shldi_epi64(U, A, B, I) \ + ((__m512i)__builtin_ia32_selectq_512((__mmask8)(U), \ + (__v8di)_mm512_shldi_epi64((A), (B), (I)), \ + (__v8di)_mm512_setzero_si512())) + +#define _mm512_shldi_epi32(A, B, I) \ + ((__m512i)__builtin_ia32_vpshldd512((__v16si)(__m512i)(A), \ + (__v16si)(__m512i)(B), (int)(I))) + +#define _mm512_mask_shldi_epi32(S, U, A, B, I) \ + ((__m512i)__builtin_ia32_selectd_512((__mmask16)(U), \ + (__v16si)_mm512_shldi_epi32((A), (B), (I)), \ + (__v16si)(__m512i)(S))) + +#define _mm512_maskz_shldi_epi32(U, A, B, I) \ + ((__m512i)__builtin_ia32_selectd_512((__mmask16)(U), \ + (__v16si)_mm512_shldi_epi32((A), (B), (I)), \ + (__v16si)_mm512_setzero_si512())) + +#define _mm512_shldi_epi16(A, B, I) \ + ((__m512i)__builtin_ia32_vpshldw512((__v32hi)(__m512i)(A), \ + (__v32hi)(__m512i)(B), (int)(I))) + +#define _mm512_mask_shldi_epi16(S, U, A, B, I) \ + ((__m512i)__builtin_ia32_selectw_512((__mmask32)(U), \ + (__v32hi)_mm512_shldi_epi16((A), (B), (I)), \ + (__v32hi)(__m512i)(S))) + +#define _mm512_maskz_shldi_epi16(U, A, B, I) \ + ((__m512i)__builtin_ia32_selectw_512((__mmask32)(U), \ + (__v32hi)_mm512_shldi_epi16((A), (B), (I)), \ + (__v32hi)_mm512_setzero_si512())) + +#define _mm512_shrdi_epi64(A, B, I) \ + ((__m512i)__builtin_ia32_vpshrdq512((__v8di)(__m512i)(A), \ + (__v8di)(__m512i)(B), (int)(I))) + +#define _mm512_mask_shrdi_epi64(S, U, A, B, I) \ + ((__m512i)__builtin_ia32_selectq_512((__mmask8)(U), \ + (__v8di)_mm512_shrdi_epi64((A), (B), (I)), \ + (__v8di)(__m512i)(S))) + +#define _mm512_maskz_shrdi_epi64(U, A, B, I) \ + ((__m512i)__builtin_ia32_selectq_512((__mmask8)(U), \ + (__v8di)_mm512_shrdi_epi64((A), (B), (I)), \ + (__v8di)_mm512_setzero_si512())) + +#define _mm512_shrdi_epi32(A, B, I) \ + ((__m512i)__builtin_ia32_vpshrdd512((__v16si)(__m512i)(A), \ + (__v16si)(__m512i)(B), (int)(I))) + +#define _mm512_mask_shrdi_epi32(S, U, A, B, I) \ + ((__m512i)__builtin_ia32_selectd_512((__mmask16)(U), \ + (__v16si)_mm512_shrdi_epi32((A), (B), (I)), \ + (__v16si)(__m512i)(S))) + +#define _mm512_maskz_shrdi_epi32(U, A, B, I) \ + ((__m512i)__builtin_ia32_selectd_512((__mmask16)(U), \ + (__v16si)_mm512_shrdi_epi32((A), (B), (I)), \ + (__v16si)_mm512_setzero_si512())) + +#define _mm512_shrdi_epi16(A, B, I) \ + ((__m512i)__builtin_ia32_vpshrdw512((__v32hi)(__m512i)(A), \ + (__v32hi)(__m512i)(B), (int)(I))) + +#define _mm512_mask_shrdi_epi16(S, U, A, B, I) \ + ((__m512i)__builtin_ia32_selectw_512((__mmask32)(U), \ + (__v32hi)_mm512_shrdi_epi16((A), (B), (I)), \ + (__v32hi)(__m512i)(S))) + +#define _mm512_maskz_shrdi_epi16(U, A, B, I) \ + ((__m512i)__builtin_ia32_selectw_512((__mmask32)(U), \ + (__v32hi)_mm512_shrdi_epi16((A), (B), (I)), \ + (__v32hi)_mm512_setzero_si512())) + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_shldv_epi64(__m512i __A, __m512i __B, __m512i __C) +{ + return (__m512i)__builtin_ia32_vpshldvq512((__v8di)__A, (__v8di)__B, + (__v8di)__C); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_mask_shldv_epi64(__m512i __A, __mmask8 __U, __m512i __B, __m512i __C) +{ + return (__m512i)__builtin_ia32_selectq_512(__U, + (__v8di)_mm512_shldv_epi64(__A, __B, __C), + (__v8di)__A); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_maskz_shldv_epi64(__mmask8 __U, __m512i __A, __m512i __B, __m512i __C) +{ + return (__m512i)__builtin_ia32_selectq_512(__U, + (__v8di)_mm512_shldv_epi64(__A, __B, __C), + (__v8di)_mm512_setzero_si512()); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_shldv_epi32(__m512i __A, __m512i __B, __m512i __C) +{ + return (__m512i)__builtin_ia32_vpshldvd512((__v16si)__A, (__v16si)__B, + (__v16si)__C); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_mask_shldv_epi32(__m512i __A, __mmask16 __U, __m512i __B, __m512i __C) +{ + return (__m512i)__builtin_ia32_selectd_512(__U, + (__v16si)_mm512_shldv_epi32(__A, __B, __C), + (__v16si)__A); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_maskz_shldv_epi32(__mmask16 __U, __m512i __A, __m512i __B, __m512i __C) +{ + return (__m512i)__builtin_ia32_selectd_512(__U, + (__v16si)_mm512_shldv_epi32(__A, __B, __C), + (__v16si)_mm512_setzero_si512()); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_shldv_epi16(__m512i __A, __m512i __B, __m512i __C) +{ + return (__m512i)__builtin_ia32_vpshldvw512((__v32hi)__A, (__v32hi)__B, + (__v32hi)__C); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_mask_shldv_epi16(__m512i __A, __mmask32 __U, __m512i __B, __m512i __C) +{ + return (__m512i)__builtin_ia32_selectw_512(__U, + (__v32hi)_mm512_shldv_epi16(__A, __B, __C), + (__v32hi)__A); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_maskz_shldv_epi16(__mmask32 __U, __m512i __A, __m512i __B, __m512i __C) +{ + return (__m512i)__builtin_ia32_selectw_512(__U, + (__v32hi)_mm512_shldv_epi16(__A, __B, __C), + (__v32hi)_mm512_setzero_si512()); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_shrdv_epi64(__m512i __A, __m512i __B, __m512i __C) +{ + return (__m512i)__builtin_ia32_vpshrdvq512((__v8di)__A, (__v8di)__B, + (__v8di)__C); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_mask_shrdv_epi64(__m512i __A, __mmask8 __U, __m512i __B, __m512i __C) +{ + return (__m512i)__builtin_ia32_selectq_512(__U, + (__v8di)_mm512_shrdv_epi64(__A, __B, __C), + (__v8di)__A); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_maskz_shrdv_epi64(__mmask8 __U, __m512i __A, __m512i __B, __m512i __C) +{ + return (__m512i)__builtin_ia32_selectq_512(__U, + (__v8di)_mm512_shrdv_epi64(__A, __B, __C), + (__v8di)_mm512_setzero_si512()); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_shrdv_epi32(__m512i __A, __m512i __B, __m512i __C) +{ + return (__m512i)__builtin_ia32_vpshrdvd512((__v16si)__A, (__v16si)__B, + (__v16si)__C); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_mask_shrdv_epi32(__m512i __A, __mmask16 __U, __m512i __B, __m512i __C) +{ + return (__m512i) __builtin_ia32_selectd_512(__U, + (__v16si)_mm512_shrdv_epi32(__A, __B, __C), + (__v16si)__A); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_maskz_shrdv_epi32(__mmask16 __U, __m512i __A, __m512i __B, __m512i __C) +{ + return (__m512i) __builtin_ia32_selectd_512(__U, + (__v16si)_mm512_shrdv_epi32(__A, __B, __C), + (__v16si)_mm512_setzero_si512()); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_shrdv_epi16(__m512i __A, __m512i __B, __m512i __C) +{ + return (__m512i)__builtin_ia32_vpshrdvw512((__v32hi)__A, (__v32hi)__B, + (__v32hi)__C); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_mask_shrdv_epi16(__m512i __A, __mmask32 __U, __m512i __B, __m512i __C) +{ + return (__m512i)__builtin_ia32_selectw_512(__U, + (__v32hi)_mm512_shrdv_epi16(__A, __B, __C), + (__v32hi)__A); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_maskz_shrdv_epi16(__mmask32 __U, __m512i __A, __m512i __B, __m512i __C) +{ + return (__m512i)__builtin_ia32_selectw_512(__U, + (__v32hi)_mm512_shrdv_epi16(__A, __B, __C), + (__v32hi)_mm512_setzero_si512()); +} + + +#undef __DEFAULT_FN_ATTRS + +#endif + diff --git a/third_party/intel/clang/avx512vbmiintrin.h b/third_party/intel/clang/avx512vbmiintrin.h new file mode 100644 index 000000000..e47cd5cad --- /dev/null +++ b/third_party/intel/clang/avx512vbmiintrin.h @@ -0,0 +1,106 @@ +/*===------------- avx512vbmiintrin.h - VBMI intrinsics ------------------=== + * + * + * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. + * See https://llvm.org/LICENSE.txt for license information. + * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + * + *===-----------------------------------------------------------------------=== + */ +#ifndef __IMMINTRIN_H +#error "Never use directly; include instead." +#endif + +#ifndef __VBMIINTRIN_H +#define __VBMIINTRIN_H + +/* Define the default attributes for the functions in this file. */ +#define __DEFAULT_FN_ATTRS \ + __attribute__((__always_inline__, __nodebug__, \ + __target__("avx512vbmi,evex512"), __min_vector_width__(512))) + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_permutex2var_epi8(__m512i __A, __m512i __I, __m512i __B) +{ + return (__m512i)__builtin_ia32_vpermi2varqi512((__v64qi)__A, (__v64qi)__I, + (__v64qi) __B); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_mask_permutex2var_epi8(__m512i __A, __mmask64 __U, __m512i __I, + __m512i __B) +{ + return (__m512i)__builtin_ia32_selectb_512(__U, + (__v64qi)_mm512_permutex2var_epi8(__A, __I, __B), + (__v64qi)__A); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_mask2_permutex2var_epi8(__m512i __A, __m512i __I, __mmask64 __U, + __m512i __B) +{ + return (__m512i)__builtin_ia32_selectb_512(__U, + (__v64qi)_mm512_permutex2var_epi8(__A, __I, __B), + (__v64qi)__I); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_maskz_permutex2var_epi8(__mmask64 __U, __m512i __A, __m512i __I, + __m512i __B) +{ + return (__m512i)__builtin_ia32_selectb_512(__U, + (__v64qi)_mm512_permutex2var_epi8(__A, __I, __B), + (__v64qi)_mm512_setzero_si512()); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_permutexvar_epi8 (__m512i __A, __m512i __B) +{ + return (__m512i)__builtin_ia32_permvarqi512((__v64qi) __B, (__v64qi) __A); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_maskz_permutexvar_epi8 (__mmask64 __M, __m512i __A, + __m512i __B) +{ + return (__m512i)__builtin_ia32_selectb_512((__mmask64)__M, + (__v64qi)_mm512_permutexvar_epi8(__A, __B), + (__v64qi)_mm512_setzero_si512()); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_mask_permutexvar_epi8 (__m512i __W, __mmask64 __M, __m512i __A, + __m512i __B) +{ + return (__m512i)__builtin_ia32_selectb_512((__mmask64)__M, + (__v64qi)_mm512_permutexvar_epi8(__A, __B), + (__v64qi)__W); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_multishift_epi64_epi8(__m512i __X, __m512i __Y) +{ + return (__m512i)__builtin_ia32_vpmultishiftqb512((__v64qi)__X, (__v64qi) __Y); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_mask_multishift_epi64_epi8(__m512i __W, __mmask64 __M, __m512i __X, + __m512i __Y) +{ + return (__m512i)__builtin_ia32_selectb_512((__mmask64)__M, + (__v64qi)_mm512_multishift_epi64_epi8(__X, __Y), + (__v64qi)__W); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_maskz_multishift_epi64_epi8(__mmask64 __M, __m512i __X, __m512i __Y) +{ + return (__m512i)__builtin_ia32_selectb_512((__mmask64)__M, + (__v64qi)_mm512_multishift_epi64_epi8(__X, __Y), + (__v64qi)_mm512_setzero_si512()); +} + + +#undef __DEFAULT_FN_ATTRS + +#endif diff --git a/third_party/intel/clang/avx512vbmivlintrin.h b/third_party/intel/clang/avx512vbmivlintrin.h new file mode 100644 index 000000000..848ca2d18 --- /dev/null +++ b/third_party/intel/clang/avx512vbmivlintrin.h @@ -0,0 +1,193 @@ +/*===------------- avx512vbmivlintrin.h - VBMI intrinsics ------------------=== + * + * + * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. + * See https://llvm.org/LICENSE.txt for license information. + * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + * + *===-----------------------------------------------------------------------=== + */ +#ifndef __IMMINTRIN_H +#error "Never use directly; include instead." +#endif + +#ifndef __VBMIVLINTRIN_H +#define __VBMIVLINTRIN_H + +/* Define the default attributes for the functions in this file. */ +#define __DEFAULT_FN_ATTRS128 \ + __attribute__((__always_inline__, __nodebug__, \ + __target__("avx512vbmi,avx512vl,no-evex512"), \ + __min_vector_width__(128))) +#define __DEFAULT_FN_ATTRS256 \ + __attribute__((__always_inline__, __nodebug__, \ + __target__("avx512vbmi,avx512vl,no-evex512"), \ + __min_vector_width__(256))) + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_permutex2var_epi8(__m128i __A, __m128i __I, __m128i __B) +{ + return (__m128i)__builtin_ia32_vpermi2varqi128((__v16qi)__A, + (__v16qi)__I, + (__v16qi)__B); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_mask_permutex2var_epi8(__m128i __A, __mmask16 __U, __m128i __I, + __m128i __B) +{ + return (__m128i)__builtin_ia32_selectb_128(__U, + (__v16qi)_mm_permutex2var_epi8(__A, __I, __B), + (__v16qi)__A); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_mask2_permutex2var_epi8(__m128i __A, __m128i __I, __mmask16 __U, + __m128i __B) +{ + return (__m128i)__builtin_ia32_selectb_128(__U, + (__v16qi)_mm_permutex2var_epi8(__A, __I, __B), + (__v16qi)__I); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_maskz_permutex2var_epi8(__mmask16 __U, __m128i __A, __m128i __I, + __m128i __B) +{ + return (__m128i)__builtin_ia32_selectb_128(__U, + (__v16qi)_mm_permutex2var_epi8(__A, __I, __B), + (__v16qi)_mm_setzero_si128()); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_permutex2var_epi8(__m256i __A, __m256i __I, __m256i __B) +{ + return (__m256i)__builtin_ia32_vpermi2varqi256((__v32qi)__A, (__v32qi)__I, + (__v32qi)__B); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_mask_permutex2var_epi8(__m256i __A, __mmask32 __U, __m256i __I, + __m256i __B) +{ + return (__m256i)__builtin_ia32_selectb_256(__U, + (__v32qi)_mm256_permutex2var_epi8(__A, __I, __B), + (__v32qi)__A); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_mask2_permutex2var_epi8(__m256i __A, __m256i __I, __mmask32 __U, + __m256i __B) +{ + return (__m256i)__builtin_ia32_selectb_256(__U, + (__v32qi)_mm256_permutex2var_epi8(__A, __I, __B), + (__v32qi)__I); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_maskz_permutex2var_epi8(__mmask32 __U, __m256i __A, __m256i __I, + __m256i __B) +{ + return (__m256i)__builtin_ia32_selectb_256(__U, + (__v32qi)_mm256_permutex2var_epi8(__A, __I, __B), + (__v32qi)_mm256_setzero_si256()); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_permutexvar_epi8 (__m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_permvarqi128((__v16qi)__B, (__v16qi)__A); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_maskz_permutexvar_epi8 (__mmask16 __M, __m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_selectb_128((__mmask16)__M, + (__v16qi)_mm_permutexvar_epi8(__A, __B), + (__v16qi)_mm_setzero_si128()); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_mask_permutexvar_epi8 (__m128i __W, __mmask16 __M, __m128i __A, + __m128i __B) +{ + return (__m128i)__builtin_ia32_selectb_128((__mmask16)__M, + (__v16qi)_mm_permutexvar_epi8(__A, __B), + (__v16qi)__W); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_permutexvar_epi8 (__m256i __A, __m256i __B) +{ + return (__m256i)__builtin_ia32_permvarqi256((__v32qi) __B, (__v32qi) __A); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_maskz_permutexvar_epi8 (__mmask32 __M, __m256i __A, + __m256i __B) +{ + return (__m256i)__builtin_ia32_selectb_256((__mmask32)__M, + (__v32qi)_mm256_permutexvar_epi8(__A, __B), + (__v32qi)_mm256_setzero_si256()); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_mask_permutexvar_epi8 (__m256i __W, __mmask32 __M, __m256i __A, + __m256i __B) +{ + return (__m256i)__builtin_ia32_selectb_256((__mmask32)__M, + (__v32qi)_mm256_permutexvar_epi8(__A, __B), + (__v32qi)__W); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_multishift_epi64_epi8(__m128i __X, __m128i __Y) +{ + return (__m128i)__builtin_ia32_vpmultishiftqb128((__v16qi)__X, (__v16qi)__Y); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_mask_multishift_epi64_epi8(__m128i __W, __mmask16 __M, __m128i __X, + __m128i __Y) +{ + return (__m128i)__builtin_ia32_selectb_128((__mmask16)__M, + (__v16qi)_mm_multishift_epi64_epi8(__X, __Y), + (__v16qi)__W); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_maskz_multishift_epi64_epi8(__mmask16 __M, __m128i __X, __m128i __Y) +{ + return (__m128i)__builtin_ia32_selectb_128((__mmask16)__M, + (__v16qi)_mm_multishift_epi64_epi8(__X, __Y), + (__v16qi)_mm_setzero_si128()); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_multishift_epi64_epi8(__m256i __X, __m256i __Y) +{ + return (__m256i)__builtin_ia32_vpmultishiftqb256((__v32qi)__X, (__v32qi)__Y); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_mask_multishift_epi64_epi8(__m256i __W, __mmask32 __M, __m256i __X, + __m256i __Y) +{ + return (__m256i)__builtin_ia32_selectb_256((__mmask32)__M, + (__v32qi)_mm256_multishift_epi64_epi8(__X, __Y), + (__v32qi)__W); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_maskz_multishift_epi64_epi8(__mmask32 __M, __m256i __X, __m256i __Y) +{ + return (__m256i)__builtin_ia32_selectb_256((__mmask32)__M, + (__v32qi)_mm256_multishift_epi64_epi8(__X, __Y), + (__v32qi)_mm256_setzero_si256()); +} + + +#undef __DEFAULT_FN_ATTRS128 +#undef __DEFAULT_FN_ATTRS256 + +#endif diff --git a/third_party/intel/clang/avx512vlbf16intrin.h b/third_party/intel/clang/avx512vlbf16intrin.h new file mode 100644 index 000000000..89c9f49c7 --- /dev/null +++ b/third_party/intel/clang/avx512vlbf16intrin.h @@ -0,0 +1,517 @@ +/*===--------- avx512vlbf16intrin.h - AVX512_BF16 intrinsics ---------------=== + * + * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. + * See https://llvm.org/LICENSE.txt for license information. + * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + * + *===-----------------------------------------------------------------------=== + */ +#ifndef __IMMINTRIN_H +#error "Never use directly; include instead." +#endif + +#ifdef __SSE2__ + +#ifndef __AVX512VLBF16INTRIN_H +#define __AVX512VLBF16INTRIN_H + +#define __DEFAULT_FN_ATTRS128 \ + __attribute__((__always_inline__, __nodebug__, \ + __target__("avx512vl,avx512bf16,no-evex512"), \ + __min_vector_width__(128))) +#define __DEFAULT_FN_ATTRS256 \ + __attribute__((__always_inline__, __nodebug__, \ + __target__("avx512vl,avx512bf16,no-evex512"), \ + __min_vector_width__(256))) + +/// Convert Two Packed Single Data to One Packed BF16 Data. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VCVTNE2PS2BF16 instructions. +/// +/// \param __A +/// A 128-bit vector of [4 x float]. +/// \param __B +/// A 128-bit vector of [4 x float]. +/// \returns A 128-bit vector of [8 x bfloat] whose lower 64 bits come from +/// conversion of __B, and higher 64 bits come from conversion of __A. +static __inline__ __m128bh __DEFAULT_FN_ATTRS128 +_mm_cvtne2ps_pbh(__m128 __A, __m128 __B) { + return (__m128bh)__builtin_ia32_cvtne2ps2bf16_128((__v4sf) __A, + (__v4sf) __B); +} + +/// Convert Two Packed Single Data to One Packed BF16 Data. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VCVTNE2PS2BF16 instructions. +/// +/// \param __A +/// A 128-bit vector of [4 x float]. +/// \param __B +/// A 128-bit vector of [4 x float]. +/// \param __W +/// A 128-bit vector of [8 x bfloat]. +/// \param __U +/// A 8-bit mask value specifying what is chosen for each element. +/// A 1 means conversion of __A or __B. A 0 means element from __W. +/// \returns A 128-bit vector of [8 x bfloat] whose lower 64 bits come from +/// conversion of __B, and higher 64 bits come from conversion of __A. +static __inline__ __m128bh __DEFAULT_FN_ATTRS128 +_mm_mask_cvtne2ps_pbh(__m128bh __W, __mmask8 __U, __m128 __A, __m128 __B) { + return (__m128bh)__builtin_ia32_selectpbf_128((__mmask8)__U, + (__v8bf)_mm_cvtne2ps_pbh(__A, __B), + (__v8bf)__W); +} + +/// Convert Two Packed Single Data to One Packed BF16 Data. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VCVTNE2PS2BF16 instructions. +/// +/// \param __A +/// A 128-bit vector of [4 x float]. +/// \param __B +/// A 128-bit vector of [4 x float]. +/// \param __U +/// A 8-bit mask value specifying what is chosen for each element. +/// A 1 means conversion of __A or __B. A 0 means element is zero. +/// \returns A 128-bit vector of [8 x bfloat] whose lower 64 bits come from +/// conversion of __B, and higher 64 bits come from conversion of __A. +static __inline__ __m128bh __DEFAULT_FN_ATTRS128 +_mm_maskz_cvtne2ps_pbh(__mmask8 __U, __m128 __A, __m128 __B) { + return (__m128bh)__builtin_ia32_selectpbf_128((__mmask8)__U, + (__v8bf)_mm_cvtne2ps_pbh(__A, __B), + (__v8bf)_mm_setzero_si128()); +} + +/// Convert Two Packed Single Data to One Packed BF16 Data. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VCVTNE2PS2BF16 instructions. +/// +/// \param __A +/// A 256-bit vector of [8 x float]. +/// \param __B +/// A 256-bit vector of [8 x float]. +/// \returns A 256-bit vector of [16 x bfloat] whose lower 128 bits come from +/// conversion of __B, and higher 128 bits come from conversion of __A. +static __inline__ __m256bh __DEFAULT_FN_ATTRS256 +_mm256_cvtne2ps_pbh(__m256 __A, __m256 __B) { + return (__m256bh)__builtin_ia32_cvtne2ps2bf16_256((__v8sf) __A, + (__v8sf) __B); +} + +/// Convert Two Packed Single Data to One Packed BF16 Data. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VCVTNE2PS2BF16 instructions. +/// +/// \param __A +/// A 256-bit vector of [8 x float]. +/// \param __B +/// A 256-bit vector of [8 x float]. +/// \param __W +/// A 256-bit vector of [16 x bfloat]. +/// \param __U +/// A 16-bit mask value specifying what is chosen for each element. +/// A 1 means conversion of __A or __B. A 0 means element from __W. +/// \returns A 256-bit vector of [16 x bfloat] whose lower 128 bits come from +/// conversion of __B, and higher 128 bits come from conversion of __A. +static __inline__ __m256bh __DEFAULT_FN_ATTRS256 +_mm256_mask_cvtne2ps_pbh(__m256bh __W, __mmask16 __U, __m256 __A, __m256 __B) { + return (__m256bh)__builtin_ia32_selectpbf_256((__mmask16)__U, + (__v16bf)_mm256_cvtne2ps_pbh(__A, __B), + (__v16bf)__W); +} + +/// Convert Two Packed Single Data to One Packed BF16 Data. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VCVTNE2PS2BF16 instructions. +/// +/// \param __A +/// A 256-bit vector of [8 x float]. +/// \param __B +/// A 256-bit vector of [8 x float]. +/// \param __U +/// A 16-bit mask value specifying what is chosen for each element. +/// A 1 means conversion of __A or __B. A 0 means element is zero. +/// \returns A 256-bit vector of [16 x bfloat] whose lower 128 bits come from +/// conversion of __B, and higher 128 bits come from conversion of __A. +static __inline__ __m256bh __DEFAULT_FN_ATTRS256 +_mm256_maskz_cvtne2ps_pbh(__mmask16 __U, __m256 __A, __m256 __B) { + return (__m256bh)__builtin_ia32_selectpbf_256((__mmask16)__U, + (__v16bf)_mm256_cvtne2ps_pbh(__A, __B), + (__v16bf)_mm256_setzero_si256()); +} + +/// Convert Packed Single Data to Packed BF16 Data. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VCVTNEPS2BF16 instructions. +/// +/// \param __A +/// A 128-bit vector of [4 x float]. +/// \returns A 128-bit vector of [8 x bfloat] whose lower 64 bits come from +/// conversion of __A, and higher 64 bits are 0. +#define _mm_cvtneps_pbh(A) \ + ((__m128bh)__builtin_ia32_vcvtneps2bf16128((__v4sf)(A))) + +/// Convert Packed Single Data to Packed BF16 Data. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VCVTNEPS2BF16 instructions. +/// +/// \param __A +/// A 128-bit vector of [4 x float]. +/// \param __W +/// A 128-bit vector of [8 x bfloat]. +/// \param __U +/// A 4-bit mask value specifying what is chosen for each element. +/// A 1 means conversion of __A. A 0 means element from __W. +/// \returns A 128-bit vector of [8 x bfloat] whose lower 64 bits come from +/// conversion of __A, and higher 64 bits are 0. +static __inline__ __m128bh __DEFAULT_FN_ATTRS128 +_mm_mask_cvtneps_pbh(__m128bh __W, __mmask8 __U, __m128 __A) { + return (__m128bh)__builtin_ia32_cvtneps2bf16_128_mask((__v4sf) __A, + (__v8bf)__W, + (__mmask8)__U); +} + +/// Convert Packed Single Data to Packed BF16 Data. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VCVTNEPS2BF16 instructions. +/// +/// \param __A +/// A 128-bit vector of [4 x float]. +/// \param __U +/// A 4-bit mask value specifying what is chosen for each element. +/// A 1 means conversion of __A. A 0 means element is zero. +/// \returns A 128-bit vector of [8 x bfloat] whose lower 64 bits come from +/// conversion of __A, and higher 64 bits are 0. +static __inline__ __m128bh __DEFAULT_FN_ATTRS128 +_mm_maskz_cvtneps_pbh(__mmask8 __U, __m128 __A) { + return (__m128bh)__builtin_ia32_cvtneps2bf16_128_mask((__v4sf) __A, + (__v8bf)_mm_setzero_si128(), + (__mmask8)__U); +} + +/// Convert Packed Single Data to Packed BF16 Data. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VCVTNEPS2BF16 instructions. +/// +/// \param __A +/// A 256-bit vector of [8 x float]. +/// \returns A 128-bit vector of [8 x bfloat] comes from conversion of __A. +#define _mm256_cvtneps_pbh(A) \ + ((__m128bh)__builtin_ia32_vcvtneps2bf16256((__v8sf)(A))) + +/// Convert Packed Single Data to Packed BF16 Data. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VCVTNEPS2BF16 instructions. +/// +/// \param __A +/// A 256-bit vector of [8 x float]. +/// \param __W +/// A 256-bit vector of [8 x bfloat]. +/// \param __U +/// A 8-bit mask value specifying what is chosen for each element. +/// A 1 means conversion of __A. A 0 means element from __W. +/// \returns A 128-bit vector of [8 x bfloat] comes from conversion of __A. +static __inline__ __m128bh __DEFAULT_FN_ATTRS256 +_mm256_mask_cvtneps_pbh(__m128bh __W, __mmask8 __U, __m256 __A) { + return (__m128bh)__builtin_ia32_cvtneps2bf16_256_mask((__v8sf)__A, + (__v8bf)__W, + (__mmask8)__U); +} + +/// Convert Packed Single Data to Packed BF16 Data. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VCVTNEPS2BF16 instructions. +/// +/// \param __A +/// A 256-bit vector of [8 x float]. +/// \param __U +/// A 8-bit mask value specifying what is chosen for each element. +/// A 1 means conversion of __A. A 0 means element is zero. +/// \returns A 128-bit vector of [8 x bfloat] comes from conversion of __A. +static __inline__ __m128bh __DEFAULT_FN_ATTRS256 +_mm256_maskz_cvtneps_pbh(__mmask8 __U, __m256 __A) { + return (__m128bh)__builtin_ia32_cvtneps2bf16_256_mask((__v8sf)__A, + (__v8bf)_mm_setzero_si128(), + (__mmask8)__U); +} + +/// Dot Product of BF16 Pairs Accumulated into Packed Single Precision. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VDPBF16PS instructions. +/// +/// \param __A +/// A 128-bit vector of [8 x bfloat]. +/// \param __B +/// A 128-bit vector of [8 x bfloat]. +/// \param __D +/// A 128-bit vector of [4 x float]. +/// \returns A 128-bit vector of [4 x float] comes from Dot Product of +/// __A, __B and __D +static __inline__ __m128 __DEFAULT_FN_ATTRS128 +_mm_dpbf16_ps(__m128 __D, __m128bh __A, __m128bh __B) { + return (__m128)__builtin_ia32_dpbf16ps_128((__v4sf)__D, + (__v8bf)__A, + (__v8bf)__B); +} + +/// Dot Product of BF16 Pairs Accumulated into Packed Single Precision. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VDPBF16PS instructions. +/// +/// \param __A +/// A 128-bit vector of [8 x bfloat]. +/// \param __B +/// A 128-bit vector of [8 x bfloat]. +/// \param __D +/// A 128-bit vector of [4 x float]. +/// \param __U +/// A 8-bit mask value specifying what is chosen for each element. +/// A 1 means __A and __B's dot product accumulated with __D. A 0 means __D. +/// \returns A 128-bit vector of [4 x float] comes from Dot Product of +/// __A, __B and __D +static __inline__ __m128 __DEFAULT_FN_ATTRS128 +_mm_mask_dpbf16_ps(__m128 __D, __mmask8 __U, __m128bh __A, __m128bh __B) { + return (__m128)__builtin_ia32_selectps_128((__mmask8)__U, + (__v4sf)_mm_dpbf16_ps(__D, __A, __B), + (__v4sf)__D); +} + +/// Dot Product of BF16 Pairs Accumulated into Packed Single Precision. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VDPBF16PS instructions. +/// +/// \param __A +/// A 128-bit vector of [8 x bfloat]. +/// \param __B +/// A 128-bit vector of [8 x bfloat]. +/// \param __D +/// A 128-bit vector of [4 x float]. +/// \param __U +/// A 8-bit mask value specifying what is chosen for each element. +/// A 1 means __A and __B's dot product accumulated with __D. A 0 means 0. +/// \returns A 128-bit vector of [4 x float] comes from Dot Product of +/// __A, __B and __D +static __inline__ __m128 __DEFAULT_FN_ATTRS128 +_mm_maskz_dpbf16_ps(__mmask8 __U, __m128 __D, __m128bh __A, __m128bh __B) { + return (__m128)__builtin_ia32_selectps_128((__mmask8)__U, + (__v4sf)_mm_dpbf16_ps(__D, __A, __B), + (__v4sf)_mm_setzero_si128()); +} + +/// Dot Product of BF16 Pairs Accumulated into Packed Single Precision. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VDPBF16PS instructions. +/// +/// \param __A +/// A 256-bit vector of [16 x bfloat]. +/// \param __B +/// A 256-bit vector of [16 x bfloat]. +/// \param __D +/// A 256-bit vector of [8 x float]. +/// \returns A 256-bit vector of [8 x float] comes from Dot Product of +/// __A, __B and __D +static __inline__ __m256 __DEFAULT_FN_ATTRS256 +_mm256_dpbf16_ps(__m256 __D, __m256bh __A, __m256bh __B) { + return (__m256)__builtin_ia32_dpbf16ps_256((__v8sf)__D, + (__v16bf)__A, + (__v16bf)__B); +} + +/// Dot Product of BF16 Pairs Accumulated into Packed Single Precision. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VDPBF16PS instructions. +/// +/// \param __A +/// A 256-bit vector of [16 x bfloat]. +/// \param __B +/// A 256-bit vector of [16 x bfloat]. +/// \param __D +/// A 256-bit vector of [8 x float]. +/// \param __U +/// A 16-bit mask value specifying what is chosen for each element. +/// A 1 means __A and __B's dot product accumulated with __D. A 0 means __D. +/// \returns A 256-bit vector of [8 x float] comes from Dot Product of +/// __A, __B and __D +static __inline__ __m256 __DEFAULT_FN_ATTRS256 +_mm256_mask_dpbf16_ps(__m256 __D, __mmask8 __U, __m256bh __A, __m256bh __B) { + return (__m256)__builtin_ia32_selectps_256((__mmask8)__U, + (__v8sf)_mm256_dpbf16_ps(__D, __A, __B), + (__v8sf)__D); +} + +/// Dot Product of BF16 Pairs Accumulated into Packed Single Precision. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VDPBF16PS instructions. +/// +/// \param __A +/// A 256-bit vector of [16 x bfloat]. +/// \param __B +/// A 256-bit vector of [16 x bfloat]. +/// \param __D +/// A 256-bit vector of [8 x float]. +/// \param __U +/// A 8-bit mask value specifying what is chosen for each element. +/// A 1 means __A and __B's dot product accumulated with __D. A 0 means 0. +/// \returns A 256-bit vector of [8 x float] comes from Dot Product of +/// __A, __B and __D +static __inline__ __m256 __DEFAULT_FN_ATTRS256 +_mm256_maskz_dpbf16_ps(__mmask8 __U, __m256 __D, __m256bh __A, __m256bh __B) { + return (__m256)__builtin_ia32_selectps_256((__mmask8)__U, + (__v8sf)_mm256_dpbf16_ps(__D, __A, __B), + (__v8sf)_mm256_setzero_si256()); +} + +/// Convert One Single float Data to One BF16 Data. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VCVTNEPS2BF16 instructions. +/// +/// \param __A +/// A float data. +/// \returns A bf16 data whose sign field and exponent field keep unchanged, +/// and fraction field is truncated to 7 bits. +static __inline__ __bf16 __DEFAULT_FN_ATTRS128 _mm_cvtness_sbh(float __A) { + __v4sf __V = {__A, 0, 0, 0}; + __v8bf __R = __builtin_ia32_cvtneps2bf16_128_mask( + (__v4sf)__V, (__v8bf)_mm_undefined_si128(), (__mmask8)-1); + return (__bf16)__R[0]; +} + +/// Convert Packed BF16 Data to Packed float Data. +/// +/// \headerfile +/// +/// \param __A +/// A 128-bit vector of [4 x bfloat]. +/// \returns A 128-bit vector of [4 x float] come from conversion of __A +static __inline__ __m128 __DEFAULT_FN_ATTRS128 _mm_cvtpbh_ps(__m128bh __A) { + return _mm_castsi128_ps( + (__m128i)_mm_slli_epi32((__m128i)_mm_cvtepi16_epi32((__m128i)__A), 16)); +} + +/// Convert Packed BF16 Data to Packed float Data. +/// +/// \headerfile +/// +/// \param __A +/// A 128-bit vector of [8 x bfloat]. +/// \returns A 256-bit vector of [8 x float] come from conversion of __A +static __inline__ __m256 __DEFAULT_FN_ATTRS256 _mm256_cvtpbh_ps(__m128bh __A) { + return _mm256_castsi256_ps((__m256i)_mm256_slli_epi32( + (__m256i)_mm256_cvtepi16_epi32((__m128i)__A), 16)); +} + +/// Convert Packed BF16 Data to Packed float Data using zeroing mask. +/// +/// \headerfile +/// +/// \param __U +/// A 4-bit mask. Elements are zeroed out when the corresponding mask +/// bit is not set. +/// \param __A +/// A 128-bit vector of [4 x bfloat]. +/// \returns A 128-bit vector of [4 x float] come from conversion of __A +static __inline__ __m128 __DEFAULT_FN_ATTRS128 +_mm_maskz_cvtpbh_ps(__mmask8 __U, __m128bh __A) { + return _mm_castsi128_ps((__m128i)_mm_slli_epi32( + (__m128i)_mm_maskz_cvtepi16_epi32((__mmask8)__U, (__m128i)__A), 16)); +} + +/// Convert Packed BF16 Data to Packed float Data using zeroing mask. +/// +/// \headerfile +/// +/// \param __U +/// A 8-bit mask. Elements are zeroed out when the corresponding mask +/// bit is not set. +/// \param __A +/// A 128-bit vector of [8 x bfloat]. +/// \returns A 256-bit vector of [8 x float] come from conversion of __A +static __inline__ __m256 __DEFAULT_FN_ATTRS256 +_mm256_maskz_cvtpbh_ps(__mmask8 __U, __m128bh __A) { + return _mm256_castsi256_ps((__m256i)_mm256_slli_epi32( + (__m256i)_mm256_maskz_cvtepi16_epi32((__mmask8)__U, (__m128i)__A), 16)); +} + +/// Convert Packed BF16 Data to Packed float Data using merging mask. +/// +/// \headerfile +/// +/// \param __S +/// A 128-bit vector of [4 x float]. Elements are copied from __S when +/// the corresponding mask bit is not set. +/// \param __U +/// A 4-bit mask. Elements are zeroed out when the corresponding mask +/// bit is not set. +/// \param __A +/// A 128-bit vector of [4 x bfloat]. +/// \returns A 128-bit vector of [4 x float] come from conversion of __A +static __inline__ __m128 __DEFAULT_FN_ATTRS128 +_mm_mask_cvtpbh_ps(__m128 __S, __mmask8 __U, __m128bh __A) { + return _mm_castsi128_ps((__m128i)_mm_mask_slli_epi32( + (__m128i)__S, (__mmask8)__U, (__m128i)_mm_cvtepi16_epi32((__m128i)__A), + 16)); +} + +/// Convert Packed BF16 Data to Packed float Data using merging mask. +/// +/// \headerfile +/// +/// \param __S +/// A 256-bit vector of [8 x float]. Elements are copied from __S when +/// the corresponding mask bit is not set. +/// \param __U +/// A 8-bit mask. Elements are zeroed out when the corresponding mask +/// bit is not set. +/// \param __A +/// A 128-bit vector of [8 x bfloat]. +/// \returns A 256-bit vector of [8 x float] come from conversion of __A +static __inline__ __m256 __DEFAULT_FN_ATTRS256 +_mm256_mask_cvtpbh_ps(__m256 __S, __mmask8 __U, __m128bh __A) { + return _mm256_castsi256_ps((__m256i)_mm256_mask_slli_epi32( + (__m256i)__S, (__mmask8)__U, (__m256i)_mm256_cvtepi16_epi32((__m128i)__A), + 16)); +} + +#undef __DEFAULT_FN_ATTRS128 +#undef __DEFAULT_FN_ATTRS256 + +#endif +#endif diff --git a/third_party/intel/clang/avx512vlbitalgintrin.h b/third_party/intel/clang/avx512vlbitalgintrin.h new file mode 100644 index 000000000..377e3a5ea --- /dev/null +++ b/third_party/intel/clang/avx512vlbitalgintrin.h @@ -0,0 +1,151 @@ +/*===---- avx512vlbitalgintrin.h - BITALG intrinsics -----------------------=== + * + * + * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. + * See https://llvm.org/LICENSE.txt for license information. + * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + * + *===-----------------------------------------------------------------------=== + */ +#ifndef __IMMINTRIN_H +#error "Never use directly; include instead." +#endif + +#ifndef __AVX512VLBITALGINTRIN_H +#define __AVX512VLBITALGINTRIN_H + +/* Define the default attributes for the functions in this file. */ +#define __DEFAULT_FN_ATTRS128 \ + __attribute__((__always_inline__, __nodebug__, \ + __target__("avx512vl,avx512bitalg,no-evex512"), \ + __min_vector_width__(128))) +#define __DEFAULT_FN_ATTRS256 \ + __attribute__((__always_inline__, __nodebug__, \ + __target__("avx512vl,avx512bitalg,no-evex512"), \ + __min_vector_width__(256))) + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_popcnt_epi16(__m256i __A) +{ + return (__m256i) __builtin_ia32_vpopcntw_256((__v16hi) __A); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_mask_popcnt_epi16(__m256i __A, __mmask16 __U, __m256i __B) +{ + return (__m256i) __builtin_ia32_selectw_256((__mmask16) __U, + (__v16hi) _mm256_popcnt_epi16(__B), + (__v16hi) __A); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_maskz_popcnt_epi16(__mmask16 __U, __m256i __B) +{ + return _mm256_mask_popcnt_epi16((__m256i) _mm256_setzero_si256(), + __U, + __B); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_popcnt_epi16(__m128i __A) +{ + return (__m128i) __builtin_ia32_vpopcntw_128((__v8hi) __A); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_mask_popcnt_epi16(__m128i __A, __mmask8 __U, __m128i __B) +{ + return (__m128i) __builtin_ia32_selectw_128((__mmask8) __U, + (__v8hi) _mm_popcnt_epi16(__B), + (__v8hi) __A); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_maskz_popcnt_epi16(__mmask8 __U, __m128i __B) +{ + return _mm_mask_popcnt_epi16((__m128i) _mm_setzero_si128(), + __U, + __B); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_popcnt_epi8(__m256i __A) +{ + return (__m256i) __builtin_ia32_vpopcntb_256((__v32qi) __A); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_mask_popcnt_epi8(__m256i __A, __mmask32 __U, __m256i __B) +{ + return (__m256i) __builtin_ia32_selectb_256((__mmask32) __U, + (__v32qi) _mm256_popcnt_epi8(__B), + (__v32qi) __A); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_maskz_popcnt_epi8(__mmask32 __U, __m256i __B) +{ + return _mm256_mask_popcnt_epi8((__m256i) _mm256_setzero_si256(), + __U, + __B); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_popcnt_epi8(__m128i __A) +{ + return (__m128i) __builtin_ia32_vpopcntb_128((__v16qi) __A); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_mask_popcnt_epi8(__m128i __A, __mmask16 __U, __m128i __B) +{ + return (__m128i) __builtin_ia32_selectb_128((__mmask16) __U, + (__v16qi) _mm_popcnt_epi8(__B), + (__v16qi) __A); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_maskz_popcnt_epi8(__mmask16 __U, __m128i __B) +{ + return _mm_mask_popcnt_epi8((__m128i) _mm_setzero_si128(), + __U, + __B); +} + +static __inline__ __mmask32 __DEFAULT_FN_ATTRS256 +_mm256_mask_bitshuffle_epi64_mask(__mmask32 __U, __m256i __A, __m256i __B) +{ + return (__mmask32) __builtin_ia32_vpshufbitqmb256_mask((__v32qi) __A, + (__v32qi) __B, + __U); +} + +static __inline__ __mmask32 __DEFAULT_FN_ATTRS256 +_mm256_bitshuffle_epi64_mask(__m256i __A, __m256i __B) +{ + return _mm256_mask_bitshuffle_epi64_mask((__mmask32) -1, + __A, + __B); +} + +static __inline__ __mmask16 __DEFAULT_FN_ATTRS128 +_mm_mask_bitshuffle_epi64_mask(__mmask16 __U, __m128i __A, __m128i __B) +{ + return (__mmask16) __builtin_ia32_vpshufbitqmb128_mask((__v16qi) __A, + (__v16qi) __B, + __U); +} + +static __inline__ __mmask16 __DEFAULT_FN_ATTRS128 +_mm_bitshuffle_epi64_mask(__m128i __A, __m128i __B) +{ + return _mm_mask_bitshuffle_epi64_mask((__mmask16) -1, + __A, + __B); +} + + +#undef __DEFAULT_FN_ATTRS128 +#undef __DEFAULT_FN_ATTRS256 + +#endif diff --git a/third_party/intel/clang/avx512vlbwintrin.h b/third_party/intel/clang/avx512vlbwintrin.h new file mode 100644 index 000000000..9aedba066 --- /dev/null +++ b/third_party/intel/clang/avx512vlbwintrin.h @@ -0,0 +1,3167 @@ +/*===---- avx512vlbwintrin.h - AVX512VL and AVX512BW intrinsics ------------=== + * + * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. + * See https://llvm.org/LICENSE.txt for license information. + * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + * + *===-----------------------------------------------------------------------=== + */ + +#ifndef __IMMINTRIN_H +#error "Never use directly; include instead." +#endif + +#ifndef __AVX512VLBWINTRIN_H +#define __AVX512VLBWINTRIN_H + +/* Define the default attributes for the functions in this file. */ +#define __DEFAULT_FN_ATTRS128 \ + __attribute__((__always_inline__, __nodebug__, \ + __target__("avx512vl,avx512bw,no-evex512"), \ + __min_vector_width__(128))) +#define __DEFAULT_FN_ATTRS256 \ + __attribute__((__always_inline__, __nodebug__, \ + __target__("avx512vl,avx512bw,no-evex512"), \ + __min_vector_width__(256))) + +/* Integer compare */ + +#define _mm_cmp_epi8_mask(a, b, p) \ + ((__mmask16)__builtin_ia32_cmpb128_mask((__v16qi)(__m128i)(a), \ + (__v16qi)(__m128i)(b), (int)(p), \ + (__mmask16)-1)) + +#define _mm_mask_cmp_epi8_mask(m, a, b, p) \ + ((__mmask16)__builtin_ia32_cmpb128_mask((__v16qi)(__m128i)(a), \ + (__v16qi)(__m128i)(b), (int)(p), \ + (__mmask16)(m))) + +#define _mm_cmp_epu8_mask(a, b, p) \ + ((__mmask16)__builtin_ia32_ucmpb128_mask((__v16qi)(__m128i)(a), \ + (__v16qi)(__m128i)(b), (int)(p), \ + (__mmask16)-1)) + +#define _mm_mask_cmp_epu8_mask(m, a, b, p) \ + ((__mmask16)__builtin_ia32_ucmpb128_mask((__v16qi)(__m128i)(a), \ + (__v16qi)(__m128i)(b), (int)(p), \ + (__mmask16)(m))) + +#define _mm256_cmp_epi8_mask(a, b, p) \ + ((__mmask32)__builtin_ia32_cmpb256_mask((__v32qi)(__m256i)(a), \ + (__v32qi)(__m256i)(b), (int)(p), \ + (__mmask32)-1)) + +#define _mm256_mask_cmp_epi8_mask(m, a, b, p) \ + ((__mmask32)__builtin_ia32_cmpb256_mask((__v32qi)(__m256i)(a), \ + (__v32qi)(__m256i)(b), (int)(p), \ + (__mmask32)(m))) + +#define _mm256_cmp_epu8_mask(a, b, p) \ + ((__mmask32)__builtin_ia32_ucmpb256_mask((__v32qi)(__m256i)(a), \ + (__v32qi)(__m256i)(b), (int)(p), \ + (__mmask32)-1)) + +#define _mm256_mask_cmp_epu8_mask(m, a, b, p) \ + ((__mmask32)__builtin_ia32_ucmpb256_mask((__v32qi)(__m256i)(a), \ + (__v32qi)(__m256i)(b), (int)(p), \ + (__mmask32)(m))) + +#define _mm_cmp_epi16_mask(a, b, p) \ + ((__mmask8)__builtin_ia32_cmpw128_mask((__v8hi)(__m128i)(a), \ + (__v8hi)(__m128i)(b), (int)(p), \ + (__mmask8)-1)) + +#define _mm_mask_cmp_epi16_mask(m, a, b, p) \ + ((__mmask8)__builtin_ia32_cmpw128_mask((__v8hi)(__m128i)(a), \ + (__v8hi)(__m128i)(b), (int)(p), \ + (__mmask8)(m))) + +#define _mm_cmp_epu16_mask(a, b, p) \ + ((__mmask8)__builtin_ia32_ucmpw128_mask((__v8hi)(__m128i)(a), \ + (__v8hi)(__m128i)(b), (int)(p), \ + (__mmask8)-1)) + +#define _mm_mask_cmp_epu16_mask(m, a, b, p) \ + ((__mmask8)__builtin_ia32_ucmpw128_mask((__v8hi)(__m128i)(a), \ + (__v8hi)(__m128i)(b), (int)(p), \ + (__mmask8)(m))) + +#define _mm256_cmp_epi16_mask(a, b, p) \ + ((__mmask16)__builtin_ia32_cmpw256_mask((__v16hi)(__m256i)(a), \ + (__v16hi)(__m256i)(b), (int)(p), \ + (__mmask16)-1)) + +#define _mm256_mask_cmp_epi16_mask(m, a, b, p) \ + ((__mmask16)__builtin_ia32_cmpw256_mask((__v16hi)(__m256i)(a), \ + (__v16hi)(__m256i)(b), (int)(p), \ + (__mmask16)(m))) + +#define _mm256_cmp_epu16_mask(a, b, p) \ + ((__mmask16)__builtin_ia32_ucmpw256_mask((__v16hi)(__m256i)(a), \ + (__v16hi)(__m256i)(b), (int)(p), \ + (__mmask16)-1)) + +#define _mm256_mask_cmp_epu16_mask(m, a, b, p) \ + ((__mmask16)__builtin_ia32_ucmpw256_mask((__v16hi)(__m256i)(a), \ + (__v16hi)(__m256i)(b), (int)(p), \ + (__mmask16)(m))) + +#define _mm_cmpeq_epi8_mask(A, B) \ + _mm_cmp_epi8_mask((A), (B), _MM_CMPINT_EQ) +#define _mm_mask_cmpeq_epi8_mask(k, A, B) \ + _mm_mask_cmp_epi8_mask((k), (A), (B), _MM_CMPINT_EQ) +#define _mm_cmpge_epi8_mask(A, B) \ + _mm_cmp_epi8_mask((A), (B), _MM_CMPINT_GE) +#define _mm_mask_cmpge_epi8_mask(k, A, B) \ + _mm_mask_cmp_epi8_mask((k), (A), (B), _MM_CMPINT_GE) +#define _mm_cmpgt_epi8_mask(A, B) \ + _mm_cmp_epi8_mask((A), (B), _MM_CMPINT_GT) +#define _mm_mask_cmpgt_epi8_mask(k, A, B) \ + _mm_mask_cmp_epi8_mask((k), (A), (B), _MM_CMPINT_GT) +#define _mm_cmple_epi8_mask(A, B) \ + _mm_cmp_epi8_mask((A), (B), _MM_CMPINT_LE) +#define _mm_mask_cmple_epi8_mask(k, A, B) \ + _mm_mask_cmp_epi8_mask((k), (A), (B), _MM_CMPINT_LE) +#define _mm_cmplt_epi8_mask(A, B) \ + _mm_cmp_epi8_mask((A), (B), _MM_CMPINT_LT) +#define _mm_mask_cmplt_epi8_mask(k, A, B) \ + _mm_mask_cmp_epi8_mask((k), (A), (B), _MM_CMPINT_LT) +#define _mm_cmpneq_epi8_mask(A, B) \ + _mm_cmp_epi8_mask((A), (B), _MM_CMPINT_NE) +#define _mm_mask_cmpneq_epi8_mask(k, A, B) \ + _mm_mask_cmp_epi8_mask((k), (A), (B), _MM_CMPINT_NE) + +#define _mm256_cmpeq_epi8_mask(A, B) \ + _mm256_cmp_epi8_mask((A), (B), _MM_CMPINT_EQ) +#define _mm256_mask_cmpeq_epi8_mask(k, A, B) \ + _mm256_mask_cmp_epi8_mask((k), (A), (B), _MM_CMPINT_EQ) +#define _mm256_cmpge_epi8_mask(A, B) \ + _mm256_cmp_epi8_mask((A), (B), _MM_CMPINT_GE) +#define _mm256_mask_cmpge_epi8_mask(k, A, B) \ + _mm256_mask_cmp_epi8_mask((k), (A), (B), _MM_CMPINT_GE) +#define _mm256_cmpgt_epi8_mask(A, B) \ + _mm256_cmp_epi8_mask((A), (B), _MM_CMPINT_GT) +#define _mm256_mask_cmpgt_epi8_mask(k, A, B) \ + _mm256_mask_cmp_epi8_mask((k), (A), (B), _MM_CMPINT_GT) +#define _mm256_cmple_epi8_mask(A, B) \ + _mm256_cmp_epi8_mask((A), (B), _MM_CMPINT_LE) +#define _mm256_mask_cmple_epi8_mask(k, A, B) \ + _mm256_mask_cmp_epi8_mask((k), (A), (B), _MM_CMPINT_LE) +#define _mm256_cmplt_epi8_mask(A, B) \ + _mm256_cmp_epi8_mask((A), (B), _MM_CMPINT_LT) +#define _mm256_mask_cmplt_epi8_mask(k, A, B) \ + _mm256_mask_cmp_epi8_mask((k), (A), (B), _MM_CMPINT_LT) +#define _mm256_cmpneq_epi8_mask(A, B) \ + _mm256_cmp_epi8_mask((A), (B), _MM_CMPINT_NE) +#define _mm256_mask_cmpneq_epi8_mask(k, A, B) \ + _mm256_mask_cmp_epi8_mask((k), (A), (B), _MM_CMPINT_NE) + +#define _mm_cmpeq_epu8_mask(A, B) \ + _mm_cmp_epu8_mask((A), (B), _MM_CMPINT_EQ) +#define _mm_mask_cmpeq_epu8_mask(k, A, B) \ + _mm_mask_cmp_epu8_mask((k), (A), (B), _MM_CMPINT_EQ) +#define _mm_cmpge_epu8_mask(A, B) \ + _mm_cmp_epu8_mask((A), (B), _MM_CMPINT_GE) +#define _mm_mask_cmpge_epu8_mask(k, A, B) \ + _mm_mask_cmp_epu8_mask((k), (A), (B), _MM_CMPINT_GE) +#define _mm_cmpgt_epu8_mask(A, B) \ + _mm_cmp_epu8_mask((A), (B), _MM_CMPINT_GT) +#define _mm_mask_cmpgt_epu8_mask(k, A, B) \ + _mm_mask_cmp_epu8_mask((k), (A), (B), _MM_CMPINT_GT) +#define _mm_cmple_epu8_mask(A, B) \ + _mm_cmp_epu8_mask((A), (B), _MM_CMPINT_LE) +#define _mm_mask_cmple_epu8_mask(k, A, B) \ + _mm_mask_cmp_epu8_mask((k), (A), (B), _MM_CMPINT_LE) +#define _mm_cmplt_epu8_mask(A, B) \ + _mm_cmp_epu8_mask((A), (B), _MM_CMPINT_LT) +#define _mm_mask_cmplt_epu8_mask(k, A, B) \ + _mm_mask_cmp_epu8_mask((k), (A), (B), _MM_CMPINT_LT) +#define _mm_cmpneq_epu8_mask(A, B) \ + _mm_cmp_epu8_mask((A), (B), _MM_CMPINT_NE) +#define _mm_mask_cmpneq_epu8_mask(k, A, B) \ + _mm_mask_cmp_epu8_mask((k), (A), (B), _MM_CMPINT_NE) + +#define _mm256_cmpeq_epu8_mask(A, B) \ + _mm256_cmp_epu8_mask((A), (B), _MM_CMPINT_EQ) +#define _mm256_mask_cmpeq_epu8_mask(k, A, B) \ + _mm256_mask_cmp_epu8_mask((k), (A), (B), _MM_CMPINT_EQ) +#define _mm256_cmpge_epu8_mask(A, B) \ + _mm256_cmp_epu8_mask((A), (B), _MM_CMPINT_GE) +#define _mm256_mask_cmpge_epu8_mask(k, A, B) \ + _mm256_mask_cmp_epu8_mask((k), (A), (B), _MM_CMPINT_GE) +#define _mm256_cmpgt_epu8_mask(A, B) \ + _mm256_cmp_epu8_mask((A), (B), _MM_CMPINT_GT) +#define _mm256_mask_cmpgt_epu8_mask(k, A, B) \ + _mm256_mask_cmp_epu8_mask((k), (A), (B), _MM_CMPINT_GT) +#define _mm256_cmple_epu8_mask(A, B) \ + _mm256_cmp_epu8_mask((A), (B), _MM_CMPINT_LE) +#define _mm256_mask_cmple_epu8_mask(k, A, B) \ + _mm256_mask_cmp_epu8_mask((k), (A), (B), _MM_CMPINT_LE) +#define _mm256_cmplt_epu8_mask(A, B) \ + _mm256_cmp_epu8_mask((A), (B), _MM_CMPINT_LT) +#define _mm256_mask_cmplt_epu8_mask(k, A, B) \ + _mm256_mask_cmp_epu8_mask((k), (A), (B), _MM_CMPINT_LT) +#define _mm256_cmpneq_epu8_mask(A, B) \ + _mm256_cmp_epu8_mask((A), (B), _MM_CMPINT_NE) +#define _mm256_mask_cmpneq_epu8_mask(k, A, B) \ + _mm256_mask_cmp_epu8_mask((k), (A), (B), _MM_CMPINT_NE) + +#define _mm_cmpeq_epi16_mask(A, B) \ + _mm_cmp_epi16_mask((A), (B), _MM_CMPINT_EQ) +#define _mm_mask_cmpeq_epi16_mask(k, A, B) \ + _mm_mask_cmp_epi16_mask((k), (A), (B), _MM_CMPINT_EQ) +#define _mm_cmpge_epi16_mask(A, B) \ + _mm_cmp_epi16_mask((A), (B), _MM_CMPINT_GE) +#define _mm_mask_cmpge_epi16_mask(k, A, B) \ + _mm_mask_cmp_epi16_mask((k), (A), (B), _MM_CMPINT_GE) +#define _mm_cmpgt_epi16_mask(A, B) \ + _mm_cmp_epi16_mask((A), (B), _MM_CMPINT_GT) +#define _mm_mask_cmpgt_epi16_mask(k, A, B) \ + _mm_mask_cmp_epi16_mask((k), (A), (B), _MM_CMPINT_GT) +#define _mm_cmple_epi16_mask(A, B) \ + _mm_cmp_epi16_mask((A), (B), _MM_CMPINT_LE) +#define _mm_mask_cmple_epi16_mask(k, A, B) \ + _mm_mask_cmp_epi16_mask((k), (A), (B), _MM_CMPINT_LE) +#define _mm_cmplt_epi16_mask(A, B) \ + _mm_cmp_epi16_mask((A), (B), _MM_CMPINT_LT) +#define _mm_mask_cmplt_epi16_mask(k, A, B) \ + _mm_mask_cmp_epi16_mask((k), (A), (B), _MM_CMPINT_LT) +#define _mm_cmpneq_epi16_mask(A, B) \ + _mm_cmp_epi16_mask((A), (B), _MM_CMPINT_NE) +#define _mm_mask_cmpneq_epi16_mask(k, A, B) \ + _mm_mask_cmp_epi16_mask((k), (A), (B), _MM_CMPINT_NE) + +#define _mm256_cmpeq_epi16_mask(A, B) \ + _mm256_cmp_epi16_mask((A), (B), _MM_CMPINT_EQ) +#define _mm256_mask_cmpeq_epi16_mask(k, A, B) \ + _mm256_mask_cmp_epi16_mask((k), (A), (B), _MM_CMPINT_EQ) +#define _mm256_cmpge_epi16_mask(A, B) \ + _mm256_cmp_epi16_mask((A), (B), _MM_CMPINT_GE) +#define _mm256_mask_cmpge_epi16_mask(k, A, B) \ + _mm256_mask_cmp_epi16_mask((k), (A), (B), _MM_CMPINT_GE) +#define _mm256_cmpgt_epi16_mask(A, B) \ + _mm256_cmp_epi16_mask((A), (B), _MM_CMPINT_GT) +#define _mm256_mask_cmpgt_epi16_mask(k, A, B) \ + _mm256_mask_cmp_epi16_mask((k), (A), (B), _MM_CMPINT_GT) +#define _mm256_cmple_epi16_mask(A, B) \ + _mm256_cmp_epi16_mask((A), (B), _MM_CMPINT_LE) +#define _mm256_mask_cmple_epi16_mask(k, A, B) \ + _mm256_mask_cmp_epi16_mask((k), (A), (B), _MM_CMPINT_LE) +#define _mm256_cmplt_epi16_mask(A, B) \ + _mm256_cmp_epi16_mask((A), (B), _MM_CMPINT_LT) +#define _mm256_mask_cmplt_epi16_mask(k, A, B) \ + _mm256_mask_cmp_epi16_mask((k), (A), (B), _MM_CMPINT_LT) +#define _mm256_cmpneq_epi16_mask(A, B) \ + _mm256_cmp_epi16_mask((A), (B), _MM_CMPINT_NE) +#define _mm256_mask_cmpneq_epi16_mask(k, A, B) \ + _mm256_mask_cmp_epi16_mask((k), (A), (B), _MM_CMPINT_NE) + +#define _mm_cmpeq_epu16_mask(A, B) \ + _mm_cmp_epu16_mask((A), (B), _MM_CMPINT_EQ) +#define _mm_mask_cmpeq_epu16_mask(k, A, B) \ + _mm_mask_cmp_epu16_mask((k), (A), (B), _MM_CMPINT_EQ) +#define _mm_cmpge_epu16_mask(A, B) \ + _mm_cmp_epu16_mask((A), (B), _MM_CMPINT_GE) +#define _mm_mask_cmpge_epu16_mask(k, A, B) \ + _mm_mask_cmp_epu16_mask((k), (A), (B), _MM_CMPINT_GE) +#define _mm_cmpgt_epu16_mask(A, B) \ + _mm_cmp_epu16_mask((A), (B), _MM_CMPINT_GT) +#define _mm_mask_cmpgt_epu16_mask(k, A, B) \ + _mm_mask_cmp_epu16_mask((k), (A), (B), _MM_CMPINT_GT) +#define _mm_cmple_epu16_mask(A, B) \ + _mm_cmp_epu16_mask((A), (B), _MM_CMPINT_LE) +#define _mm_mask_cmple_epu16_mask(k, A, B) \ + _mm_mask_cmp_epu16_mask((k), (A), (B), _MM_CMPINT_LE) +#define _mm_cmplt_epu16_mask(A, B) \ + _mm_cmp_epu16_mask((A), (B), _MM_CMPINT_LT) +#define _mm_mask_cmplt_epu16_mask(k, A, B) \ + _mm_mask_cmp_epu16_mask((k), (A), (B), _MM_CMPINT_LT) +#define _mm_cmpneq_epu16_mask(A, B) \ + _mm_cmp_epu16_mask((A), (B), _MM_CMPINT_NE) +#define _mm_mask_cmpneq_epu16_mask(k, A, B) \ + _mm_mask_cmp_epu16_mask((k), (A), (B), _MM_CMPINT_NE) + +#define _mm256_cmpeq_epu16_mask(A, B) \ + _mm256_cmp_epu16_mask((A), (B), _MM_CMPINT_EQ) +#define _mm256_mask_cmpeq_epu16_mask(k, A, B) \ + _mm256_mask_cmp_epu16_mask((k), (A), (B), _MM_CMPINT_EQ) +#define _mm256_cmpge_epu16_mask(A, B) \ + _mm256_cmp_epu16_mask((A), (B), _MM_CMPINT_GE) +#define _mm256_mask_cmpge_epu16_mask(k, A, B) \ + _mm256_mask_cmp_epu16_mask((k), (A), (B), _MM_CMPINT_GE) +#define _mm256_cmpgt_epu16_mask(A, B) \ + _mm256_cmp_epu16_mask((A), (B), _MM_CMPINT_GT) +#define _mm256_mask_cmpgt_epu16_mask(k, A, B) \ + _mm256_mask_cmp_epu16_mask((k), (A), (B), _MM_CMPINT_GT) +#define _mm256_cmple_epu16_mask(A, B) \ + _mm256_cmp_epu16_mask((A), (B), _MM_CMPINT_LE) +#define _mm256_mask_cmple_epu16_mask(k, A, B) \ + _mm256_mask_cmp_epu16_mask((k), (A), (B), _MM_CMPINT_LE) +#define _mm256_cmplt_epu16_mask(A, B) \ + _mm256_cmp_epu16_mask((A), (B), _MM_CMPINT_LT) +#define _mm256_mask_cmplt_epu16_mask(k, A, B) \ + _mm256_mask_cmp_epu16_mask((k), (A), (B), _MM_CMPINT_LT) +#define _mm256_cmpneq_epu16_mask(A, B) \ + _mm256_cmp_epu16_mask((A), (B), _MM_CMPINT_NE) +#define _mm256_mask_cmpneq_epu16_mask(k, A, B) \ + _mm256_mask_cmp_epu16_mask((k), (A), (B), _MM_CMPINT_NE) + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_mask_add_epi8(__m256i __W, __mmask32 __U, __m256i __A, __m256i __B){ + return (__m256i)__builtin_ia32_selectb_256((__mmask32)__U, + (__v32qi)_mm256_add_epi8(__A, __B), + (__v32qi)__W); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_maskz_add_epi8(__mmask32 __U, __m256i __A, __m256i __B) { + return (__m256i)__builtin_ia32_selectb_256((__mmask32)__U, + (__v32qi)_mm256_add_epi8(__A, __B), + (__v32qi)_mm256_setzero_si256()); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_mask_add_epi16(__m256i __W, __mmask16 __U, __m256i __A, __m256i __B) { + return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U, + (__v16hi)_mm256_add_epi16(__A, __B), + (__v16hi)__W); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_maskz_add_epi16(__mmask16 __U, __m256i __A, __m256i __B) { + return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U, + (__v16hi)_mm256_add_epi16(__A, __B), + (__v16hi)_mm256_setzero_si256()); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_mask_sub_epi8(__m256i __W, __mmask32 __U, __m256i __A, __m256i __B) { + return (__m256i)__builtin_ia32_selectb_256((__mmask32)__U, + (__v32qi)_mm256_sub_epi8(__A, __B), + (__v32qi)__W); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_maskz_sub_epi8(__mmask32 __U, __m256i __A, __m256i __B) { + return (__m256i)__builtin_ia32_selectb_256((__mmask32)__U, + (__v32qi)_mm256_sub_epi8(__A, __B), + (__v32qi)_mm256_setzero_si256()); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_mask_sub_epi16(__m256i __W, __mmask16 __U, __m256i __A, __m256i __B) { + return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U, + (__v16hi)_mm256_sub_epi16(__A, __B), + (__v16hi)__W); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_maskz_sub_epi16(__mmask16 __U, __m256i __A, __m256i __B) { + return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U, + (__v16hi)_mm256_sub_epi16(__A, __B), + (__v16hi)_mm256_setzero_si256()); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_mask_add_epi8(__m128i __W, __mmask16 __U, __m128i __A, __m128i __B) { + return (__m128i)__builtin_ia32_selectb_128((__mmask16)__U, + (__v16qi)_mm_add_epi8(__A, __B), + (__v16qi)__W); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_maskz_add_epi8(__mmask16 __U, __m128i __A, __m128i __B) { + return (__m128i)__builtin_ia32_selectb_128((__mmask16)__U, + (__v16qi)_mm_add_epi8(__A, __B), + (__v16qi)_mm_setzero_si128()); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_mask_add_epi16(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B) { + return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U, + (__v8hi)_mm_add_epi16(__A, __B), + (__v8hi)__W); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_maskz_add_epi16(__mmask8 __U, __m128i __A, __m128i __B) { + return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U, + (__v8hi)_mm_add_epi16(__A, __B), + (__v8hi)_mm_setzero_si128()); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_mask_sub_epi8(__m128i __W, __mmask16 __U, __m128i __A, __m128i __B) { + return (__m128i)__builtin_ia32_selectb_128((__mmask16)__U, + (__v16qi)_mm_sub_epi8(__A, __B), + (__v16qi)__W); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_maskz_sub_epi8(__mmask16 __U, __m128i __A, __m128i __B) { + return (__m128i)__builtin_ia32_selectb_128((__mmask16)__U, + (__v16qi)_mm_sub_epi8(__A, __B), + (__v16qi)_mm_setzero_si128()); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_mask_sub_epi16(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B) { + return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U, + (__v8hi)_mm_sub_epi16(__A, __B), + (__v8hi)__W); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_maskz_sub_epi16(__mmask8 __U, __m128i __A, __m128i __B) { + return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U, + (__v8hi)_mm_sub_epi16(__A, __B), + (__v8hi)_mm_setzero_si128()); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_mask_mullo_epi16(__m256i __W, __mmask16 __U, __m256i __A, __m256i __B) { + return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U, + (__v16hi)_mm256_mullo_epi16(__A, __B), + (__v16hi)__W); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_maskz_mullo_epi16(__mmask16 __U, __m256i __A, __m256i __B) { + return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U, + (__v16hi)_mm256_mullo_epi16(__A, __B), + (__v16hi)_mm256_setzero_si256()); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_mask_mullo_epi16(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B) { + return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U, + (__v8hi)_mm_mullo_epi16(__A, __B), + (__v8hi)__W); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_maskz_mullo_epi16(__mmask8 __U, __m128i __A, __m128i __B) { + return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U, + (__v8hi)_mm_mullo_epi16(__A, __B), + (__v8hi)_mm_setzero_si128()); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_mask_blend_epi8 (__mmask16 __U, __m128i __A, __m128i __W) +{ + return (__m128i) __builtin_ia32_selectb_128 ((__mmask16) __U, + (__v16qi) __W, + (__v16qi) __A); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_mask_blend_epi8 (__mmask32 __U, __m256i __A, __m256i __W) +{ + return (__m256i) __builtin_ia32_selectb_256 ((__mmask32) __U, + (__v32qi) __W, + (__v32qi) __A); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_mask_blend_epi16 (__mmask8 __U, __m128i __A, __m128i __W) +{ + return (__m128i) __builtin_ia32_selectw_128 ((__mmask8) __U, + (__v8hi) __W, + (__v8hi) __A); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_mask_blend_epi16 (__mmask16 __U, __m256i __A, __m256i __W) +{ + return (__m256i) __builtin_ia32_selectw_256 ((__mmask16) __U, + (__v16hi) __W, + (__v16hi) __A); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_mask_abs_epi8(__m128i __W, __mmask16 __U, __m128i __A) +{ + return (__m128i)__builtin_ia32_selectb_128((__mmask16)__U, + (__v16qi)_mm_abs_epi8(__A), + (__v16qi)__W); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_maskz_abs_epi8(__mmask16 __U, __m128i __A) +{ + return (__m128i)__builtin_ia32_selectb_128((__mmask16)__U, + (__v16qi)_mm_abs_epi8(__A), + (__v16qi)_mm_setzero_si128()); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_mask_abs_epi8(__m256i __W, __mmask32 __U, __m256i __A) +{ + return (__m256i)__builtin_ia32_selectb_256((__mmask32)__U, + (__v32qi)_mm256_abs_epi8(__A), + (__v32qi)__W); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_maskz_abs_epi8 (__mmask32 __U, __m256i __A) +{ + return (__m256i)__builtin_ia32_selectb_256((__mmask32)__U, + (__v32qi)_mm256_abs_epi8(__A), + (__v32qi)_mm256_setzero_si256()); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_mask_abs_epi16(__m128i __W, __mmask8 __U, __m128i __A) +{ + return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U, + (__v8hi)_mm_abs_epi16(__A), + (__v8hi)__W); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_maskz_abs_epi16(__mmask8 __U, __m128i __A) +{ + return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U, + (__v8hi)_mm_abs_epi16(__A), + (__v8hi)_mm_setzero_si128()); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_mask_abs_epi16(__m256i __W, __mmask16 __U, __m256i __A) +{ + return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U, + (__v16hi)_mm256_abs_epi16(__A), + (__v16hi)__W); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_maskz_abs_epi16(__mmask16 __U, __m256i __A) +{ + return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U, + (__v16hi)_mm256_abs_epi16(__A), + (__v16hi)_mm256_setzero_si256()); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_maskz_packs_epi32(__mmask8 __M, __m128i __A, __m128i __B) { + return (__m128i)__builtin_ia32_selectw_128((__mmask8)__M, + (__v8hi)_mm_packs_epi32(__A, __B), + (__v8hi)_mm_setzero_si128()); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_mask_packs_epi32(__m128i __W, __mmask8 __M, __m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_selectw_128((__mmask8)__M, + (__v8hi)_mm_packs_epi32(__A, __B), + (__v8hi)__W); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_maskz_packs_epi32(__mmask16 __M, __m256i __A, __m256i __B) +{ + return (__m256i)__builtin_ia32_selectw_256((__mmask16)__M, + (__v16hi)_mm256_packs_epi32(__A, __B), + (__v16hi)_mm256_setzero_si256()); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_mask_packs_epi32(__m256i __W, __mmask16 __M, __m256i __A, __m256i __B) +{ + return (__m256i)__builtin_ia32_selectw_256((__mmask16)__M, + (__v16hi)_mm256_packs_epi32(__A, __B), + (__v16hi)__W); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_maskz_packs_epi16(__mmask16 __M, __m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_selectb_128((__mmask16)__M, + (__v16qi)_mm_packs_epi16(__A, __B), + (__v16qi)_mm_setzero_si128()); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_mask_packs_epi16(__m128i __W, __mmask16 __M, __m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_selectb_128((__mmask16)__M, + (__v16qi)_mm_packs_epi16(__A, __B), + (__v16qi)__W); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_maskz_packs_epi16(__mmask32 __M, __m256i __A, __m256i __B) +{ + return (__m256i)__builtin_ia32_selectb_256((__mmask32)__M, + (__v32qi)_mm256_packs_epi16(__A, __B), + (__v32qi)_mm256_setzero_si256()); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_mask_packs_epi16(__m256i __W, __mmask32 __M, __m256i __A, __m256i __B) +{ + return (__m256i)__builtin_ia32_selectb_256((__mmask32)__M, + (__v32qi)_mm256_packs_epi16(__A, __B), + (__v32qi)__W); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_maskz_packus_epi32(__mmask8 __M, __m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_selectw_128((__mmask8)__M, + (__v8hi)_mm_packus_epi32(__A, __B), + (__v8hi)_mm_setzero_si128()); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_mask_packus_epi32(__m128i __W, __mmask8 __M, __m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_selectw_128((__mmask8)__M, + (__v8hi)_mm_packus_epi32(__A, __B), + (__v8hi)__W); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_maskz_packus_epi32(__mmask16 __M, __m256i __A, __m256i __B) +{ + return (__m256i)__builtin_ia32_selectw_256((__mmask16)__M, + (__v16hi)_mm256_packus_epi32(__A, __B), + (__v16hi)_mm256_setzero_si256()); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_mask_packus_epi32(__m256i __W, __mmask16 __M, __m256i __A, __m256i __B) +{ + return (__m256i)__builtin_ia32_selectw_256((__mmask16)__M, + (__v16hi)_mm256_packus_epi32(__A, __B), + (__v16hi)__W); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_maskz_packus_epi16(__mmask16 __M, __m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_selectb_128((__mmask16)__M, + (__v16qi)_mm_packus_epi16(__A, __B), + (__v16qi)_mm_setzero_si128()); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_mask_packus_epi16(__m128i __W, __mmask16 __M, __m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_selectb_128((__mmask16)__M, + (__v16qi)_mm_packus_epi16(__A, __B), + (__v16qi)__W); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_maskz_packus_epi16(__mmask32 __M, __m256i __A, __m256i __B) +{ + return (__m256i)__builtin_ia32_selectb_256((__mmask32)__M, + (__v32qi)_mm256_packus_epi16(__A, __B), + (__v32qi)_mm256_setzero_si256()); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_mask_packus_epi16(__m256i __W, __mmask32 __M, __m256i __A, __m256i __B) +{ + return (__m256i)__builtin_ia32_selectb_256((__mmask32)__M, + (__v32qi)_mm256_packus_epi16(__A, __B), + (__v32qi)__W); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_mask_adds_epi8(__m128i __W, __mmask16 __U, __m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_selectb_128((__mmask16)__U, + (__v16qi)_mm_adds_epi8(__A, __B), + (__v16qi)__W); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_maskz_adds_epi8(__mmask16 __U, __m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_selectb_128((__mmask16)__U, + (__v16qi)_mm_adds_epi8(__A, __B), + (__v16qi)_mm_setzero_si128()); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_mask_adds_epi8(__m256i __W, __mmask32 __U, __m256i __A, __m256i __B) +{ + return (__m256i)__builtin_ia32_selectb_256((__mmask32)__U, + (__v32qi)_mm256_adds_epi8(__A, __B), + (__v32qi)__W); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_maskz_adds_epi8(__mmask32 __U, __m256i __A, __m256i __B) +{ + return (__m256i)__builtin_ia32_selectb_256((__mmask32)__U, + (__v32qi)_mm256_adds_epi8(__A, __B), + (__v32qi)_mm256_setzero_si256()); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_mask_adds_epi16(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U, + (__v8hi)_mm_adds_epi16(__A, __B), + (__v8hi)__W); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_maskz_adds_epi16(__mmask8 __U, __m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U, + (__v8hi)_mm_adds_epi16(__A, __B), + (__v8hi)_mm_setzero_si128()); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_mask_adds_epi16(__m256i __W, __mmask16 __U, __m256i __A, __m256i __B) +{ + return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U, + (__v16hi)_mm256_adds_epi16(__A, __B), + (__v16hi)__W); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_maskz_adds_epi16(__mmask16 __U, __m256i __A, __m256i __B) +{ + return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U, + (__v16hi)_mm256_adds_epi16(__A, __B), + (__v16hi)_mm256_setzero_si256()); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_mask_adds_epu8(__m128i __W, __mmask16 __U, __m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_selectb_128((__mmask16)__U, + (__v16qi)_mm_adds_epu8(__A, __B), + (__v16qi)__W); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_maskz_adds_epu8(__mmask16 __U, __m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_selectb_128((__mmask16)__U, + (__v16qi)_mm_adds_epu8(__A, __B), + (__v16qi)_mm_setzero_si128()); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_mask_adds_epu8(__m256i __W, __mmask32 __U, __m256i __A, __m256i __B) +{ + return (__m256i)__builtin_ia32_selectb_256((__mmask32)__U, + (__v32qi)_mm256_adds_epu8(__A, __B), + (__v32qi)__W); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_maskz_adds_epu8(__mmask32 __U, __m256i __A, __m256i __B) +{ + return (__m256i)__builtin_ia32_selectb_256((__mmask32)__U, + (__v32qi)_mm256_adds_epu8(__A, __B), + (__v32qi)_mm256_setzero_si256()); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_mask_adds_epu16(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U, + (__v8hi)_mm_adds_epu16(__A, __B), + (__v8hi)__W); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_maskz_adds_epu16(__mmask8 __U, __m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U, + (__v8hi)_mm_adds_epu16(__A, __B), + (__v8hi)_mm_setzero_si128()); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_mask_adds_epu16(__m256i __W, __mmask16 __U, __m256i __A, __m256i __B) +{ + return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U, + (__v16hi)_mm256_adds_epu16(__A, __B), + (__v16hi)__W); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_maskz_adds_epu16(__mmask16 __U, __m256i __A, __m256i __B) +{ + return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U, + (__v16hi)_mm256_adds_epu16(__A, __B), + (__v16hi)_mm256_setzero_si256()); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_mask_avg_epu8(__m128i __W, __mmask16 __U, __m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_selectb_128((__mmask16)__U, + (__v16qi)_mm_avg_epu8(__A, __B), + (__v16qi)__W); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_maskz_avg_epu8(__mmask16 __U, __m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_selectb_128((__mmask16)__U, + (__v16qi)_mm_avg_epu8(__A, __B), + (__v16qi)_mm_setzero_si128()); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_mask_avg_epu8(__m256i __W, __mmask32 __U, __m256i __A, __m256i __B) +{ + return (__m256i)__builtin_ia32_selectb_256((__mmask32)__U, + (__v32qi)_mm256_avg_epu8(__A, __B), + (__v32qi)__W); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_maskz_avg_epu8(__mmask32 __U, __m256i __A, __m256i __B) +{ + return (__m256i)__builtin_ia32_selectb_256((__mmask32)__U, + (__v32qi)_mm256_avg_epu8(__A, __B), + (__v32qi)_mm256_setzero_si256()); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_mask_avg_epu16(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U, + (__v8hi)_mm_avg_epu16(__A, __B), + (__v8hi)__W); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_maskz_avg_epu16(__mmask8 __U, __m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U, + (__v8hi)_mm_avg_epu16(__A, __B), + (__v8hi)_mm_setzero_si128()); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_mask_avg_epu16(__m256i __W, __mmask16 __U, __m256i __A, __m256i __B) +{ + return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U, + (__v16hi)_mm256_avg_epu16(__A, __B), + (__v16hi)__W); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_maskz_avg_epu16(__mmask16 __U, __m256i __A, __m256i __B) +{ + return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U, + (__v16hi)_mm256_avg_epu16(__A, __B), + (__v16hi)_mm256_setzero_si256()); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_maskz_max_epi8(__mmask16 __M, __m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_selectb_128((__mmask16)__M, + (__v16qi)_mm_max_epi8(__A, __B), + (__v16qi)_mm_setzero_si128()); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_mask_max_epi8(__m128i __W, __mmask16 __M, __m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_selectb_128((__mmask16)__M, + (__v16qi)_mm_max_epi8(__A, __B), + (__v16qi)__W); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_maskz_max_epi8(__mmask32 __M, __m256i __A, __m256i __B) +{ + return (__m256i)__builtin_ia32_selectb_256((__mmask32)__M, + (__v32qi)_mm256_max_epi8(__A, __B), + (__v32qi)_mm256_setzero_si256()); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_mask_max_epi8(__m256i __W, __mmask32 __M, __m256i __A, __m256i __B) +{ + return (__m256i)__builtin_ia32_selectb_256((__mmask32)__M, + (__v32qi)_mm256_max_epi8(__A, __B), + (__v32qi)__W); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_maskz_max_epi16(__mmask8 __M, __m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_selectw_128((__mmask8)__M, + (__v8hi)_mm_max_epi16(__A, __B), + (__v8hi)_mm_setzero_si128()); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_mask_max_epi16(__m128i __W, __mmask8 __M, __m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_selectw_128((__mmask8)__M, + (__v8hi)_mm_max_epi16(__A, __B), + (__v8hi)__W); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_maskz_max_epi16(__mmask16 __M, __m256i __A, __m256i __B) +{ + return (__m256i)__builtin_ia32_selectw_256((__mmask16)__M, + (__v16hi)_mm256_max_epi16(__A, __B), + (__v16hi)_mm256_setzero_si256()); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_mask_max_epi16(__m256i __W, __mmask16 __M, __m256i __A, __m256i __B) +{ + return (__m256i)__builtin_ia32_selectw_256((__mmask16)__M, + (__v16hi)_mm256_max_epi16(__A, __B), + (__v16hi)__W); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_maskz_max_epu8(__mmask16 __M, __m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_selectb_128((__mmask16)__M, + (__v16qi)_mm_max_epu8(__A, __B), + (__v16qi)_mm_setzero_si128()); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_mask_max_epu8(__m128i __W, __mmask16 __M, __m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_selectb_128((__mmask16)__M, + (__v16qi)_mm_max_epu8(__A, __B), + (__v16qi)__W); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_maskz_max_epu8 (__mmask32 __M, __m256i __A, __m256i __B) +{ + return (__m256i)__builtin_ia32_selectb_256((__mmask32)__M, + (__v32qi)_mm256_max_epu8(__A, __B), + (__v32qi)_mm256_setzero_si256()); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_mask_max_epu8(__m256i __W, __mmask32 __M, __m256i __A, __m256i __B) +{ + return (__m256i)__builtin_ia32_selectb_256((__mmask32)__M, + (__v32qi)_mm256_max_epu8(__A, __B), + (__v32qi)__W); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_maskz_max_epu16(__mmask8 __M, __m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_selectw_128((__mmask8)__M, + (__v8hi)_mm_max_epu16(__A, __B), + (__v8hi)_mm_setzero_si128()); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_mask_max_epu16(__m128i __W, __mmask8 __M, __m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_selectw_128((__mmask8)__M, + (__v8hi)_mm_max_epu16(__A, __B), + (__v8hi)__W); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_maskz_max_epu16(__mmask16 __M, __m256i __A, __m256i __B) +{ + return (__m256i)__builtin_ia32_selectw_256((__mmask16)__M, + (__v16hi)_mm256_max_epu16(__A, __B), + (__v16hi)_mm256_setzero_si256()); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_mask_max_epu16(__m256i __W, __mmask16 __M, __m256i __A, __m256i __B) +{ + return (__m256i)__builtin_ia32_selectw_256((__mmask16)__M, + (__v16hi)_mm256_max_epu16(__A, __B), + (__v16hi)__W); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_maskz_min_epi8(__mmask16 __M, __m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_selectb_128((__mmask16)__M, + (__v16qi)_mm_min_epi8(__A, __B), + (__v16qi)_mm_setzero_si128()); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_mask_min_epi8(__m128i __W, __mmask16 __M, __m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_selectb_128((__mmask16)__M, + (__v16qi)_mm_min_epi8(__A, __B), + (__v16qi)__W); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_maskz_min_epi8(__mmask32 __M, __m256i __A, __m256i __B) +{ + return (__m256i)__builtin_ia32_selectb_256((__mmask32)__M, + (__v32qi)_mm256_min_epi8(__A, __B), + (__v32qi)_mm256_setzero_si256()); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_mask_min_epi8(__m256i __W, __mmask32 __M, __m256i __A, __m256i __B) +{ + return (__m256i)__builtin_ia32_selectb_256((__mmask32)__M, + (__v32qi)_mm256_min_epi8(__A, __B), + (__v32qi)__W); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_maskz_min_epi16(__mmask8 __M, __m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_selectw_128((__mmask8)__M, + (__v8hi)_mm_min_epi16(__A, __B), + (__v8hi)_mm_setzero_si128()); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_mask_min_epi16(__m128i __W, __mmask8 __M, __m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_selectw_128((__mmask8)__M, + (__v8hi)_mm_min_epi16(__A, __B), + (__v8hi)__W); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_maskz_min_epi16(__mmask16 __M, __m256i __A, __m256i __B) +{ + return (__m256i)__builtin_ia32_selectw_256((__mmask16)__M, + (__v16hi)_mm256_min_epi16(__A, __B), + (__v16hi)_mm256_setzero_si256()); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_mask_min_epi16(__m256i __W, __mmask16 __M, __m256i __A, __m256i __B) +{ + return (__m256i)__builtin_ia32_selectw_256((__mmask16)__M, + (__v16hi)_mm256_min_epi16(__A, __B), + (__v16hi)__W); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_maskz_min_epu8(__mmask16 __M, __m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_selectb_128((__mmask16)__M, + (__v16qi)_mm_min_epu8(__A, __B), + (__v16qi)_mm_setzero_si128()); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_mask_min_epu8(__m128i __W, __mmask16 __M, __m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_selectb_128((__mmask16)__M, + (__v16qi)_mm_min_epu8(__A, __B), + (__v16qi)__W); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_maskz_min_epu8 (__mmask32 __M, __m256i __A, __m256i __B) +{ + return (__m256i)__builtin_ia32_selectb_256((__mmask32)__M, + (__v32qi)_mm256_min_epu8(__A, __B), + (__v32qi)_mm256_setzero_si256()); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_mask_min_epu8(__m256i __W, __mmask32 __M, __m256i __A, __m256i __B) +{ + return (__m256i)__builtin_ia32_selectb_256((__mmask32)__M, + (__v32qi)_mm256_min_epu8(__A, __B), + (__v32qi)__W); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_maskz_min_epu16(__mmask8 __M, __m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_selectw_128((__mmask8)__M, + (__v8hi)_mm_min_epu16(__A, __B), + (__v8hi)_mm_setzero_si128()); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_mask_min_epu16(__m128i __W, __mmask8 __M, __m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_selectw_128((__mmask8)__M, + (__v8hi)_mm_min_epu16(__A, __B), + (__v8hi)__W); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_maskz_min_epu16(__mmask16 __M, __m256i __A, __m256i __B) +{ + return (__m256i)__builtin_ia32_selectw_256((__mmask16)__M, + (__v16hi)_mm256_min_epu16(__A, __B), + (__v16hi)_mm256_setzero_si256()); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_mask_min_epu16(__m256i __W, __mmask16 __M, __m256i __A, __m256i __B) +{ + return (__m256i)__builtin_ia32_selectw_256((__mmask16)__M, + (__v16hi)_mm256_min_epu16(__A, __B), + (__v16hi)__W); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_mask_shuffle_epi8(__m128i __W, __mmask16 __U, __m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_selectb_128((__mmask16)__U, + (__v16qi)_mm_shuffle_epi8(__A, __B), + (__v16qi)__W); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_maskz_shuffle_epi8(__mmask16 __U, __m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_selectb_128((__mmask16)__U, + (__v16qi)_mm_shuffle_epi8(__A, __B), + (__v16qi)_mm_setzero_si128()); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_mask_shuffle_epi8(__m256i __W, __mmask32 __U, __m256i __A, __m256i __B) +{ + return (__m256i)__builtin_ia32_selectb_256((__mmask32)__U, + (__v32qi)_mm256_shuffle_epi8(__A, __B), + (__v32qi)__W); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_maskz_shuffle_epi8(__mmask32 __U, __m256i __A, __m256i __B) +{ + return (__m256i)__builtin_ia32_selectb_256((__mmask32)__U, + (__v32qi)_mm256_shuffle_epi8(__A, __B), + (__v32qi)_mm256_setzero_si256()); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_mask_subs_epi8(__m128i __W, __mmask16 __U, __m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_selectb_128((__mmask16)__U, + (__v16qi)_mm_subs_epi8(__A, __B), + (__v16qi)__W); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_maskz_subs_epi8(__mmask16 __U, __m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_selectb_128((__mmask16)__U, + (__v16qi)_mm_subs_epi8(__A, __B), + (__v16qi)_mm_setzero_si128()); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_mask_subs_epi8(__m256i __W, __mmask32 __U, __m256i __A, __m256i __B) +{ + return (__m256i)__builtin_ia32_selectb_256((__mmask32)__U, + (__v32qi)_mm256_subs_epi8(__A, __B), + (__v32qi)__W); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_maskz_subs_epi8(__mmask32 __U, __m256i __A, __m256i __B) +{ + return (__m256i)__builtin_ia32_selectb_256((__mmask32)__U, + (__v32qi)_mm256_subs_epi8(__A, __B), + (__v32qi)_mm256_setzero_si256()); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_mask_subs_epi16(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U, + (__v8hi)_mm_subs_epi16(__A, __B), + (__v8hi)__W); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_maskz_subs_epi16(__mmask8 __U, __m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U, + (__v8hi)_mm_subs_epi16(__A, __B), + (__v8hi)_mm_setzero_si128()); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_mask_subs_epi16(__m256i __W, __mmask16 __U, __m256i __A, __m256i __B) +{ + return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U, + (__v16hi)_mm256_subs_epi16(__A, __B), + (__v16hi)__W); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_maskz_subs_epi16(__mmask16 __U, __m256i __A, __m256i __B) +{ + return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U, + (__v16hi)_mm256_subs_epi16(__A, __B), + (__v16hi)_mm256_setzero_si256()); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_mask_subs_epu8(__m128i __W, __mmask16 __U, __m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_selectb_128((__mmask16)__U, + (__v16qi)_mm_subs_epu8(__A, __B), + (__v16qi)__W); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_maskz_subs_epu8(__mmask16 __U, __m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_selectb_128((__mmask16)__U, + (__v16qi)_mm_subs_epu8(__A, __B), + (__v16qi)_mm_setzero_si128()); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_mask_subs_epu8(__m256i __W, __mmask32 __U, __m256i __A, __m256i __B) +{ + return (__m256i)__builtin_ia32_selectb_256((__mmask32)__U, + (__v32qi)_mm256_subs_epu8(__A, __B), + (__v32qi)__W); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_maskz_subs_epu8(__mmask32 __U, __m256i __A, __m256i __B) +{ + return (__m256i)__builtin_ia32_selectb_256((__mmask32)__U, + (__v32qi)_mm256_subs_epu8(__A, __B), + (__v32qi)_mm256_setzero_si256()); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_mask_subs_epu16(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U, + (__v8hi)_mm_subs_epu16(__A, __B), + (__v8hi)__W); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_maskz_subs_epu16(__mmask8 __U, __m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U, + (__v8hi)_mm_subs_epu16(__A, __B), + (__v8hi)_mm_setzero_si128()); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_mask_subs_epu16(__m256i __W, __mmask16 __U, __m256i __A, + __m256i __B) { + return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U, + (__v16hi)_mm256_subs_epu16(__A, __B), + (__v16hi)__W); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_maskz_subs_epu16(__mmask16 __U, __m256i __A, __m256i __B) +{ + return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U, + (__v16hi)_mm256_subs_epu16(__A, __B), + (__v16hi)_mm256_setzero_si256()); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_permutex2var_epi16(__m128i __A, __m128i __I, __m128i __B) +{ + return (__m128i)__builtin_ia32_vpermi2varhi128((__v8hi)__A, (__v8hi)__I, + (__v8hi) __B); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_mask_permutex2var_epi16(__m128i __A, __mmask8 __U, __m128i __I, + __m128i __B) +{ + return (__m128i)__builtin_ia32_selectw_128(__U, + (__v8hi)_mm_permutex2var_epi16(__A, __I, __B), + (__v8hi)__A); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_mask2_permutex2var_epi16(__m128i __A, __m128i __I, __mmask8 __U, + __m128i __B) +{ + return (__m128i)__builtin_ia32_selectw_128(__U, + (__v8hi)_mm_permutex2var_epi16(__A, __I, __B), + (__v8hi)__I); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_maskz_permutex2var_epi16 (__mmask8 __U, __m128i __A, __m128i __I, + __m128i __B) +{ + return (__m128i)__builtin_ia32_selectw_128(__U, + (__v8hi)_mm_permutex2var_epi16(__A, __I, __B), + (__v8hi)_mm_setzero_si128()); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_permutex2var_epi16(__m256i __A, __m256i __I, __m256i __B) +{ + return (__m256i)__builtin_ia32_vpermi2varhi256((__v16hi)__A, (__v16hi)__I, + (__v16hi)__B); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_mask_permutex2var_epi16(__m256i __A, __mmask16 __U, __m256i __I, + __m256i __B) +{ + return (__m256i)__builtin_ia32_selectw_256(__U, + (__v16hi)_mm256_permutex2var_epi16(__A, __I, __B), + (__v16hi)__A); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_mask2_permutex2var_epi16(__m256i __A, __m256i __I, __mmask16 __U, + __m256i __B) +{ + return (__m256i)__builtin_ia32_selectw_256(__U, + (__v16hi)_mm256_permutex2var_epi16(__A, __I, __B), + (__v16hi)__I); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_maskz_permutex2var_epi16 (__mmask16 __U, __m256i __A, __m256i __I, + __m256i __B) +{ + return (__m256i)__builtin_ia32_selectw_256(__U, + (__v16hi)_mm256_permutex2var_epi16(__A, __I, __B), + (__v16hi)_mm256_setzero_si256()); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_mask_maddubs_epi16(__m128i __W, __mmask8 __U, __m128i __X, __m128i __Y) { + return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U, + (__v8hi)_mm_maddubs_epi16(__X, __Y), + (__v8hi)__W); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_maskz_maddubs_epi16(__mmask8 __U, __m128i __X, __m128i __Y) { + return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U, + (__v8hi)_mm_maddubs_epi16(__X, __Y), + (__v8hi)_mm_setzero_si128()); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_mask_maddubs_epi16(__m256i __W, __mmask16 __U, __m256i __X, + __m256i __Y) { + return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U, + (__v16hi)_mm256_maddubs_epi16(__X, __Y), + (__v16hi)__W); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_maskz_maddubs_epi16(__mmask16 __U, __m256i __X, __m256i __Y) { + return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U, + (__v16hi)_mm256_maddubs_epi16(__X, __Y), + (__v16hi)_mm256_setzero_si256()); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_mask_madd_epi16(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B) { + return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U, + (__v4si)_mm_madd_epi16(__A, __B), + (__v4si)__W); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_maskz_madd_epi16(__mmask8 __U, __m128i __A, __m128i __B) { + return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U, + (__v4si)_mm_madd_epi16(__A, __B), + (__v4si)_mm_setzero_si128()); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_mask_madd_epi16(__m256i __W, __mmask8 __U, __m256i __A, __m256i __B) { + return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U, + (__v8si)_mm256_madd_epi16(__A, __B), + (__v8si)__W); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_maskz_madd_epi16(__mmask8 __U, __m256i __A, __m256i __B) { + return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U, + (__v8si)_mm256_madd_epi16(__A, __B), + (__v8si)_mm256_setzero_si256()); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_cvtsepi16_epi8 (__m128i __A) { + return (__m128i) __builtin_ia32_pmovswb128_mask ((__v8hi) __A, + (__v16qi) _mm_setzero_si128(), + (__mmask8) -1); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_mask_cvtsepi16_epi8 (__m128i __O, __mmask8 __M, __m128i __A) { + return (__m128i) __builtin_ia32_pmovswb128_mask ((__v8hi) __A, + (__v16qi) __O, + __M); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_maskz_cvtsepi16_epi8 (__mmask8 __M, __m128i __A) { + return (__m128i) __builtin_ia32_pmovswb128_mask ((__v8hi) __A, + (__v16qi) _mm_setzero_si128(), + __M); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS256 +_mm256_cvtsepi16_epi8 (__m256i __A) { + return (__m128i) __builtin_ia32_pmovswb256_mask ((__v16hi) __A, + (__v16qi) _mm_setzero_si128(), + (__mmask16) -1); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS256 +_mm256_mask_cvtsepi16_epi8 (__m128i __O, __mmask16 __M, __m256i __A) { + return (__m128i) __builtin_ia32_pmovswb256_mask ((__v16hi) __A, + (__v16qi) __O, + __M); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS256 +_mm256_maskz_cvtsepi16_epi8 (__mmask16 __M, __m256i __A) { + return (__m128i) __builtin_ia32_pmovswb256_mask ((__v16hi) __A, + (__v16qi) _mm_setzero_si128(), + __M); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_cvtusepi16_epi8 (__m128i __A) { + return (__m128i) __builtin_ia32_pmovuswb128_mask ((__v8hi) __A, + (__v16qi) _mm_setzero_si128(), + (__mmask8) -1); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_mask_cvtusepi16_epi8 (__m128i __O, __mmask8 __M, __m128i __A) { + return (__m128i) __builtin_ia32_pmovuswb128_mask ((__v8hi) __A, + (__v16qi) __O, + __M); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_maskz_cvtusepi16_epi8 (__mmask8 __M, __m128i __A) { + return (__m128i) __builtin_ia32_pmovuswb128_mask ((__v8hi) __A, + (__v16qi) _mm_setzero_si128(), + __M); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS256 +_mm256_cvtusepi16_epi8 (__m256i __A) { + return (__m128i) __builtin_ia32_pmovuswb256_mask ((__v16hi) __A, + (__v16qi) _mm_setzero_si128(), + (__mmask16) -1); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS256 +_mm256_mask_cvtusepi16_epi8 (__m128i __O, __mmask16 __M, __m256i __A) { + return (__m128i) __builtin_ia32_pmovuswb256_mask ((__v16hi) __A, + (__v16qi) __O, + __M); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS256 +_mm256_maskz_cvtusepi16_epi8 (__mmask16 __M, __m256i __A) { + return (__m128i) __builtin_ia32_pmovuswb256_mask ((__v16hi) __A, + (__v16qi) _mm_setzero_si128(), + __M); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_cvtepi16_epi8 (__m128i __A) { + return (__m128i)__builtin_shufflevector( + __builtin_convertvector((__v8hi)__A, __v8qi), + (__v8qi){0, 0, 0, 0, 0, 0, 0, 0}, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, + 12, 13, 14, 15); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_mask_cvtepi16_epi8 (__m128i __O, __mmask8 __M, __m128i __A) { + return (__m128i) __builtin_ia32_pmovwb128_mask ((__v8hi) __A, + (__v16qi) __O, + __M); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_maskz_cvtepi16_epi8 (__mmask8 __M, __m128i __A) { + return (__m128i) __builtin_ia32_pmovwb128_mask ((__v8hi) __A, + (__v16qi) _mm_setzero_si128(), + __M); +} + +static __inline__ void __DEFAULT_FN_ATTRS128 +_mm_mask_cvtepi16_storeu_epi8 (void * __P, __mmask8 __M, __m128i __A) +{ + __builtin_ia32_pmovwb128mem_mask ((__v16qi *) __P, (__v8hi) __A, __M); +} + + +static __inline__ void __DEFAULT_FN_ATTRS128 +_mm_mask_cvtsepi16_storeu_epi8 (void * __P, __mmask8 __M, __m128i __A) +{ + __builtin_ia32_pmovswb128mem_mask ((__v16qi *) __P, (__v8hi) __A, __M); +} + +static __inline__ void __DEFAULT_FN_ATTRS128 +_mm_mask_cvtusepi16_storeu_epi8 (void * __P, __mmask8 __M, __m128i __A) +{ + __builtin_ia32_pmovuswb128mem_mask ((__v16qi *) __P, (__v8hi) __A, __M); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS256 +_mm256_cvtepi16_epi8 (__m256i __A) { + return (__m128i)__builtin_convertvector((__v16hi) __A, __v16qi); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS256 +_mm256_mask_cvtepi16_epi8 (__m128i __O, __mmask16 __M, __m256i __A) { + return (__m128i)__builtin_ia32_selectb_128((__mmask16)__M, + (__v16qi)_mm256_cvtepi16_epi8(__A), + (__v16qi)__O); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS256 +_mm256_maskz_cvtepi16_epi8 (__mmask16 __M, __m256i __A) { + return (__m128i)__builtin_ia32_selectb_128((__mmask16)__M, + (__v16qi)_mm256_cvtepi16_epi8(__A), + (__v16qi)_mm_setzero_si128()); +} + +static __inline__ void __DEFAULT_FN_ATTRS256 +_mm256_mask_cvtepi16_storeu_epi8 (void * __P, __mmask16 __M, __m256i __A) +{ + __builtin_ia32_pmovwb256mem_mask ((__v16qi *) __P, (__v16hi) __A, __M); +} + +static __inline__ void __DEFAULT_FN_ATTRS256 +_mm256_mask_cvtsepi16_storeu_epi8 (void * __P, __mmask16 __M, __m256i __A) +{ + __builtin_ia32_pmovswb256mem_mask ((__v16qi *) __P, (__v16hi) __A, __M); +} + +static __inline__ void __DEFAULT_FN_ATTRS256 +_mm256_mask_cvtusepi16_storeu_epi8 (void * __P, __mmask16 __M, __m256i __A) +{ + __builtin_ia32_pmovuswb256mem_mask ((__v16qi*) __P, (__v16hi) __A, __M); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_mask_mulhrs_epi16(__m128i __W, __mmask8 __U, __m128i __X, __m128i __Y) { + return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U, + (__v8hi)_mm_mulhrs_epi16(__X, __Y), + (__v8hi)__W); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_maskz_mulhrs_epi16(__mmask8 __U, __m128i __X, __m128i __Y) { + return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U, + (__v8hi)_mm_mulhrs_epi16(__X, __Y), + (__v8hi)_mm_setzero_si128()); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_mask_mulhrs_epi16(__m256i __W, __mmask16 __U, __m256i __X, __m256i __Y) { + return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U, + (__v16hi)_mm256_mulhrs_epi16(__X, __Y), + (__v16hi)__W); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_maskz_mulhrs_epi16(__mmask16 __U, __m256i __X, __m256i __Y) { + return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U, + (__v16hi)_mm256_mulhrs_epi16(__X, __Y), + (__v16hi)_mm256_setzero_si256()); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_mask_mulhi_epu16(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B) { + return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U, + (__v8hi)_mm_mulhi_epu16(__A, __B), + (__v8hi)__W); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_maskz_mulhi_epu16(__mmask8 __U, __m128i __A, __m128i __B) { + return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U, + (__v8hi)_mm_mulhi_epu16(__A, __B), + (__v8hi)_mm_setzero_si128()); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_mask_mulhi_epu16(__m256i __W, __mmask16 __U, __m256i __A, __m256i __B) { + return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U, + (__v16hi)_mm256_mulhi_epu16(__A, __B), + (__v16hi)__W); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_maskz_mulhi_epu16(__mmask16 __U, __m256i __A, __m256i __B) { + return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U, + (__v16hi)_mm256_mulhi_epu16(__A, __B), + (__v16hi)_mm256_setzero_si256()); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_mask_mulhi_epi16(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B) { + return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U, + (__v8hi)_mm_mulhi_epi16(__A, __B), + (__v8hi)__W); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_maskz_mulhi_epi16(__mmask8 __U, __m128i __A, __m128i __B) { + return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U, + (__v8hi)_mm_mulhi_epi16(__A, __B), + (__v8hi)_mm_setzero_si128()); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_mask_mulhi_epi16(__m256i __W, __mmask16 __U, __m256i __A, __m256i __B) { + return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U, + (__v16hi)_mm256_mulhi_epi16(__A, __B), + (__v16hi)__W); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_maskz_mulhi_epi16(__mmask16 __U, __m256i __A, __m256i __B) { + return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U, + (__v16hi)_mm256_mulhi_epi16(__A, __B), + (__v16hi)_mm256_setzero_si256()); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_mask_unpackhi_epi8(__m128i __W, __mmask16 __U, __m128i __A, __m128i __B) { + return (__m128i)__builtin_ia32_selectb_128((__mmask16)__U, + (__v16qi)_mm_unpackhi_epi8(__A, __B), + (__v16qi)__W); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_maskz_unpackhi_epi8(__mmask16 __U, __m128i __A, __m128i __B) { + return (__m128i)__builtin_ia32_selectb_128((__mmask16)__U, + (__v16qi)_mm_unpackhi_epi8(__A, __B), + (__v16qi)_mm_setzero_si128()); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_mask_unpackhi_epi8(__m256i __W, __mmask32 __U, __m256i __A, __m256i __B) { + return (__m256i)__builtin_ia32_selectb_256((__mmask32)__U, + (__v32qi)_mm256_unpackhi_epi8(__A, __B), + (__v32qi)__W); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_maskz_unpackhi_epi8(__mmask32 __U, __m256i __A, __m256i __B) { + return (__m256i)__builtin_ia32_selectb_256((__mmask32)__U, + (__v32qi)_mm256_unpackhi_epi8(__A, __B), + (__v32qi)_mm256_setzero_si256()); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_mask_unpackhi_epi16(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B) { + return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U, + (__v8hi)_mm_unpackhi_epi16(__A, __B), + (__v8hi)__W); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_maskz_unpackhi_epi16(__mmask8 __U, __m128i __A, __m128i __B) { + return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U, + (__v8hi)_mm_unpackhi_epi16(__A, __B), + (__v8hi) _mm_setzero_si128()); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_mask_unpackhi_epi16(__m256i __W, __mmask16 __U, __m256i __A, __m256i __B) { + return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U, + (__v16hi)_mm256_unpackhi_epi16(__A, __B), + (__v16hi)__W); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_maskz_unpackhi_epi16(__mmask16 __U, __m256i __A, __m256i __B) { + return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U, + (__v16hi)_mm256_unpackhi_epi16(__A, __B), + (__v16hi)_mm256_setzero_si256()); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_mask_unpacklo_epi8(__m128i __W, __mmask16 __U, __m128i __A, __m128i __B) { + return (__m128i)__builtin_ia32_selectb_128((__mmask16)__U, + (__v16qi)_mm_unpacklo_epi8(__A, __B), + (__v16qi)__W); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_maskz_unpacklo_epi8(__mmask16 __U, __m128i __A, __m128i __B) { + return (__m128i)__builtin_ia32_selectb_128((__mmask16)__U, + (__v16qi)_mm_unpacklo_epi8(__A, __B), + (__v16qi)_mm_setzero_si128()); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_mask_unpacklo_epi8(__m256i __W, __mmask32 __U, __m256i __A, __m256i __B) { + return (__m256i)__builtin_ia32_selectb_256((__mmask32)__U, + (__v32qi)_mm256_unpacklo_epi8(__A, __B), + (__v32qi)__W); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_maskz_unpacklo_epi8(__mmask32 __U, __m256i __A, __m256i __B) { + return (__m256i)__builtin_ia32_selectb_256((__mmask32)__U, + (__v32qi)_mm256_unpacklo_epi8(__A, __B), + (__v32qi)_mm256_setzero_si256()); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_mask_unpacklo_epi16(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B) { + return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U, + (__v8hi)_mm_unpacklo_epi16(__A, __B), + (__v8hi)__W); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_maskz_unpacklo_epi16(__mmask8 __U, __m128i __A, __m128i __B) { + return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U, + (__v8hi)_mm_unpacklo_epi16(__A, __B), + (__v8hi) _mm_setzero_si128()); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_mask_unpacklo_epi16(__m256i __W, __mmask16 __U, __m256i __A, __m256i __B) { + return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U, + (__v16hi)_mm256_unpacklo_epi16(__A, __B), + (__v16hi)__W); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_maskz_unpacklo_epi16(__mmask16 __U, __m256i __A, __m256i __B) { + return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U, + (__v16hi)_mm256_unpacklo_epi16(__A, __B), + (__v16hi)_mm256_setzero_si256()); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_mask_cvtepi8_epi16(__m128i __W, __mmask8 __U, __m128i __A) +{ + return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U, + (__v8hi)_mm_cvtepi8_epi16(__A), + (__v8hi)__W); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_maskz_cvtepi8_epi16(__mmask8 __U, __m128i __A) +{ + return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U, + (__v8hi)_mm_cvtepi8_epi16(__A), + (__v8hi)_mm_setzero_si128()); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_mask_cvtepi8_epi16(__m256i __W, __mmask16 __U, __m128i __A) +{ + return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U, + (__v16hi)_mm256_cvtepi8_epi16(__A), + (__v16hi)__W); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_maskz_cvtepi8_epi16(__mmask16 __U, __m128i __A) +{ + return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U, + (__v16hi)_mm256_cvtepi8_epi16(__A), + (__v16hi)_mm256_setzero_si256()); +} + + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_mask_cvtepu8_epi16(__m128i __W, __mmask8 __U, __m128i __A) +{ + return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U, + (__v8hi)_mm_cvtepu8_epi16(__A), + (__v8hi)__W); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_maskz_cvtepu8_epi16(__mmask8 __U, __m128i __A) +{ + return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U, + (__v8hi)_mm_cvtepu8_epi16(__A), + (__v8hi)_mm_setzero_si128()); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_mask_cvtepu8_epi16(__m256i __W, __mmask16 __U, __m128i __A) +{ + return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U, + (__v16hi)_mm256_cvtepu8_epi16(__A), + (__v16hi)__W); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_maskz_cvtepu8_epi16 (__mmask16 __U, __m128i __A) +{ + return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U, + (__v16hi)_mm256_cvtepu8_epi16(__A), + (__v16hi)_mm256_setzero_si256()); +} + + +#define _mm_mask_shufflehi_epi16(W, U, A, imm) \ + ((__m128i)__builtin_ia32_selectw_128((__mmask8)(U), \ + (__v8hi)_mm_shufflehi_epi16((A), (imm)), \ + (__v8hi)(__m128i)(W))) + +#define _mm_maskz_shufflehi_epi16(U, A, imm) \ + ((__m128i)__builtin_ia32_selectw_128((__mmask8)(U), \ + (__v8hi)_mm_shufflehi_epi16((A), (imm)), \ + (__v8hi)_mm_setzero_si128())) + +#define _mm256_mask_shufflehi_epi16(W, U, A, imm) \ + ((__m256i)__builtin_ia32_selectw_256((__mmask16)(U), \ + (__v16hi)_mm256_shufflehi_epi16((A), (imm)), \ + (__v16hi)(__m256i)(W))) + +#define _mm256_maskz_shufflehi_epi16(U, A, imm) \ + ((__m256i)__builtin_ia32_selectw_256((__mmask16)(U), \ + (__v16hi)_mm256_shufflehi_epi16((A), (imm)), \ + (__v16hi)_mm256_setzero_si256())) + +#define _mm_mask_shufflelo_epi16(W, U, A, imm) \ + ((__m128i)__builtin_ia32_selectw_128((__mmask8)(U), \ + (__v8hi)_mm_shufflelo_epi16((A), (imm)), \ + (__v8hi)(__m128i)(W))) + +#define _mm_maskz_shufflelo_epi16(U, A, imm) \ + ((__m128i)__builtin_ia32_selectw_128((__mmask8)(U), \ + (__v8hi)_mm_shufflelo_epi16((A), (imm)), \ + (__v8hi)_mm_setzero_si128())) + +#define _mm256_mask_shufflelo_epi16(W, U, A, imm) \ + ((__m256i)__builtin_ia32_selectw_256((__mmask16)(U), \ + (__v16hi)_mm256_shufflelo_epi16((A), \ + (imm)), \ + (__v16hi)(__m256i)(W))) + +#define _mm256_maskz_shufflelo_epi16(U, A, imm) \ + ((__m256i)__builtin_ia32_selectw_256((__mmask16)(U), \ + (__v16hi)_mm256_shufflelo_epi16((A), \ + (imm)), \ + (__v16hi)_mm256_setzero_si256())) + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_sllv_epi16(__m256i __A, __m256i __B) +{ + return (__m256i)__builtin_ia32_psllv16hi((__v16hi)__A, (__v16hi)__B); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_mask_sllv_epi16(__m256i __W, __mmask16 __U, __m256i __A, __m256i __B) +{ + return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U, + (__v16hi)_mm256_sllv_epi16(__A, __B), + (__v16hi)__W); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_maskz_sllv_epi16(__mmask16 __U, __m256i __A, __m256i __B) +{ + return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U, + (__v16hi)_mm256_sllv_epi16(__A, __B), + (__v16hi)_mm256_setzero_si256()); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_sllv_epi16(__m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_psllv8hi((__v8hi)__A, (__v8hi)__B); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_mask_sllv_epi16(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U, + (__v8hi)_mm_sllv_epi16(__A, __B), + (__v8hi)__W); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_maskz_sllv_epi16(__mmask8 __U, __m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U, + (__v8hi)_mm_sllv_epi16(__A, __B), + (__v8hi)_mm_setzero_si128()); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_mask_sll_epi16(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U, + (__v8hi)_mm_sll_epi16(__A, __B), + (__v8hi)__W); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_maskz_sll_epi16 (__mmask8 __U, __m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U, + (__v8hi)_mm_sll_epi16(__A, __B), + (__v8hi)_mm_setzero_si128()); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_mask_sll_epi16(__m256i __W, __mmask16 __U, __m256i __A, __m128i __B) +{ + return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U, + (__v16hi)_mm256_sll_epi16(__A, __B), + (__v16hi)__W); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_maskz_sll_epi16(__mmask16 __U, __m256i __A, __m128i __B) +{ + return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U, + (__v16hi)_mm256_sll_epi16(__A, __B), + (__v16hi)_mm256_setzero_si256()); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_mask_slli_epi16(__m128i __W, __mmask8 __U, __m128i __A, unsigned int __B) +{ + return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U, + (__v8hi)_mm_slli_epi16(__A, (int)__B), + (__v8hi)__W); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_maskz_slli_epi16 (__mmask8 __U, __m128i __A, unsigned int __B) +{ + return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U, + (__v8hi)_mm_slli_epi16(__A, (int)__B), + (__v8hi)_mm_setzero_si128()); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_mask_slli_epi16(__m256i __W, __mmask16 __U, __m256i __A, + unsigned int __B) +{ + return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U, + (__v16hi)_mm256_slli_epi16(__A, (int)__B), + (__v16hi)__W); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_maskz_slli_epi16(__mmask16 __U, __m256i __A, unsigned int __B) +{ + return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U, + (__v16hi)_mm256_slli_epi16(__A, (int)__B), + (__v16hi)_mm256_setzero_si256()); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_srlv_epi16(__m256i __A, __m256i __B) +{ + return (__m256i)__builtin_ia32_psrlv16hi((__v16hi)__A, (__v16hi)__B); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_mask_srlv_epi16(__m256i __W, __mmask16 __U, __m256i __A, __m256i __B) +{ + return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U, + (__v16hi)_mm256_srlv_epi16(__A, __B), + (__v16hi)__W); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_maskz_srlv_epi16(__mmask16 __U, __m256i __A, __m256i __B) +{ + return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U, + (__v16hi)_mm256_srlv_epi16(__A, __B), + (__v16hi)_mm256_setzero_si256()); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_srlv_epi16(__m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_psrlv8hi((__v8hi)__A, (__v8hi)__B); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_mask_srlv_epi16(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U, + (__v8hi)_mm_srlv_epi16(__A, __B), + (__v8hi)__W); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_maskz_srlv_epi16(__mmask8 __U, __m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U, + (__v8hi)_mm_srlv_epi16(__A, __B), + (__v8hi)_mm_setzero_si128()); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_srav_epi16(__m256i __A, __m256i __B) +{ + return (__m256i)__builtin_ia32_psrav16hi((__v16hi)__A, (__v16hi)__B); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_mask_srav_epi16(__m256i __W, __mmask16 __U, __m256i __A, __m256i __B) +{ + return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U, + (__v16hi)_mm256_srav_epi16(__A, __B), + (__v16hi)__W); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_maskz_srav_epi16(__mmask16 __U, __m256i __A, __m256i __B) +{ + return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U, + (__v16hi)_mm256_srav_epi16(__A, __B), + (__v16hi)_mm256_setzero_si256()); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_srav_epi16(__m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_psrav8hi((__v8hi)__A, (__v8hi)__B); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_mask_srav_epi16(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U, + (__v8hi)_mm_srav_epi16(__A, __B), + (__v8hi)__W); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_maskz_srav_epi16(__mmask8 __U, __m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U, + (__v8hi)_mm_srav_epi16(__A, __B), + (__v8hi)_mm_setzero_si128()); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_mask_sra_epi16(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U, + (__v8hi)_mm_sra_epi16(__A, __B), + (__v8hi)__W); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_maskz_sra_epi16(__mmask8 __U, __m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U, + (__v8hi)_mm_sra_epi16(__A, __B), + (__v8hi)_mm_setzero_si128()); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_mask_sra_epi16(__m256i __W, __mmask16 __U, __m256i __A, __m128i __B) +{ + return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U, + (__v16hi)_mm256_sra_epi16(__A, __B), + (__v16hi)__W); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_maskz_sra_epi16(__mmask16 __U, __m256i __A, __m128i __B) +{ + return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U, + (__v16hi)_mm256_sra_epi16(__A, __B), + (__v16hi)_mm256_setzero_si256()); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_mask_srai_epi16(__m128i __W, __mmask8 __U, __m128i __A, unsigned int __B) +{ + return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U, + (__v8hi)_mm_srai_epi16(__A, (int)__B), + (__v8hi)__W); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_maskz_srai_epi16(__mmask8 __U, __m128i __A, unsigned int __B) +{ + return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U, + (__v8hi)_mm_srai_epi16(__A, (int)__B), + (__v8hi)_mm_setzero_si128()); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_mask_srai_epi16(__m256i __W, __mmask16 __U, __m256i __A, + unsigned int __B) +{ + return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U, + (__v16hi)_mm256_srai_epi16(__A, (int)__B), + (__v16hi)__W); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_maskz_srai_epi16(__mmask16 __U, __m256i __A, unsigned int __B) +{ + return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U, + (__v16hi)_mm256_srai_epi16(__A, (int)__B), + (__v16hi)_mm256_setzero_si256()); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_mask_srl_epi16(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U, + (__v8hi)_mm_srl_epi16(__A, __B), + (__v8hi)__W); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_maskz_srl_epi16 (__mmask8 __U, __m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U, + (__v8hi)_mm_srl_epi16(__A, __B), + (__v8hi)_mm_setzero_si128()); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_mask_srl_epi16(__m256i __W, __mmask16 __U, __m256i __A, __m128i __B) +{ + return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U, + (__v16hi)_mm256_srl_epi16(__A, __B), + (__v16hi)__W); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_maskz_srl_epi16(__mmask16 __U, __m256i __A, __m128i __B) +{ + return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U, + (__v16hi)_mm256_srl_epi16(__A, __B), + (__v16hi)_mm256_setzero_si256()); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_mask_srli_epi16(__m128i __W, __mmask8 __U, __m128i __A, int __B) +{ + return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U, + (__v8hi)_mm_srli_epi16(__A, __B), + (__v8hi)__W); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_maskz_srli_epi16 (__mmask8 __U, __m128i __A, int __B) +{ + return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U, + (__v8hi)_mm_srli_epi16(__A, __B), + (__v8hi)_mm_setzero_si128()); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_mask_srli_epi16(__m256i __W, __mmask16 __U, __m256i __A, int __B) +{ + return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U, + (__v16hi)_mm256_srli_epi16(__A, __B), + (__v16hi)__W); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_maskz_srli_epi16(__mmask16 __U, __m256i __A, int __B) +{ + return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U, + (__v16hi)_mm256_srli_epi16(__A, __B), + (__v16hi)_mm256_setzero_si256()); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_mask_mov_epi16 (__m128i __W, __mmask8 __U, __m128i __A) +{ + return (__m128i) __builtin_ia32_selectw_128 ((__mmask8) __U, + (__v8hi) __A, + (__v8hi) __W); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_maskz_mov_epi16 (__mmask8 __U, __m128i __A) +{ + return (__m128i) __builtin_ia32_selectw_128 ((__mmask8) __U, + (__v8hi) __A, + (__v8hi) _mm_setzero_si128 ()); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_mask_mov_epi16 (__m256i __W, __mmask16 __U, __m256i __A) +{ + return (__m256i) __builtin_ia32_selectw_256 ((__mmask16) __U, + (__v16hi) __A, + (__v16hi) __W); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_maskz_mov_epi16 (__mmask16 __U, __m256i __A) +{ + return (__m256i) __builtin_ia32_selectw_256 ((__mmask16) __U, + (__v16hi) __A, + (__v16hi) _mm256_setzero_si256 ()); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_mask_mov_epi8 (__m128i __W, __mmask16 __U, __m128i __A) +{ + return (__m128i) __builtin_ia32_selectb_128 ((__mmask16) __U, + (__v16qi) __A, + (__v16qi) __W); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_maskz_mov_epi8 (__mmask16 __U, __m128i __A) +{ + return (__m128i) __builtin_ia32_selectb_128 ((__mmask16) __U, + (__v16qi) __A, + (__v16qi) _mm_setzero_si128 ()); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_mask_mov_epi8 (__m256i __W, __mmask32 __U, __m256i __A) +{ + return (__m256i) __builtin_ia32_selectb_256 ((__mmask32) __U, + (__v32qi) __A, + (__v32qi) __W); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_maskz_mov_epi8 (__mmask32 __U, __m256i __A) +{ + return (__m256i) __builtin_ia32_selectb_256 ((__mmask32) __U, + (__v32qi) __A, + (__v32qi) _mm256_setzero_si256 ()); +} + + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_mask_set1_epi8 (__m128i __O, __mmask16 __M, char __A) +{ + return (__m128i) __builtin_ia32_selectb_128(__M, + (__v16qi) _mm_set1_epi8(__A), + (__v16qi) __O); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_maskz_set1_epi8 (__mmask16 __M, char __A) +{ + return (__m128i) __builtin_ia32_selectb_128(__M, + (__v16qi) _mm_set1_epi8(__A), + (__v16qi) _mm_setzero_si128()); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_mask_set1_epi8 (__m256i __O, __mmask32 __M, char __A) +{ + return (__m256i) __builtin_ia32_selectb_256(__M, + (__v32qi) _mm256_set1_epi8(__A), + (__v32qi) __O); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_maskz_set1_epi8 (__mmask32 __M, char __A) +{ + return (__m256i) __builtin_ia32_selectb_256(__M, + (__v32qi) _mm256_set1_epi8(__A), + (__v32qi) _mm256_setzero_si256()); +} + +static __inline __m128i __DEFAULT_FN_ATTRS128 +_mm_loadu_epi16 (void const *__P) +{ + struct __loadu_epi16 { + __m128i_u __v; + } __attribute__((__packed__, __may_alias__)); + return ((const struct __loadu_epi16*)__P)->__v; +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_mask_loadu_epi16 (__m128i __W, __mmask8 __U, void const *__P) +{ + return (__m128i) __builtin_ia32_loaddquhi128_mask ((const __v8hi *) __P, + (__v8hi) __W, + (__mmask8) __U); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_maskz_loadu_epi16 (__mmask8 __U, void const *__P) +{ + return (__m128i) __builtin_ia32_loaddquhi128_mask ((const __v8hi *) __P, + (__v8hi) + _mm_setzero_si128 (), + (__mmask8) __U); +} + +static __inline __m256i __DEFAULT_FN_ATTRS256 +_mm256_loadu_epi16 (void const *__P) +{ + struct __loadu_epi16 { + __m256i_u __v; + } __attribute__((__packed__, __may_alias__)); + return ((const struct __loadu_epi16*)__P)->__v; +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_mask_loadu_epi16 (__m256i __W, __mmask16 __U, void const *__P) +{ + return (__m256i) __builtin_ia32_loaddquhi256_mask ((const __v16hi *) __P, + (__v16hi) __W, + (__mmask16) __U); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_maskz_loadu_epi16 (__mmask16 __U, void const *__P) +{ + return (__m256i) __builtin_ia32_loaddquhi256_mask ((const __v16hi *) __P, + (__v16hi) + _mm256_setzero_si256 (), + (__mmask16) __U); +} + +static __inline __m128i __DEFAULT_FN_ATTRS128 +_mm_loadu_epi8 (void const *__P) +{ + struct __loadu_epi8 { + __m128i_u __v; + } __attribute__((__packed__, __may_alias__)); + return ((const struct __loadu_epi8*)__P)->__v; +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_mask_loadu_epi8 (__m128i __W, __mmask16 __U, void const *__P) +{ + return (__m128i) __builtin_ia32_loaddquqi128_mask ((const __v16qi *) __P, + (__v16qi) __W, + (__mmask16) __U); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_maskz_loadu_epi8 (__mmask16 __U, void const *__P) +{ + return (__m128i) __builtin_ia32_loaddquqi128_mask ((const __v16qi *) __P, + (__v16qi) + _mm_setzero_si128 (), + (__mmask16) __U); +} + +static __inline __m256i __DEFAULT_FN_ATTRS256 +_mm256_loadu_epi8 (void const *__P) +{ + struct __loadu_epi8 { + __m256i_u __v; + } __attribute__((__packed__, __may_alias__)); + return ((const struct __loadu_epi8*)__P)->__v; +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_mask_loadu_epi8 (__m256i __W, __mmask32 __U, void const *__P) +{ + return (__m256i) __builtin_ia32_loaddquqi256_mask ((const __v32qi *) __P, + (__v32qi) __W, + (__mmask32) __U); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_maskz_loadu_epi8 (__mmask32 __U, void const *__P) +{ + return (__m256i) __builtin_ia32_loaddquqi256_mask ((const __v32qi *) __P, + (__v32qi) + _mm256_setzero_si256 (), + (__mmask32) __U); +} + +static __inline void __DEFAULT_FN_ATTRS128 +_mm_storeu_epi16 (void *__P, __m128i __A) +{ + struct __storeu_epi16 { + __m128i_u __v; + } __attribute__((__packed__, __may_alias__)); + ((struct __storeu_epi16*)__P)->__v = __A; +} + +static __inline__ void __DEFAULT_FN_ATTRS128 +_mm_mask_storeu_epi16 (void *__P, __mmask8 __U, __m128i __A) +{ + __builtin_ia32_storedquhi128_mask ((__v8hi *) __P, + (__v8hi) __A, + (__mmask8) __U); +} + +static __inline void __DEFAULT_FN_ATTRS256 +_mm256_storeu_epi16 (void *__P, __m256i __A) +{ + struct __storeu_epi16 { + __m256i_u __v; + } __attribute__((__packed__, __may_alias__)); + ((struct __storeu_epi16*)__P)->__v = __A; +} + +static __inline__ void __DEFAULT_FN_ATTRS256 +_mm256_mask_storeu_epi16 (void *__P, __mmask16 __U, __m256i __A) +{ + __builtin_ia32_storedquhi256_mask ((__v16hi *) __P, + (__v16hi) __A, + (__mmask16) __U); +} + +static __inline void __DEFAULT_FN_ATTRS128 +_mm_storeu_epi8 (void *__P, __m128i __A) +{ + struct __storeu_epi8 { + __m128i_u __v; + } __attribute__((__packed__, __may_alias__)); + ((struct __storeu_epi8*)__P)->__v = __A; +} + +static __inline__ void __DEFAULT_FN_ATTRS128 +_mm_mask_storeu_epi8 (void *__P, __mmask16 __U, __m128i __A) +{ + __builtin_ia32_storedquqi128_mask ((__v16qi *) __P, + (__v16qi) __A, + (__mmask16) __U); +} + +static __inline void __DEFAULT_FN_ATTRS256 +_mm256_storeu_epi8 (void *__P, __m256i __A) +{ + struct __storeu_epi8 { + __m256i_u __v; + } __attribute__((__packed__, __may_alias__)); + ((struct __storeu_epi8*)__P)->__v = __A; +} + +static __inline__ void __DEFAULT_FN_ATTRS256 +_mm256_mask_storeu_epi8 (void *__P, __mmask32 __U, __m256i __A) +{ + __builtin_ia32_storedquqi256_mask ((__v32qi *) __P, + (__v32qi) __A, + (__mmask32) __U); +} + +static __inline__ __mmask16 __DEFAULT_FN_ATTRS128 +_mm_test_epi8_mask (__m128i __A, __m128i __B) +{ + return _mm_cmpneq_epi8_mask (_mm_and_si128(__A, __B), _mm_setzero_si128()); +} + +static __inline__ __mmask16 __DEFAULT_FN_ATTRS128 +_mm_mask_test_epi8_mask (__mmask16 __U, __m128i __A, __m128i __B) +{ + return _mm_mask_cmpneq_epi8_mask (__U, _mm_and_si128 (__A, __B), + _mm_setzero_si128()); +} + +static __inline__ __mmask32 __DEFAULT_FN_ATTRS256 +_mm256_test_epi8_mask (__m256i __A, __m256i __B) +{ + return _mm256_cmpneq_epi8_mask (_mm256_and_si256(__A, __B), + _mm256_setzero_si256()); +} + +static __inline__ __mmask32 __DEFAULT_FN_ATTRS256 +_mm256_mask_test_epi8_mask (__mmask32 __U, __m256i __A, __m256i __B) +{ + return _mm256_mask_cmpneq_epi8_mask (__U, _mm256_and_si256(__A, __B), + _mm256_setzero_si256()); +} + +static __inline__ __mmask8 __DEFAULT_FN_ATTRS128 +_mm_test_epi16_mask (__m128i __A, __m128i __B) +{ + return _mm_cmpneq_epi16_mask (_mm_and_si128 (__A, __B), _mm_setzero_si128()); +} + +static __inline__ __mmask8 __DEFAULT_FN_ATTRS128 +_mm_mask_test_epi16_mask (__mmask8 __U, __m128i __A, __m128i __B) +{ + return _mm_mask_cmpneq_epi16_mask (__U, _mm_and_si128 (__A, __B), + _mm_setzero_si128()); +} + +static __inline__ __mmask16 __DEFAULT_FN_ATTRS256 +_mm256_test_epi16_mask (__m256i __A, __m256i __B) +{ + return _mm256_cmpneq_epi16_mask (_mm256_and_si256 (__A, __B), + _mm256_setzero_si256 ()); +} + +static __inline__ __mmask16 __DEFAULT_FN_ATTRS256 +_mm256_mask_test_epi16_mask (__mmask16 __U, __m256i __A, __m256i __B) +{ + return _mm256_mask_cmpneq_epi16_mask (__U, _mm256_and_si256(__A, __B), + _mm256_setzero_si256()); +} + +static __inline__ __mmask16 __DEFAULT_FN_ATTRS128 +_mm_testn_epi8_mask (__m128i __A, __m128i __B) +{ + return _mm_cmpeq_epi8_mask (_mm_and_si128 (__A, __B), _mm_setzero_si128()); +} + +static __inline__ __mmask16 __DEFAULT_FN_ATTRS128 +_mm_mask_testn_epi8_mask (__mmask16 __U, __m128i __A, __m128i __B) +{ + return _mm_mask_cmpeq_epi8_mask (__U, _mm_and_si128 (__A, __B), + _mm_setzero_si128()); +} + +static __inline__ __mmask32 __DEFAULT_FN_ATTRS256 +_mm256_testn_epi8_mask (__m256i __A, __m256i __B) +{ + return _mm256_cmpeq_epi8_mask (_mm256_and_si256 (__A, __B), + _mm256_setzero_si256()); +} + +static __inline__ __mmask32 __DEFAULT_FN_ATTRS256 +_mm256_mask_testn_epi8_mask (__mmask32 __U, __m256i __A, __m256i __B) +{ + return _mm256_mask_cmpeq_epi8_mask (__U, _mm256_and_si256 (__A, __B), + _mm256_setzero_si256()); +} + +static __inline__ __mmask8 __DEFAULT_FN_ATTRS128 +_mm_testn_epi16_mask (__m128i __A, __m128i __B) +{ + return _mm_cmpeq_epi16_mask (_mm_and_si128 (__A, __B), _mm_setzero_si128()); +} + +static __inline__ __mmask8 __DEFAULT_FN_ATTRS128 +_mm_mask_testn_epi16_mask (__mmask8 __U, __m128i __A, __m128i __B) +{ + return _mm_mask_cmpeq_epi16_mask (__U, _mm_and_si128(__A, __B), _mm_setzero_si128()); +} + +static __inline__ __mmask16 __DEFAULT_FN_ATTRS256 +_mm256_testn_epi16_mask (__m256i __A, __m256i __B) +{ + return _mm256_cmpeq_epi16_mask (_mm256_and_si256(__A, __B), + _mm256_setzero_si256()); +} + +static __inline__ __mmask16 __DEFAULT_FN_ATTRS256 +_mm256_mask_testn_epi16_mask (__mmask16 __U, __m256i __A, __m256i __B) +{ + return _mm256_mask_cmpeq_epi16_mask (__U, _mm256_and_si256 (__A, __B), + _mm256_setzero_si256()); +} + +static __inline__ __mmask16 __DEFAULT_FN_ATTRS128 +_mm_movepi8_mask (__m128i __A) +{ + return (__mmask16) __builtin_ia32_cvtb2mask128 ((__v16qi) __A); +} + +static __inline__ __mmask32 __DEFAULT_FN_ATTRS256 +_mm256_movepi8_mask (__m256i __A) +{ + return (__mmask32) __builtin_ia32_cvtb2mask256 ((__v32qi) __A); +} + +static __inline__ __mmask8 __DEFAULT_FN_ATTRS128 +_mm_movepi16_mask (__m128i __A) +{ + return (__mmask8) __builtin_ia32_cvtw2mask128 ((__v8hi) __A); +} + +static __inline__ __mmask16 __DEFAULT_FN_ATTRS256 +_mm256_movepi16_mask (__m256i __A) +{ + return (__mmask16) __builtin_ia32_cvtw2mask256 ((__v16hi) __A); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_movm_epi8 (__mmask16 __A) +{ + return (__m128i) __builtin_ia32_cvtmask2b128 (__A); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_movm_epi8 (__mmask32 __A) +{ + return (__m256i) __builtin_ia32_cvtmask2b256 (__A); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_movm_epi16 (__mmask8 __A) +{ + return (__m128i) __builtin_ia32_cvtmask2w128 (__A); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_movm_epi16 (__mmask16 __A) +{ + return (__m256i) __builtin_ia32_cvtmask2w256 (__A); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_mask_broadcastb_epi8 (__m128i __O, __mmask16 __M, __m128i __A) +{ + return (__m128i)__builtin_ia32_selectb_128(__M, + (__v16qi) _mm_broadcastb_epi8(__A), + (__v16qi) __O); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_maskz_broadcastb_epi8 (__mmask16 __M, __m128i __A) +{ + return (__m128i)__builtin_ia32_selectb_128(__M, + (__v16qi) _mm_broadcastb_epi8(__A), + (__v16qi) _mm_setzero_si128()); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_mask_broadcastb_epi8 (__m256i __O, __mmask32 __M, __m128i __A) +{ + return (__m256i)__builtin_ia32_selectb_256(__M, + (__v32qi) _mm256_broadcastb_epi8(__A), + (__v32qi) __O); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_maskz_broadcastb_epi8 (__mmask32 __M, __m128i __A) +{ + return (__m256i)__builtin_ia32_selectb_256(__M, + (__v32qi) _mm256_broadcastb_epi8(__A), + (__v32qi) _mm256_setzero_si256()); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_mask_broadcastw_epi16 (__m128i __O, __mmask8 __M, __m128i __A) +{ + return (__m128i)__builtin_ia32_selectw_128(__M, + (__v8hi) _mm_broadcastw_epi16(__A), + (__v8hi) __O); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_maskz_broadcastw_epi16 (__mmask8 __M, __m128i __A) +{ + return (__m128i)__builtin_ia32_selectw_128(__M, + (__v8hi) _mm_broadcastw_epi16(__A), + (__v8hi) _mm_setzero_si128()); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_mask_broadcastw_epi16 (__m256i __O, __mmask16 __M, __m128i __A) +{ + return (__m256i)__builtin_ia32_selectw_256(__M, + (__v16hi) _mm256_broadcastw_epi16(__A), + (__v16hi) __O); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_maskz_broadcastw_epi16 (__mmask16 __M, __m128i __A) +{ + return (__m256i)__builtin_ia32_selectw_256(__M, + (__v16hi) _mm256_broadcastw_epi16(__A), + (__v16hi) _mm256_setzero_si256()); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_mask_set1_epi16 (__m256i __O, __mmask16 __M, short __A) +{ + return (__m256i) __builtin_ia32_selectw_256 (__M, + (__v16hi) _mm256_set1_epi16(__A), + (__v16hi) __O); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_maskz_set1_epi16 (__mmask16 __M, short __A) +{ + return (__m256i) __builtin_ia32_selectw_256(__M, + (__v16hi)_mm256_set1_epi16(__A), + (__v16hi) _mm256_setzero_si256()); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_mask_set1_epi16 (__m128i __O, __mmask8 __M, short __A) +{ + return (__m128i) __builtin_ia32_selectw_128(__M, + (__v8hi) _mm_set1_epi16(__A), + (__v8hi) __O); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_maskz_set1_epi16 (__mmask8 __M, short __A) +{ + return (__m128i) __builtin_ia32_selectw_128(__M, + (__v8hi) _mm_set1_epi16(__A), + (__v8hi) _mm_setzero_si128()); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_permutexvar_epi16 (__m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_permvarhi128((__v8hi) __B, (__v8hi) __A); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_maskz_permutexvar_epi16 (__mmask8 __M, __m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_selectw_128((__mmask8)__M, + (__v8hi)_mm_permutexvar_epi16(__A, __B), + (__v8hi) _mm_setzero_si128()); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_mask_permutexvar_epi16 (__m128i __W, __mmask8 __M, __m128i __A, + __m128i __B) +{ + return (__m128i)__builtin_ia32_selectw_128((__mmask8)__M, + (__v8hi)_mm_permutexvar_epi16(__A, __B), + (__v8hi)__W); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_permutexvar_epi16 (__m256i __A, __m256i __B) +{ + return (__m256i)__builtin_ia32_permvarhi256((__v16hi) __B, (__v16hi) __A); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_maskz_permutexvar_epi16 (__mmask16 __M, __m256i __A, + __m256i __B) +{ + return (__m256i)__builtin_ia32_selectw_256((__mmask16)__M, + (__v16hi)_mm256_permutexvar_epi16(__A, __B), + (__v16hi)_mm256_setzero_si256()); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_mask_permutexvar_epi16 (__m256i __W, __mmask16 __M, __m256i __A, + __m256i __B) +{ + return (__m256i)__builtin_ia32_selectw_256((__mmask16)__M, + (__v16hi)_mm256_permutexvar_epi16(__A, __B), + (__v16hi)__W); +} + +#define _mm_mask_alignr_epi8(W, U, A, B, N) \ + ((__m128i)__builtin_ia32_selectb_128((__mmask16)(U), \ + (__v16qi)_mm_alignr_epi8((A), (B), (int)(N)), \ + (__v16qi)(__m128i)(W))) + +#define _mm_maskz_alignr_epi8(U, A, B, N) \ + ((__m128i)__builtin_ia32_selectb_128((__mmask16)(U), \ + (__v16qi)_mm_alignr_epi8((A), (B), (int)(N)), \ + (__v16qi)_mm_setzero_si128())) + +#define _mm256_mask_alignr_epi8(W, U, A, B, N) \ + ((__m256i)__builtin_ia32_selectb_256((__mmask32)(U), \ + (__v32qi)_mm256_alignr_epi8((A), (B), (int)(N)), \ + (__v32qi)(__m256i)(W))) + +#define _mm256_maskz_alignr_epi8(U, A, B, N) \ + ((__m256i)__builtin_ia32_selectb_256((__mmask32)(U), \ + (__v32qi)_mm256_alignr_epi8((A), (B), (int)(N)), \ + (__v32qi)_mm256_setzero_si256())) + +#define _mm_dbsad_epu8(A, B, imm) \ + ((__m128i)__builtin_ia32_dbpsadbw128((__v16qi)(__m128i)(A), \ + (__v16qi)(__m128i)(B), (int)(imm))) + +#define _mm_mask_dbsad_epu8(W, U, A, B, imm) \ + ((__m128i)__builtin_ia32_selectw_128((__mmask8)(U), \ + (__v8hi)_mm_dbsad_epu8((A), (B), (imm)), \ + (__v8hi)(__m128i)(W))) + +#define _mm_maskz_dbsad_epu8(U, A, B, imm) \ + ((__m128i)__builtin_ia32_selectw_128((__mmask8)(U), \ + (__v8hi)_mm_dbsad_epu8((A), (B), (imm)), \ + (__v8hi)_mm_setzero_si128())) + +#define _mm256_dbsad_epu8(A, B, imm) \ + ((__m256i)__builtin_ia32_dbpsadbw256((__v32qi)(__m256i)(A), \ + (__v32qi)(__m256i)(B), (int)(imm))) + +#define _mm256_mask_dbsad_epu8(W, U, A, B, imm) \ + ((__m256i)__builtin_ia32_selectw_256((__mmask16)(U), \ + (__v16hi)_mm256_dbsad_epu8((A), (B), (imm)), \ + (__v16hi)(__m256i)(W))) + +#define _mm256_maskz_dbsad_epu8(U, A, B, imm) \ + ((__m256i)__builtin_ia32_selectw_256((__mmask16)(U), \ + (__v16hi)_mm256_dbsad_epu8((A), (B), (imm)), \ + (__v16hi)_mm256_setzero_si256())) + +static __inline__ short __DEFAULT_FN_ATTRS128 +_mm_reduce_add_epi16(__m128i __W) { + return __builtin_reduce_add((__v8hi)__W); +} + +static __inline__ short __DEFAULT_FN_ATTRS128 +_mm_reduce_mul_epi16(__m128i __W) { + return __builtin_reduce_mul((__v8hi)__W); +} + +static __inline__ short __DEFAULT_FN_ATTRS128 +_mm_reduce_and_epi16(__m128i __W) { + return __builtin_reduce_and((__v8hi)__W); +} + +static __inline__ short __DEFAULT_FN_ATTRS128 +_mm_reduce_or_epi16(__m128i __W) { + return __builtin_reduce_or((__v8hi)__W); +} + +static __inline__ short __DEFAULT_FN_ATTRS128 +_mm_mask_reduce_add_epi16( __mmask8 __M, __m128i __W) { + __W = _mm_maskz_mov_epi16(__M, __W); + return __builtin_reduce_add((__v8hi)__W); +} + +static __inline__ short __DEFAULT_FN_ATTRS128 +_mm_mask_reduce_mul_epi16( __mmask8 __M, __m128i __W) { + __W = _mm_mask_mov_epi16(_mm_set1_epi16(1), __M, __W); + return __builtin_reduce_mul((__v8hi)__W); +} + +static __inline__ short __DEFAULT_FN_ATTRS128 +_mm_mask_reduce_and_epi16( __mmask8 __M, __m128i __W) { + __W = _mm_mask_mov_epi16(_mm_set1_epi16(-1), __M, __W); + return __builtin_reduce_and((__v8hi)__W); +} + +static __inline__ short __DEFAULT_FN_ATTRS128 +_mm_mask_reduce_or_epi16(__mmask8 __M, __m128i __W) { + __W = _mm_maskz_mov_epi16(__M, __W); + return __builtin_reduce_or((__v8hi)__W); +} + +static __inline__ short __DEFAULT_FN_ATTRS128 +_mm_reduce_max_epi16(__m128i __V) { + return __builtin_reduce_max((__v8hi)__V); +} + +static __inline__ unsigned short __DEFAULT_FN_ATTRS128 +_mm_reduce_max_epu16(__m128i __V) { + return __builtin_reduce_max((__v8hu)__V); +} + +static __inline__ short __DEFAULT_FN_ATTRS128 +_mm_reduce_min_epi16(__m128i __V) { + return __builtin_reduce_min((__v8hi)__V); +} + +static __inline__ unsigned short __DEFAULT_FN_ATTRS128 +_mm_reduce_min_epu16(__m128i __V) { + return __builtin_reduce_min((__v8hu)__V); +} + +static __inline__ short __DEFAULT_FN_ATTRS128 +_mm_mask_reduce_max_epi16(__mmask16 __M, __m128i __V) { + __V = _mm_mask_mov_epi16(_mm_set1_epi16(-32767-1), __M, __V); + return __builtin_reduce_max((__v8hi)__V); +} + +static __inline__ unsigned short __DEFAULT_FN_ATTRS128 +_mm_mask_reduce_max_epu16(__mmask16 __M, __m128i __V) { + __V = _mm_maskz_mov_epi16(__M, __V); + return __builtin_reduce_max((__v8hu)__V); +} + +static __inline__ short __DEFAULT_FN_ATTRS128 +_mm_mask_reduce_min_epi16(__mmask16 __M, __m128i __V) { + __V = _mm_mask_mov_epi16(_mm_set1_epi16(32767), __M, __V); + return __builtin_reduce_min((__v8hi)__V); +} + +static __inline__ unsigned short __DEFAULT_FN_ATTRS128 +_mm_mask_reduce_min_epu16(__mmask16 __M, __m128i __V) { + __V = _mm_mask_mov_epi16(_mm_set1_epi16(-1), __M, __V); + return __builtin_reduce_min((__v8hu)__V); +} + +static __inline__ short __DEFAULT_FN_ATTRS256 +_mm256_reduce_add_epi16(__m256i __W) { + return __builtin_reduce_add((__v16hi)__W); +} + +static __inline__ short __DEFAULT_FN_ATTRS256 +_mm256_reduce_mul_epi16(__m256i __W) { + return __builtin_reduce_mul((__v16hi)__W); +} + +static __inline__ short __DEFAULT_FN_ATTRS256 +_mm256_reduce_and_epi16(__m256i __W) { + return __builtin_reduce_and((__v16hi)__W); +} + +static __inline__ short __DEFAULT_FN_ATTRS256 +_mm256_reduce_or_epi16(__m256i __W) { + return __builtin_reduce_or((__v16hi)__W); +} + +static __inline__ short __DEFAULT_FN_ATTRS256 +_mm256_mask_reduce_add_epi16( __mmask16 __M, __m256i __W) { + __W = _mm256_maskz_mov_epi16(__M, __W); + return __builtin_reduce_add((__v16hi)__W); +} + +static __inline__ short __DEFAULT_FN_ATTRS256 +_mm256_mask_reduce_mul_epi16( __mmask16 __M, __m256i __W) { + __W = _mm256_mask_mov_epi16(_mm256_set1_epi16(1), __M, __W); + return __builtin_reduce_mul((__v16hi)__W); +} + +static __inline__ short __DEFAULT_FN_ATTRS256 +_mm256_mask_reduce_and_epi16( __mmask16 __M, __m256i __W) { + __W = _mm256_mask_mov_epi16(_mm256_set1_epi16(-1), __M, __W); + return __builtin_reduce_and((__v16hi)__W); +} + +static __inline__ short __DEFAULT_FN_ATTRS256 +_mm256_mask_reduce_or_epi16(__mmask16 __M, __m256i __W) { + __W = _mm256_maskz_mov_epi16(__M, __W); + return __builtin_reduce_or((__v16hi)__W); +} + +static __inline__ short __DEFAULT_FN_ATTRS256 +_mm256_reduce_max_epi16(__m256i __V) { + return __builtin_reduce_max((__v16hi)__V); +} + +static __inline__ unsigned short __DEFAULT_FN_ATTRS256 +_mm256_reduce_max_epu16(__m256i __V) { + return __builtin_reduce_max((__v16hu)__V); +} + +static __inline__ short __DEFAULT_FN_ATTRS256 +_mm256_reduce_min_epi16(__m256i __V) { + return __builtin_reduce_min((__v16hi)__V); +} + +static __inline__ unsigned short __DEFAULT_FN_ATTRS256 +_mm256_reduce_min_epu16(__m256i __V) { + return __builtin_reduce_min((__v16hu)__V); +} + +static __inline__ short __DEFAULT_FN_ATTRS256 +_mm256_mask_reduce_max_epi16(__mmask16 __M, __m256i __V) { + __V = _mm256_mask_mov_epi16(_mm256_set1_epi16(-32767-1), __M, __V); + return __builtin_reduce_max((__v16hi)__V); +} + +static __inline__ unsigned short __DEFAULT_FN_ATTRS256 +_mm256_mask_reduce_max_epu16(__mmask16 __M, __m256i __V) { + __V = _mm256_maskz_mov_epi16(__M, __V); + return __builtin_reduce_max((__v16hu)__V); +} + +static __inline__ short __DEFAULT_FN_ATTRS256 +_mm256_mask_reduce_min_epi16(__mmask16 __M, __m256i __V) { + __V = _mm256_mask_mov_epi16(_mm256_set1_epi16(32767), __M, __V); + return __builtin_reduce_min((__v16hi)__V); +} + +static __inline__ unsigned short __DEFAULT_FN_ATTRS256 +_mm256_mask_reduce_min_epu16(__mmask16 __M, __m256i __V) { + __V = _mm256_mask_mov_epi16(_mm256_set1_epi16(-1), __M, __V); + return __builtin_reduce_min((__v16hu)__V); +} + +static __inline__ signed char __DEFAULT_FN_ATTRS128 +_mm_reduce_add_epi8(__m128i __W) { + return __builtin_reduce_add((__v16qs)__W); +} + +static __inline__ signed char __DEFAULT_FN_ATTRS128 +_mm_reduce_mul_epi8(__m128i __W) { + return __builtin_reduce_mul((__v16qs)__W); +} + +static __inline__ signed char __DEFAULT_FN_ATTRS128 +_mm_reduce_and_epi8(__m128i __W) { + return __builtin_reduce_and((__v16qs)__W); +} + +static __inline__ signed char __DEFAULT_FN_ATTRS128 +_mm_reduce_or_epi8(__m128i __W) { + return __builtin_reduce_or((__v16qs)__W); +} + +static __inline__ signed char __DEFAULT_FN_ATTRS128 +_mm_mask_reduce_add_epi8(__mmask16 __M, __m128i __W) { + __W = _mm_maskz_mov_epi8(__M, __W); + return __builtin_reduce_add((__v16qs)__W); +} + +static __inline__ signed char __DEFAULT_FN_ATTRS128 +_mm_mask_reduce_mul_epi8(__mmask16 __M, __m128i __W) { + __W = _mm_mask_mov_epi8(_mm_set1_epi8(1), __M, __W); + return __builtin_reduce_mul((__v16qs)__W); +} + +static __inline__ signed char __DEFAULT_FN_ATTRS128 +_mm_mask_reduce_and_epi8(__mmask16 __M, __m128i __W) { + __W = _mm_mask_mov_epi8(_mm_set1_epi8(-1), __M, __W); + return __builtin_reduce_and((__v16qs)__W); +} + +static __inline__ signed char __DEFAULT_FN_ATTRS128 +_mm_mask_reduce_or_epi8(__mmask16 __M, __m128i __W) { + __W = _mm_maskz_mov_epi8(__M, __W); + return __builtin_reduce_or((__v16qs)__W); +} + +static __inline__ signed char __DEFAULT_FN_ATTRS128 +_mm_reduce_max_epi8(__m128i __V) { + return __builtin_reduce_max((__v16qs)__V); +} + +static __inline__ unsigned char __DEFAULT_FN_ATTRS128 +_mm_reduce_max_epu8(__m128i __V) { + return __builtin_reduce_max((__v16qu)__V); +} + +static __inline__ signed char __DEFAULT_FN_ATTRS128 +_mm_reduce_min_epi8(__m128i __V) { + return __builtin_reduce_min((__v16qs)__V); +} + +static __inline__ unsigned char __DEFAULT_FN_ATTRS128 +_mm_reduce_min_epu8(__m128i __V) { + return __builtin_reduce_min((__v16qu)__V); +} + +static __inline__ signed char __DEFAULT_FN_ATTRS128 +_mm_mask_reduce_max_epi8(__mmask16 __M, __m128i __V) { + __V = _mm_mask_mov_epi8(_mm_set1_epi8(-127-1), __M, __V); + return __builtin_reduce_max((__v16qs)__V); +} + +static __inline__ unsigned char __DEFAULT_FN_ATTRS128 +_mm_mask_reduce_max_epu8(__mmask16 __M, __m128i __V) { + __V = _mm_maskz_mov_epi8(__M, __V); + return __builtin_reduce_max((__v16qu)__V); +} + +static __inline__ signed char __DEFAULT_FN_ATTRS128 +_mm_mask_reduce_min_epi8(__mmask16 __M, __m128i __V) { + __V = _mm_mask_mov_epi8(_mm_set1_epi8(127), __M, __V); + return __builtin_reduce_min((__v16qs)__V); +} + +static __inline__ unsigned char __DEFAULT_FN_ATTRS128 +_mm_mask_reduce_min_epu8(__mmask16 __M, __m128i __V) { + __V = _mm_mask_mov_epi8(_mm_set1_epi8(-1), __M, __V); + return __builtin_reduce_min((__v16qu)__V); +} + +static __inline__ signed char __DEFAULT_FN_ATTRS256 +_mm256_reduce_add_epi8(__m256i __W) { + return __builtin_reduce_add((__v32qs)__W); +} + +static __inline__ signed char __DEFAULT_FN_ATTRS256 +_mm256_reduce_mul_epi8(__m256i __W) { + return __builtin_reduce_mul((__v32qs)__W); +} + +static __inline__ signed char __DEFAULT_FN_ATTRS256 +_mm256_reduce_and_epi8(__m256i __W) { + return __builtin_reduce_and((__v32qs)__W); +} + +static __inline__ signed char __DEFAULT_FN_ATTRS256 +_mm256_reduce_or_epi8(__m256i __W) { + return __builtin_reduce_or((__v32qs)__W); +} + +static __inline__ signed char __DEFAULT_FN_ATTRS256 +_mm256_mask_reduce_add_epi8(__mmask32 __M, __m256i __W) { + __W = _mm256_maskz_mov_epi8(__M, __W); + return __builtin_reduce_add((__v32qs)__W); +} + +static __inline__ signed char __DEFAULT_FN_ATTRS256 +_mm256_mask_reduce_mul_epi8(__mmask32 __M, __m256i __W) { + __W = _mm256_mask_mov_epi8(_mm256_set1_epi8(1), __M, __W); + return __builtin_reduce_mul((__v32qs)__W); +} + +static __inline__ signed char __DEFAULT_FN_ATTRS256 +_mm256_mask_reduce_and_epi8(__mmask32 __M, __m256i __W) { + __W = _mm256_mask_mov_epi8(_mm256_set1_epi8(-1), __M, __W); + return __builtin_reduce_and((__v32qs)__W); +} + +static __inline__ signed char __DEFAULT_FN_ATTRS256 +_mm256_mask_reduce_or_epi8(__mmask32 __M, __m256i __W) { + __W = _mm256_maskz_mov_epi8(__M, __W); + return __builtin_reduce_or((__v32qs)__W); +} + +static __inline__ signed char __DEFAULT_FN_ATTRS256 +_mm256_reduce_max_epi8(__m256i __V) { + return __builtin_reduce_max((__v32qs)__V); +} + +static __inline__ unsigned char __DEFAULT_FN_ATTRS256 +_mm256_reduce_max_epu8(__m256i __V) { + return __builtin_reduce_max((__v32qu)__V); +} + +static __inline__ signed char __DEFAULT_FN_ATTRS256 +_mm256_reduce_min_epi8(__m256i __V) { + return __builtin_reduce_min((__v32qs)__V); +} + +static __inline__ unsigned char __DEFAULT_FN_ATTRS256 +_mm256_reduce_min_epu8(__m256i __V) { + return __builtin_reduce_min((__v32qu)__V); +} + +static __inline__ signed char __DEFAULT_FN_ATTRS256 +_mm256_mask_reduce_max_epi8(__mmask32 __M, __m256i __V) { + __V = _mm256_mask_mov_epi8(_mm256_set1_epi8(-127-1), __M, __V); + return __builtin_reduce_max((__v32qs)__V); +} + +static __inline__ unsigned char __DEFAULT_FN_ATTRS256 +_mm256_mask_reduce_max_epu8(__mmask32 __M, __m256i __V) { + __V = _mm256_maskz_mov_epi8(__M, __V); + return __builtin_reduce_max((__v32qu)__V); +} + +static __inline__ signed char __DEFAULT_FN_ATTRS256 +_mm256_mask_reduce_min_epi8(__mmask32 __M, __m256i __V) { + __V = _mm256_mask_mov_epi8(_mm256_set1_epi8(127), __M, __V); + return __builtin_reduce_min((__v32qs)__V); +} + +static __inline__ unsigned char __DEFAULT_FN_ATTRS256 +_mm256_mask_reduce_min_epu8(__mmask32 __M, __m256i __V) { + __V = _mm256_mask_mov_epi8(_mm256_set1_epi8(-1), __M, __V); + return __builtin_reduce_min((__v32qu)__V); +} + +#undef __DEFAULT_FN_ATTRS128 +#undef __DEFAULT_FN_ATTRS256 + +#endif /* __AVX512VLBWINTRIN_H */ diff --git a/third_party/intel/clang/avx512vlcdintrin.h b/third_party/intel/clang/avx512vlcdintrin.h new file mode 100644 index 000000000..923e2c551 --- /dev/null +++ b/third_party/intel/clang/avx512vlcdintrin.h @@ -0,0 +1,230 @@ +/*===---- avx512vlcdintrin.h - AVX512VL and AVX512CD intrinsics ------------=== + * + * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. + * See https://llvm.org/LICENSE.txt for license information. + * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + * + *===-----------------------------------------------------------------------=== + */ +#ifndef __IMMINTRIN_H +#error "Never use directly; include instead." +#endif + +#ifndef __AVX512VLCDINTRIN_H +#define __AVX512VLCDINTRIN_H + +/* Define the default attributes for the functions in this file. */ +#define __DEFAULT_FN_ATTRS128 \ + __attribute__((__always_inline__, __nodebug__, \ + __target__("avx512vl,avx512cd,no-evex512"), \ + __min_vector_width__(128))) +#define __DEFAULT_FN_ATTRS256 \ + __attribute__((__always_inline__, __nodebug__, \ + __target__("avx512vl,avx512cd,no-evex512"), \ + __min_vector_width__(256))) + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_broadcastmb_epi64 (__mmask8 __A) +{ + return (__m128i) _mm_set1_epi64x((long long) __A); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_broadcastmb_epi64 (__mmask8 __A) +{ + return (__m256i) _mm256_set1_epi64x((long long)__A); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_broadcastmw_epi32 (__mmask16 __A) +{ + return (__m128i) _mm_set1_epi32((int)__A); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_broadcastmw_epi32 (__mmask16 __A) +{ + return (__m256i) _mm256_set1_epi32((int)__A); +} + + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_conflict_epi64 (__m128i __A) +{ + return (__m128i) __builtin_ia32_vpconflictdi_128 ((__v2di) __A); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_mask_conflict_epi64 (__m128i __W, __mmask8 __U, __m128i __A) +{ + return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U, + (__v2di)_mm_conflict_epi64(__A), + (__v2di)__W); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_maskz_conflict_epi64 (__mmask8 __U, __m128i __A) +{ + return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U, + (__v2di)_mm_conflict_epi64(__A), + (__v2di)_mm_setzero_si128()); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_conflict_epi64 (__m256i __A) +{ + return (__m256i) __builtin_ia32_vpconflictdi_256 ((__v4di) __A); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_mask_conflict_epi64 (__m256i __W, __mmask8 __U, __m256i __A) +{ + return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U, + (__v4di)_mm256_conflict_epi64(__A), + (__v4di)__W); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_maskz_conflict_epi64 (__mmask8 __U, __m256i __A) +{ + return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U, + (__v4di)_mm256_conflict_epi64(__A), + (__v4di)_mm256_setzero_si256()); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_conflict_epi32 (__m128i __A) +{ + return (__m128i) __builtin_ia32_vpconflictsi_128 ((__v4si) __A); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_mask_conflict_epi32 (__m128i __W, __mmask8 __U, __m128i __A) +{ + return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U, + (__v4si)_mm_conflict_epi32(__A), + (__v4si)__W); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_maskz_conflict_epi32 (__mmask8 __U, __m128i __A) +{ + return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U, + (__v4si)_mm_conflict_epi32(__A), + (__v4si)_mm_setzero_si128()); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_conflict_epi32 (__m256i __A) +{ + return (__m256i) __builtin_ia32_vpconflictsi_256 ((__v8si) __A); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_mask_conflict_epi32 (__m256i __W, __mmask8 __U, __m256i __A) +{ + return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U, + (__v8si)_mm256_conflict_epi32(__A), + (__v8si)__W); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_maskz_conflict_epi32 (__mmask8 __U, __m256i __A) +{ + return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U, + (__v8si)_mm256_conflict_epi32(__A), + (__v8si)_mm256_setzero_si256()); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_lzcnt_epi32 (__m128i __A) +{ + return (__m128i) __builtin_ia32_vplzcntd_128 ((__v4si) __A); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_mask_lzcnt_epi32 (__m128i __W, __mmask8 __U, __m128i __A) +{ + return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U, + (__v4si)_mm_lzcnt_epi32(__A), + (__v4si)__W); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_maskz_lzcnt_epi32 (__mmask8 __U, __m128i __A) +{ + return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U, + (__v4si)_mm_lzcnt_epi32(__A), + (__v4si)_mm_setzero_si128()); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_lzcnt_epi32 (__m256i __A) +{ + return (__m256i) __builtin_ia32_vplzcntd_256 ((__v8si) __A); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_mask_lzcnt_epi32 (__m256i __W, __mmask8 __U, __m256i __A) +{ + return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U, + (__v8si)_mm256_lzcnt_epi32(__A), + (__v8si)__W); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_maskz_lzcnt_epi32 (__mmask8 __U, __m256i __A) +{ + return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U, + (__v8si)_mm256_lzcnt_epi32(__A), + (__v8si)_mm256_setzero_si256()); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_lzcnt_epi64 (__m128i __A) +{ + return (__m128i) __builtin_ia32_vplzcntq_128 ((__v2di) __A); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_mask_lzcnt_epi64 (__m128i __W, __mmask8 __U, __m128i __A) +{ + return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U, + (__v2di)_mm_lzcnt_epi64(__A), + (__v2di)__W); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_maskz_lzcnt_epi64 (__mmask8 __U, __m128i __A) +{ + return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U, + (__v2di)_mm_lzcnt_epi64(__A), + (__v2di)_mm_setzero_si128()); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_lzcnt_epi64 (__m256i __A) +{ + return (__m256i) __builtin_ia32_vplzcntq_256 ((__v4di) __A); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_mask_lzcnt_epi64 (__m256i __W, __mmask8 __U, __m256i __A) +{ + return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U, + (__v4di)_mm256_lzcnt_epi64(__A), + (__v4di)__W); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_maskz_lzcnt_epi64 (__mmask8 __U, __m256i __A) +{ + return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U, + (__v4di)_mm256_lzcnt_epi64(__A), + (__v4di)_mm256_setzero_si256()); +} + +#undef __DEFAULT_FN_ATTRS128 +#undef __DEFAULT_FN_ATTRS256 + +#endif /* __AVX512VLCDINTRIN_H */ diff --git a/third_party/intel/clang/avx512vldqintrin.h b/third_party/intel/clang/avx512vldqintrin.h new file mode 100644 index 000000000..272cdd89e --- /dev/null +++ b/third_party/intel/clang/avx512vldqintrin.h @@ -0,0 +1,1173 @@ +/*===---- avx512vldqintrin.h - AVX512VL and AVX512DQ intrinsics ------------=== + * + * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. + * See https://llvm.org/LICENSE.txt for license information. + * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + * + *===-----------------------------------------------------------------------=== + */ + +#ifndef __IMMINTRIN_H +#error "Never use directly; include instead." +#endif + +#ifndef __AVX512VLDQINTRIN_H +#define __AVX512VLDQINTRIN_H + +/* Define the default attributes for the functions in this file. */ +#define __DEFAULT_FN_ATTRS128 \ + __attribute__((__always_inline__, __nodebug__, \ + __target__("avx512vl,avx512dq,no-evex512"), \ + __min_vector_width__(128))) +#define __DEFAULT_FN_ATTRS256 \ + __attribute__((__always_inline__, __nodebug__, \ + __target__("avx512vl,avx512dq,no-evex512"), \ + __min_vector_width__(256))) + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_mullo_epi64 (__m256i __A, __m256i __B) { + return (__m256i) ((__v4du) __A * (__v4du) __B); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_mask_mullo_epi64(__m256i __W, __mmask8 __U, __m256i __A, __m256i __B) { + return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U, + (__v4di)_mm256_mullo_epi64(__A, __B), + (__v4di)__W); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_maskz_mullo_epi64(__mmask8 __U, __m256i __A, __m256i __B) { + return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U, + (__v4di)_mm256_mullo_epi64(__A, __B), + (__v4di)_mm256_setzero_si256()); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_mullo_epi64 (__m128i __A, __m128i __B) { + return (__m128i) ((__v2du) __A * (__v2du) __B); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_mask_mullo_epi64(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B) { + return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U, + (__v2di)_mm_mullo_epi64(__A, __B), + (__v2di)__W); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_maskz_mullo_epi64(__mmask8 __U, __m128i __A, __m128i __B) { + return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U, + (__v2di)_mm_mullo_epi64(__A, __B), + (__v2di)_mm_setzero_si128()); +} + +static __inline__ __m256d __DEFAULT_FN_ATTRS256 +_mm256_mask_andnot_pd(__m256d __W, __mmask8 __U, __m256d __A, __m256d __B) { + return (__m256d)__builtin_ia32_selectpd_256((__mmask8)__U, + (__v4df)_mm256_andnot_pd(__A, __B), + (__v4df)__W); +} + +static __inline__ __m256d __DEFAULT_FN_ATTRS256 +_mm256_maskz_andnot_pd(__mmask8 __U, __m256d __A, __m256d __B) { + return (__m256d)__builtin_ia32_selectpd_256((__mmask8)__U, + (__v4df)_mm256_andnot_pd(__A, __B), + (__v4df)_mm256_setzero_pd()); +} + +static __inline__ __m128d __DEFAULT_FN_ATTRS128 +_mm_mask_andnot_pd(__m128d __W, __mmask8 __U, __m128d __A, __m128d __B) { + return (__m128d)__builtin_ia32_selectpd_128((__mmask8)__U, + (__v2df)_mm_andnot_pd(__A, __B), + (__v2df)__W); +} + +static __inline__ __m128d __DEFAULT_FN_ATTRS128 +_mm_maskz_andnot_pd(__mmask8 __U, __m128d __A, __m128d __B) { + return (__m128d)__builtin_ia32_selectpd_128((__mmask8)__U, + (__v2df)_mm_andnot_pd(__A, __B), + (__v2df)_mm_setzero_pd()); +} + +static __inline__ __m256 __DEFAULT_FN_ATTRS256 +_mm256_mask_andnot_ps(__m256 __W, __mmask8 __U, __m256 __A, __m256 __B) { + return (__m256)__builtin_ia32_selectps_256((__mmask8)__U, + (__v8sf)_mm256_andnot_ps(__A, __B), + (__v8sf)__W); +} + +static __inline__ __m256 __DEFAULT_FN_ATTRS256 +_mm256_maskz_andnot_ps(__mmask8 __U, __m256 __A, __m256 __B) { + return (__m256)__builtin_ia32_selectps_256((__mmask8)__U, + (__v8sf)_mm256_andnot_ps(__A, __B), + (__v8sf)_mm256_setzero_ps()); +} + +static __inline__ __m128 __DEFAULT_FN_ATTRS128 +_mm_mask_andnot_ps(__m128 __W, __mmask8 __U, __m128 __A, __m128 __B) { + return (__m128)__builtin_ia32_selectps_128((__mmask8)__U, + (__v4sf)_mm_andnot_ps(__A, __B), + (__v4sf)__W); +} + +static __inline__ __m128 __DEFAULT_FN_ATTRS128 +_mm_maskz_andnot_ps(__mmask8 __U, __m128 __A, __m128 __B) { + return (__m128)__builtin_ia32_selectps_128((__mmask8)__U, + (__v4sf)_mm_andnot_ps(__A, __B), + (__v4sf)_mm_setzero_ps()); +} + +static __inline__ __m256d __DEFAULT_FN_ATTRS256 +_mm256_mask_and_pd(__m256d __W, __mmask8 __U, __m256d __A, __m256d __B) { + return (__m256d)__builtin_ia32_selectpd_256((__mmask8)__U, + (__v4df)_mm256_and_pd(__A, __B), + (__v4df)__W); +} + +static __inline__ __m256d __DEFAULT_FN_ATTRS256 +_mm256_maskz_and_pd(__mmask8 __U, __m256d __A, __m256d __B) { + return (__m256d)__builtin_ia32_selectpd_256((__mmask8)__U, + (__v4df)_mm256_and_pd(__A, __B), + (__v4df)_mm256_setzero_pd()); +} + +static __inline__ __m128d __DEFAULT_FN_ATTRS128 +_mm_mask_and_pd(__m128d __W, __mmask8 __U, __m128d __A, __m128d __B) { + return (__m128d)__builtin_ia32_selectpd_128((__mmask8)__U, + (__v2df)_mm_and_pd(__A, __B), + (__v2df)__W); +} + +static __inline__ __m128d __DEFAULT_FN_ATTRS128 +_mm_maskz_and_pd(__mmask8 __U, __m128d __A, __m128d __B) { + return (__m128d)__builtin_ia32_selectpd_128((__mmask8)__U, + (__v2df)_mm_and_pd(__A, __B), + (__v2df)_mm_setzero_pd()); +} + +static __inline__ __m256 __DEFAULT_FN_ATTRS256 +_mm256_mask_and_ps(__m256 __W, __mmask8 __U, __m256 __A, __m256 __B) { + return (__m256)__builtin_ia32_selectps_256((__mmask8)__U, + (__v8sf)_mm256_and_ps(__A, __B), + (__v8sf)__W); +} + +static __inline__ __m256 __DEFAULT_FN_ATTRS256 +_mm256_maskz_and_ps(__mmask8 __U, __m256 __A, __m256 __B) { + return (__m256)__builtin_ia32_selectps_256((__mmask8)__U, + (__v8sf)_mm256_and_ps(__A, __B), + (__v8sf)_mm256_setzero_ps()); +} + +static __inline__ __m128 __DEFAULT_FN_ATTRS128 +_mm_mask_and_ps(__m128 __W, __mmask8 __U, __m128 __A, __m128 __B) { + return (__m128)__builtin_ia32_selectps_128((__mmask8)__U, + (__v4sf)_mm_and_ps(__A, __B), + (__v4sf)__W); +} + +static __inline__ __m128 __DEFAULT_FN_ATTRS128 +_mm_maskz_and_ps(__mmask8 __U, __m128 __A, __m128 __B) { + return (__m128)__builtin_ia32_selectps_128((__mmask8)__U, + (__v4sf)_mm_and_ps(__A, __B), + (__v4sf)_mm_setzero_ps()); +} + +static __inline__ __m256d __DEFAULT_FN_ATTRS256 +_mm256_mask_xor_pd(__m256d __W, __mmask8 __U, __m256d __A, __m256d __B) { + return (__m256d)__builtin_ia32_selectpd_256((__mmask8)__U, + (__v4df)_mm256_xor_pd(__A, __B), + (__v4df)__W); +} + +static __inline__ __m256d __DEFAULT_FN_ATTRS256 +_mm256_maskz_xor_pd(__mmask8 __U, __m256d __A, __m256d __B) { + return (__m256d)__builtin_ia32_selectpd_256((__mmask8)__U, + (__v4df)_mm256_xor_pd(__A, __B), + (__v4df)_mm256_setzero_pd()); +} + +static __inline__ __m128d __DEFAULT_FN_ATTRS128 +_mm_mask_xor_pd(__m128d __W, __mmask8 __U, __m128d __A, __m128d __B) { + return (__m128d)__builtin_ia32_selectpd_128((__mmask8)__U, + (__v2df)_mm_xor_pd(__A, __B), + (__v2df)__W); +} + +static __inline__ __m128d __DEFAULT_FN_ATTRS128 +_mm_maskz_xor_pd (__mmask8 __U, __m128d __A, __m128d __B) { + return (__m128d)__builtin_ia32_selectpd_128((__mmask8)__U, + (__v2df)_mm_xor_pd(__A, __B), + (__v2df)_mm_setzero_pd()); +} + +static __inline__ __m256 __DEFAULT_FN_ATTRS256 +_mm256_mask_xor_ps(__m256 __W, __mmask8 __U, __m256 __A, __m256 __B) { + return (__m256)__builtin_ia32_selectps_256((__mmask8)__U, + (__v8sf)_mm256_xor_ps(__A, __B), + (__v8sf)__W); +} + +static __inline__ __m256 __DEFAULT_FN_ATTRS256 +_mm256_maskz_xor_ps(__mmask8 __U, __m256 __A, __m256 __B) { + return (__m256)__builtin_ia32_selectps_256((__mmask8)__U, + (__v8sf)_mm256_xor_ps(__A, __B), + (__v8sf)_mm256_setzero_ps()); +} + +static __inline__ __m128 __DEFAULT_FN_ATTRS128 +_mm_mask_xor_ps(__m128 __W, __mmask8 __U, __m128 __A, __m128 __B) { + return (__m128)__builtin_ia32_selectps_128((__mmask8)__U, + (__v4sf)_mm_xor_ps(__A, __B), + (__v4sf)__W); +} + +static __inline__ __m128 __DEFAULT_FN_ATTRS128 +_mm_maskz_xor_ps(__mmask8 __U, __m128 __A, __m128 __B) { + return (__m128)__builtin_ia32_selectps_128((__mmask8)__U, + (__v4sf)_mm_xor_ps(__A, __B), + (__v4sf)_mm_setzero_ps()); +} + +static __inline__ __m256d __DEFAULT_FN_ATTRS256 +_mm256_mask_or_pd(__m256d __W, __mmask8 __U, __m256d __A, __m256d __B) { + return (__m256d)__builtin_ia32_selectpd_256((__mmask8)__U, + (__v4df)_mm256_or_pd(__A, __B), + (__v4df)__W); +} + +static __inline__ __m256d __DEFAULT_FN_ATTRS256 +_mm256_maskz_or_pd(__mmask8 __U, __m256d __A, __m256d __B) { + return (__m256d)__builtin_ia32_selectpd_256((__mmask8)__U, + (__v4df)_mm256_or_pd(__A, __B), + (__v4df)_mm256_setzero_pd()); +} + +static __inline__ __m128d __DEFAULT_FN_ATTRS128 +_mm_mask_or_pd(__m128d __W, __mmask8 __U, __m128d __A, __m128d __B) { + return (__m128d)__builtin_ia32_selectpd_128((__mmask8)__U, + (__v2df)_mm_or_pd(__A, __B), + (__v2df)__W); +} + +static __inline__ __m128d __DEFAULT_FN_ATTRS128 +_mm_maskz_or_pd(__mmask8 __U, __m128d __A, __m128d __B) { + return (__m128d)__builtin_ia32_selectpd_128((__mmask8)__U, + (__v2df)_mm_or_pd(__A, __B), + (__v2df)_mm_setzero_pd()); +} + +static __inline__ __m256 __DEFAULT_FN_ATTRS256 +_mm256_mask_or_ps(__m256 __W, __mmask8 __U, __m256 __A, __m256 __B) { + return (__m256)__builtin_ia32_selectps_256((__mmask8)__U, + (__v8sf)_mm256_or_ps(__A, __B), + (__v8sf)__W); +} + +static __inline__ __m256 __DEFAULT_FN_ATTRS256 +_mm256_maskz_or_ps(__mmask8 __U, __m256 __A, __m256 __B) { + return (__m256)__builtin_ia32_selectps_256((__mmask8)__U, + (__v8sf)_mm256_or_ps(__A, __B), + (__v8sf)_mm256_setzero_ps()); +} + +static __inline__ __m128 __DEFAULT_FN_ATTRS128 +_mm_mask_or_ps(__m128 __W, __mmask8 __U, __m128 __A, __m128 __B) { + return (__m128)__builtin_ia32_selectps_128((__mmask8)__U, + (__v4sf)_mm_or_ps(__A, __B), + (__v4sf)__W); +} + +static __inline__ __m128 __DEFAULT_FN_ATTRS128 +_mm_maskz_or_ps(__mmask8 __U, __m128 __A, __m128 __B) { + return (__m128)__builtin_ia32_selectps_128((__mmask8)__U, + (__v4sf)_mm_or_ps(__A, __B), + (__v4sf)_mm_setzero_ps()); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_cvtpd_epi64 (__m128d __A) { + return (__m128i) __builtin_ia32_cvtpd2qq128_mask ((__v2df) __A, + (__v2di) _mm_setzero_si128(), + (__mmask8) -1); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_mask_cvtpd_epi64 (__m128i __W, __mmask8 __U, __m128d __A) { + return (__m128i) __builtin_ia32_cvtpd2qq128_mask ((__v2df) __A, + (__v2di) __W, + (__mmask8) __U); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_maskz_cvtpd_epi64 (__mmask8 __U, __m128d __A) { + return (__m128i) __builtin_ia32_cvtpd2qq128_mask ((__v2df) __A, + (__v2di) _mm_setzero_si128(), + (__mmask8) __U); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_cvtpd_epi64 (__m256d __A) { + return (__m256i) __builtin_ia32_cvtpd2qq256_mask ((__v4df) __A, + (__v4di) _mm256_setzero_si256(), + (__mmask8) -1); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_mask_cvtpd_epi64 (__m256i __W, __mmask8 __U, __m256d __A) { + return (__m256i) __builtin_ia32_cvtpd2qq256_mask ((__v4df) __A, + (__v4di) __W, + (__mmask8) __U); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_maskz_cvtpd_epi64 (__mmask8 __U, __m256d __A) { + return (__m256i) __builtin_ia32_cvtpd2qq256_mask ((__v4df) __A, + (__v4di) _mm256_setzero_si256(), + (__mmask8) __U); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_cvtpd_epu64 (__m128d __A) { + return (__m128i) __builtin_ia32_cvtpd2uqq128_mask ((__v2df) __A, + (__v2di) _mm_setzero_si128(), + (__mmask8) -1); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_mask_cvtpd_epu64 (__m128i __W, __mmask8 __U, __m128d __A) { + return (__m128i) __builtin_ia32_cvtpd2uqq128_mask ((__v2df) __A, + (__v2di) __W, + (__mmask8) __U); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_maskz_cvtpd_epu64 (__mmask8 __U, __m128d __A) { + return (__m128i) __builtin_ia32_cvtpd2uqq128_mask ((__v2df) __A, + (__v2di) _mm_setzero_si128(), + (__mmask8) __U); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_cvtpd_epu64 (__m256d __A) { + return (__m256i) __builtin_ia32_cvtpd2uqq256_mask ((__v4df) __A, + (__v4di) _mm256_setzero_si256(), + (__mmask8) -1); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_mask_cvtpd_epu64 (__m256i __W, __mmask8 __U, __m256d __A) { + return (__m256i) __builtin_ia32_cvtpd2uqq256_mask ((__v4df) __A, + (__v4di) __W, + (__mmask8) __U); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_maskz_cvtpd_epu64 (__mmask8 __U, __m256d __A) { + return (__m256i) __builtin_ia32_cvtpd2uqq256_mask ((__v4df) __A, + (__v4di) _mm256_setzero_si256(), + (__mmask8) __U); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_cvtps_epi64 (__m128 __A) { + return (__m128i) __builtin_ia32_cvtps2qq128_mask ((__v4sf) __A, + (__v2di) _mm_setzero_si128(), + (__mmask8) -1); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_mask_cvtps_epi64 (__m128i __W, __mmask8 __U, __m128 __A) { + return (__m128i) __builtin_ia32_cvtps2qq128_mask ((__v4sf) __A, + (__v2di) __W, + (__mmask8) __U); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_maskz_cvtps_epi64 (__mmask8 __U, __m128 __A) { + return (__m128i) __builtin_ia32_cvtps2qq128_mask ((__v4sf) __A, + (__v2di) _mm_setzero_si128(), + (__mmask8) __U); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_cvtps_epi64 (__m128 __A) { + return (__m256i) __builtin_ia32_cvtps2qq256_mask ((__v4sf) __A, + (__v4di) _mm256_setzero_si256(), + (__mmask8) -1); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_mask_cvtps_epi64 (__m256i __W, __mmask8 __U, __m128 __A) { + return (__m256i) __builtin_ia32_cvtps2qq256_mask ((__v4sf) __A, + (__v4di) __W, + (__mmask8) __U); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_maskz_cvtps_epi64 (__mmask8 __U, __m128 __A) { + return (__m256i) __builtin_ia32_cvtps2qq256_mask ((__v4sf) __A, + (__v4di) _mm256_setzero_si256(), + (__mmask8) __U); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_cvtps_epu64 (__m128 __A) { + return (__m128i) __builtin_ia32_cvtps2uqq128_mask ((__v4sf) __A, + (__v2di) _mm_setzero_si128(), + (__mmask8) -1); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_mask_cvtps_epu64 (__m128i __W, __mmask8 __U, __m128 __A) { + return (__m128i) __builtin_ia32_cvtps2uqq128_mask ((__v4sf) __A, + (__v2di) __W, + (__mmask8) __U); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_maskz_cvtps_epu64 (__mmask8 __U, __m128 __A) { + return (__m128i) __builtin_ia32_cvtps2uqq128_mask ((__v4sf) __A, + (__v2di) _mm_setzero_si128(), + (__mmask8) __U); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_cvtps_epu64 (__m128 __A) { + return (__m256i) __builtin_ia32_cvtps2uqq256_mask ((__v4sf) __A, + (__v4di) _mm256_setzero_si256(), + (__mmask8) -1); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_mask_cvtps_epu64 (__m256i __W, __mmask8 __U, __m128 __A) { + return (__m256i) __builtin_ia32_cvtps2uqq256_mask ((__v4sf) __A, + (__v4di) __W, + (__mmask8) __U); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_maskz_cvtps_epu64 (__mmask8 __U, __m128 __A) { + return (__m256i) __builtin_ia32_cvtps2uqq256_mask ((__v4sf) __A, + (__v4di) _mm256_setzero_si256(), + (__mmask8) __U); +} + +static __inline__ __m128d __DEFAULT_FN_ATTRS128 +_mm_cvtepi64_pd (__m128i __A) { + return (__m128d)__builtin_convertvector((__v2di)__A, __v2df); +} + +static __inline__ __m128d __DEFAULT_FN_ATTRS128 +_mm_mask_cvtepi64_pd (__m128d __W, __mmask8 __U, __m128i __A) { + return (__m128d)__builtin_ia32_selectpd_128((__mmask8)__U, + (__v2df)_mm_cvtepi64_pd(__A), + (__v2df)__W); +} + +static __inline__ __m128d __DEFAULT_FN_ATTRS128 +_mm_maskz_cvtepi64_pd (__mmask8 __U, __m128i __A) { + return (__m128d)__builtin_ia32_selectpd_128((__mmask8)__U, + (__v2df)_mm_cvtepi64_pd(__A), + (__v2df)_mm_setzero_pd()); +} + +static __inline__ __m256d __DEFAULT_FN_ATTRS256 +_mm256_cvtepi64_pd (__m256i __A) { + return (__m256d)__builtin_convertvector((__v4di)__A, __v4df); +} + +static __inline__ __m256d __DEFAULT_FN_ATTRS256 +_mm256_mask_cvtepi64_pd (__m256d __W, __mmask8 __U, __m256i __A) { + return (__m256d)__builtin_ia32_selectpd_256((__mmask8)__U, + (__v4df)_mm256_cvtepi64_pd(__A), + (__v4df)__W); +} + +static __inline__ __m256d __DEFAULT_FN_ATTRS256 +_mm256_maskz_cvtepi64_pd (__mmask8 __U, __m256i __A) { + return (__m256d)__builtin_ia32_selectpd_256((__mmask8)__U, + (__v4df)_mm256_cvtepi64_pd(__A), + (__v4df)_mm256_setzero_pd()); +} + +static __inline__ __m128 __DEFAULT_FN_ATTRS128 +_mm_cvtepi64_ps (__m128i __A) { + return (__m128) __builtin_ia32_cvtqq2ps128_mask ((__v2di) __A, + (__v4sf) _mm_setzero_ps(), + (__mmask8) -1); +} + +static __inline__ __m128 __DEFAULT_FN_ATTRS128 +_mm_mask_cvtepi64_ps (__m128 __W, __mmask8 __U, __m128i __A) { + return (__m128) __builtin_ia32_cvtqq2ps128_mask ((__v2di) __A, + (__v4sf) __W, + (__mmask8) __U); +} + +static __inline__ __m128 __DEFAULT_FN_ATTRS128 +_mm_maskz_cvtepi64_ps (__mmask8 __U, __m128i __A) { + return (__m128) __builtin_ia32_cvtqq2ps128_mask ((__v2di) __A, + (__v4sf) _mm_setzero_ps(), + (__mmask8) __U); +} + +static __inline__ __m128 __DEFAULT_FN_ATTRS256 +_mm256_cvtepi64_ps (__m256i __A) { + return (__m128)__builtin_convertvector((__v4di)__A, __v4sf); +} + +static __inline__ __m128 __DEFAULT_FN_ATTRS256 +_mm256_mask_cvtepi64_ps (__m128 __W, __mmask8 __U, __m256i __A) { + return (__m128)__builtin_ia32_selectps_128((__mmask8)__U, + (__v4sf)_mm256_cvtepi64_ps(__A), + (__v4sf)__W); +} + +static __inline__ __m128 __DEFAULT_FN_ATTRS256 +_mm256_maskz_cvtepi64_ps (__mmask8 __U, __m256i __A) { + return (__m128)__builtin_ia32_selectps_128((__mmask8)__U, + (__v4sf)_mm256_cvtepi64_ps(__A), + (__v4sf)_mm_setzero_ps()); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_cvttpd_epi64 (__m128d __A) { + return (__m128i) __builtin_ia32_cvttpd2qq128_mask ((__v2df) __A, + (__v2di) _mm_setzero_si128(), + (__mmask8) -1); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_mask_cvttpd_epi64 (__m128i __W, __mmask8 __U, __m128d __A) { + return (__m128i) __builtin_ia32_cvttpd2qq128_mask ((__v2df) __A, + (__v2di) __W, + (__mmask8) __U); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_maskz_cvttpd_epi64 (__mmask8 __U, __m128d __A) { + return (__m128i) __builtin_ia32_cvttpd2qq128_mask ((__v2df) __A, + (__v2di) _mm_setzero_si128(), + (__mmask8) __U); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_cvttpd_epi64 (__m256d __A) { + return (__m256i) __builtin_ia32_cvttpd2qq256_mask ((__v4df) __A, + (__v4di) _mm256_setzero_si256(), + (__mmask8) -1); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_mask_cvttpd_epi64 (__m256i __W, __mmask8 __U, __m256d __A) { + return (__m256i) __builtin_ia32_cvttpd2qq256_mask ((__v4df) __A, + (__v4di) __W, + (__mmask8) __U); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_maskz_cvttpd_epi64 (__mmask8 __U, __m256d __A) { + return (__m256i) __builtin_ia32_cvttpd2qq256_mask ((__v4df) __A, + (__v4di) _mm256_setzero_si256(), + (__mmask8) __U); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_cvttpd_epu64 (__m128d __A) { + return (__m128i) __builtin_ia32_cvttpd2uqq128_mask ((__v2df) __A, + (__v2di) _mm_setzero_si128(), + (__mmask8) -1); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_mask_cvttpd_epu64 (__m128i __W, __mmask8 __U, __m128d __A) { + return (__m128i) __builtin_ia32_cvttpd2uqq128_mask ((__v2df) __A, + (__v2di) __W, + (__mmask8) __U); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_maskz_cvttpd_epu64 (__mmask8 __U, __m128d __A) { + return (__m128i) __builtin_ia32_cvttpd2uqq128_mask ((__v2df) __A, + (__v2di) _mm_setzero_si128(), + (__mmask8) __U); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_cvttpd_epu64 (__m256d __A) { + return (__m256i) __builtin_ia32_cvttpd2uqq256_mask ((__v4df) __A, + (__v4di) _mm256_setzero_si256(), + (__mmask8) -1); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_mask_cvttpd_epu64 (__m256i __W, __mmask8 __U, __m256d __A) { + return (__m256i) __builtin_ia32_cvttpd2uqq256_mask ((__v4df) __A, + (__v4di) __W, + (__mmask8) __U); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_maskz_cvttpd_epu64 (__mmask8 __U, __m256d __A) { + return (__m256i) __builtin_ia32_cvttpd2uqq256_mask ((__v4df) __A, + (__v4di) _mm256_setzero_si256(), + (__mmask8) __U); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_cvttps_epi64 (__m128 __A) { + return (__m128i) __builtin_ia32_cvttps2qq128_mask ((__v4sf) __A, + (__v2di) _mm_setzero_si128(), + (__mmask8) -1); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_mask_cvttps_epi64 (__m128i __W, __mmask8 __U, __m128 __A) { + return (__m128i) __builtin_ia32_cvttps2qq128_mask ((__v4sf) __A, + (__v2di) __W, + (__mmask8) __U); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_maskz_cvttps_epi64 (__mmask8 __U, __m128 __A) { + return (__m128i) __builtin_ia32_cvttps2qq128_mask ((__v4sf) __A, + (__v2di) _mm_setzero_si128(), + (__mmask8) __U); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_cvttps_epi64 (__m128 __A) { + return (__m256i) __builtin_ia32_cvttps2qq256_mask ((__v4sf) __A, + (__v4di) _mm256_setzero_si256(), + (__mmask8) -1); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_mask_cvttps_epi64 (__m256i __W, __mmask8 __U, __m128 __A) { + return (__m256i) __builtin_ia32_cvttps2qq256_mask ((__v4sf) __A, + (__v4di) __W, + (__mmask8) __U); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_maskz_cvttps_epi64 (__mmask8 __U, __m128 __A) { + return (__m256i) __builtin_ia32_cvttps2qq256_mask ((__v4sf) __A, + (__v4di) _mm256_setzero_si256(), + (__mmask8) __U); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_cvttps_epu64 (__m128 __A) { + return (__m128i) __builtin_ia32_cvttps2uqq128_mask ((__v4sf) __A, + (__v2di) _mm_setzero_si128(), + (__mmask8) -1); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_mask_cvttps_epu64 (__m128i __W, __mmask8 __U, __m128 __A) { + return (__m128i) __builtin_ia32_cvttps2uqq128_mask ((__v4sf) __A, + (__v2di) __W, + (__mmask8) __U); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_maskz_cvttps_epu64 (__mmask8 __U, __m128 __A) { + return (__m128i) __builtin_ia32_cvttps2uqq128_mask ((__v4sf) __A, + (__v2di) _mm_setzero_si128(), + (__mmask8) __U); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_cvttps_epu64 (__m128 __A) { + return (__m256i) __builtin_ia32_cvttps2uqq256_mask ((__v4sf) __A, + (__v4di) _mm256_setzero_si256(), + (__mmask8) -1); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_mask_cvttps_epu64 (__m256i __W, __mmask8 __U, __m128 __A) { + return (__m256i) __builtin_ia32_cvttps2uqq256_mask ((__v4sf) __A, + (__v4di) __W, + (__mmask8) __U); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_maskz_cvttps_epu64 (__mmask8 __U, __m128 __A) { + return (__m256i) __builtin_ia32_cvttps2uqq256_mask ((__v4sf) __A, + (__v4di) _mm256_setzero_si256(), + (__mmask8) __U); +} + +static __inline__ __m128d __DEFAULT_FN_ATTRS128 +_mm_cvtepu64_pd (__m128i __A) { + return (__m128d)__builtin_convertvector((__v2du)__A, __v2df); +} + +static __inline__ __m128d __DEFAULT_FN_ATTRS128 +_mm_mask_cvtepu64_pd (__m128d __W, __mmask8 __U, __m128i __A) { + return (__m128d)__builtin_ia32_selectpd_128((__mmask8)__U, + (__v2df)_mm_cvtepu64_pd(__A), + (__v2df)__W); +} + +static __inline__ __m128d __DEFAULT_FN_ATTRS128 +_mm_maskz_cvtepu64_pd (__mmask8 __U, __m128i __A) { + return (__m128d)__builtin_ia32_selectpd_128((__mmask8)__U, + (__v2df)_mm_cvtepu64_pd(__A), + (__v2df)_mm_setzero_pd()); +} + +static __inline__ __m256d __DEFAULT_FN_ATTRS256 +_mm256_cvtepu64_pd (__m256i __A) { + return (__m256d)__builtin_convertvector((__v4du)__A, __v4df); +} + +static __inline__ __m256d __DEFAULT_FN_ATTRS256 +_mm256_mask_cvtepu64_pd (__m256d __W, __mmask8 __U, __m256i __A) { + return (__m256d)__builtin_ia32_selectpd_256((__mmask8)__U, + (__v4df)_mm256_cvtepu64_pd(__A), + (__v4df)__W); +} + +static __inline__ __m256d __DEFAULT_FN_ATTRS256 +_mm256_maskz_cvtepu64_pd (__mmask8 __U, __m256i __A) { + return (__m256d)__builtin_ia32_selectpd_256((__mmask8)__U, + (__v4df)_mm256_cvtepu64_pd(__A), + (__v4df)_mm256_setzero_pd()); +} + +static __inline__ __m128 __DEFAULT_FN_ATTRS128 +_mm_cvtepu64_ps (__m128i __A) { + return (__m128) __builtin_ia32_cvtuqq2ps128_mask ((__v2di) __A, + (__v4sf) _mm_setzero_ps(), + (__mmask8) -1); +} + +static __inline__ __m128 __DEFAULT_FN_ATTRS128 +_mm_mask_cvtepu64_ps (__m128 __W, __mmask8 __U, __m128i __A) { + return (__m128) __builtin_ia32_cvtuqq2ps128_mask ((__v2di) __A, + (__v4sf) __W, + (__mmask8) __U); +} + +static __inline__ __m128 __DEFAULT_FN_ATTRS128 +_mm_maskz_cvtepu64_ps (__mmask8 __U, __m128i __A) { + return (__m128) __builtin_ia32_cvtuqq2ps128_mask ((__v2di) __A, + (__v4sf) _mm_setzero_ps(), + (__mmask8) __U); +} + +static __inline__ __m128 __DEFAULT_FN_ATTRS256 +_mm256_cvtepu64_ps (__m256i __A) { + return (__m128)__builtin_convertvector((__v4du)__A, __v4sf); +} + +static __inline__ __m128 __DEFAULT_FN_ATTRS256 +_mm256_mask_cvtepu64_ps (__m128 __W, __mmask8 __U, __m256i __A) { + return (__m128)__builtin_ia32_selectps_128((__mmask8)__U, + (__v4sf)_mm256_cvtepu64_ps(__A), + (__v4sf)__W); +} + +static __inline__ __m128 __DEFAULT_FN_ATTRS256 +_mm256_maskz_cvtepu64_ps (__mmask8 __U, __m256i __A) { + return (__m128)__builtin_ia32_selectps_128((__mmask8)__U, + (__v4sf)_mm256_cvtepu64_ps(__A), + (__v4sf)_mm_setzero_ps()); +} + +#define _mm_range_pd(A, B, C) \ + ((__m128d)__builtin_ia32_rangepd128_mask((__v2df)(__m128d)(A), \ + (__v2df)(__m128d)(B), (int)(C), \ + (__v2df)_mm_setzero_pd(), \ + (__mmask8)-1)) + +#define _mm_mask_range_pd(W, U, A, B, C) \ + ((__m128d)__builtin_ia32_rangepd128_mask((__v2df)(__m128d)(A), \ + (__v2df)(__m128d)(B), (int)(C), \ + (__v2df)(__m128d)(W), \ + (__mmask8)(U))) + +#define _mm_maskz_range_pd(U, A, B, C) \ + ((__m128d)__builtin_ia32_rangepd128_mask((__v2df)(__m128d)(A), \ + (__v2df)(__m128d)(B), (int)(C), \ + (__v2df)_mm_setzero_pd(), \ + (__mmask8)(U))) + +#define _mm256_range_pd(A, B, C) \ + ((__m256d)__builtin_ia32_rangepd256_mask((__v4df)(__m256d)(A), \ + (__v4df)(__m256d)(B), (int)(C), \ + (__v4df)_mm256_setzero_pd(), \ + (__mmask8)-1)) + +#define _mm256_mask_range_pd(W, U, A, B, C) \ + ((__m256d)__builtin_ia32_rangepd256_mask((__v4df)(__m256d)(A), \ + (__v4df)(__m256d)(B), (int)(C), \ + (__v4df)(__m256d)(W), \ + (__mmask8)(U))) + +#define _mm256_maskz_range_pd(U, A, B, C) \ + ((__m256d)__builtin_ia32_rangepd256_mask((__v4df)(__m256d)(A), \ + (__v4df)(__m256d)(B), (int)(C), \ + (__v4df)_mm256_setzero_pd(), \ + (__mmask8)(U))) + +#define _mm_range_ps(A, B, C) \ + ((__m128)__builtin_ia32_rangeps128_mask((__v4sf)(__m128)(A), \ + (__v4sf)(__m128)(B), (int)(C), \ + (__v4sf)_mm_setzero_ps(), \ + (__mmask8)-1)) + +#define _mm_mask_range_ps(W, U, A, B, C) \ + ((__m128)__builtin_ia32_rangeps128_mask((__v4sf)(__m128)(A), \ + (__v4sf)(__m128)(B), (int)(C), \ + (__v4sf)(__m128)(W), (__mmask8)(U))) + +#define _mm_maskz_range_ps(U, A, B, C) \ + ((__m128)__builtin_ia32_rangeps128_mask((__v4sf)(__m128)(A), \ + (__v4sf)(__m128)(B), (int)(C), \ + (__v4sf)_mm_setzero_ps(), \ + (__mmask8)(U))) + +#define _mm256_range_ps(A, B, C) \ + ((__m256)__builtin_ia32_rangeps256_mask((__v8sf)(__m256)(A), \ + (__v8sf)(__m256)(B), (int)(C), \ + (__v8sf)_mm256_setzero_ps(), \ + (__mmask8)-1)) + +#define _mm256_mask_range_ps(W, U, A, B, C) \ + ((__m256)__builtin_ia32_rangeps256_mask((__v8sf)(__m256)(A), \ + (__v8sf)(__m256)(B), (int)(C), \ + (__v8sf)(__m256)(W), (__mmask8)(U))) + +#define _mm256_maskz_range_ps(U, A, B, C) \ + ((__m256)__builtin_ia32_rangeps256_mask((__v8sf)(__m256)(A), \ + (__v8sf)(__m256)(B), (int)(C), \ + (__v8sf)_mm256_setzero_ps(), \ + (__mmask8)(U))) + +#define _mm_reduce_pd(A, B) \ + ((__m128d)__builtin_ia32_reducepd128_mask((__v2df)(__m128d)(A), (int)(B), \ + (__v2df)_mm_setzero_pd(), \ + (__mmask8)-1)) + +#define _mm_mask_reduce_pd(W, U, A, B) \ + ((__m128d)__builtin_ia32_reducepd128_mask((__v2df)(__m128d)(A), (int)(B), \ + (__v2df)(__m128d)(W), \ + (__mmask8)(U))) + +#define _mm_maskz_reduce_pd(U, A, B) \ + ((__m128d)__builtin_ia32_reducepd128_mask((__v2df)(__m128d)(A), (int)(B), \ + (__v2df)_mm_setzero_pd(), \ + (__mmask8)(U))) + +#define _mm256_reduce_pd(A, B) \ + ((__m256d)__builtin_ia32_reducepd256_mask((__v4df)(__m256d)(A), (int)(B), \ + (__v4df)_mm256_setzero_pd(), \ + (__mmask8)-1)) + +#define _mm256_mask_reduce_pd(W, U, A, B) \ + ((__m256d)__builtin_ia32_reducepd256_mask((__v4df)(__m256d)(A), (int)(B), \ + (__v4df)(__m256d)(W), \ + (__mmask8)(U))) + +#define _mm256_maskz_reduce_pd(U, A, B) \ + ((__m256d)__builtin_ia32_reducepd256_mask((__v4df)(__m256d)(A), (int)(B), \ + (__v4df)_mm256_setzero_pd(), \ + (__mmask8)(U))) + +#define _mm_reduce_ps(A, B) \ + ((__m128)__builtin_ia32_reduceps128_mask((__v4sf)(__m128)(A), (int)(B), \ + (__v4sf)_mm_setzero_ps(), \ + (__mmask8)-1)) + +#define _mm_mask_reduce_ps(W, U, A, B) \ + ((__m128)__builtin_ia32_reduceps128_mask((__v4sf)(__m128)(A), (int)(B), \ + (__v4sf)(__m128)(W), \ + (__mmask8)(U))) + +#define _mm_maskz_reduce_ps(U, A, B) \ + ((__m128)__builtin_ia32_reduceps128_mask((__v4sf)(__m128)(A), (int)(B), \ + (__v4sf)_mm_setzero_ps(), \ + (__mmask8)(U))) + +#define _mm256_reduce_ps(A, B) \ + ((__m256)__builtin_ia32_reduceps256_mask((__v8sf)(__m256)(A), (int)(B), \ + (__v8sf)_mm256_setzero_ps(), \ + (__mmask8)-1)) + +#define _mm256_mask_reduce_ps(W, U, A, B) \ + ((__m256)__builtin_ia32_reduceps256_mask((__v8sf)(__m256)(A), (int)(B), \ + (__v8sf)(__m256)(W), \ + (__mmask8)(U))) + +#define _mm256_maskz_reduce_ps(U, A, B) \ + ((__m256)__builtin_ia32_reduceps256_mask((__v8sf)(__m256)(A), (int)(B), \ + (__v8sf)_mm256_setzero_ps(), \ + (__mmask8)(U))) + +static __inline__ __mmask8 __DEFAULT_FN_ATTRS128 +_mm_movepi32_mask (__m128i __A) +{ + return (__mmask8) __builtin_ia32_cvtd2mask128 ((__v4si) __A); +} + +static __inline__ __mmask8 __DEFAULT_FN_ATTRS256 +_mm256_movepi32_mask (__m256i __A) +{ + return (__mmask8) __builtin_ia32_cvtd2mask256 ((__v8si) __A); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_movm_epi32 (__mmask8 __A) +{ + return (__m128i) __builtin_ia32_cvtmask2d128 (__A); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_movm_epi32 (__mmask8 __A) +{ + return (__m256i) __builtin_ia32_cvtmask2d256 (__A); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_movm_epi64 (__mmask8 __A) +{ + return (__m128i) __builtin_ia32_cvtmask2q128 (__A); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_movm_epi64 (__mmask8 __A) +{ + return (__m256i) __builtin_ia32_cvtmask2q256 (__A); +} + +static __inline__ __mmask8 __DEFAULT_FN_ATTRS128 +_mm_movepi64_mask (__m128i __A) +{ + return (__mmask8) __builtin_ia32_cvtq2mask128 ((__v2di) __A); +} + +static __inline__ __mmask8 __DEFAULT_FN_ATTRS256 +_mm256_movepi64_mask (__m256i __A) +{ + return (__mmask8) __builtin_ia32_cvtq2mask256 ((__v4di) __A); +} + +static __inline__ __m256 __DEFAULT_FN_ATTRS256 +_mm256_broadcast_f32x2 (__m128 __A) +{ + return (__m256)__builtin_shufflevector((__v4sf)__A, (__v4sf)__A, + 0, 1, 0, 1, 0, 1, 0, 1); +} + +static __inline__ __m256 __DEFAULT_FN_ATTRS256 +_mm256_mask_broadcast_f32x2 (__m256 __O, __mmask8 __M, __m128 __A) +{ + return (__m256)__builtin_ia32_selectps_256((__mmask8)__M, + (__v8sf)_mm256_broadcast_f32x2(__A), + (__v8sf)__O); +} + +static __inline__ __m256 __DEFAULT_FN_ATTRS256 +_mm256_maskz_broadcast_f32x2 (__mmask8 __M, __m128 __A) +{ + return (__m256)__builtin_ia32_selectps_256((__mmask8)__M, + (__v8sf)_mm256_broadcast_f32x2(__A), + (__v8sf)_mm256_setzero_ps()); +} + +static __inline__ __m256d __DEFAULT_FN_ATTRS256 +_mm256_broadcast_f64x2(__m128d __A) +{ + return (__m256d)__builtin_shufflevector((__v2df)__A, (__v2df)__A, + 0, 1, 0, 1); +} + +static __inline__ __m256d __DEFAULT_FN_ATTRS256 +_mm256_mask_broadcast_f64x2(__m256d __O, __mmask8 __M, __m128d __A) +{ + return (__m256d)__builtin_ia32_selectpd_256((__mmask8)__M, + (__v4df)_mm256_broadcast_f64x2(__A), + (__v4df)__O); +} + +static __inline__ __m256d __DEFAULT_FN_ATTRS256 +_mm256_maskz_broadcast_f64x2 (__mmask8 __M, __m128d __A) +{ + return (__m256d)__builtin_ia32_selectpd_256((__mmask8)__M, + (__v4df)_mm256_broadcast_f64x2(__A), + (__v4df)_mm256_setzero_pd()); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_broadcast_i32x2 (__m128i __A) +{ + return (__m128i)__builtin_shufflevector((__v4si)__A, (__v4si)__A, + 0, 1, 0, 1); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_mask_broadcast_i32x2 (__m128i __O, __mmask8 __M, __m128i __A) +{ + return (__m128i)__builtin_ia32_selectd_128((__mmask8)__M, + (__v4si)_mm_broadcast_i32x2(__A), + (__v4si)__O); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_maskz_broadcast_i32x2 (__mmask8 __M, __m128i __A) +{ + return (__m128i)__builtin_ia32_selectd_128((__mmask8)__M, + (__v4si)_mm_broadcast_i32x2(__A), + (__v4si)_mm_setzero_si128()); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_broadcast_i32x2 (__m128i __A) +{ + return (__m256i)__builtin_shufflevector((__v4si)__A, (__v4si)__A, + 0, 1, 0, 1, 0, 1, 0, 1); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_mask_broadcast_i32x2 (__m256i __O, __mmask8 __M, __m128i __A) +{ + return (__m256i)__builtin_ia32_selectd_256((__mmask8)__M, + (__v8si)_mm256_broadcast_i32x2(__A), + (__v8si)__O); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_maskz_broadcast_i32x2 (__mmask8 __M, __m128i __A) +{ + return (__m256i)__builtin_ia32_selectd_256((__mmask8)__M, + (__v8si)_mm256_broadcast_i32x2(__A), + (__v8si)_mm256_setzero_si256()); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_broadcast_i64x2(__m128i __A) +{ + return (__m256i)__builtin_shufflevector((__v2di)__A, (__v2di)__A, + 0, 1, 0, 1); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_mask_broadcast_i64x2(__m256i __O, __mmask8 __M, __m128i __A) +{ + return (__m256i)__builtin_ia32_selectq_256((__mmask8)__M, + (__v4di)_mm256_broadcast_i64x2(__A), + (__v4di)__O); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_maskz_broadcast_i64x2 (__mmask8 __M, __m128i __A) +{ + return (__m256i)__builtin_ia32_selectq_256((__mmask8)__M, + (__v4di)_mm256_broadcast_i64x2(__A), + (__v4di)_mm256_setzero_si256()); +} + +#define _mm256_extractf64x2_pd(A, imm) \ + ((__m128d)__builtin_ia32_extractf64x2_256_mask((__v4df)(__m256d)(A), \ + (int)(imm), \ + (__v2df)_mm_undefined_pd(), \ + (__mmask8)-1)) + +#define _mm256_mask_extractf64x2_pd(W, U, A, imm) \ + ((__m128d)__builtin_ia32_extractf64x2_256_mask((__v4df)(__m256d)(A), \ + (int)(imm), \ + (__v2df)(__m128d)(W), \ + (__mmask8)(U))) + +#define _mm256_maskz_extractf64x2_pd(U, A, imm) \ + ((__m128d)__builtin_ia32_extractf64x2_256_mask((__v4df)(__m256d)(A), \ + (int)(imm), \ + (__v2df)_mm_setzero_pd(), \ + (__mmask8)(U))) + +#define _mm256_extracti64x2_epi64(A, imm) \ + ((__m128i)__builtin_ia32_extracti64x2_256_mask((__v4di)(__m256i)(A), \ + (int)(imm), \ + (__v2di)_mm_undefined_si128(), \ + (__mmask8)-1)) + +#define _mm256_mask_extracti64x2_epi64(W, U, A, imm) \ + ((__m128i)__builtin_ia32_extracti64x2_256_mask((__v4di)(__m256i)(A), \ + (int)(imm), \ + (__v2di)(__m128i)(W), \ + (__mmask8)(U))) + +#define _mm256_maskz_extracti64x2_epi64(U, A, imm) \ + ((__m128i)__builtin_ia32_extracti64x2_256_mask((__v4di)(__m256i)(A), \ + (int)(imm), \ + (__v2di)_mm_setzero_si128(), \ + (__mmask8)(U))) + +#define _mm256_insertf64x2(A, B, imm) \ + ((__m256d)__builtin_ia32_insertf64x2_256((__v4df)(__m256d)(A), \ + (__v2df)(__m128d)(B), (int)(imm))) + +#define _mm256_mask_insertf64x2(W, U, A, B, imm) \ + ((__m256d)__builtin_ia32_selectpd_256((__mmask8)(U), \ + (__v4df)_mm256_insertf64x2((A), (B), (imm)), \ + (__v4df)(__m256d)(W))) + +#define _mm256_maskz_insertf64x2(U, A, B, imm) \ + ((__m256d)__builtin_ia32_selectpd_256((__mmask8)(U), \ + (__v4df)_mm256_insertf64x2((A), (B), (imm)), \ + (__v4df)_mm256_setzero_pd())) + +#define _mm256_inserti64x2(A, B, imm) \ + ((__m256i)__builtin_ia32_inserti64x2_256((__v4di)(__m256i)(A), \ + (__v2di)(__m128i)(B), (int)(imm))) + +#define _mm256_mask_inserti64x2(W, U, A, B, imm) \ + ((__m256i)__builtin_ia32_selectq_256((__mmask8)(U), \ + (__v4di)_mm256_inserti64x2((A), (B), (imm)), \ + (__v4di)(__m256i)(W))) + +#define _mm256_maskz_inserti64x2(U, A, B, imm) \ + ((__m256i)__builtin_ia32_selectq_256((__mmask8)(U), \ + (__v4di)_mm256_inserti64x2((A), (B), (imm)), \ + (__v4di)_mm256_setzero_si256())) + +#define _mm_mask_fpclass_pd_mask(U, A, imm) \ + ((__mmask8)__builtin_ia32_fpclasspd128_mask((__v2df)(__m128d)(A), (int)(imm), \ + (__mmask8)(U))) + +#define _mm_fpclass_pd_mask(A, imm) \ + ((__mmask8)__builtin_ia32_fpclasspd128_mask((__v2df)(__m128d)(A), (int)(imm), \ + (__mmask8)-1)) + +#define _mm256_mask_fpclass_pd_mask(U, A, imm) \ + ((__mmask8)__builtin_ia32_fpclasspd256_mask((__v4df)(__m256d)(A), (int)(imm), \ + (__mmask8)(U))) + +#define _mm256_fpclass_pd_mask(A, imm) \ + ((__mmask8)__builtin_ia32_fpclasspd256_mask((__v4df)(__m256d)(A), (int)(imm), \ + (__mmask8)-1)) + +#define _mm_mask_fpclass_ps_mask(U, A, imm) \ + ((__mmask8)__builtin_ia32_fpclassps128_mask((__v4sf)(__m128)(A), (int)(imm), \ + (__mmask8)(U))) + +#define _mm_fpclass_ps_mask(A, imm) \ + ((__mmask8)__builtin_ia32_fpclassps128_mask((__v4sf)(__m128)(A), (int)(imm), \ + (__mmask8)-1)) + +#define _mm256_mask_fpclass_ps_mask(U, A, imm) \ + ((__mmask8)__builtin_ia32_fpclassps256_mask((__v8sf)(__m256)(A), (int)(imm), \ + (__mmask8)(U))) + +#define _mm256_fpclass_ps_mask(A, imm) \ + ((__mmask8)__builtin_ia32_fpclassps256_mask((__v8sf)(__m256)(A), (int)(imm), \ + (__mmask8)-1)) + +#undef __DEFAULT_FN_ATTRS128 +#undef __DEFAULT_FN_ATTRS256 + +#endif diff --git a/third_party/intel/clang/avx512vlfp16intrin.h b/third_party/intel/clang/avx512vlfp16intrin.h new file mode 100644 index 000000000..a12acb7d9 --- /dev/null +++ b/third_party/intel/clang/avx512vlfp16intrin.h @@ -0,0 +1,2071 @@ +/*===---------- avx512vlfp16intrin.h - AVX512-FP16 intrinsics --------------=== + * + * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. + * See https://llvm.org/LICENSE.txt for license information. + * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + * + *===-----------------------------------------------------------------------=== + */ +#ifndef __IMMINTRIN_H +#error \ + "Never use directly; include instead." +#endif + +#ifdef __SSE2__ + +#ifndef __AVX512VLFP16INTRIN_H +#define __AVX512VLFP16INTRIN_H + +/* Define the default attributes for the functions in this file. */ +#define __DEFAULT_FN_ATTRS256 \ + __attribute__((__always_inline__, __nodebug__, \ + __target__("avx512fp16,avx512vl,no-evex512"), \ + __min_vector_width__(256))) +#define __DEFAULT_FN_ATTRS128 \ + __attribute__((__always_inline__, __nodebug__, \ + __target__("avx512fp16,avx512vl,no-evex512"), \ + __min_vector_width__(128))) + +static __inline__ _Float16 __DEFAULT_FN_ATTRS128 _mm_cvtsh_h(__m128h __a) { + return __a[0]; +} + +static __inline__ _Float16 __DEFAULT_FN_ATTRS256 _mm256_cvtsh_h(__m256h __a) { + return __a[0]; +} + +static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_set_sh(_Float16 __h) { + return __extension__(__m128h){__h, 0, 0, 0, 0, 0, 0, 0}; +} + +static __inline __m128h __DEFAULT_FN_ATTRS128 _mm_set1_ph(_Float16 __h) { + return (__m128h)(__v8hf){__h, __h, __h, __h, __h, __h, __h, __h}; +} + +static __inline __m256h __DEFAULT_FN_ATTRS256 _mm256_set1_ph(_Float16 __h) { + return (__m256h)(__v16hf){__h, __h, __h, __h, __h, __h, __h, __h, + __h, __h, __h, __h, __h, __h, __h, __h}; +} + +static __inline __m128h __DEFAULT_FN_ATTRS128 +_mm_set_ph(_Float16 __h1, _Float16 __h2, _Float16 __h3, _Float16 __h4, + _Float16 __h5, _Float16 __h6, _Float16 __h7, _Float16 __h8) { + return (__m128h)(__v8hf){__h8, __h7, __h6, __h5, __h4, __h3, __h2, __h1}; +} + +static __inline __m256h __DEFAULT_FN_ATTRS256 +_mm256_set1_pch(_Float16 _Complex h) { + return (__m256h)_mm256_set1_ps(__builtin_bit_cast(float, h)); +} + +static __inline __m128h __DEFAULT_FN_ATTRS128 +_mm_set1_pch(_Float16 _Complex h) { + return (__m128h)_mm_set1_ps(__builtin_bit_cast(float, h)); +} + +static __inline __m256h __DEFAULT_FN_ATTRS256 +_mm256_set_ph(_Float16 __h1, _Float16 __h2, _Float16 __h3, _Float16 __h4, + _Float16 __h5, _Float16 __h6, _Float16 __h7, _Float16 __h8, + _Float16 __h9, _Float16 __h10, _Float16 __h11, _Float16 __h12, + _Float16 __h13, _Float16 __h14, _Float16 __h15, _Float16 __h16) { + return (__m256h)(__v16hf){__h16, __h15, __h14, __h13, __h12, __h11, + __h10, __h9, __h8, __h7, __h6, __h5, + __h4, __h3, __h2, __h1}; +} + +#define _mm_setr_ph(h1, h2, h3, h4, h5, h6, h7, h8) \ + _mm_set_ph((h8), (h7), (h6), (h5), (h4), (h3), (h2), (h1)) + +#define _mm256_setr_ph(h1, h2, h3, h4, h5, h6, h7, h8, h9, h10, h11, h12, h13, \ + h14, h15, h16) \ + _mm256_set_ph((h16), (h15), (h14), (h13), (h12), (h11), (h10), (h9), (h8), \ + (h7), (h6), (h5), (h4), (h3), (h2), (h1)) + +static __inline__ __m256h __DEFAULT_FN_ATTRS256 _mm256_add_ph(__m256h __A, + __m256h __B) { + return (__m256h)((__v16hf)__A + (__v16hf)__B); +} + +static __inline__ __m256h __DEFAULT_FN_ATTRS256 +_mm256_mask_add_ph(__m256h __W, __mmask16 __U, __m256h __A, __m256h __B) { + return (__m256h)__builtin_ia32_selectph_256( + __U, (__v16hf)_mm256_add_ph(__A, __B), (__v16hf)__W); +} + +static __inline__ __m256h __DEFAULT_FN_ATTRS256 +_mm256_maskz_add_ph(__mmask16 __U, __m256h __A, __m256h __B) { + return (__m256h)__builtin_ia32_selectph_256( + __U, (__v16hf)_mm256_add_ph(__A, __B), (__v16hf)_mm256_setzero_ph()); +} + +static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_add_ph(__m128h __A, + __m128h __B) { + return (__m128h)((__v8hf)__A + (__v8hf)__B); +} + +static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_mask_add_ph(__m128h __W, + __mmask8 __U, + __m128h __A, + __m128h __B) { + return (__m128h)__builtin_ia32_selectph_128(__U, (__v8hf)_mm_add_ph(__A, __B), + (__v8hf)__W); +} + +static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_maskz_add_ph(__mmask8 __U, + __m128h __A, + __m128h __B) { + return (__m128h)__builtin_ia32_selectph_128(__U, (__v8hf)_mm_add_ph(__A, __B), + (__v8hf)_mm_setzero_ph()); +} + +static __inline__ __m256h __DEFAULT_FN_ATTRS256 _mm256_sub_ph(__m256h __A, + __m256h __B) { + return (__m256h)((__v16hf)__A - (__v16hf)__B); +} + +static __inline__ __m256h __DEFAULT_FN_ATTRS256 +_mm256_mask_sub_ph(__m256h __W, __mmask16 __U, __m256h __A, __m256h __B) { + return (__m256h)__builtin_ia32_selectph_256( + __U, (__v16hf)_mm256_sub_ph(__A, __B), (__v16hf)__W); +} + +static __inline__ __m256h __DEFAULT_FN_ATTRS256 +_mm256_maskz_sub_ph(__mmask16 __U, __m256h __A, __m256h __B) { + return (__m256h)__builtin_ia32_selectph_256( + __U, (__v16hf)_mm256_sub_ph(__A, __B), (__v16hf)_mm256_setzero_ph()); +} + +static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_sub_ph(__m128h __A, + __m128h __B) { + return (__m128h)((__v8hf)__A - (__v8hf)__B); +} + +static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_mask_sub_ph(__m128h __W, + __mmask8 __U, + __m128h __A, + __m128h __B) { + return (__m128h)__builtin_ia32_selectph_128(__U, (__v8hf)_mm_sub_ph(__A, __B), + (__v8hf)__W); +} + +static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_maskz_sub_ph(__mmask8 __U, + __m128h __A, + __m128h __B) { + return (__m128h)__builtin_ia32_selectph_128(__U, (__v8hf)_mm_sub_ph(__A, __B), + (__v8hf)_mm_setzero_ph()); +} + +static __inline__ __m256h __DEFAULT_FN_ATTRS256 _mm256_mul_ph(__m256h __A, + __m256h __B) { + return (__m256h)((__v16hf)__A * (__v16hf)__B); +} + +static __inline__ __m256h __DEFAULT_FN_ATTRS256 +_mm256_mask_mul_ph(__m256h __W, __mmask16 __U, __m256h __A, __m256h __B) { + return (__m256h)__builtin_ia32_selectph_256( + __U, (__v16hf)_mm256_mul_ph(__A, __B), (__v16hf)__W); +} + +static __inline__ __m256h __DEFAULT_FN_ATTRS256 +_mm256_maskz_mul_ph(__mmask16 __U, __m256h __A, __m256h __B) { + return (__m256h)__builtin_ia32_selectph_256( + __U, (__v16hf)_mm256_mul_ph(__A, __B), (__v16hf)_mm256_setzero_ph()); +} + +static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_mul_ph(__m128h __A, + __m128h __B) { + return (__m128h)((__v8hf)__A * (__v8hf)__B); +} + +static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_mask_mul_ph(__m128h __W, + __mmask8 __U, + __m128h __A, + __m128h __B) { + return (__m128h)__builtin_ia32_selectph_128(__U, (__v8hf)_mm_mul_ph(__A, __B), + (__v8hf)__W); +} + +static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_maskz_mul_ph(__mmask8 __U, + __m128h __A, + __m128h __B) { + return (__m128h)__builtin_ia32_selectph_128(__U, (__v8hf)_mm_mul_ph(__A, __B), + (__v8hf)_mm_setzero_ph()); +} + +static __inline__ __m256h __DEFAULT_FN_ATTRS256 _mm256_div_ph(__m256h __A, + __m256h __B) { + return (__m256h)((__v16hf)__A / (__v16hf)__B); +} + +static __inline__ __m256h __DEFAULT_FN_ATTRS256 +_mm256_mask_div_ph(__m256h __W, __mmask16 __U, __m256h __A, __m256h __B) { + return (__m256h)__builtin_ia32_selectph_256( + __U, (__v16hf)_mm256_div_ph(__A, __B), (__v16hf)__W); +} + +static __inline__ __m256h __DEFAULT_FN_ATTRS256 +_mm256_maskz_div_ph(__mmask16 __U, __m256h __A, __m256h __B) { + return (__m256h)__builtin_ia32_selectph_256( + __U, (__v16hf)_mm256_div_ph(__A, __B), (__v16hf)_mm256_setzero_ph()); +} + +static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_div_ph(__m128h __A, + __m128h __B) { + return (__m128h)((__v8hf)__A / (__v8hf)__B); +} + +static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_mask_div_ph(__m128h __W, + __mmask8 __U, + __m128h __A, + __m128h __B) { + return (__m128h)__builtin_ia32_selectph_128(__U, (__v8hf)_mm_div_ph(__A, __B), + (__v8hf)__W); +} + +static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_maskz_div_ph(__mmask8 __U, + __m128h __A, + __m128h __B) { + return (__m128h)__builtin_ia32_selectph_128(__U, (__v8hf)_mm_div_ph(__A, __B), + (__v8hf)_mm_setzero_ph()); +} + +static __inline__ __m256h __DEFAULT_FN_ATTRS256 _mm256_min_ph(__m256h __A, + __m256h __B) { + return (__m256h)__builtin_ia32_minph256((__v16hf)__A, (__v16hf)__B); +} + +static __inline__ __m256h __DEFAULT_FN_ATTRS256 +_mm256_mask_min_ph(__m256h __W, __mmask16 __U, __m256h __A, __m256h __B) { + return (__m256h)__builtin_ia32_selectph_256( + (__mmask16)__U, + (__v16hf)__builtin_ia32_minph256((__v16hf)__A, (__v16hf)__B), + (__v16hf)__W); +} + +static __inline__ __m256h __DEFAULT_FN_ATTRS256 +_mm256_maskz_min_ph(__mmask16 __U, __m256h __A, __m256h __B) { + return (__m256h)__builtin_ia32_selectph_256( + (__mmask16)__U, + (__v16hf)__builtin_ia32_minph256((__v16hf)__A, (__v16hf)__B), + (__v16hf)_mm256_setzero_ph()); +} + +static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_min_ph(__m128h __A, + __m128h __B) { + return (__m128h)__builtin_ia32_minph128((__v8hf)__A, (__v8hf)__B); +} + +static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_mask_min_ph(__m128h __W, + __mmask8 __U, + __m128h __A, + __m128h __B) { + return (__m128h)__builtin_ia32_selectph_128( + (__mmask8)__U, (__v8hf)__builtin_ia32_minph128((__v8hf)__A, (__v8hf)__B), + (__v8hf)__W); +} + +static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_maskz_min_ph(__mmask8 __U, + __m128h __A, + __m128h __B) { + return (__m128h)__builtin_ia32_selectph_128( + (__mmask8)__U, (__v8hf)__builtin_ia32_minph128((__v8hf)__A, (__v8hf)__B), + (__v8hf)_mm_setzero_ph()); +} + +static __inline__ __m256h __DEFAULT_FN_ATTRS256 _mm256_max_ph(__m256h __A, + __m256h __B) { + return (__m256h)__builtin_ia32_maxph256((__v16hf)__A, (__v16hf)__B); +} + +static __inline__ __m256h __DEFAULT_FN_ATTRS256 +_mm256_mask_max_ph(__m256h __W, __mmask16 __U, __m256h __A, __m256h __B) { + return (__m256h)__builtin_ia32_selectph_256( + (__mmask16)__U, + (__v16hf)__builtin_ia32_maxph256((__v16hf)__A, (__v16hf)__B), + (__v16hf)__W); +} + +static __inline__ __m256h __DEFAULT_FN_ATTRS256 +_mm256_maskz_max_ph(__mmask16 __U, __m256h __A, __m256h __B) { + return (__m256h)__builtin_ia32_selectph_256( + (__mmask16)__U, + (__v16hf)__builtin_ia32_maxph256((__v16hf)__A, (__v16hf)__B), + (__v16hf)_mm256_setzero_ph()); +} + +static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_max_ph(__m128h __A, + __m128h __B) { + return (__m128h)__builtin_ia32_maxph128((__v8hf)__A, (__v8hf)__B); +} + +static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_mask_max_ph(__m128h __W, + __mmask8 __U, + __m128h __A, + __m128h __B) { + return (__m128h)__builtin_ia32_selectph_128( + (__mmask8)__U, (__v8hf)__builtin_ia32_maxph128((__v8hf)__A, (__v8hf)__B), + (__v8hf)__W); +} + +static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_maskz_max_ph(__mmask8 __U, + __m128h __A, + __m128h __B) { + return (__m128h)__builtin_ia32_selectph_128( + (__mmask8)__U, (__v8hf)__builtin_ia32_maxph128((__v8hf)__A, (__v8hf)__B), + (__v8hf)_mm_setzero_ph()); +} + +static __inline__ __m256h __DEFAULT_FN_ATTRS256 _mm256_abs_ph(__m256h __A) { + return (__m256h)_mm256_and_epi32(_mm256_set1_epi32(0x7FFF7FFF), (__m256i)__A); +} + +static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_abs_ph(__m128h __A) { + return (__m128h)_mm_and_epi32(_mm_set1_epi32(0x7FFF7FFF), (__m128i)__A); +} + +static __inline__ __m256h __DEFAULT_FN_ATTRS256 _mm256_conj_pch(__m256h __A) { + return (__m256h)_mm256_xor_ps((__m256)__A, _mm256_set1_ps(-0.0f)); +} + +static __inline__ __m256h __DEFAULT_FN_ATTRS256 +_mm256_mask_conj_pch(__m256h __W, __mmask8 __U, __m256h __A) { + return (__m256h)__builtin_ia32_selectps_256( + (__mmask8)__U, (__v8sf)_mm256_conj_pch(__A), (__v8sf)__W); +} + +static __inline__ __m256h __DEFAULT_FN_ATTRS256 +_mm256_maskz_conj_pch(__mmask8 __U, __m256h __A) { + return (__m256h)__builtin_ia32_selectps_256( + (__mmask8)__U, (__v8sf)_mm256_conj_pch(__A), (__v8sf)_mm256_setzero_ps()); +} + +static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_conj_pch(__m128h __A) { + return (__m128h)_mm_xor_ps((__m128)__A, _mm_set1_ps(-0.0f)); +} + +static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_mask_conj_pch(__m128h __W, + __mmask8 __U, + __m128h __A) { + return (__m128h)__builtin_ia32_selectps_128( + (__mmask8)__U, (__v4sf)_mm_conj_pch(__A), (__v4sf)__W); +} + +static __inline__ __m128h __DEFAULT_FN_ATTRS128 +_mm_maskz_conj_pch(__mmask8 __U, __m128h __A) { + return (__m128h)__builtin_ia32_selectps_128( + (__mmask8)__U, (__v4sf)_mm_conj_pch(__A), (__v4sf)_mm_setzero_ps()); +} + +#define _mm256_cmp_ph_mask(a, b, p) \ + ((__mmask16)__builtin_ia32_cmpph256_mask( \ + (__v16hf)(__m256h)(a), (__v16hf)(__m256h)(b), (int)(p), (__mmask16)-1)) + +#define _mm256_mask_cmp_ph_mask(m, a, b, p) \ + ((__mmask16)__builtin_ia32_cmpph256_mask( \ + (__v16hf)(__m256h)(a), (__v16hf)(__m256h)(b), (int)(p), (__mmask16)(m))) + +#define _mm_cmp_ph_mask(a, b, p) \ + ((__mmask8)__builtin_ia32_cmpph128_mask( \ + (__v8hf)(__m128h)(a), (__v8hf)(__m128h)(b), (int)(p), (__mmask8)-1)) + +#define _mm_mask_cmp_ph_mask(m, a, b, p) \ + ((__mmask8)__builtin_ia32_cmpph128_mask( \ + (__v8hf)(__m128h)(a), (__v8hf)(__m128h)(b), (int)(p), (__mmask8)(m))) + +static __inline__ __m256h __DEFAULT_FN_ATTRS256 _mm256_rcp_ph(__m256h __A) { + return (__m256h)__builtin_ia32_rcpph256_mask( + (__v16hf)__A, (__v16hf)_mm256_undefined_ph(), (__mmask16)-1); +} + +static __inline__ __m256h __DEFAULT_FN_ATTRS256 +_mm256_mask_rcp_ph(__m256h __W, __mmask16 __U, __m256h __A) { + return (__m256h)__builtin_ia32_rcpph256_mask((__v16hf)__A, (__v16hf)__W, + (__mmask16)__U); +} + +static __inline__ __m256h __DEFAULT_FN_ATTRS256 +_mm256_maskz_rcp_ph(__mmask16 __U, __m256h __A) { + return (__m256h)__builtin_ia32_rcpph256_mask( + (__v16hf)__A, (__v16hf)_mm256_setzero_ph(), (__mmask16)__U); +} + +static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_rcp_ph(__m128h __A) { + return (__m128h)__builtin_ia32_rcpph128_mask( + (__v8hf)__A, (__v8hf)_mm_undefined_ph(), (__mmask8)-1); +} + +static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_mask_rcp_ph(__m128h __W, + __mmask8 __U, + __m128h __A) { + return (__m128h)__builtin_ia32_rcpph128_mask((__v8hf)__A, (__v8hf)__W, + (__mmask8)__U); +} + +static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_maskz_rcp_ph(__mmask8 __U, + __m128h __A) { + return (__m128h)__builtin_ia32_rcpph128_mask( + (__v8hf)__A, (__v8hf)_mm_setzero_ph(), (__mmask8)__U); +} + +static __inline__ __m256h __DEFAULT_FN_ATTRS256 _mm256_rsqrt_ph(__m256h __A) { + return (__m256h)__builtin_ia32_rsqrtph256_mask( + (__v16hf)__A, (__v16hf)_mm256_undefined_ph(), (__mmask16)-1); +} + +static __inline__ __m256h __DEFAULT_FN_ATTRS256 +_mm256_mask_rsqrt_ph(__m256h __W, __mmask16 __U, __m256h __A) { + return (__m256h)__builtin_ia32_rsqrtph256_mask((__v16hf)__A, (__v16hf)__W, + (__mmask16)__U); +} + +static __inline__ __m256h __DEFAULT_FN_ATTRS256 +_mm256_maskz_rsqrt_ph(__mmask16 __U, __m256h __A) { + return (__m256h)__builtin_ia32_rsqrtph256_mask( + (__v16hf)__A, (__v16hf)_mm256_setzero_ph(), (__mmask16)__U); +} + +static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_rsqrt_ph(__m128h __A) { + return (__m128h)__builtin_ia32_rsqrtph128_mask( + (__v8hf)__A, (__v8hf)_mm_undefined_ph(), (__mmask8)-1); +} + +static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_mask_rsqrt_ph(__m128h __W, + __mmask8 __U, + __m128h __A) { + return (__m128h)__builtin_ia32_rsqrtph128_mask((__v8hf)__A, (__v8hf)__W, + (__mmask8)__U); +} + +static __inline__ __m128h __DEFAULT_FN_ATTRS128 +_mm_maskz_rsqrt_ph(__mmask8 __U, __m128h __A) { + return (__m128h)__builtin_ia32_rsqrtph128_mask( + (__v8hf)__A, (__v8hf)_mm_setzero_ph(), (__mmask8)__U); +} + +static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_getexp_ph(__m128h __A) { + return (__m128h)__builtin_ia32_getexpph128_mask( + (__v8hf)__A, (__v8hf)_mm_setzero_ph(), (__mmask8)-1); +} + +static __inline__ __m128h __DEFAULT_FN_ATTRS128 +_mm_mask_getexp_ph(__m128h __W, __mmask8 __U, __m128h __A) { + return (__m128h)__builtin_ia32_getexpph128_mask((__v8hf)__A, (__v8hf)__W, + (__mmask8)__U); +} + +static __inline__ __m128h __DEFAULT_FN_ATTRS128 +_mm_maskz_getexp_ph(__mmask8 __U, __m128h __A) { + return (__m128h)__builtin_ia32_getexpph128_mask( + (__v8hf)__A, (__v8hf)_mm_setzero_ph(), (__mmask8)__U); +} + +static __inline__ __m256h __DEFAULT_FN_ATTRS256 _mm256_getexp_ph(__m256h __A) { + return (__m256h)__builtin_ia32_getexpph256_mask( + (__v16hf)__A, (__v16hf)_mm256_setzero_ph(), (__mmask16)-1); +} + +static __inline__ __m256h __DEFAULT_FN_ATTRS256 +_mm256_mask_getexp_ph(__m256h __W, __mmask16 __U, __m256h __A) { + return (__m256h)__builtin_ia32_getexpph256_mask((__v16hf)__A, (__v16hf)__W, + (__mmask16)__U); +} + +static __inline__ __m256h __DEFAULT_FN_ATTRS256 +_mm256_maskz_getexp_ph(__mmask16 __U, __m256h __A) { + return (__m256h)__builtin_ia32_getexpph256_mask( + (__v16hf)__A, (__v16hf)_mm256_setzero_ph(), (__mmask16)__U); +} + +#define _mm_getmant_ph(A, B, C) \ + ((__m128h)__builtin_ia32_getmantph128_mask( \ + (__v8hf)(__m128h)(A), (int)(((C) << 2) | (B)), (__v8hf)_mm_setzero_ph(), \ + (__mmask8)-1)) + +#define _mm_mask_getmant_ph(W, U, A, B, C) \ + ((__m128h)__builtin_ia32_getmantph128_mask( \ + (__v8hf)(__m128h)(A), (int)(((C) << 2) | (B)), (__v8hf)(__m128h)(W), \ + (__mmask8)(U))) + +#define _mm_maskz_getmant_ph(U, A, B, C) \ + ((__m128h)__builtin_ia32_getmantph128_mask( \ + (__v8hf)(__m128h)(A), (int)(((C) << 2) | (B)), (__v8hf)_mm_setzero_ph(), \ + (__mmask8)(U))) + +#define _mm256_getmant_ph(A, B, C) \ + ((__m256h)__builtin_ia32_getmantph256_mask( \ + (__v16hf)(__m256h)(A), (int)(((C) << 2) | (B)), \ + (__v16hf)_mm256_setzero_ph(), (__mmask16)-1)) + +#define _mm256_mask_getmant_ph(W, U, A, B, C) \ + ((__m256h)__builtin_ia32_getmantph256_mask( \ + (__v16hf)(__m256h)(A), (int)(((C) << 2) | (B)), (__v16hf)(__m256h)(W), \ + (__mmask16)(U))) + +#define _mm256_maskz_getmant_ph(U, A, B, C) \ + ((__m256h)__builtin_ia32_getmantph256_mask( \ + (__v16hf)(__m256h)(A), (int)(((C) << 2) | (B)), \ + (__v16hf)_mm256_setzero_ph(), (__mmask16)(U))) + +static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_scalef_ph(__m128h __A, + __m128h __B) { + return (__m128h)__builtin_ia32_scalefph128_mask( + (__v8hf)__A, (__v8hf)__B, (__v8hf)_mm_setzero_ph(), (__mmask8)-1); +} + +static __inline__ __m128h __DEFAULT_FN_ATTRS128 +_mm_mask_scalef_ph(__m128h __W, __mmask8 __U, __m128h __A, __m128h __B) { + return (__m128h)__builtin_ia32_scalefph128_mask((__v8hf)__A, (__v8hf)__B, + (__v8hf)__W, (__mmask8)__U); +} + +static __inline__ __m128h __DEFAULT_FN_ATTRS128 +_mm_maskz_scalef_ph(__mmask8 __U, __m128h __A, __m128h __B) { + return (__m128h)__builtin_ia32_scalefph128_mask( + (__v8hf)__A, (__v8hf)__B, (__v8hf)_mm_setzero_ph(), (__mmask8)__U); +} + +static __inline__ __m256h __DEFAULT_FN_ATTRS256 _mm256_scalef_ph(__m256h __A, + __m256h __B) { + return (__m256h)__builtin_ia32_scalefph256_mask( + (__v16hf)__A, (__v16hf)__B, (__v16hf)_mm256_setzero_ph(), (__mmask16)-1); +} + +static __inline__ __m256h __DEFAULT_FN_ATTRS256 +_mm256_mask_scalef_ph(__m256h __W, __mmask16 __U, __m256h __A, __m256h __B) { + return (__m256h)__builtin_ia32_scalefph256_mask((__v16hf)__A, (__v16hf)__B, + (__v16hf)__W, (__mmask16)__U); +} + +static __inline__ __m256h __DEFAULT_FN_ATTRS256 +_mm256_maskz_scalef_ph(__mmask16 __U, __m256h __A, __m256h __B) { + return (__m256h)__builtin_ia32_scalefph256_mask( + (__v16hf)__A, (__v16hf)__B, (__v16hf)_mm256_setzero_ph(), (__mmask16)__U); +} + +#define _mm_roundscale_ph(A, imm) \ + ((__m128h)__builtin_ia32_rndscaleph_128_mask( \ + (__v8hf)(__m128h)(A), (int)(imm), (__v8hf)_mm_setzero_ph(), \ + (__mmask8)-1)) + +#define _mm_mask_roundscale_ph(W, U, A, imm) \ + ((__m128h)__builtin_ia32_rndscaleph_128_mask( \ + (__v8hf)(__m128h)(A), (int)(imm), (__v8hf)(__m128h)(W), (__mmask8)(U))) + +#define _mm_maskz_roundscale_ph(U, A, imm) \ + ((__m128h)__builtin_ia32_rndscaleph_128_mask( \ + (__v8hf)(__m128h)(A), (int)(imm), (__v8hf)_mm_setzero_ph(), \ + (__mmask8)(U))) + +#define _mm256_roundscale_ph(A, imm) \ + ((__m256h)__builtin_ia32_rndscaleph_256_mask( \ + (__v16hf)(__m256h)(A), (int)(imm), (__v16hf)_mm256_setzero_ph(), \ + (__mmask16)-1)) + +#define _mm256_mask_roundscale_ph(W, U, A, imm) \ + ((__m256h)__builtin_ia32_rndscaleph_256_mask( \ + (__v16hf)(__m256h)(A), (int)(imm), (__v16hf)(__m256h)(W), \ + (__mmask16)(U))) + +#define _mm256_maskz_roundscale_ph(U, A, imm) \ + ((__m256h)__builtin_ia32_rndscaleph_256_mask( \ + (__v16hf)(__m256h)(A), (int)(imm), (__v16hf)_mm256_setzero_ph(), \ + (__mmask16)(U))) + +#define _mm_reduce_ph(A, imm) \ + ((__m128h)__builtin_ia32_reduceph128_mask((__v8hf)(__m128h)(A), (int)(imm), \ + (__v8hf)_mm_setzero_ph(), \ + (__mmask8)-1)) + +#define _mm_mask_reduce_ph(W, U, A, imm) \ + ((__m128h)__builtin_ia32_reduceph128_mask( \ + (__v8hf)(__m128h)(A), (int)(imm), (__v8hf)(__m128h)(W), (__mmask8)(U))) + +#define _mm_maskz_reduce_ph(U, A, imm) \ + ((__m128h)__builtin_ia32_reduceph128_mask((__v8hf)(__m128h)(A), (int)(imm), \ + (__v8hf)_mm_setzero_ph(), \ + (__mmask8)(U))) + +#define _mm256_reduce_ph(A, imm) \ + ((__m256h)__builtin_ia32_reduceph256_mask((__v16hf)(__m256h)(A), (int)(imm), \ + (__v16hf)_mm256_setzero_ph(), \ + (__mmask16)-1)) + +#define _mm256_mask_reduce_ph(W, U, A, imm) \ + ((__m256h)__builtin_ia32_reduceph256_mask((__v16hf)(__m256h)(A), (int)(imm), \ + (__v16hf)(__m256h)(W), \ + (__mmask16)(U))) + +#define _mm256_maskz_reduce_ph(U, A, imm) \ + ((__m256h)__builtin_ia32_reduceph256_mask((__v16hf)(__m256h)(A), (int)(imm), \ + (__v16hf)_mm256_setzero_ph(), \ + (__mmask16)(U))) + +static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_sqrt_ph(__m128h __a) { + return __builtin_ia32_sqrtph((__v8hf)__a); +} + +static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_mask_sqrt_ph(__m128h __W, + __mmask8 __U, + __m128h __A) { + return (__m128h)__builtin_ia32_selectph_128( + (__mmask8)__U, (__v8hf)_mm_sqrt_ph(__A), (__v8hf)__W); +} + +static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_maskz_sqrt_ph(__mmask8 __U, + __m128h __A) { + return (__m128h)__builtin_ia32_selectph_128( + (__mmask8)__U, (__v8hf)_mm_sqrt_ph(__A), (__v8hf)_mm_setzero_ph()); +} + +static __inline __m256h __DEFAULT_FN_ATTRS256 _mm256_sqrt_ph(__m256h __a) { + return (__m256h)__builtin_ia32_sqrtph256((__v16hf)__a); +} + +static __inline__ __m256h __DEFAULT_FN_ATTRS256 +_mm256_mask_sqrt_ph(__m256h __W, __mmask16 __U, __m256h __A) { + return (__m256h)__builtin_ia32_selectph_256( + (__mmask16)__U, (__v16hf)_mm256_sqrt_ph(__A), (__v16hf)__W); +} + +static __inline__ __m256h __DEFAULT_FN_ATTRS256 +_mm256_maskz_sqrt_ph(__mmask16 __U, __m256h __A) { + return (__m256h)__builtin_ia32_selectph_256((__mmask16)__U, + (__v16hf)_mm256_sqrt_ph(__A), + (__v16hf)_mm256_setzero_ph()); +} + +#define _mm_mask_fpclass_ph_mask(U, A, imm) \ + ((__mmask8)__builtin_ia32_fpclassph128_mask((__v8hf)(__m128h)(A), \ + (int)(imm), (__mmask8)(U))) + +#define _mm_fpclass_ph_mask(A, imm) \ + ((__mmask8)__builtin_ia32_fpclassph128_mask((__v8hf)(__m128h)(A), \ + (int)(imm), (__mmask8)-1)) + +#define _mm256_mask_fpclass_ph_mask(U, A, imm) \ + ((__mmask16)__builtin_ia32_fpclassph256_mask((__v16hf)(__m256h)(A), \ + (int)(imm), (__mmask16)(U))) + +#define _mm256_fpclass_ph_mask(A, imm) \ + ((__mmask16)__builtin_ia32_fpclassph256_mask((__v16hf)(__m256h)(A), \ + (int)(imm), (__mmask16)-1)) + +static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_cvtpd_ph(__m128d __A) { + return (__m128h)__builtin_ia32_vcvtpd2ph128_mask( + (__v2df)__A, (__v8hf)_mm_undefined_ph(), (__mmask8)-1); +} + +static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_mask_cvtpd_ph(__m128h __W, + __mmask8 __U, + __m128d __A) { + return (__m128h)__builtin_ia32_vcvtpd2ph128_mask((__v2df)__A, (__v8hf)__W, + (__mmask8)__U); +} + +static __inline__ __m128h __DEFAULT_FN_ATTRS128 +_mm_maskz_cvtpd_ph(__mmask8 __U, __m128d __A) { + return (__m128h)__builtin_ia32_vcvtpd2ph128_mask( + (__v2df)__A, (__v8hf)_mm_setzero_ph(), (__mmask8)__U); +} + +static __inline__ __m128h __DEFAULT_FN_ATTRS256 _mm256_cvtpd_ph(__m256d __A) { + return (__m128h)__builtin_ia32_vcvtpd2ph256_mask( + (__v4df)__A, (__v8hf)_mm_undefined_ph(), (__mmask8)-1); +} + +static __inline__ __m128h __DEFAULT_FN_ATTRS256 +_mm256_mask_cvtpd_ph(__m128h __W, __mmask8 __U, __m256d __A) { + return (__m128h)__builtin_ia32_vcvtpd2ph256_mask((__v4df)__A, (__v8hf)__W, + (__mmask8)__U); +} + +static __inline__ __m128h __DEFAULT_FN_ATTRS256 +_mm256_maskz_cvtpd_ph(__mmask8 __U, __m256d __A) { + return (__m128h)__builtin_ia32_vcvtpd2ph256_mask( + (__v4df)__A, (__v8hf)_mm_setzero_ph(), (__mmask8)__U); +} + +static __inline__ __m128d __DEFAULT_FN_ATTRS128 _mm_cvtph_pd(__m128h __A) { + return (__m128d)__builtin_ia32_vcvtph2pd128_mask( + (__v8hf)__A, (__v2df)_mm_undefined_pd(), (__mmask8)-1); +} + +static __inline__ __m128d __DEFAULT_FN_ATTRS128 _mm_mask_cvtph_pd(__m128d __W, + __mmask8 __U, + __m128h __A) { + return (__m128d)__builtin_ia32_vcvtph2pd128_mask((__v8hf)__A, (__v2df)__W, + (__mmask8)__U); +} + +static __inline__ __m128d __DEFAULT_FN_ATTRS128 +_mm_maskz_cvtph_pd(__mmask8 __U, __m128h __A) { + return (__m128d)__builtin_ia32_vcvtph2pd128_mask( + (__v8hf)__A, (__v2df)_mm_setzero_pd(), (__mmask8)__U); +} + +static __inline__ __m256d __DEFAULT_FN_ATTRS256 _mm256_cvtph_pd(__m128h __A) { + return (__m256d)__builtin_ia32_vcvtph2pd256_mask( + (__v8hf)__A, (__v4df)_mm256_undefined_pd(), (__mmask8)-1); +} + +static __inline__ __m256d __DEFAULT_FN_ATTRS256 +_mm256_mask_cvtph_pd(__m256d __W, __mmask8 __U, __m128h __A) { + return (__m256d)__builtin_ia32_vcvtph2pd256_mask((__v8hf)__A, (__v4df)__W, + (__mmask8)__U); +} + +static __inline__ __m256d __DEFAULT_FN_ATTRS256 +_mm256_maskz_cvtph_pd(__mmask8 __U, __m128h __A) { + return (__m256d)__builtin_ia32_vcvtph2pd256_mask( + (__v8hf)__A, (__v4df)_mm256_setzero_pd(), (__mmask8)__U); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_cvtph_epi16(__m128h __A) { + return (__m128i)__builtin_ia32_vcvtph2w128_mask( + (__v8hf)__A, (__v8hi)_mm_undefined_si128(), (__mmask8)-1); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_mask_cvtph_epi16(__m128i __W, __mmask8 __U, __m128h __A) { + return (__m128i)__builtin_ia32_vcvtph2w128_mask((__v8hf)__A, (__v8hi)__W, + (__mmask8)__U); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_maskz_cvtph_epi16(__mmask8 __U, __m128h __A) { + return (__m128i)__builtin_ia32_vcvtph2w128_mask( + (__v8hf)__A, (__v8hi)_mm_setzero_si128(), (__mmask8)__U); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_cvtph_epi16(__m256h __A) { + return (__m256i)__builtin_ia32_vcvtph2w256_mask( + (__v16hf)__A, (__v16hi)_mm256_undefined_si256(), (__mmask16)-1); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_mask_cvtph_epi16(__m256i __W, __mmask16 __U, __m256h __A) { + return (__m256i)__builtin_ia32_vcvtph2w256_mask((__v16hf)__A, (__v16hi)__W, + (__mmask16)__U); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_maskz_cvtph_epi16(__mmask16 __U, __m256h __A) { + return (__m256i)__builtin_ia32_vcvtph2w256_mask( + (__v16hf)__A, (__v16hi)_mm256_setzero_si256(), (__mmask16)__U); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_cvttph_epi16(__m128h __A) { + return (__m128i)__builtin_ia32_vcvttph2w128_mask( + (__v8hf)__A, (__v8hi)_mm_undefined_si128(), (__mmask8)-1); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_mask_cvttph_epi16(__m128i __W, __mmask8 __U, __m128h __A) { + return (__m128i)__builtin_ia32_vcvttph2w128_mask((__v8hf)__A, (__v8hi)__W, + (__mmask8)__U); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_maskz_cvttph_epi16(__mmask8 __U, __m128h __A) { + return (__m128i)__builtin_ia32_vcvttph2w128_mask( + (__v8hf)__A, (__v8hi)_mm_setzero_si128(), (__mmask8)__U); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_cvttph_epi16(__m256h __A) { + return (__m256i)__builtin_ia32_vcvttph2w256_mask( + (__v16hf)__A, (__v16hi)_mm256_undefined_si256(), (__mmask16)-1); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_mask_cvttph_epi16(__m256i __W, __mmask16 __U, __m256h __A) { + return (__m256i)__builtin_ia32_vcvttph2w256_mask((__v16hf)__A, (__v16hi)__W, + (__mmask16)__U); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_maskz_cvttph_epi16(__mmask16 __U, __m256h __A) { + return (__m256i)__builtin_ia32_vcvttph2w256_mask( + (__v16hf)__A, (__v16hi)_mm256_setzero_si256(), (__mmask16)__U); +} + +static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_cvtepi16_ph(__m128i __A) { + return (__m128h) __builtin_convertvector((__v8hi)__A, __v8hf); +} + +static __inline__ __m128h __DEFAULT_FN_ATTRS128 +_mm_mask_cvtepi16_ph(__m128h __W, __mmask8 __U, __m128i __A) { + return (__m128h)__builtin_ia32_selectph_128( + (__mmask8)__U, (__v8hf)_mm_cvtepi16_ph(__A), (__v8hf)__W); +} + +static __inline__ __m128h __DEFAULT_FN_ATTRS128 +_mm_maskz_cvtepi16_ph(__mmask8 __U, __m128i __A) { + return (__m128h)__builtin_ia32_selectph_128( + (__mmask8)__U, (__v8hf)_mm_cvtepi16_ph(__A), (__v8hf)_mm_setzero_ph()); +} + +static __inline__ __m256h __DEFAULT_FN_ATTRS256 +_mm256_cvtepi16_ph(__m256i __A) { + return (__m256h) __builtin_convertvector((__v16hi)__A, __v16hf); +} + +static __inline__ __m256h __DEFAULT_FN_ATTRS256 +_mm256_mask_cvtepi16_ph(__m256h __W, __mmask16 __U, __m256i __A) { + return (__m256h)__builtin_ia32_selectph_256( + (__mmask16)__U, (__v16hf)_mm256_cvtepi16_ph(__A), (__v16hf)__W); +} + +static __inline__ __m256h __DEFAULT_FN_ATTRS256 +_mm256_maskz_cvtepi16_ph(__mmask16 __U, __m256i __A) { + return (__m256h)__builtin_ia32_selectph_256((__mmask16)__U, + (__v16hf)_mm256_cvtepi16_ph(__A), + (__v16hf)_mm256_setzero_ph()); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_cvtph_epu16(__m128h __A) { + return (__m128i)__builtin_ia32_vcvtph2uw128_mask( + (__v8hf)__A, (__v8hu)_mm_undefined_si128(), (__mmask8)-1); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_mask_cvtph_epu16(__m128i __W, __mmask8 __U, __m128h __A) { + return (__m128i)__builtin_ia32_vcvtph2uw128_mask((__v8hf)__A, (__v8hu)__W, + (__mmask8)__U); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_maskz_cvtph_epu16(__mmask8 __U, __m128h __A) { + return (__m128i)__builtin_ia32_vcvtph2uw128_mask( + (__v8hf)__A, (__v8hu)_mm_setzero_si128(), (__mmask8)__U); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_cvtph_epu16(__m256h __A) { + return (__m256i)__builtin_ia32_vcvtph2uw256_mask( + (__v16hf)__A, (__v16hu)_mm256_undefined_si256(), (__mmask16)-1); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_mask_cvtph_epu16(__m256i __W, __mmask16 __U, __m256h __A) { + return (__m256i)__builtin_ia32_vcvtph2uw256_mask((__v16hf)__A, (__v16hu)__W, + (__mmask16)__U); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_maskz_cvtph_epu16(__mmask16 __U, __m256h __A) { + return (__m256i)__builtin_ia32_vcvtph2uw256_mask( + (__v16hf)__A, (__v16hu)_mm256_setzero_si256(), (__mmask16)__U); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_cvttph_epu16(__m128h __A) { + return (__m128i)__builtin_ia32_vcvttph2uw128_mask( + (__v8hf)__A, (__v8hu)_mm_undefined_si128(), (__mmask8)-1); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_mask_cvttph_epu16(__m128i __W, __mmask8 __U, __m128h __A) { + return (__m128i)__builtin_ia32_vcvttph2uw128_mask((__v8hf)__A, (__v8hu)__W, + (__mmask8)__U); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_maskz_cvttph_epu16(__mmask8 __U, __m128h __A) { + return (__m128i)__builtin_ia32_vcvttph2uw128_mask( + (__v8hf)__A, (__v8hu)_mm_setzero_si128(), (__mmask8)__U); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_cvttph_epu16(__m256h __A) { + return (__m256i)__builtin_ia32_vcvttph2uw256_mask( + (__v16hf)__A, (__v16hu)_mm256_undefined_si256(), (__mmask16)-1); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_mask_cvttph_epu16(__m256i __W, __mmask16 __U, __m256h __A) { + return (__m256i)__builtin_ia32_vcvttph2uw256_mask((__v16hf)__A, (__v16hu)__W, + (__mmask16)__U); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_maskz_cvttph_epu16(__mmask16 __U, __m256h __A) { + return (__m256i)__builtin_ia32_vcvttph2uw256_mask( + (__v16hf)__A, (__v16hu)_mm256_setzero_si256(), (__mmask16)__U); +} + +static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_cvtepu16_ph(__m128i __A) { + return (__m128h) __builtin_convertvector((__v8hu)__A, __v8hf); +} + +static __inline__ __m128h __DEFAULT_FN_ATTRS128 +_mm_mask_cvtepu16_ph(__m128h __W, __mmask8 __U, __m128i __A) { + return (__m128h)__builtin_ia32_selectph_128( + (__mmask8)__U, (__v8hf)_mm_cvtepu16_ph(__A), (__v8hf)__W); +} + +static __inline__ __m128h __DEFAULT_FN_ATTRS128 +_mm_maskz_cvtepu16_ph(__mmask8 __U, __m128i __A) { + return (__m128h)__builtin_ia32_selectph_128( + (__mmask8)__U, (__v8hf)_mm_cvtepu16_ph(__A), (__v8hf)_mm_setzero_ph()); +} + +static __inline__ __m256h __DEFAULT_FN_ATTRS256 +_mm256_cvtepu16_ph(__m256i __A) { + return (__m256h) __builtin_convertvector((__v16hu)__A, __v16hf); +} + +static __inline__ __m256h __DEFAULT_FN_ATTRS256 +_mm256_mask_cvtepu16_ph(__m256h __W, __mmask16 __U, __m256i __A) { + return (__m256h)__builtin_ia32_selectph_256( + (__mmask16)__U, (__v16hf)_mm256_cvtepu16_ph(__A), (__v16hf)__W); +} + +static __inline__ __m256h __DEFAULT_FN_ATTRS256 +_mm256_maskz_cvtepu16_ph(__mmask16 __U, __m256i __A) { + return (__m256h)__builtin_ia32_selectph_256((__mmask16)__U, + (__v16hf)_mm256_cvtepu16_ph(__A), + (__v16hf)_mm256_setzero_ph()); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_cvtph_epi32(__m128h __A) { + return (__m128i)__builtin_ia32_vcvtph2dq128_mask( + (__v8hf)__A, (__v4si)_mm_undefined_si128(), (__mmask8)-1); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_mask_cvtph_epi32(__m128i __W, __mmask8 __U, __m128h __A) { + return (__m128i)__builtin_ia32_vcvtph2dq128_mask((__v8hf)__A, (__v4si)__W, + (__mmask8)__U); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_maskz_cvtph_epi32(__mmask8 __U, __m128h __A) { + return (__m128i)__builtin_ia32_vcvtph2dq128_mask( + (__v8hf)__A, (__v4si)_mm_setzero_si128(), (__mmask8)__U); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_cvtph_epi32(__m128h __A) { + return (__m256i)__builtin_ia32_vcvtph2dq256_mask( + (__v8hf)__A, (__v8si)_mm256_undefined_si256(), (__mmask8)-1); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_mask_cvtph_epi32(__m256i __W, __mmask8 __U, __m128h __A) { + return (__m256i)__builtin_ia32_vcvtph2dq256_mask((__v8hf)__A, (__v8si)__W, + (__mmask8)__U); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_maskz_cvtph_epi32(__mmask8 __U, __m128h __A) { + return (__m256i)__builtin_ia32_vcvtph2dq256_mask( + (__v8hf)__A, (__v8si)_mm256_setzero_si256(), (__mmask8)__U); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_cvtph_epu32(__m128h __A) { + return (__m128i)__builtin_ia32_vcvtph2udq128_mask( + (__v8hf)__A, (__v4su)_mm_undefined_si128(), (__mmask8)-1); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_mask_cvtph_epu32(__m128i __W, __mmask8 __U, __m128h __A) { + return (__m128i)__builtin_ia32_vcvtph2udq128_mask((__v8hf)__A, (__v4su)__W, + (__mmask8)__U); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_maskz_cvtph_epu32(__mmask8 __U, __m128h __A) { + return (__m128i)__builtin_ia32_vcvtph2udq128_mask( + (__v8hf)__A, (__v4su)_mm_setzero_si128(), (__mmask8)__U); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_cvtph_epu32(__m128h __A) { + return (__m256i)__builtin_ia32_vcvtph2udq256_mask( + (__v8hf)__A, (__v8su)_mm256_undefined_si256(), (__mmask8)-1); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_mask_cvtph_epu32(__m256i __W, __mmask8 __U, __m128h __A) { + return (__m256i)__builtin_ia32_vcvtph2udq256_mask((__v8hf)__A, (__v8su)__W, + (__mmask8)__U); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_maskz_cvtph_epu32(__mmask8 __U, __m128h __A) { + return (__m256i)__builtin_ia32_vcvtph2udq256_mask( + (__v8hf)__A, (__v8su)_mm256_setzero_si256(), (__mmask8)__U); +} + +static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_cvtepi32_ph(__m128i __A) { + return (__m128h)__builtin_ia32_vcvtdq2ph128_mask( + (__v4si)__A, (__v8hf)_mm_undefined_ph(), (__mmask8)-1); +} + +static __inline__ __m128h __DEFAULT_FN_ATTRS128 +_mm_mask_cvtepi32_ph(__m128h __W, __mmask8 __U, __m128i __A) { + return (__m128h)__builtin_ia32_vcvtdq2ph128_mask((__v4si)__A, (__v8hf)__W, + (__mmask8)__U); +} + +static __inline__ __m128h __DEFAULT_FN_ATTRS128 +_mm_maskz_cvtepi32_ph(__mmask8 __U, __m128i __A) { + return (__m128h)__builtin_ia32_vcvtdq2ph128_mask( + (__v4si)__A, (__v8hf)_mm_setzero_ph(), (__mmask8)__U); +} + +static __inline__ __m128h __DEFAULT_FN_ATTRS256 +_mm256_cvtepi32_ph(__m256i __A) { + return (__m128h) __builtin_convertvector((__v8si)__A, __v8hf); +} + +static __inline__ __m128h __DEFAULT_FN_ATTRS256 +_mm256_mask_cvtepi32_ph(__m128h __W, __mmask8 __U, __m256i __A) { + return (__m128h)__builtin_ia32_selectph_128( + (__mmask8)__U, (__v8hf)_mm256_cvtepi32_ph(__A), (__v8hf)__W); +} + +static __inline__ __m128h __DEFAULT_FN_ATTRS256 +_mm256_maskz_cvtepi32_ph(__mmask8 __U, __m256i __A) { + return (__m128h)__builtin_ia32_selectph_128( + (__mmask8)__U, (__v8hf)_mm256_cvtepi32_ph(__A), (__v8hf)_mm_setzero_ph()); +} + +static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_cvtepu32_ph(__m128i __A) { + return (__m128h)__builtin_ia32_vcvtudq2ph128_mask( + (__v4su)__A, (__v8hf)_mm_undefined_ph(), (__mmask8)-1); +} + +static __inline__ __m128h __DEFAULT_FN_ATTRS128 +_mm_mask_cvtepu32_ph(__m128h __W, __mmask8 __U, __m128i __A) { + return (__m128h)__builtin_ia32_vcvtudq2ph128_mask((__v4su)__A, (__v8hf)__W, + (__mmask8)__U); +} + +static __inline__ __m128h __DEFAULT_FN_ATTRS128 +_mm_maskz_cvtepu32_ph(__mmask8 __U, __m128i __A) { + return (__m128h)__builtin_ia32_vcvtudq2ph128_mask( + (__v4su)__A, (__v8hf)_mm_setzero_ph(), (__mmask8)__U); +} + +static __inline__ __m128h __DEFAULT_FN_ATTRS256 +_mm256_cvtepu32_ph(__m256i __A) { + return (__m128h) __builtin_convertvector((__v8su)__A, __v8hf); +} + +static __inline__ __m128h __DEFAULT_FN_ATTRS256 +_mm256_mask_cvtepu32_ph(__m128h __W, __mmask8 __U, __m256i __A) { + return (__m128h)__builtin_ia32_selectph_128( + (__mmask8)__U, (__v8hf)_mm256_cvtepu32_ph(__A), (__v8hf)__W); +} + +static __inline__ __m128h __DEFAULT_FN_ATTRS256 +_mm256_maskz_cvtepu32_ph(__mmask8 __U, __m256i __A) { + return (__m128h)__builtin_ia32_selectph_128( + (__mmask8)__U, (__v8hf)_mm256_cvtepu32_ph(__A), (__v8hf)_mm_setzero_ph()); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_cvttph_epi32(__m128h __A) { + return (__m128i)__builtin_ia32_vcvttph2dq128_mask( + (__v8hf)__A, (__v4si)_mm_undefined_si128(), (__mmask8)-1); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_mask_cvttph_epi32(__m128i __W, __mmask8 __U, __m128h __A) { + return (__m128i)__builtin_ia32_vcvttph2dq128_mask((__v8hf)__A, (__v4si)__W, + (__mmask8)__U); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_maskz_cvttph_epi32(__mmask8 __U, __m128h __A) { + return (__m128i)__builtin_ia32_vcvttph2dq128_mask( + (__v8hf)__A, (__v4si)_mm_setzero_si128(), (__mmask8)__U); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_cvttph_epi32(__m128h __A) { + return (__m256i)__builtin_ia32_vcvttph2dq256_mask( + (__v8hf)__A, (__v8si)_mm256_undefined_si256(), (__mmask8)-1); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_mask_cvttph_epi32(__m256i __W, __mmask8 __U, __m128h __A) { + return (__m256i)__builtin_ia32_vcvttph2dq256_mask((__v8hf)__A, (__v8si)__W, + (__mmask8)__U); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_maskz_cvttph_epi32(__mmask8 __U, __m128h __A) { + return (__m256i)__builtin_ia32_vcvttph2dq256_mask( + (__v8hf)__A, (__v8si)_mm256_setzero_si256(), (__mmask8)__U); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_cvttph_epu32(__m128h __A) { + return (__m128i)__builtin_ia32_vcvttph2udq128_mask( + (__v8hf)__A, (__v4su)_mm_undefined_si128(), (__mmask8)-1); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_mask_cvttph_epu32(__m128i __W, __mmask8 __U, __m128h __A) { + return (__m128i)__builtin_ia32_vcvttph2udq128_mask((__v8hf)__A, (__v4su)__W, + (__mmask8)__U); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_maskz_cvttph_epu32(__mmask8 __U, __m128h __A) { + return (__m128i)__builtin_ia32_vcvttph2udq128_mask( + (__v8hf)__A, (__v4su)_mm_setzero_si128(), (__mmask8)__U); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_cvttph_epu32(__m128h __A) { + return (__m256i)__builtin_ia32_vcvttph2udq256_mask( + (__v8hf)__A, (__v8su)_mm256_undefined_si256(), (__mmask8)-1); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_mask_cvttph_epu32(__m256i __W, __mmask8 __U, __m128h __A) { + return (__m256i)__builtin_ia32_vcvttph2udq256_mask((__v8hf)__A, (__v8su)__W, + (__mmask8)__U); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_maskz_cvttph_epu32(__mmask8 __U, __m128h __A) { + return (__m256i)__builtin_ia32_vcvttph2udq256_mask( + (__v8hf)__A, (__v8su)_mm256_setzero_si256(), (__mmask8)__U); +} + +static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_cvtepi64_ph(__m128i __A) { + return (__m128h)__builtin_ia32_vcvtqq2ph128_mask( + (__v2di)__A, (__v8hf)_mm_undefined_ph(), (__mmask8)-1); +} + +static __inline__ __m128h __DEFAULT_FN_ATTRS128 +_mm_mask_cvtepi64_ph(__m128h __W, __mmask8 __U, __m128i __A) { + return (__m128h)__builtin_ia32_vcvtqq2ph128_mask((__v2di)__A, (__v8hf)__W, + (__mmask8)__U); +} + +static __inline__ __m128h __DEFAULT_FN_ATTRS128 +_mm_maskz_cvtepi64_ph(__mmask8 __U, __m128i __A) { + return (__m128h)__builtin_ia32_vcvtqq2ph128_mask( + (__v2di)__A, (__v8hf)_mm_setzero_ph(), (__mmask8)__U); +} + +static __inline__ __m128h __DEFAULT_FN_ATTRS256 +_mm256_cvtepi64_ph(__m256i __A) { + return (__m128h)__builtin_ia32_vcvtqq2ph256_mask( + (__v4di)__A, (__v8hf)_mm_undefined_ph(), (__mmask8)-1); +} + +static __inline__ __m128h __DEFAULT_FN_ATTRS256 +_mm256_mask_cvtepi64_ph(__m128h __W, __mmask8 __U, __m256i __A) { + return (__m128h)__builtin_ia32_vcvtqq2ph256_mask((__v4di)__A, (__v8hf)__W, + (__mmask8)__U); +} + +static __inline__ __m128h __DEFAULT_FN_ATTRS256 +_mm256_maskz_cvtepi64_ph(__mmask8 __U, __m256i __A) { + return (__m128h)__builtin_ia32_vcvtqq2ph256_mask( + (__v4di)__A, (__v8hf)_mm_setzero_ph(), (__mmask8)__U); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_cvtph_epi64(__m128h __A) { + return (__m128i)__builtin_ia32_vcvtph2qq128_mask( + (__v8hf)__A, (__v2di)_mm_undefined_si128(), (__mmask8)-1); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_mask_cvtph_epi64(__m128i __W, __mmask8 __U, __m128h __A) { + return (__m128i)__builtin_ia32_vcvtph2qq128_mask((__v8hf)__A, (__v2di)__W, + (__mmask8)__U); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_maskz_cvtph_epi64(__mmask8 __U, __m128h __A) { + return (__m128i)__builtin_ia32_vcvtph2qq128_mask( + (__v8hf)__A, (__v2di)_mm_setzero_si128(), (__mmask8)__U); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_cvtph_epi64(__m128h __A) { + return (__m256i)__builtin_ia32_vcvtph2qq256_mask( + (__v8hf)__A, (__v4di)_mm256_undefined_si256(), (__mmask8)-1); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_mask_cvtph_epi64(__m256i __W, __mmask8 __U, __m128h __A) { + return (__m256i)__builtin_ia32_vcvtph2qq256_mask((__v8hf)__A, (__v4di)__W, + (__mmask8)__U); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_maskz_cvtph_epi64(__mmask8 __U, __m128h __A) { + return (__m256i)__builtin_ia32_vcvtph2qq256_mask( + (__v8hf)__A, (__v4di)_mm256_setzero_si256(), (__mmask8)__U); +} + +static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_cvtepu64_ph(__m128i __A) { + return (__m128h)__builtin_ia32_vcvtuqq2ph128_mask( + (__v2du)__A, (__v8hf)_mm_undefined_ph(), (__mmask8)-1); +} + +static __inline__ __m128h __DEFAULT_FN_ATTRS128 +_mm_mask_cvtepu64_ph(__m128h __W, __mmask8 __U, __m128i __A) { + return (__m128h)__builtin_ia32_vcvtuqq2ph128_mask((__v2du)__A, (__v8hf)__W, + (__mmask8)__U); +} + +static __inline__ __m128h __DEFAULT_FN_ATTRS128 +_mm_maskz_cvtepu64_ph(__mmask8 __U, __m128i __A) { + return (__m128h)__builtin_ia32_vcvtuqq2ph128_mask( + (__v2du)__A, (__v8hf)_mm_setzero_ph(), (__mmask8)__U); +} + +static __inline__ __m128h __DEFAULT_FN_ATTRS256 +_mm256_cvtepu64_ph(__m256i __A) { + return (__m128h)__builtin_ia32_vcvtuqq2ph256_mask( + (__v4du)__A, (__v8hf)_mm_undefined_ph(), (__mmask8)-1); +} + +static __inline__ __m128h __DEFAULT_FN_ATTRS256 +_mm256_mask_cvtepu64_ph(__m128h __W, __mmask8 __U, __m256i __A) { + return (__m128h)__builtin_ia32_vcvtuqq2ph256_mask((__v4du)__A, (__v8hf)__W, + (__mmask8)__U); +} + +static __inline__ __m128h __DEFAULT_FN_ATTRS256 +_mm256_maskz_cvtepu64_ph(__mmask8 __U, __m256i __A) { + return (__m128h)__builtin_ia32_vcvtuqq2ph256_mask( + (__v4du)__A, (__v8hf)_mm_setzero_ph(), (__mmask8)__U); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_cvtph_epu64(__m128h __A) { + return (__m128i)__builtin_ia32_vcvtph2uqq128_mask( + (__v8hf)__A, (__v2du)_mm_undefined_si128(), (__mmask8)-1); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_mask_cvtph_epu64(__m128i __W, __mmask8 __U, __m128h __A) { + return (__m128i)__builtin_ia32_vcvtph2uqq128_mask((__v8hf)__A, (__v2du)__W, + (__mmask8)__U); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_maskz_cvtph_epu64(__mmask8 __U, __m128h __A) { + return (__m128i)__builtin_ia32_vcvtph2uqq128_mask( + (__v8hf)__A, (__v2du)_mm_setzero_si128(), (__mmask8)__U); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_cvtph_epu64(__m128h __A) { + return (__m256i)__builtin_ia32_vcvtph2uqq256_mask( + (__v8hf)__A, (__v4du)_mm256_undefined_si256(), (__mmask8)-1); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_mask_cvtph_epu64(__m256i __W, __mmask8 __U, __m128h __A) { + return (__m256i)__builtin_ia32_vcvtph2uqq256_mask((__v8hf)__A, (__v4du)__W, + (__mmask8)__U); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_maskz_cvtph_epu64(__mmask8 __U, __m128h __A) { + return (__m256i)__builtin_ia32_vcvtph2uqq256_mask( + (__v8hf)__A, (__v4du)_mm256_setzero_si256(), (__mmask8)__U); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_cvttph_epi64(__m128h __A) { + return (__m128i)__builtin_ia32_vcvttph2qq128_mask( + (__v8hf)__A, (__v2di)_mm_undefined_si128(), (__mmask8)-1); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_mask_cvttph_epi64(__m128i __W, __mmask8 __U, __m128h __A) { + return (__m128i)__builtin_ia32_vcvttph2qq128_mask((__v8hf)__A, (__v2di)__W, + (__mmask8)__U); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_maskz_cvttph_epi64(__mmask8 __U, __m128h __A) { + return (__m128i)__builtin_ia32_vcvttph2qq128_mask( + (__v8hf)__A, (__v2di)_mm_setzero_si128(), (__mmask8)__U); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_cvttph_epi64(__m128h __A) { + return (__m256i)__builtin_ia32_vcvttph2qq256_mask( + (__v8hf)__A, (__v4di)_mm256_undefined_si256(), (__mmask8)-1); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_mask_cvttph_epi64(__m256i __W, __mmask8 __U, __m128h __A) { + return (__m256i)__builtin_ia32_vcvttph2qq256_mask((__v8hf)__A, (__v4di)__W, + (__mmask8)__U); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_maskz_cvttph_epi64(__mmask8 __U, __m128h __A) { + return (__m256i)__builtin_ia32_vcvttph2qq256_mask( + (__v8hf)__A, (__v4di)_mm256_setzero_si256(), (__mmask8)__U); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_cvttph_epu64(__m128h __A) { + return (__m128i)__builtin_ia32_vcvttph2uqq128_mask( + (__v8hf)__A, (__v2du)_mm_undefined_si128(), (__mmask8)-1); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_mask_cvttph_epu64(__m128i __W, __mmask8 __U, __m128h __A) { + return (__m128i)__builtin_ia32_vcvttph2uqq128_mask((__v8hf)__A, (__v2du)__W, + (__mmask8)__U); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_maskz_cvttph_epu64(__mmask8 __U, __m128h __A) { + return (__m128i)__builtin_ia32_vcvttph2uqq128_mask( + (__v8hf)__A, (__v2du)_mm_setzero_si128(), (__mmask8)__U); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_cvttph_epu64(__m128h __A) { + return (__m256i)__builtin_ia32_vcvttph2uqq256_mask( + (__v8hf)__A, (__v4du)_mm256_undefined_si256(), (__mmask8)-1); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_mask_cvttph_epu64(__m256i __W, __mmask8 __U, __m128h __A) { + return (__m256i)__builtin_ia32_vcvttph2uqq256_mask((__v8hf)__A, (__v4du)__W, + (__mmask8)__U); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_maskz_cvttph_epu64(__mmask8 __U, __m128h __A) { + return (__m256i)__builtin_ia32_vcvttph2uqq256_mask( + (__v8hf)__A, (__v4du)_mm256_setzero_si256(), (__mmask8)__U); +} + +static __inline__ __m128 __DEFAULT_FN_ATTRS128 _mm_cvtxph_ps(__m128h __A) { + return (__m128)__builtin_ia32_vcvtph2psx128_mask( + (__v8hf)__A, (__v4sf)_mm_undefined_ps(), (__mmask8)-1); +} + +static __inline__ __m128 __DEFAULT_FN_ATTRS128 _mm_mask_cvtxph_ps(__m128 __W, + __mmask8 __U, + __m128h __A) { + return (__m128)__builtin_ia32_vcvtph2psx128_mask((__v8hf)__A, (__v4sf)__W, + (__mmask8)__U); +} + +static __inline__ __m128 __DEFAULT_FN_ATTRS128 +_mm_maskz_cvtxph_ps(__mmask8 __U, __m128h __A) { + return (__m128)__builtin_ia32_vcvtph2psx128_mask( + (__v8hf)__A, (__v4sf)_mm_setzero_ps(), (__mmask8)__U); +} + +static __inline__ __m256 __DEFAULT_FN_ATTRS256 _mm256_cvtxph_ps(__m128h __A) { + return (__m256)__builtin_ia32_vcvtph2psx256_mask( + (__v8hf)__A, (__v8sf)_mm256_undefined_ps(), (__mmask8)-1); +} + +static __inline__ __m256 __DEFAULT_FN_ATTRS256 +_mm256_mask_cvtxph_ps(__m256 __W, __mmask8 __U, __m128h __A) { + return (__m256)__builtin_ia32_vcvtph2psx256_mask((__v8hf)__A, (__v8sf)__W, + (__mmask8)__U); +} + +static __inline__ __m256 __DEFAULT_FN_ATTRS256 +_mm256_maskz_cvtxph_ps(__mmask8 __U, __m128h __A) { + return (__m256)__builtin_ia32_vcvtph2psx256_mask( + (__v8hf)__A, (__v8sf)_mm256_setzero_ps(), (__mmask8)__U); +} + +static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_cvtxps_ph(__m128 __A) { + return (__m128h)__builtin_ia32_vcvtps2phx128_mask( + (__v4sf)__A, (__v8hf)_mm_undefined_ph(), (__mmask8)-1); +} + +static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_mask_cvtxps_ph(__m128h __W, + __mmask8 __U, + __m128 __A) { + return (__m128h)__builtin_ia32_vcvtps2phx128_mask((__v4sf)__A, (__v8hf)__W, + (__mmask8)__U); +} + +static __inline__ __m128h __DEFAULT_FN_ATTRS128 +_mm_maskz_cvtxps_ph(__mmask8 __U, __m128 __A) { + return (__m128h)__builtin_ia32_vcvtps2phx128_mask( + (__v4sf)__A, (__v8hf)_mm_setzero_ph(), (__mmask8)__U); +} + +static __inline__ __m128h __DEFAULT_FN_ATTRS256 _mm256_cvtxps_ph(__m256 __A) { + return (__m128h)__builtin_ia32_vcvtps2phx256_mask( + (__v8sf)__A, (__v8hf)_mm_undefined_ph(), (__mmask8)-1); +} + +static __inline__ __m128h __DEFAULT_FN_ATTRS256 +_mm256_mask_cvtxps_ph(__m128h __W, __mmask8 __U, __m256 __A) { + return (__m128h)__builtin_ia32_vcvtps2phx256_mask((__v8sf)__A, (__v8hf)__W, + (__mmask8)__U); +} + +static __inline__ __m128h __DEFAULT_FN_ATTRS256 +_mm256_maskz_cvtxps_ph(__mmask8 __U, __m256 __A) { + return (__m128h)__builtin_ia32_vcvtps2phx256_mask( + (__v8sf)__A, (__v8hf)_mm_setzero_ph(), (__mmask8)__U); +} + +static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_fmadd_ph(__m128h __A, + __m128h __B, + __m128h __C) { + return (__m128h)__builtin_ia32_vfmaddph((__v8hf)__A, (__v8hf)__B, + (__v8hf)__C); +} + +static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_mask_fmadd_ph(__m128h __A, + __mmask8 __U, + __m128h __B, + __m128h __C) { + return (__m128h)__builtin_ia32_selectph_128( + (__mmask8)__U, + __builtin_ia32_vfmaddph((__v8hf)__A, (__v8hf)__B, (__v8hf)__C), + (__v8hf)__A); +} + +static __inline__ __m128h __DEFAULT_FN_ATTRS128 +_mm_mask3_fmadd_ph(__m128h __A, __m128h __B, __m128h __C, __mmask8 __U) { + return (__m128h)__builtin_ia32_selectph_128( + (__mmask8)__U, + __builtin_ia32_vfmaddph((__v8hf)__A, (__v8hf)__B, (__v8hf)__C), + (__v8hf)__C); +} + +static __inline__ __m128h __DEFAULT_FN_ATTRS128 +_mm_maskz_fmadd_ph(__mmask8 __U, __m128h __A, __m128h __B, __m128h __C) { + return (__m128h)__builtin_ia32_selectph_128( + (__mmask8)__U, + __builtin_ia32_vfmaddph((__v8hf)__A, (__v8hf)__B, (__v8hf)__C), + (__v8hf)_mm_setzero_ph()); +} + +static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_fmsub_ph(__m128h __A, + __m128h __B, + __m128h __C) { + return (__m128h)__builtin_ia32_vfmaddph((__v8hf)__A, (__v8hf)__B, + -(__v8hf)__C); +} + +static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_mask_fmsub_ph(__m128h __A, + __mmask8 __U, + __m128h __B, + __m128h __C) { + return (__m128h)__builtin_ia32_selectph_128( + (__mmask8)__U, _mm_fmsub_ph((__v8hf)__A, (__v8hf)__B, (__v8hf)__C), + (__v8hf)__A); +} + +static __inline__ __m128h __DEFAULT_FN_ATTRS128 +_mm_maskz_fmsub_ph(__mmask8 __U, __m128h __A, __m128h __B, __m128h __C) { + return (__m128h)__builtin_ia32_selectph_128( + (__mmask8)__U, _mm_fmsub_ph((__v8hf)__A, (__v8hf)__B, (__v8hf)__C), + (__v8hf)_mm_setzero_ph()); +} + +static __inline__ __m128h __DEFAULT_FN_ATTRS128 +_mm_mask3_fnmadd_ph(__m128h __A, __m128h __B, __m128h __C, __mmask8 __U) { + return (__m128h)__builtin_ia32_selectph_128( + (__mmask8)__U, + __builtin_ia32_vfmaddph(-(__v8hf)__A, (__v8hf)__B, (__v8hf)__C), + (__v8hf)__C); +} + +static __inline__ __m128h __DEFAULT_FN_ATTRS128 +_mm_maskz_fnmadd_ph(__mmask8 __U, __m128h __A, __m128h __B, __m128h __C) { + return (__m128h)__builtin_ia32_selectph_128( + (__mmask8)__U, + __builtin_ia32_vfmaddph(-(__v8hf)__A, (__v8hf)__B, (__v8hf)__C), + (__v8hf)_mm_setzero_ph()); +} + +static __inline__ __m128h __DEFAULT_FN_ATTRS128 +_mm_maskz_fnmsub_ph(__mmask8 __U, __m128h __A, __m128h __B, __m128h __C) { + return (__m128h)__builtin_ia32_selectph_128( + (__mmask8)__U, + __builtin_ia32_vfmaddph(-(__v8hf)__A, (__v8hf)__B, -(__v8hf)__C), + (__v8hf)_mm_setzero_ph()); +} + +static __inline__ __m256h __DEFAULT_FN_ATTRS256 _mm256_fmadd_ph(__m256h __A, + __m256h __B, + __m256h __C) { + return (__m256h)__builtin_ia32_vfmaddph256((__v16hf)__A, (__v16hf)__B, + (__v16hf)__C); +} + +static __inline__ __m256h __DEFAULT_FN_ATTRS256 +_mm256_mask_fmadd_ph(__m256h __A, __mmask16 __U, __m256h __B, __m256h __C) { + return (__m256h)__builtin_ia32_selectph_256( + (__mmask16)__U, + __builtin_ia32_vfmaddph256((__v16hf)__A, (__v16hf)__B, (__v16hf)__C), + (__v16hf)__A); +} + +static __inline__ __m256h __DEFAULT_FN_ATTRS256 +_mm256_mask3_fmadd_ph(__m256h __A, __m256h __B, __m256h __C, __mmask16 __U) { + return (__m256h)__builtin_ia32_selectph_256( + (__mmask16)__U, + __builtin_ia32_vfmaddph256((__v16hf)__A, (__v16hf)__B, (__v16hf)__C), + (__v16hf)__C); +} + +static __inline__ __m256h __DEFAULT_FN_ATTRS256 +_mm256_maskz_fmadd_ph(__mmask16 __U, __m256h __A, __m256h __B, __m256h __C) { + return (__m256h)__builtin_ia32_selectph_256( + (__mmask16)__U, + __builtin_ia32_vfmaddph256((__v16hf)__A, (__v16hf)__B, (__v16hf)__C), + (__v16hf)_mm256_setzero_ph()); +} + +static __inline__ __m256h __DEFAULT_FN_ATTRS256 _mm256_fmsub_ph(__m256h __A, + __m256h __B, + __m256h __C) { + return (__m256h)__builtin_ia32_vfmaddph256((__v16hf)__A, (__v16hf)__B, + -(__v16hf)__C); +} + +static __inline__ __m256h __DEFAULT_FN_ATTRS256 +_mm256_mask_fmsub_ph(__m256h __A, __mmask16 __U, __m256h __B, __m256h __C) { + return (__m256h)__builtin_ia32_selectph_256( + (__mmask16)__U, + __builtin_ia32_vfmaddph256((__v16hf)__A, (__v16hf)__B, -(__v16hf)__C), + (__v16hf)__A); +} + +static __inline__ __m256h __DEFAULT_FN_ATTRS256 +_mm256_maskz_fmsub_ph(__mmask16 __U, __m256h __A, __m256h __B, __m256h __C) { + return (__m256h)__builtin_ia32_selectph_256( + (__mmask16)__U, + __builtin_ia32_vfmaddph256((__v16hf)__A, (__v16hf)__B, -(__v16hf)__C), + (__v16hf)_mm256_setzero_ph()); +} + +static __inline__ __m256h __DEFAULT_FN_ATTRS256 +_mm256_mask3_fnmadd_ph(__m256h __A, __m256h __B, __m256h __C, __mmask16 __U) { + return (__m256h)__builtin_ia32_selectph_256( + (__mmask16)__U, + __builtin_ia32_vfmaddph256(-(__v16hf)__A, (__v16hf)__B, (__v16hf)__C), + (__v16hf)__C); +} + +static __inline__ __m256h __DEFAULT_FN_ATTRS256 +_mm256_maskz_fnmadd_ph(__mmask16 __U, __m256h __A, __m256h __B, __m256h __C) { + return (__m256h)__builtin_ia32_selectph_256( + (__mmask16)__U, + __builtin_ia32_vfmaddph256(-(__v16hf)__A, (__v16hf)__B, (__v16hf)__C), + (__v16hf)_mm256_setzero_ph()); +} + +static __inline__ __m256h __DEFAULT_FN_ATTRS256 +_mm256_maskz_fnmsub_ph(__mmask16 __U, __m256h __A, __m256h __B, __m256h __C) { + return (__m256h)__builtin_ia32_selectph_256( + (__mmask16)__U, + __builtin_ia32_vfmaddph256(-(__v16hf)__A, (__v16hf)__B, -(__v16hf)__C), + (__v16hf)_mm256_setzero_ph()); +} + +static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_fmaddsub_ph(__m128h __A, + __m128h __B, + __m128h __C) { + return (__m128h)__builtin_ia32_vfmaddsubph((__v8hf)__A, (__v8hf)__B, + (__v8hf)__C); +} + +static __inline__ __m128h __DEFAULT_FN_ATTRS128 +_mm_mask_fmaddsub_ph(__m128h __A, __mmask8 __U, __m128h __B, __m128h __C) { + return (__m128h)__builtin_ia32_selectph_128( + (__mmask8)__U, + __builtin_ia32_vfmaddsubph((__v8hf)__A, (__v8hf)__B, (__v8hf)__C), + (__v8hf)__A); +} + +static __inline__ __m128h __DEFAULT_FN_ATTRS128 +_mm_mask3_fmaddsub_ph(__m128h __A, __m128h __B, __m128h __C, __mmask8 __U) { + return (__m128h)__builtin_ia32_selectph_128( + (__mmask8)__U, + __builtin_ia32_vfmaddsubph((__v8hf)__A, (__v8hf)__B, (__v8hf)__C), + (__v8hf)__C); +} + +static __inline__ __m128h __DEFAULT_FN_ATTRS128 +_mm_maskz_fmaddsub_ph(__mmask8 __U, __m128h __A, __m128h __B, __m128h __C) { + return (__m128h)__builtin_ia32_selectph_128( + (__mmask8)__U, + __builtin_ia32_vfmaddsubph((__v8hf)__A, (__v8hf)__B, (__v8hf)__C), + (__v8hf)_mm_setzero_ph()); +} + +static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_fmsubadd_ph(__m128h __A, + __m128h __B, + __m128h __C) { + return (__m128h)__builtin_ia32_vfmaddsubph((__v8hf)__A, (__v8hf)__B, + -(__v8hf)__C); +} + +static __inline__ __m128h __DEFAULT_FN_ATTRS128 +_mm_mask_fmsubadd_ph(__m128h __A, __mmask8 __U, __m128h __B, __m128h __C) { + return (__m128h)__builtin_ia32_selectph_128( + (__mmask8)__U, + __builtin_ia32_vfmaddsubph((__v8hf)__A, (__v8hf)__B, -(__v8hf)__C), + (__v8hf)__A); +} + +static __inline__ __m128h __DEFAULT_FN_ATTRS128 +_mm_maskz_fmsubadd_ph(__mmask8 __U, __m128h __A, __m128h __B, __m128h __C) { + return (__m128h)__builtin_ia32_selectph_128( + (__mmask8)__U, + __builtin_ia32_vfmaddsubph((__v8hf)__A, (__v8hf)__B, -(__v8hf)__C), + (__v8hf)_mm_setzero_ph()); +} + +static __inline__ __m256h __DEFAULT_FN_ATTRS256 +_mm256_fmaddsub_ph(__m256h __A, __m256h __B, __m256h __C) { + return (__m256h)__builtin_ia32_vfmaddsubph256((__v16hf)__A, (__v16hf)__B, + (__v16hf)__C); +} + +static __inline__ __m256h __DEFAULT_FN_ATTRS256 +_mm256_mask_fmaddsub_ph(__m256h __A, __mmask16 __U, __m256h __B, __m256h __C) { + return (__m256h)__builtin_ia32_selectph_256( + (__mmask16)__U, + __builtin_ia32_vfmaddsubph256((__v16hf)__A, (__v16hf)__B, (__v16hf)__C), + (__v16hf)__A); +} + +static __inline__ __m256h __DEFAULT_FN_ATTRS256 +_mm256_mask3_fmaddsub_ph(__m256h __A, __m256h __B, __m256h __C, __mmask16 __U) { + return (__m256h)__builtin_ia32_selectph_256( + (__mmask16)__U, + __builtin_ia32_vfmaddsubph256((__v16hf)__A, (__v16hf)__B, (__v16hf)__C), + (__v16hf)__C); +} + +static __inline__ __m256h __DEFAULT_FN_ATTRS256 +_mm256_maskz_fmaddsub_ph(__mmask16 __U, __m256h __A, __m256h __B, __m256h __C) { + return (__m256h)__builtin_ia32_selectph_256( + (__mmask16)__U, + __builtin_ia32_vfmaddsubph256((__v16hf)__A, (__v16hf)__B, (__v16hf)__C), + (__v16hf)_mm256_setzero_ph()); +} + +static __inline__ __m256h __DEFAULT_FN_ATTRS256 +_mm256_fmsubadd_ph(__m256h __A, __m256h __B, __m256h __C) { + return (__m256h)__builtin_ia32_vfmaddsubph256((__v16hf)__A, (__v16hf)__B, + -(__v16hf)__C); +} + +static __inline__ __m256h __DEFAULT_FN_ATTRS256 +_mm256_mask_fmsubadd_ph(__m256h __A, __mmask16 __U, __m256h __B, __m256h __C) { + return (__m256h)__builtin_ia32_selectph_256( + (__mmask16)__U, + __builtin_ia32_vfmaddsubph256((__v16hf)__A, (__v16hf)__B, -(__v16hf)__C), + (__v16hf)__A); +} + +static __inline__ __m256h __DEFAULT_FN_ATTRS256 +_mm256_maskz_fmsubadd_ph(__mmask16 __U, __m256h __A, __m256h __B, __m256h __C) { + return (__m256h)__builtin_ia32_selectph_256( + (__mmask16)__U, + __builtin_ia32_vfmaddsubph256((__v16hf)__A, (__v16hf)__B, -(__v16hf)__C), + (__v16hf)_mm256_setzero_ph()); +} + +static __inline__ __m128h __DEFAULT_FN_ATTRS128 +_mm_mask3_fmsub_ph(__m128h __A, __m128h __B, __m128h __C, __mmask8 __U) { + return (__m128h)__builtin_ia32_selectph_128( + (__mmask8)__U, + __builtin_ia32_vfmaddph((__v8hf)__A, (__v8hf)__B, -(__v8hf)__C), + (__v8hf)__C); +} + +static __inline__ __m256h __DEFAULT_FN_ATTRS256 +_mm256_mask3_fmsub_ph(__m256h __A, __m256h __B, __m256h __C, __mmask16 __U) { + return (__m256h)__builtin_ia32_selectph_256( + (__mmask16)__U, + __builtin_ia32_vfmaddph256((__v16hf)__A, (__v16hf)__B, -(__v16hf)__C), + (__v16hf)__C); +} + +static __inline__ __m128h __DEFAULT_FN_ATTRS128 +_mm_mask3_fmsubadd_ph(__m128h __A, __m128h __B, __m128h __C, __mmask8 __U) { + return (__m128h)__builtin_ia32_selectph_128( + (__mmask8)__U, + __builtin_ia32_vfmaddsubph((__v8hf)__A, (__v8hf)__B, -(__v8hf)__C), + (__v8hf)__C); +} + +static __inline__ __m256h __DEFAULT_FN_ATTRS256 +_mm256_mask3_fmsubadd_ph(__m256h __A, __m256h __B, __m256h __C, __mmask16 __U) { + return (__m256h)__builtin_ia32_selectph_256( + (__mmask16)__U, + __builtin_ia32_vfmaddsubph256((__v16hf)__A, (__v16hf)__B, -(__v16hf)__C), + (__v16hf)__C); +} + +static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_fnmadd_ph(__m128h __A, + __m128h __B, + __m128h __C) { + return (__m128h)__builtin_ia32_vfmaddph((__v8hf)__A, -(__v8hf)__B, + (__v8hf)__C); +} + +static __inline__ __m128h __DEFAULT_FN_ATTRS128 +_mm_mask_fnmadd_ph(__m128h __A, __mmask8 __U, __m128h __B, __m128h __C) { + return (__m128h)__builtin_ia32_selectph_128( + (__mmask8)__U, + __builtin_ia32_vfmaddph((__v8hf)__A, -(__v8hf)__B, (__v8hf)__C), + (__v8hf)__A); +} + +static __inline__ __m256h __DEFAULT_FN_ATTRS256 _mm256_fnmadd_ph(__m256h __A, + __m256h __B, + __m256h __C) { + return (__m256h)__builtin_ia32_vfmaddph256((__v16hf)__A, -(__v16hf)__B, + (__v16hf)__C); +} + +static __inline__ __m256h __DEFAULT_FN_ATTRS256 +_mm256_mask_fnmadd_ph(__m256h __A, __mmask16 __U, __m256h __B, __m256h __C) { + return (__m256h)__builtin_ia32_selectph_256( + (__mmask16)__U, + __builtin_ia32_vfmaddph256((__v16hf)__A, -(__v16hf)__B, (__v16hf)__C), + (__v16hf)__A); +} + +static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_fnmsub_ph(__m128h __A, + __m128h __B, + __m128h __C) { + return (__m128h)__builtin_ia32_vfmaddph((__v8hf)__A, -(__v8hf)__B, + -(__v8hf)__C); +} + +static __inline__ __m128h __DEFAULT_FN_ATTRS128 +_mm_mask_fnmsub_ph(__m128h __A, __mmask8 __U, __m128h __B, __m128h __C) { + return (__m128h)__builtin_ia32_selectph_128( + (__mmask8)__U, + __builtin_ia32_vfmaddph((__v8hf)__A, -(__v8hf)__B, -(__v8hf)__C), + (__v8hf)__A); +} + +static __inline__ __m128h __DEFAULT_FN_ATTRS128 +_mm_mask3_fnmsub_ph(__m128h __A, __m128h __B, __m128h __C, __mmask8 __U) { + return (__m128h)__builtin_ia32_selectph_128( + (__mmask8)__U, + __builtin_ia32_vfmaddph((__v8hf)__A, -(__v8hf)__B, -(__v8hf)__C), + (__v8hf)__C); +} + +static __inline__ __m256h __DEFAULT_FN_ATTRS256 _mm256_fnmsub_ph(__m256h __A, + __m256h __B, + __m256h __C) { + return (__m256h)__builtin_ia32_vfmaddph256((__v16hf)__A, -(__v16hf)__B, + -(__v16hf)__C); +} + +static __inline__ __m256h __DEFAULT_FN_ATTRS256 +_mm256_mask_fnmsub_ph(__m256h __A, __mmask16 __U, __m256h __B, __m256h __C) { + return (__m256h)__builtin_ia32_selectph_256( + (__mmask16)__U, + __builtin_ia32_vfmaddph256((__v16hf)__A, -(__v16hf)__B, -(__v16hf)__C), + (__v16hf)__A); +} + +static __inline__ __m256h __DEFAULT_FN_ATTRS256 +_mm256_mask3_fnmsub_ph(__m256h __A, __m256h __B, __m256h __C, __mmask16 __U) { + return (__m256h)__builtin_ia32_selectph_256( + (__mmask16)__U, + __builtin_ia32_vfmaddph256((__v16hf)__A, -(__v16hf)__B, -(__v16hf)__C), + (__v16hf)__C); +} + +static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_fcmul_pch(__m128h __A, + __m128h __B) { + return (__m128h)__builtin_ia32_vfcmulcph128_mask( + (__v4sf)__A, (__v4sf)__B, (__v4sf)_mm_undefined_ph(), (__mmask8)-1); +} + +static __inline__ __m128h __DEFAULT_FN_ATTRS128 +_mm_mask_fcmul_pch(__m128h __W, __mmask8 __U, __m128h __A, __m128h __B) { + return (__m128h)__builtin_ia32_vfcmulcph128_mask((__v4sf)__A, (__v4sf)__B, + (__v4sf)__W, (__mmask8)__U); +} + +static __inline__ __m128h __DEFAULT_FN_ATTRS128 +_mm_maskz_fcmul_pch(__mmask8 __U, __m128h __A, __m128h __B) { + return (__m128h)__builtin_ia32_vfcmulcph128_mask( + (__v4sf)__A, (__v4sf)__B, (__v4sf)_mm_setzero_ph(), (__mmask8)__U); +} + +static __inline__ __m256h __DEFAULT_FN_ATTRS128 _mm256_fcmul_pch(__m256h __A, + __m256h __B) { + return (__m256h)__builtin_ia32_vfcmulcph256_mask( + (__v8sf)__A, (__v8sf)__B, (__v8sf)_mm256_undefined_ph(), (__mmask8)-1); +} + +static __inline__ __m256h __DEFAULT_FN_ATTRS256 +_mm256_mask_fcmul_pch(__m256h __W, __mmask8 __U, __m256h __A, __m256h __B) { + return (__m256h)__builtin_ia32_vfcmulcph256_mask((__v8sf)__A, (__v8sf)__B, + (__v8sf)__W, (__mmask8)__U); +} + +static __inline__ __m256h __DEFAULT_FN_ATTRS256 +_mm256_maskz_fcmul_pch(__mmask8 __U, __m256h __A, __m256h __B) { + return (__m256h)__builtin_ia32_vfcmulcph256_mask( + (__v8sf)__A, (__v8sf)__B, (__v8sf)_mm256_setzero_ph(), (__mmask8)__U); +} + +static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_fcmadd_pch(__m128h __A, + __m128h __B, + __m128h __C) { + return (__m128h)__builtin_ia32_vfcmaddcph128_mask((__v4sf)__A, (__v4sf)__B, + (__v4sf)__C, (__mmask8)-1); +} + +static __inline__ __m128h __DEFAULT_FN_ATTRS128 +_mm_mask_fcmadd_pch(__m128h __A, __mmask8 __U, __m128h __B, __m128h __C) { + return (__m128h)__builtin_ia32_selectps_128( + __U, + __builtin_ia32_vfcmaddcph128_mask((__v4sf)__A, (__v4sf)(__m128h)__B, + (__v4sf)__C, (__mmask8)__U), + (__v4sf)__A); +} + +static __inline__ __m128h __DEFAULT_FN_ATTRS128 +_mm_mask3_fcmadd_pch(__m128h __A, __m128h __B, __m128h __C, __mmask8 __U) { + return (__m128h)__builtin_ia32_vfcmaddcph128_mask((__v4sf)__A, (__v4sf)__B, + (__v4sf)__C, (__mmask8)__U); +} + +static __inline__ __m128h __DEFAULT_FN_ATTRS128 +_mm_maskz_fcmadd_pch(__mmask8 __U, __m128h __A, __m128h __B, __m128h __C) { + return (__m128h)__builtin_ia32_vfcmaddcph128_maskz( + (__v4sf)__A, (__v4sf)__B, (__v4sf)__C, (__mmask8)__U); +} + +static __inline__ __m256h __DEFAULT_FN_ATTRS256 _mm256_fcmadd_pch(__m256h __A, + __m256h __B, + __m256h __C) { + return (__m256h)__builtin_ia32_vfcmaddcph256_mask((__v8sf)__A, (__v8sf)__B, + (__v8sf)__C, (__mmask8)-1); +} + +static __inline__ __m256h __DEFAULT_FN_ATTRS256 +_mm256_mask_fcmadd_pch(__m256h __A, __mmask8 __U, __m256h __B, __m256h __C) { + return (__m256h)__builtin_ia32_selectps_256( + __U, + __builtin_ia32_vfcmaddcph256_mask((__v8sf)__A, (__v8sf)__B, (__v8sf)__C, + (__mmask8)__U), + (__v8sf)__A); +} + +static __inline__ __m256h __DEFAULT_FN_ATTRS256 +_mm256_mask3_fcmadd_pch(__m256h __A, __m256h __B, __m256h __C, __mmask8 __U) { + return (__m256h)__builtin_ia32_vfcmaddcph256_mask((__v8sf)__A, (__v8sf)__B, + (__v8sf)__C, (__mmask8)__U); +} + +static __inline__ __m256h __DEFAULT_FN_ATTRS256 +_mm256_maskz_fcmadd_pch(__mmask8 __U, __m256h __A, __m256h __B, __m256h __C) { + return (__m256h)__builtin_ia32_vfcmaddcph256_maskz( + (__v8sf)__A, (__v8sf)__B, (__v8sf)__C, (__mmask8)__U); +} + +static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_fmul_pch(__m128h __A, + __m128h __B) { + return (__m128h)__builtin_ia32_vfmulcph128_mask( + (__v4sf)__A, (__v4sf)__B, (__v4sf)_mm_undefined_ph(), (__mmask8)-1); +} + +static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_mask_fmul_pch(__m128h __W, + __mmask8 __U, + __m128h __A, + __m128h __B) { + return (__m128h)__builtin_ia32_vfmulcph128_mask((__v4sf)__A, (__v4sf)__B, + (__v4sf)__W, (__mmask8)__U); +} + +static __inline__ __m128h __DEFAULT_FN_ATTRS128 +_mm_maskz_fmul_pch(__mmask8 __U, __m128h __A, __m128h __B) { + return (__m128h)__builtin_ia32_vfmulcph128_mask( + (__v4sf)__A, (__v4sf)__B, (__v4sf)_mm_setzero_ph(), (__mmask8)__U); +} + +static __inline__ __m256h __DEFAULT_FN_ATTRS256 _mm256_fmul_pch(__m256h __A, + __m256h __B) { + return (__m256h)__builtin_ia32_vfmulcph256_mask( + (__v8sf)__A, (__v8sf)__B, (__v8sf)_mm256_undefined_ph(), (__mmask8)-1); +} + +static __inline__ __m256h __DEFAULT_FN_ATTRS256 +_mm256_mask_fmul_pch(__m256h __W, __mmask8 __U, __m256h __A, __m256h __B) { + return (__m256h)__builtin_ia32_vfmulcph256_mask((__v8sf)__A, (__v8sf)__B, + (__v8sf)__W, (__mmask8)__U); +} + +static __inline__ __m256h __DEFAULT_FN_ATTRS256 +_mm256_maskz_fmul_pch(__mmask8 __U, __m256h __A, __m256h __B) { + return (__m256h)__builtin_ia32_vfmulcph256_mask( + (__v8sf)__A, (__v8sf)__B, (__v8sf)_mm256_setzero_ph(), (__mmask8)__U); +} + +static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_fmadd_pch(__m128h __A, + __m128h __B, + __m128h __C) { + return (__m128h)__builtin_ia32_vfmaddcph128_mask((__v4sf)__A, (__v4sf)__B, + (__v4sf)__C, (__mmask8)-1); +} + +static __inline__ __m128h __DEFAULT_FN_ATTRS128 +_mm_mask_fmadd_pch(__m128h __A, __mmask8 __U, __m128h __B, __m128h __C) { + return (__m128h)__builtin_ia32_selectps_128( + __U, + __builtin_ia32_vfmaddcph128_mask((__v4sf)__A, (__v4sf)__B, (__v4sf)__C, + (__mmask8)__U), + (__v4sf)__A); +} + +static __inline__ __m128h __DEFAULT_FN_ATTRS128 +_mm_mask3_fmadd_pch(__m128h __A, __m128h __B, __m128h __C, __mmask8 __U) { + return (__m128h)__builtin_ia32_vfmaddcph128_mask((__v4sf)__A, (__v4sf)__B, + (__v4sf)__C, (__mmask8)__U); +} + +static __inline__ __m128h __DEFAULT_FN_ATTRS128 +_mm_maskz_fmadd_pch(__mmask8 __U, __m128h __A, __m128h __B, __m128h __C) { + return (__m128h)__builtin_ia32_vfmaddcph128_maskz((__v4sf)__A, (__v4sf)__B, + (__v4sf)__C, (__mmask8)__U); +} + +static __inline__ __m256h __DEFAULT_FN_ATTRS256 _mm256_fmadd_pch(__m256h __A, + __m256h __B, + __m256h __C) { + return (__m256h)__builtin_ia32_vfmaddcph256_mask((__v8sf)__A, (__v8sf)__B, + (__v8sf)__C, (__mmask8)-1); +} + +static __inline__ __m256h __DEFAULT_FN_ATTRS256 +_mm256_mask_fmadd_pch(__m256h __A, __mmask8 __U, __m256h __B, __m256h __C) { + return (__m256h)__builtin_ia32_selectps_256( + __U, + __builtin_ia32_vfmaddcph256_mask((__v8sf)__A, (__v8sf)__B, (__v8sf)__C, + (__mmask8)__U), + (__v8sf)__A); +} + +static __inline__ __m256h __DEFAULT_FN_ATTRS256 +_mm256_mask3_fmadd_pch(__m256h __A, __m256h __B, __m256h __C, __mmask8 __U) { + return (__m256h)__builtin_ia32_vfmaddcph256_mask((__v8sf)__A, (__v8sf)__B, + (__v8sf)__C, (__mmask8)__U); +} + +static __inline__ __m256h __DEFAULT_FN_ATTRS256 +_mm256_maskz_fmadd_pch(__mmask8 __U, __m256h __A, __m256h __B, __m256h __C) { + return (__m256h)__builtin_ia32_vfmaddcph256_maskz((__v8sf)__A, (__v8sf)__B, + (__v8sf)__C, (__mmask8)__U); +} + +static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_mask_blend_ph(__mmask8 __U, + __m128h __A, + __m128h __W) { + return (__m128h)__builtin_ia32_selectph_128((__mmask8)__U, (__v8hf)__W, + (__v8hf)__A); +} + +static __inline__ __m256h __DEFAULT_FN_ATTRS256 +_mm256_mask_blend_ph(__mmask16 __U, __m256h __A, __m256h __W) { + return (__m256h)__builtin_ia32_selectph_256((__mmask16)__U, (__v16hf)__W, + (__v16hf)__A); +} + +static __inline__ __m128h __DEFAULT_FN_ATTRS128 +_mm_permutex2var_ph(__m128h __A, __m128i __I, __m128h __B) { + return (__m128h)__builtin_ia32_vpermi2varhi128((__v8hi)__A, (__v8hi)__I, + (__v8hi)__B); +} + +static __inline__ __m256h __DEFAULT_FN_ATTRS256 +_mm256_permutex2var_ph(__m256h __A, __m256i __I, __m256h __B) { + return (__m256h)__builtin_ia32_vpermi2varhi256((__v16hi)__A, (__v16hi)__I, + (__v16hi)__B); +} + +static __inline__ __m128h __DEFAULT_FN_ATTRS128 +_mm_permutexvar_ph(__m128i __A, __m128h __B) { + return (__m128h)__builtin_ia32_permvarhi128((__v8hi)__B, (__v8hi)__A); +} + +static __inline__ __m256h __DEFAULT_FN_ATTRS256 +_mm256_permutexvar_ph(__m256i __A, __m256h __B) { + return (__m256h)__builtin_ia32_permvarhi256((__v16hi)__B, (__v16hi)__A); +} + +static __inline__ _Float16 __DEFAULT_FN_ATTRS256 +_mm256_reduce_add_ph(__m256h __W) { + return __builtin_ia32_reduce_fadd_ph256(-0.0f16, __W); +} + +static __inline__ _Float16 __DEFAULT_FN_ATTRS256 +_mm256_reduce_mul_ph(__m256h __W) { + return __builtin_ia32_reduce_fmul_ph256(1.0f16, __W); +} + +static __inline__ _Float16 __DEFAULT_FN_ATTRS256 +_mm256_reduce_max_ph(__m256h __V) { + return __builtin_ia32_reduce_fmax_ph256(__V); +} + +static __inline__ _Float16 __DEFAULT_FN_ATTRS256 +_mm256_reduce_min_ph(__m256h __V) { + return __builtin_ia32_reduce_fmin_ph256(__V); +} + +static __inline__ _Float16 __DEFAULT_FN_ATTRS128 +_mm_reduce_add_ph(__m128h __W) { + return __builtin_ia32_reduce_fadd_ph128(-0.0f16, __W); +} + +static __inline__ _Float16 __DEFAULT_FN_ATTRS128 +_mm_reduce_mul_ph(__m128h __W) { + return __builtin_ia32_reduce_fmul_ph128(1.0f16, __W); +} + +static __inline__ _Float16 __DEFAULT_FN_ATTRS128 +_mm_reduce_max_ph(__m128h __V) { + return __builtin_ia32_reduce_fmax_ph128(__V); +} + +static __inline__ _Float16 __DEFAULT_FN_ATTRS128 +_mm_reduce_min_ph(__m128h __V) { + return __builtin_ia32_reduce_fmin_ph128(__V); +} + +// intrinsics below are alias for f*mul_*ch +#define _mm_mul_pch(A, B) _mm_fmul_pch(A, B) +#define _mm_mask_mul_pch(W, U, A, B) _mm_mask_fmul_pch(W, U, A, B) +#define _mm_maskz_mul_pch(U, A, B) _mm_maskz_fmul_pch(U, A, B) +#define _mm256_mul_pch(A, B) _mm256_fmul_pch(A, B) +#define _mm256_mask_mul_pch(W, U, A, B) _mm256_mask_fmul_pch(W, U, A, B) +#define _mm256_maskz_mul_pch(U, A, B) _mm256_maskz_fmul_pch(U, A, B) + +#define _mm_cmul_pch(A, B) _mm_fcmul_pch(A, B) +#define _mm_mask_cmul_pch(W, U, A, B) _mm_mask_fcmul_pch(W, U, A, B) +#define _mm_maskz_cmul_pch(U, A, B) _mm_maskz_fcmul_pch(U, A, B) +#define _mm256_cmul_pch(A, B) _mm256_fcmul_pch(A, B) +#define _mm256_mask_cmul_pch(W, U, A, B) _mm256_mask_fcmul_pch(W, U, A, B) +#define _mm256_maskz_cmul_pch(U, A, B) _mm256_maskz_fcmul_pch(U, A, B) + +#undef __DEFAULT_FN_ATTRS128 +#undef __DEFAULT_FN_ATTRS256 + +#endif +#endif diff --git a/third_party/intel/clang/avx512vlintrin.h b/third_party/intel/clang/avx512vlintrin.h new file mode 100644 index 000000000..2a5f7b43f --- /dev/null +++ b/third_party/intel/clang/avx512vlintrin.h @@ -0,0 +1,8437 @@ +/*===---- avx512vlintrin.h - AVX512VL intrinsics ---------------------------=== + * + * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. + * See https://llvm.org/LICENSE.txt for license information. + * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + * + *===-----------------------------------------------------------------------=== + */ + +#ifndef __IMMINTRIN_H +#error "Never use directly; include instead." +#endif + +#ifndef __AVX512VLINTRIN_H +#define __AVX512VLINTRIN_H + +#define __DEFAULT_FN_ATTRS128 \ + __attribute__((__always_inline__, __nodebug__, \ + __target__("avx512vl,no-evex512"), \ + __min_vector_width__(128))) +#define __DEFAULT_FN_ATTRS256 \ + __attribute__((__always_inline__, __nodebug__, \ + __target__("avx512vl,no-evex512"), \ + __min_vector_width__(256))) + +typedef short __v2hi __attribute__((__vector_size__(4))); +typedef char __v4qi __attribute__((__vector_size__(4))); +typedef char __v2qi __attribute__((__vector_size__(2))); + +/* Integer compare */ + +#define _mm_cmpeq_epi32_mask(A, B) \ + _mm_cmp_epi32_mask((A), (B), _MM_CMPINT_EQ) +#define _mm_mask_cmpeq_epi32_mask(k, A, B) \ + _mm_mask_cmp_epi32_mask((k), (A), (B), _MM_CMPINT_EQ) +#define _mm_cmpge_epi32_mask(A, B) \ + _mm_cmp_epi32_mask((A), (B), _MM_CMPINT_GE) +#define _mm_mask_cmpge_epi32_mask(k, A, B) \ + _mm_mask_cmp_epi32_mask((k), (A), (B), _MM_CMPINT_GE) +#define _mm_cmpgt_epi32_mask(A, B) \ + _mm_cmp_epi32_mask((A), (B), _MM_CMPINT_GT) +#define _mm_mask_cmpgt_epi32_mask(k, A, B) \ + _mm_mask_cmp_epi32_mask((k), (A), (B), _MM_CMPINT_GT) +#define _mm_cmple_epi32_mask(A, B) \ + _mm_cmp_epi32_mask((A), (B), _MM_CMPINT_LE) +#define _mm_mask_cmple_epi32_mask(k, A, B) \ + _mm_mask_cmp_epi32_mask((k), (A), (B), _MM_CMPINT_LE) +#define _mm_cmplt_epi32_mask(A, B) \ + _mm_cmp_epi32_mask((A), (B), _MM_CMPINT_LT) +#define _mm_mask_cmplt_epi32_mask(k, A, B) \ + _mm_mask_cmp_epi32_mask((k), (A), (B), _MM_CMPINT_LT) +#define _mm_cmpneq_epi32_mask(A, B) \ + _mm_cmp_epi32_mask((A), (B), _MM_CMPINT_NE) +#define _mm_mask_cmpneq_epi32_mask(k, A, B) \ + _mm_mask_cmp_epi32_mask((k), (A), (B), _MM_CMPINT_NE) + +#define _mm256_cmpeq_epi32_mask(A, B) \ + _mm256_cmp_epi32_mask((A), (B), _MM_CMPINT_EQ) +#define _mm256_mask_cmpeq_epi32_mask(k, A, B) \ + _mm256_mask_cmp_epi32_mask((k), (A), (B), _MM_CMPINT_EQ) +#define _mm256_cmpge_epi32_mask(A, B) \ + _mm256_cmp_epi32_mask((A), (B), _MM_CMPINT_GE) +#define _mm256_mask_cmpge_epi32_mask(k, A, B) \ + _mm256_mask_cmp_epi32_mask((k), (A), (B), _MM_CMPINT_GE) +#define _mm256_cmpgt_epi32_mask(A, B) \ + _mm256_cmp_epi32_mask((A), (B), _MM_CMPINT_GT) +#define _mm256_mask_cmpgt_epi32_mask(k, A, B) \ + _mm256_mask_cmp_epi32_mask((k), (A), (B), _MM_CMPINT_GT) +#define _mm256_cmple_epi32_mask(A, B) \ + _mm256_cmp_epi32_mask((A), (B), _MM_CMPINT_LE) +#define _mm256_mask_cmple_epi32_mask(k, A, B) \ + _mm256_mask_cmp_epi32_mask((k), (A), (B), _MM_CMPINT_LE) +#define _mm256_cmplt_epi32_mask(A, B) \ + _mm256_cmp_epi32_mask((A), (B), _MM_CMPINT_LT) +#define _mm256_mask_cmplt_epi32_mask(k, A, B) \ + _mm256_mask_cmp_epi32_mask((k), (A), (B), _MM_CMPINT_LT) +#define _mm256_cmpneq_epi32_mask(A, B) \ + _mm256_cmp_epi32_mask((A), (B), _MM_CMPINT_NE) +#define _mm256_mask_cmpneq_epi32_mask(k, A, B) \ + _mm256_mask_cmp_epi32_mask((k), (A), (B), _MM_CMPINT_NE) + +#define _mm_cmpeq_epu32_mask(A, B) \ + _mm_cmp_epu32_mask((A), (B), _MM_CMPINT_EQ) +#define _mm_mask_cmpeq_epu32_mask(k, A, B) \ + _mm_mask_cmp_epu32_mask((k), (A), (B), _MM_CMPINT_EQ) +#define _mm_cmpge_epu32_mask(A, B) \ + _mm_cmp_epu32_mask((A), (B), _MM_CMPINT_GE) +#define _mm_mask_cmpge_epu32_mask(k, A, B) \ + _mm_mask_cmp_epu32_mask((k), (A), (B), _MM_CMPINT_GE) +#define _mm_cmpgt_epu32_mask(A, B) \ + _mm_cmp_epu32_mask((A), (B), _MM_CMPINT_GT) +#define _mm_mask_cmpgt_epu32_mask(k, A, B) \ + _mm_mask_cmp_epu32_mask((k), (A), (B), _MM_CMPINT_GT) +#define _mm_cmple_epu32_mask(A, B) \ + _mm_cmp_epu32_mask((A), (B), _MM_CMPINT_LE) +#define _mm_mask_cmple_epu32_mask(k, A, B) \ + _mm_mask_cmp_epu32_mask((k), (A), (B), _MM_CMPINT_LE) +#define _mm_cmplt_epu32_mask(A, B) \ + _mm_cmp_epu32_mask((A), (B), _MM_CMPINT_LT) +#define _mm_mask_cmplt_epu32_mask(k, A, B) \ + _mm_mask_cmp_epu32_mask((k), (A), (B), _MM_CMPINT_LT) +#define _mm_cmpneq_epu32_mask(A, B) \ + _mm_cmp_epu32_mask((A), (B), _MM_CMPINT_NE) +#define _mm_mask_cmpneq_epu32_mask(k, A, B) \ + _mm_mask_cmp_epu32_mask((k), (A), (B), _MM_CMPINT_NE) + +#define _mm256_cmpeq_epu32_mask(A, B) \ + _mm256_cmp_epu32_mask((A), (B), _MM_CMPINT_EQ) +#define _mm256_mask_cmpeq_epu32_mask(k, A, B) \ + _mm256_mask_cmp_epu32_mask((k), (A), (B), _MM_CMPINT_EQ) +#define _mm256_cmpge_epu32_mask(A, B) \ + _mm256_cmp_epu32_mask((A), (B), _MM_CMPINT_GE) +#define _mm256_mask_cmpge_epu32_mask(k, A, B) \ + _mm256_mask_cmp_epu32_mask((k), (A), (B), _MM_CMPINT_GE) +#define _mm256_cmpgt_epu32_mask(A, B) \ + _mm256_cmp_epu32_mask((A), (B), _MM_CMPINT_GT) +#define _mm256_mask_cmpgt_epu32_mask(k, A, B) \ + _mm256_mask_cmp_epu32_mask((k), (A), (B), _MM_CMPINT_GT) +#define _mm256_cmple_epu32_mask(A, B) \ + _mm256_cmp_epu32_mask((A), (B), _MM_CMPINT_LE) +#define _mm256_mask_cmple_epu32_mask(k, A, B) \ + _mm256_mask_cmp_epu32_mask((k), (A), (B), _MM_CMPINT_LE) +#define _mm256_cmplt_epu32_mask(A, B) \ + _mm256_cmp_epu32_mask((A), (B), _MM_CMPINT_LT) +#define _mm256_mask_cmplt_epu32_mask(k, A, B) \ + _mm256_mask_cmp_epu32_mask((k), (A), (B), _MM_CMPINT_LT) +#define _mm256_cmpneq_epu32_mask(A, B) \ + _mm256_cmp_epu32_mask((A), (B), _MM_CMPINT_NE) +#define _mm256_mask_cmpneq_epu32_mask(k, A, B) \ + _mm256_mask_cmp_epu32_mask((k), (A), (B), _MM_CMPINT_NE) + +#define _mm_cmpeq_epi64_mask(A, B) \ + _mm_cmp_epi64_mask((A), (B), _MM_CMPINT_EQ) +#define _mm_mask_cmpeq_epi64_mask(k, A, B) \ + _mm_mask_cmp_epi64_mask((k), (A), (B), _MM_CMPINT_EQ) +#define _mm_cmpge_epi64_mask(A, B) \ + _mm_cmp_epi64_mask((A), (B), _MM_CMPINT_GE) +#define _mm_mask_cmpge_epi64_mask(k, A, B) \ + _mm_mask_cmp_epi64_mask((k), (A), (B), _MM_CMPINT_GE) +#define _mm_cmpgt_epi64_mask(A, B) \ + _mm_cmp_epi64_mask((A), (B), _MM_CMPINT_GT) +#define _mm_mask_cmpgt_epi64_mask(k, A, B) \ + _mm_mask_cmp_epi64_mask((k), (A), (B), _MM_CMPINT_GT) +#define _mm_cmple_epi64_mask(A, B) \ + _mm_cmp_epi64_mask((A), (B), _MM_CMPINT_LE) +#define _mm_mask_cmple_epi64_mask(k, A, B) \ + _mm_mask_cmp_epi64_mask((k), (A), (B), _MM_CMPINT_LE) +#define _mm_cmplt_epi64_mask(A, B) \ + _mm_cmp_epi64_mask((A), (B), _MM_CMPINT_LT) +#define _mm_mask_cmplt_epi64_mask(k, A, B) \ + _mm_mask_cmp_epi64_mask((k), (A), (B), _MM_CMPINT_LT) +#define _mm_cmpneq_epi64_mask(A, B) \ + _mm_cmp_epi64_mask((A), (B), _MM_CMPINT_NE) +#define _mm_mask_cmpneq_epi64_mask(k, A, B) \ + _mm_mask_cmp_epi64_mask((k), (A), (B), _MM_CMPINT_NE) + +#define _mm256_cmpeq_epi64_mask(A, B) \ + _mm256_cmp_epi64_mask((A), (B), _MM_CMPINT_EQ) +#define _mm256_mask_cmpeq_epi64_mask(k, A, B) \ + _mm256_mask_cmp_epi64_mask((k), (A), (B), _MM_CMPINT_EQ) +#define _mm256_cmpge_epi64_mask(A, B) \ + _mm256_cmp_epi64_mask((A), (B), _MM_CMPINT_GE) +#define _mm256_mask_cmpge_epi64_mask(k, A, B) \ + _mm256_mask_cmp_epi64_mask((k), (A), (B), _MM_CMPINT_GE) +#define _mm256_cmpgt_epi64_mask(A, B) \ + _mm256_cmp_epi64_mask((A), (B), _MM_CMPINT_GT) +#define _mm256_mask_cmpgt_epi64_mask(k, A, B) \ + _mm256_mask_cmp_epi64_mask((k), (A), (B), _MM_CMPINT_GT) +#define _mm256_cmple_epi64_mask(A, B) \ + _mm256_cmp_epi64_mask((A), (B), _MM_CMPINT_LE) +#define _mm256_mask_cmple_epi64_mask(k, A, B) \ + _mm256_mask_cmp_epi64_mask((k), (A), (B), _MM_CMPINT_LE) +#define _mm256_cmplt_epi64_mask(A, B) \ + _mm256_cmp_epi64_mask((A), (B), _MM_CMPINT_LT) +#define _mm256_mask_cmplt_epi64_mask(k, A, B) \ + _mm256_mask_cmp_epi64_mask((k), (A), (B), _MM_CMPINT_LT) +#define _mm256_cmpneq_epi64_mask(A, B) \ + _mm256_cmp_epi64_mask((A), (B), _MM_CMPINT_NE) +#define _mm256_mask_cmpneq_epi64_mask(k, A, B) \ + _mm256_mask_cmp_epi64_mask((k), (A), (B), _MM_CMPINT_NE) + +#define _mm_cmpeq_epu64_mask(A, B) \ + _mm_cmp_epu64_mask((A), (B), _MM_CMPINT_EQ) +#define _mm_mask_cmpeq_epu64_mask(k, A, B) \ + _mm_mask_cmp_epu64_mask((k), (A), (B), _MM_CMPINT_EQ) +#define _mm_cmpge_epu64_mask(A, B) \ + _mm_cmp_epu64_mask((A), (B), _MM_CMPINT_GE) +#define _mm_mask_cmpge_epu64_mask(k, A, B) \ + _mm_mask_cmp_epu64_mask((k), (A), (B), _MM_CMPINT_GE) +#define _mm_cmpgt_epu64_mask(A, B) \ + _mm_cmp_epu64_mask((A), (B), _MM_CMPINT_GT) +#define _mm_mask_cmpgt_epu64_mask(k, A, B) \ + _mm_mask_cmp_epu64_mask((k), (A), (B), _MM_CMPINT_GT) +#define _mm_cmple_epu64_mask(A, B) \ + _mm_cmp_epu64_mask((A), (B), _MM_CMPINT_LE) +#define _mm_mask_cmple_epu64_mask(k, A, B) \ + _mm_mask_cmp_epu64_mask((k), (A), (B), _MM_CMPINT_LE) +#define _mm_cmplt_epu64_mask(A, B) \ + _mm_cmp_epu64_mask((A), (B), _MM_CMPINT_LT) +#define _mm_mask_cmplt_epu64_mask(k, A, B) \ + _mm_mask_cmp_epu64_mask((k), (A), (B), _MM_CMPINT_LT) +#define _mm_cmpneq_epu64_mask(A, B) \ + _mm_cmp_epu64_mask((A), (B), _MM_CMPINT_NE) +#define _mm_mask_cmpneq_epu64_mask(k, A, B) \ + _mm_mask_cmp_epu64_mask((k), (A), (B), _MM_CMPINT_NE) + +#define _mm256_cmpeq_epu64_mask(A, B) \ + _mm256_cmp_epu64_mask((A), (B), _MM_CMPINT_EQ) +#define _mm256_mask_cmpeq_epu64_mask(k, A, B) \ + _mm256_mask_cmp_epu64_mask((k), (A), (B), _MM_CMPINT_EQ) +#define _mm256_cmpge_epu64_mask(A, B) \ + _mm256_cmp_epu64_mask((A), (B), _MM_CMPINT_GE) +#define _mm256_mask_cmpge_epu64_mask(k, A, B) \ + _mm256_mask_cmp_epu64_mask((k), (A), (B), _MM_CMPINT_GE) +#define _mm256_cmpgt_epu64_mask(A, B) \ + _mm256_cmp_epu64_mask((A), (B), _MM_CMPINT_GT) +#define _mm256_mask_cmpgt_epu64_mask(k, A, B) \ + _mm256_mask_cmp_epu64_mask((k), (A), (B), _MM_CMPINT_GT) +#define _mm256_cmple_epu64_mask(A, B) \ + _mm256_cmp_epu64_mask((A), (B), _MM_CMPINT_LE) +#define _mm256_mask_cmple_epu64_mask(k, A, B) \ + _mm256_mask_cmp_epu64_mask((k), (A), (B), _MM_CMPINT_LE) +#define _mm256_cmplt_epu64_mask(A, B) \ + _mm256_cmp_epu64_mask((A), (B), _MM_CMPINT_LT) +#define _mm256_mask_cmplt_epu64_mask(k, A, B) \ + _mm256_mask_cmp_epu64_mask((k), (A), (B), _MM_CMPINT_LT) +#define _mm256_cmpneq_epu64_mask(A, B) \ + _mm256_cmp_epu64_mask((A), (B), _MM_CMPINT_NE) +#define _mm256_mask_cmpneq_epu64_mask(k, A, B) \ + _mm256_mask_cmp_epu64_mask((k), (A), (B), _MM_CMPINT_NE) + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_mask_add_epi32(__m256i __W, __mmask8 __U, __m256i __A, __m256i __B) +{ + return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U, + (__v8si)_mm256_add_epi32(__A, __B), + (__v8si)__W); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_maskz_add_epi32(__mmask8 __U, __m256i __A, __m256i __B) +{ + return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U, + (__v8si)_mm256_add_epi32(__A, __B), + (__v8si)_mm256_setzero_si256()); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_mask_add_epi64(__m256i __W, __mmask8 __U, __m256i __A, __m256i __B) +{ + return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U, + (__v4di)_mm256_add_epi64(__A, __B), + (__v4di)__W); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_maskz_add_epi64(__mmask8 __U, __m256i __A, __m256i __B) +{ + return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U, + (__v4di)_mm256_add_epi64(__A, __B), + (__v4di)_mm256_setzero_si256()); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_mask_sub_epi32(__m256i __W, __mmask8 __U, __m256i __A, __m256i __B) +{ + return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U, + (__v8si)_mm256_sub_epi32(__A, __B), + (__v8si)__W); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_maskz_sub_epi32(__mmask8 __U, __m256i __A, __m256i __B) +{ + return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U, + (__v8si)_mm256_sub_epi32(__A, __B), + (__v8si)_mm256_setzero_si256()); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_mask_sub_epi64(__m256i __W, __mmask8 __U, __m256i __A, __m256i __B) +{ + return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U, + (__v4di)_mm256_sub_epi64(__A, __B), + (__v4di)__W); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_maskz_sub_epi64(__mmask8 __U, __m256i __A, __m256i __B) +{ + return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U, + (__v4di)_mm256_sub_epi64(__A, __B), + (__v4di)_mm256_setzero_si256()); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_mask_add_epi32(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U, + (__v4si)_mm_add_epi32(__A, __B), + (__v4si)__W); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_maskz_add_epi32(__mmask8 __U, __m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U, + (__v4si)_mm_add_epi32(__A, __B), + (__v4si)_mm_setzero_si128()); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_mask_add_epi64(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U, + (__v2di)_mm_add_epi64(__A, __B), + (__v2di)__W); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_maskz_add_epi64(__mmask8 __U, __m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U, + (__v2di)_mm_add_epi64(__A, __B), + (__v2di)_mm_setzero_si128()); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_mask_sub_epi32(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U, + (__v4si)_mm_sub_epi32(__A, __B), + (__v4si)__W); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_maskz_sub_epi32(__mmask8 __U, __m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U, + (__v4si)_mm_sub_epi32(__A, __B), + (__v4si)_mm_setzero_si128()); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_mask_sub_epi64(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U, + (__v2di)_mm_sub_epi64(__A, __B), + (__v2di)__W); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_maskz_sub_epi64(__mmask8 __U, __m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U, + (__v2di)_mm_sub_epi64(__A, __B), + (__v2di)_mm_setzero_si128()); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_mask_mul_epi32(__m256i __W, __mmask8 __M, __m256i __X, __m256i __Y) +{ + return (__m256i)__builtin_ia32_selectq_256((__mmask8)__M, + (__v4di)_mm256_mul_epi32(__X, __Y), + (__v4di)__W); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_maskz_mul_epi32(__mmask8 __M, __m256i __X, __m256i __Y) +{ + return (__m256i)__builtin_ia32_selectq_256((__mmask8)__M, + (__v4di)_mm256_mul_epi32(__X, __Y), + (__v4di)_mm256_setzero_si256()); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_mask_mul_epi32(__m128i __W, __mmask8 __M, __m128i __X, __m128i __Y) +{ + return (__m128i)__builtin_ia32_selectq_128((__mmask8)__M, + (__v2di)_mm_mul_epi32(__X, __Y), + (__v2di)__W); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_maskz_mul_epi32(__mmask8 __M, __m128i __X, __m128i __Y) +{ + return (__m128i)__builtin_ia32_selectq_128((__mmask8)__M, + (__v2di)_mm_mul_epi32(__X, __Y), + (__v2di)_mm_setzero_si128()); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_mask_mul_epu32(__m256i __W, __mmask8 __M, __m256i __X, __m256i __Y) +{ + return (__m256i)__builtin_ia32_selectq_256((__mmask8)__M, + (__v4di)_mm256_mul_epu32(__X, __Y), + (__v4di)__W); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_maskz_mul_epu32(__mmask8 __M, __m256i __X, __m256i __Y) +{ + return (__m256i)__builtin_ia32_selectq_256((__mmask8)__M, + (__v4di)_mm256_mul_epu32(__X, __Y), + (__v4di)_mm256_setzero_si256()); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_mask_mul_epu32(__m128i __W, __mmask8 __M, __m128i __X, __m128i __Y) +{ + return (__m128i)__builtin_ia32_selectq_128((__mmask8)__M, + (__v2di)_mm_mul_epu32(__X, __Y), + (__v2di)__W); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_maskz_mul_epu32(__mmask8 __M, __m128i __X, __m128i __Y) +{ + return (__m128i)__builtin_ia32_selectq_128((__mmask8)__M, + (__v2di)_mm_mul_epu32(__X, __Y), + (__v2di)_mm_setzero_si128()); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_maskz_mullo_epi32(__mmask8 __M, __m256i __A, __m256i __B) +{ + return (__m256i)__builtin_ia32_selectd_256((__mmask8)__M, + (__v8si)_mm256_mullo_epi32(__A, __B), + (__v8si)_mm256_setzero_si256()); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_mask_mullo_epi32(__m256i __W, __mmask8 __M, __m256i __A, __m256i __B) +{ + return (__m256i)__builtin_ia32_selectd_256((__mmask8)__M, + (__v8si)_mm256_mullo_epi32(__A, __B), + (__v8si)__W); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_maskz_mullo_epi32(__mmask8 __M, __m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_selectd_128((__mmask8)__M, + (__v4si)_mm_mullo_epi32(__A, __B), + (__v4si)_mm_setzero_si128()); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_mask_mullo_epi32(__m128i __W, __mmask8 __M, __m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_selectd_128((__mmask8)__M, + (__v4si)_mm_mullo_epi32(__A, __B), + (__v4si)__W); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_and_epi32(__m256i __a, __m256i __b) +{ + return (__m256i)((__v8su)__a & (__v8su)__b); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_mask_and_epi32(__m256i __W, __mmask8 __U, __m256i __A, __m256i __B) +{ + return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U, + (__v8si)_mm256_and_epi32(__A, __B), + (__v8si)__W); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_maskz_and_epi32(__mmask8 __U, __m256i __A, __m256i __B) +{ + return (__m256i)_mm256_mask_and_epi32(_mm256_setzero_si256(), __U, __A, __B); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_and_epi32(__m128i __a, __m128i __b) +{ + return (__m128i)((__v4su)__a & (__v4su)__b); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_mask_and_epi32(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U, + (__v4si)_mm_and_epi32(__A, __B), + (__v4si)__W); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_maskz_and_epi32(__mmask8 __U, __m128i __A, __m128i __B) +{ + return (__m128i)_mm_mask_and_epi32(_mm_setzero_si128(), __U, __A, __B); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_andnot_epi32(__m256i __A, __m256i __B) +{ + return (__m256i)(~(__v8su)__A & (__v8su)__B); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_mask_andnot_epi32(__m256i __W, __mmask8 __U, __m256i __A, __m256i __B) +{ + return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U, + (__v8si)_mm256_andnot_epi32(__A, __B), + (__v8si)__W); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_maskz_andnot_epi32(__mmask8 __U, __m256i __A, __m256i __B) +{ + return (__m256i)_mm256_mask_andnot_epi32(_mm256_setzero_si256(), + __U, __A, __B); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_andnot_epi32(__m128i __A, __m128i __B) +{ + return (__m128i)(~(__v4su)__A & (__v4su)__B); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_mask_andnot_epi32(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U, + (__v4si)_mm_andnot_epi32(__A, __B), + (__v4si)__W); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_maskz_andnot_epi32(__mmask8 __U, __m128i __A, __m128i __B) +{ + return (__m128i)_mm_mask_andnot_epi32(_mm_setzero_si128(), __U, __A, __B); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_or_epi32(__m256i __a, __m256i __b) +{ + return (__m256i)((__v8su)__a | (__v8su)__b); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_mask_or_epi32 (__m256i __W, __mmask8 __U, __m256i __A, __m256i __B) +{ + return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U, + (__v8si)_mm256_or_epi32(__A, __B), + (__v8si)__W); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_maskz_or_epi32(__mmask8 __U, __m256i __A, __m256i __B) +{ + return (__m256i)_mm256_mask_or_epi32(_mm256_setzero_si256(), __U, __A, __B); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_or_epi32(__m128i __a, __m128i __b) +{ + return (__m128i)((__v4su)__a | (__v4su)__b); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_mask_or_epi32(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U, + (__v4si)_mm_or_epi32(__A, __B), + (__v4si)__W); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_maskz_or_epi32(__mmask8 __U, __m128i __A, __m128i __B) +{ + return (__m128i)_mm_mask_or_epi32(_mm_setzero_si128(), __U, __A, __B); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_xor_epi32(__m256i __a, __m256i __b) +{ + return (__m256i)((__v8su)__a ^ (__v8su)__b); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_mask_xor_epi32(__m256i __W, __mmask8 __U, __m256i __A, __m256i __B) +{ + return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U, + (__v8si)_mm256_xor_epi32(__A, __B), + (__v8si)__W); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_maskz_xor_epi32(__mmask8 __U, __m256i __A, __m256i __B) +{ + return (__m256i)_mm256_mask_xor_epi32(_mm256_setzero_si256(), __U, __A, __B); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_xor_epi32(__m128i __a, __m128i __b) +{ + return (__m128i)((__v4su)__a ^ (__v4su)__b); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_mask_xor_epi32(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U, + (__v4si)_mm_xor_epi32(__A, __B), + (__v4si)__W); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_maskz_xor_epi32(__mmask8 __U, __m128i __A, __m128i __B) +{ + return (__m128i)_mm_mask_xor_epi32(_mm_setzero_si128(), __U, __A, __B); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_and_epi64(__m256i __a, __m256i __b) +{ + return (__m256i)((__v4du)__a & (__v4du)__b); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_mask_and_epi64(__m256i __W, __mmask8 __U, __m256i __A, __m256i __B) +{ + return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U, + (__v4di)_mm256_and_epi64(__A, __B), + (__v4di)__W); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_maskz_and_epi64(__mmask8 __U, __m256i __A, __m256i __B) +{ + return (__m256i)_mm256_mask_and_epi64(_mm256_setzero_si256(), __U, __A, __B); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_and_epi64(__m128i __a, __m128i __b) +{ + return (__m128i)((__v2du)__a & (__v2du)__b); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_mask_and_epi64(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U, + (__v2di)_mm_and_epi64(__A, __B), + (__v2di)__W); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_maskz_and_epi64(__mmask8 __U, __m128i __A, __m128i __B) +{ + return (__m128i)_mm_mask_and_epi64(_mm_setzero_si128(), __U, __A, __B); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_andnot_epi64(__m256i __A, __m256i __B) +{ + return (__m256i)(~(__v4du)__A & (__v4du)__B); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_mask_andnot_epi64(__m256i __W, __mmask8 __U, __m256i __A, __m256i __B) +{ + return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U, + (__v4di)_mm256_andnot_epi64(__A, __B), + (__v4di)__W); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_maskz_andnot_epi64(__mmask8 __U, __m256i __A, __m256i __B) +{ + return (__m256i)_mm256_mask_andnot_epi64(_mm256_setzero_si256(), + __U, __A, __B); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_andnot_epi64(__m128i __A, __m128i __B) +{ + return (__m128i)(~(__v2du)__A & (__v2du)__B); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_mask_andnot_epi64(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U, + (__v2di)_mm_andnot_epi64(__A, __B), + (__v2di)__W); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_maskz_andnot_epi64(__mmask8 __U, __m128i __A, __m128i __B) +{ + return (__m128i)_mm_mask_andnot_epi64(_mm_setzero_si128(), __U, __A, __B); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_or_epi64(__m256i __a, __m256i __b) +{ + return (__m256i)((__v4du)__a | (__v4du)__b); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_mask_or_epi64(__m256i __W, __mmask8 __U, __m256i __A, __m256i __B) +{ + return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U, + (__v4di)_mm256_or_epi64(__A, __B), + (__v4di)__W); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_maskz_or_epi64(__mmask8 __U, __m256i __A, __m256i __B) +{ + return (__m256i)_mm256_mask_or_epi64(_mm256_setzero_si256(), __U, __A, __B); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_or_epi64(__m128i __a, __m128i __b) +{ + return (__m128i)((__v2du)__a | (__v2du)__b); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_mask_or_epi64(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U, + (__v2di)_mm_or_epi64(__A, __B), + (__v2di)__W); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_maskz_or_epi64(__mmask8 __U, __m128i __A, __m128i __B) +{ + return (__m128i)_mm_mask_or_epi64(_mm_setzero_si128(), __U, __A, __B); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_xor_epi64(__m256i __a, __m256i __b) +{ + return (__m256i)((__v4du)__a ^ (__v4du)__b); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_mask_xor_epi64(__m256i __W, __mmask8 __U, __m256i __A, __m256i __B) +{ + return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U, + (__v4di)_mm256_xor_epi64(__A, __B), + (__v4di)__W); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_maskz_xor_epi64(__mmask8 __U, __m256i __A, __m256i __B) +{ + return (__m256i)_mm256_mask_xor_epi64(_mm256_setzero_si256(), __U, __A, __B); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_xor_epi64(__m128i __a, __m128i __b) +{ + return (__m128i)((__v2du)__a ^ (__v2du)__b); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_mask_xor_epi64(__m128i __W, __mmask8 __U, __m128i __A, + __m128i __B) +{ + return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U, + (__v2di)_mm_xor_epi64(__A, __B), + (__v2di)__W); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_maskz_xor_epi64(__mmask8 __U, __m128i __A, __m128i __B) +{ + return (__m128i)_mm_mask_xor_epi64(_mm_setzero_si128(), __U, __A, __B); +} + +#define _mm_cmp_epi32_mask(a, b, p) \ + ((__mmask8)__builtin_ia32_cmpd128_mask((__v4si)(__m128i)(a), \ + (__v4si)(__m128i)(b), (int)(p), \ + (__mmask8)-1)) + +#define _mm_mask_cmp_epi32_mask(m, a, b, p) \ + ((__mmask8)__builtin_ia32_cmpd128_mask((__v4si)(__m128i)(a), \ + (__v4si)(__m128i)(b), (int)(p), \ + (__mmask8)(m))) + +#define _mm_cmp_epu32_mask(a, b, p) \ + ((__mmask8)__builtin_ia32_ucmpd128_mask((__v4si)(__m128i)(a), \ + (__v4si)(__m128i)(b), (int)(p), \ + (__mmask8)-1)) + +#define _mm_mask_cmp_epu32_mask(m, a, b, p) \ + ((__mmask8)__builtin_ia32_ucmpd128_mask((__v4si)(__m128i)(a), \ + (__v4si)(__m128i)(b), (int)(p), \ + (__mmask8)(m))) + +#define _mm256_cmp_epi32_mask(a, b, p) \ + ((__mmask8)__builtin_ia32_cmpd256_mask((__v8si)(__m256i)(a), \ + (__v8si)(__m256i)(b), (int)(p), \ + (__mmask8)-1)) + +#define _mm256_mask_cmp_epi32_mask(m, a, b, p) \ + ((__mmask8)__builtin_ia32_cmpd256_mask((__v8si)(__m256i)(a), \ + (__v8si)(__m256i)(b), (int)(p), \ + (__mmask8)(m))) + +#define _mm256_cmp_epu32_mask(a, b, p) \ + ((__mmask8)__builtin_ia32_ucmpd256_mask((__v8si)(__m256i)(a), \ + (__v8si)(__m256i)(b), (int)(p), \ + (__mmask8)-1)) + +#define _mm256_mask_cmp_epu32_mask(m, a, b, p) \ + ((__mmask8)__builtin_ia32_ucmpd256_mask((__v8si)(__m256i)(a), \ + (__v8si)(__m256i)(b), (int)(p), \ + (__mmask8)(m))) + +#define _mm_cmp_epi64_mask(a, b, p) \ + ((__mmask8)__builtin_ia32_cmpq128_mask((__v2di)(__m128i)(a), \ + (__v2di)(__m128i)(b), (int)(p), \ + (__mmask8)-1)) + +#define _mm_mask_cmp_epi64_mask(m, a, b, p) \ + ((__mmask8)__builtin_ia32_cmpq128_mask((__v2di)(__m128i)(a), \ + (__v2di)(__m128i)(b), (int)(p), \ + (__mmask8)(m))) + +#define _mm_cmp_epu64_mask(a, b, p) \ + ((__mmask8)__builtin_ia32_ucmpq128_mask((__v2di)(__m128i)(a), \ + (__v2di)(__m128i)(b), (int)(p), \ + (__mmask8)-1)) + +#define _mm_mask_cmp_epu64_mask(m, a, b, p) \ + ((__mmask8)__builtin_ia32_ucmpq128_mask((__v2di)(__m128i)(a), \ + (__v2di)(__m128i)(b), (int)(p), \ + (__mmask8)(m))) + +#define _mm256_cmp_epi64_mask(a, b, p) \ + ((__mmask8)__builtin_ia32_cmpq256_mask((__v4di)(__m256i)(a), \ + (__v4di)(__m256i)(b), (int)(p), \ + (__mmask8)-1)) + +#define _mm256_mask_cmp_epi64_mask(m, a, b, p) \ + ((__mmask8)__builtin_ia32_cmpq256_mask((__v4di)(__m256i)(a), \ + (__v4di)(__m256i)(b), (int)(p), \ + (__mmask8)(m))) + +#define _mm256_cmp_epu64_mask(a, b, p) \ + ((__mmask8)__builtin_ia32_ucmpq256_mask((__v4di)(__m256i)(a), \ + (__v4di)(__m256i)(b), (int)(p), \ + (__mmask8)-1)) + +#define _mm256_mask_cmp_epu64_mask(m, a, b, p) \ + ((__mmask8)__builtin_ia32_ucmpq256_mask((__v4di)(__m256i)(a), \ + (__v4di)(__m256i)(b), (int)(p), \ + (__mmask8)(m))) + +#define _mm256_cmp_ps_mask(a, b, p) \ + ((__mmask8)__builtin_ia32_cmpps256_mask((__v8sf)(__m256)(a), \ + (__v8sf)(__m256)(b), (int)(p), \ + (__mmask8)-1)) + +#define _mm256_mask_cmp_ps_mask(m, a, b, p) \ + ((__mmask8)__builtin_ia32_cmpps256_mask((__v8sf)(__m256)(a), \ + (__v8sf)(__m256)(b), (int)(p), \ + (__mmask8)(m))) + +#define _mm256_cmp_pd_mask(a, b, p) \ + ((__mmask8)__builtin_ia32_cmppd256_mask((__v4df)(__m256d)(a), \ + (__v4df)(__m256d)(b), (int)(p), \ + (__mmask8)-1)) + +#define _mm256_mask_cmp_pd_mask(m, a, b, p) \ + ((__mmask8)__builtin_ia32_cmppd256_mask((__v4df)(__m256d)(a), \ + (__v4df)(__m256d)(b), (int)(p), \ + (__mmask8)(m))) + +#define _mm_cmp_ps_mask(a, b, p) \ + ((__mmask8)__builtin_ia32_cmpps128_mask((__v4sf)(__m128)(a), \ + (__v4sf)(__m128)(b), (int)(p), \ + (__mmask8)-1)) + +#define _mm_mask_cmp_ps_mask(m, a, b, p) \ + ((__mmask8)__builtin_ia32_cmpps128_mask((__v4sf)(__m128)(a), \ + (__v4sf)(__m128)(b), (int)(p), \ + (__mmask8)(m))) + +#define _mm_cmp_pd_mask(a, b, p) \ + ((__mmask8)__builtin_ia32_cmppd128_mask((__v2df)(__m128d)(a), \ + (__v2df)(__m128d)(b), (int)(p), \ + (__mmask8)-1)) + +#define _mm_mask_cmp_pd_mask(m, a, b, p) \ + ((__mmask8)__builtin_ia32_cmppd128_mask((__v2df)(__m128d)(a), \ + (__v2df)(__m128d)(b), (int)(p), \ + (__mmask8)(m))) + +static __inline__ __m128d __DEFAULT_FN_ATTRS128 +_mm_mask_fmadd_pd(__m128d __A, __mmask8 __U, __m128d __B, __m128d __C) +{ + return (__m128d) __builtin_ia32_selectpd_128((__mmask8) __U, + __builtin_ia32_vfmaddpd ((__v2df) __A, + (__v2df) __B, + (__v2df) __C), + (__v2df) __A); +} + +static __inline__ __m128d __DEFAULT_FN_ATTRS128 +_mm_mask3_fmadd_pd(__m128d __A, __m128d __B, __m128d __C, __mmask8 __U) +{ + return (__m128d) __builtin_ia32_selectpd_128((__mmask8) __U, + __builtin_ia32_vfmaddpd ((__v2df) __A, + (__v2df) __B, + (__v2df) __C), + (__v2df) __C); +} + +static __inline__ __m128d __DEFAULT_FN_ATTRS128 +_mm_maskz_fmadd_pd(__mmask8 __U, __m128d __A, __m128d __B, __m128d __C) +{ + return (__m128d) __builtin_ia32_selectpd_128((__mmask8) __U, + __builtin_ia32_vfmaddpd ((__v2df) __A, + (__v2df) __B, + (__v2df) __C), + (__v2df)_mm_setzero_pd()); +} + +static __inline__ __m128d __DEFAULT_FN_ATTRS128 +_mm_mask_fmsub_pd(__m128d __A, __mmask8 __U, __m128d __B, __m128d __C) +{ + return (__m128d) __builtin_ia32_selectpd_128((__mmask8) __U, + __builtin_ia32_vfmaddpd ((__v2df) __A, + (__v2df) __B, + -(__v2df) __C), + (__v2df) __A); +} + +static __inline__ __m128d __DEFAULT_FN_ATTRS128 +_mm_maskz_fmsub_pd(__mmask8 __U, __m128d __A, __m128d __B, __m128d __C) +{ + return (__m128d) __builtin_ia32_selectpd_128((__mmask8) __U, + __builtin_ia32_vfmaddpd ((__v2df) __A, + (__v2df) __B, + -(__v2df) __C), + (__v2df)_mm_setzero_pd()); +} + +static __inline__ __m128d __DEFAULT_FN_ATTRS128 +_mm_mask3_fnmadd_pd(__m128d __A, __m128d __B, __m128d __C, __mmask8 __U) +{ + return (__m128d) __builtin_ia32_selectpd_128((__mmask8) __U, + __builtin_ia32_vfmaddpd (-(__v2df) __A, + (__v2df) __B, + (__v2df) __C), + (__v2df) __C); +} + +static __inline__ __m128d __DEFAULT_FN_ATTRS128 +_mm_maskz_fnmadd_pd(__mmask8 __U, __m128d __A, __m128d __B, __m128d __C) +{ + return (__m128d) __builtin_ia32_selectpd_128((__mmask8) __U, + __builtin_ia32_vfmaddpd (-(__v2df) __A, + (__v2df) __B, + (__v2df) __C), + (__v2df)_mm_setzero_pd()); +} + +static __inline__ __m128d __DEFAULT_FN_ATTRS128 +_mm_maskz_fnmsub_pd(__mmask8 __U, __m128d __A, __m128d __B, __m128d __C) +{ + return (__m128d) __builtin_ia32_selectpd_128((__mmask8) __U, + __builtin_ia32_vfmaddpd (-(__v2df) __A, + (__v2df) __B, + -(__v2df) __C), + (__v2df)_mm_setzero_pd()); +} + +static __inline__ __m256d __DEFAULT_FN_ATTRS256 +_mm256_mask_fmadd_pd(__m256d __A, __mmask8 __U, __m256d __B, __m256d __C) +{ + return (__m256d) __builtin_ia32_selectpd_256((__mmask8) __U, + __builtin_ia32_vfmaddpd256 ((__v4df) __A, + (__v4df) __B, + (__v4df) __C), + (__v4df) __A); +} + +static __inline__ __m256d __DEFAULT_FN_ATTRS256 +_mm256_mask3_fmadd_pd(__m256d __A, __m256d __B, __m256d __C, __mmask8 __U) +{ + return (__m256d) __builtin_ia32_selectpd_256((__mmask8) __U, + __builtin_ia32_vfmaddpd256 ((__v4df) __A, + (__v4df) __B, + (__v4df) __C), + (__v4df) __C); +} + +static __inline__ __m256d __DEFAULT_FN_ATTRS256 +_mm256_maskz_fmadd_pd(__mmask8 __U, __m256d __A, __m256d __B, __m256d __C) +{ + return (__m256d) __builtin_ia32_selectpd_256((__mmask8) __U, + __builtin_ia32_vfmaddpd256 ((__v4df) __A, + (__v4df) __B, + (__v4df) __C), + (__v4df)_mm256_setzero_pd()); +} + +static __inline__ __m256d __DEFAULT_FN_ATTRS256 +_mm256_mask_fmsub_pd(__m256d __A, __mmask8 __U, __m256d __B, __m256d __C) +{ + return (__m256d) __builtin_ia32_selectpd_256((__mmask8) __U, + __builtin_ia32_vfmaddpd256 ((__v4df) __A, + (__v4df) __B, + -(__v4df) __C), + (__v4df) __A); +} + +static __inline__ __m256d __DEFAULT_FN_ATTRS256 +_mm256_maskz_fmsub_pd(__mmask8 __U, __m256d __A, __m256d __B, __m256d __C) +{ + return (__m256d) __builtin_ia32_selectpd_256((__mmask8) __U, + __builtin_ia32_vfmaddpd256 ((__v4df) __A, + (__v4df) __B, + -(__v4df) __C), + (__v4df)_mm256_setzero_pd()); +} + +static __inline__ __m256d __DEFAULT_FN_ATTRS256 +_mm256_mask3_fnmadd_pd(__m256d __A, __m256d __B, __m256d __C, __mmask8 __U) +{ + return (__m256d) __builtin_ia32_selectpd_256((__mmask8) __U, + __builtin_ia32_vfmaddpd256 (-(__v4df) __A, + (__v4df) __B, + (__v4df) __C), + (__v4df) __C); +} + +static __inline__ __m256d __DEFAULT_FN_ATTRS256 +_mm256_maskz_fnmadd_pd(__mmask8 __U, __m256d __A, __m256d __B, __m256d __C) +{ + return (__m256d) __builtin_ia32_selectpd_256((__mmask8) __U, + __builtin_ia32_vfmaddpd256 (-(__v4df) __A, + (__v4df) __B, + (__v4df) __C), + (__v4df)_mm256_setzero_pd()); +} + +static __inline__ __m256d __DEFAULT_FN_ATTRS256 +_mm256_maskz_fnmsub_pd(__mmask8 __U, __m256d __A, __m256d __B, __m256d __C) +{ + return (__m256d) __builtin_ia32_selectpd_256((__mmask8) __U, + __builtin_ia32_vfmaddpd256 (-(__v4df) __A, + (__v4df) __B, + -(__v4df) __C), + (__v4df)_mm256_setzero_pd()); +} + +static __inline__ __m128 __DEFAULT_FN_ATTRS128 +_mm_mask_fmadd_ps(__m128 __A, __mmask8 __U, __m128 __B, __m128 __C) +{ + return (__m128) __builtin_ia32_selectps_128((__mmask8) __U, + __builtin_ia32_vfmaddps ((__v4sf) __A, + (__v4sf) __B, + (__v4sf) __C), + (__v4sf) __A); +} + +static __inline__ __m128 __DEFAULT_FN_ATTRS128 +_mm_mask3_fmadd_ps(__m128 __A, __m128 __B, __m128 __C, __mmask8 __U) +{ + return (__m128) __builtin_ia32_selectps_128((__mmask8) __U, + __builtin_ia32_vfmaddps ((__v4sf) __A, + (__v4sf) __B, + (__v4sf) __C), + (__v4sf) __C); +} + +static __inline__ __m128 __DEFAULT_FN_ATTRS128 +_mm_maskz_fmadd_ps(__mmask8 __U, __m128 __A, __m128 __B, __m128 __C) +{ + return (__m128) __builtin_ia32_selectps_128((__mmask8) __U, + __builtin_ia32_vfmaddps ((__v4sf) __A, + (__v4sf) __B, + (__v4sf) __C), + (__v4sf)_mm_setzero_ps()); +} + +static __inline__ __m128 __DEFAULT_FN_ATTRS128 +_mm_mask_fmsub_ps(__m128 __A, __mmask8 __U, __m128 __B, __m128 __C) +{ + return (__m128) __builtin_ia32_selectps_128((__mmask8) __U, + __builtin_ia32_vfmaddps ((__v4sf) __A, + (__v4sf) __B, + -(__v4sf) __C), + (__v4sf) __A); +} + +static __inline__ __m128 __DEFAULT_FN_ATTRS128 +_mm_maskz_fmsub_ps(__mmask8 __U, __m128 __A, __m128 __B, __m128 __C) +{ + return (__m128) __builtin_ia32_selectps_128((__mmask8) __U, + __builtin_ia32_vfmaddps ((__v4sf) __A, + (__v4sf) __B, + -(__v4sf) __C), + (__v4sf)_mm_setzero_ps()); +} + +static __inline__ __m128 __DEFAULT_FN_ATTRS128 +_mm_mask3_fnmadd_ps(__m128 __A, __m128 __B, __m128 __C, __mmask8 __U) +{ + return (__m128) __builtin_ia32_selectps_128((__mmask8) __U, + __builtin_ia32_vfmaddps (-(__v4sf) __A, + (__v4sf) __B, + (__v4sf) __C), + (__v4sf) __C); +} + +static __inline__ __m128 __DEFAULT_FN_ATTRS128 +_mm_maskz_fnmadd_ps(__mmask8 __U, __m128 __A, __m128 __B, __m128 __C) +{ + return (__m128) __builtin_ia32_selectps_128((__mmask8) __U, + __builtin_ia32_vfmaddps (-(__v4sf) __A, + (__v4sf) __B, + (__v4sf) __C), + (__v4sf)_mm_setzero_ps()); +} + +static __inline__ __m128 __DEFAULT_FN_ATTRS128 +_mm_maskz_fnmsub_ps(__mmask8 __U, __m128 __A, __m128 __B, __m128 __C) +{ + return (__m128) __builtin_ia32_selectps_128((__mmask8) __U, + __builtin_ia32_vfmaddps (-(__v4sf) __A, + (__v4sf) __B, + -(__v4sf) __C), + (__v4sf)_mm_setzero_ps()); +} + +static __inline__ __m256 __DEFAULT_FN_ATTRS256 +_mm256_mask_fmadd_ps(__m256 __A, __mmask8 __U, __m256 __B, __m256 __C) +{ + return (__m256) __builtin_ia32_selectps_256((__mmask8) __U, + __builtin_ia32_vfmaddps256 ((__v8sf) __A, + (__v8sf) __B, + (__v8sf) __C), + (__v8sf) __A); +} + +static __inline__ __m256 __DEFAULT_FN_ATTRS256 +_mm256_mask3_fmadd_ps(__m256 __A, __m256 __B, __m256 __C, __mmask8 __U) +{ + return (__m256) __builtin_ia32_selectps_256((__mmask8) __U, + __builtin_ia32_vfmaddps256 ((__v8sf) __A, + (__v8sf) __B, + (__v8sf) __C), + (__v8sf) __C); +} + +static __inline__ __m256 __DEFAULT_FN_ATTRS256 +_mm256_maskz_fmadd_ps(__mmask8 __U, __m256 __A, __m256 __B, __m256 __C) +{ + return (__m256) __builtin_ia32_selectps_256((__mmask8) __U, + __builtin_ia32_vfmaddps256 ((__v8sf) __A, + (__v8sf) __B, + (__v8sf) __C), + (__v8sf)_mm256_setzero_ps()); +} + +static __inline__ __m256 __DEFAULT_FN_ATTRS256 +_mm256_mask_fmsub_ps(__m256 __A, __mmask8 __U, __m256 __B, __m256 __C) +{ + return (__m256) __builtin_ia32_selectps_256((__mmask8) __U, + __builtin_ia32_vfmaddps256 ((__v8sf) __A, + (__v8sf) __B, + -(__v8sf) __C), + (__v8sf) __A); +} + +static __inline__ __m256 __DEFAULT_FN_ATTRS256 +_mm256_maskz_fmsub_ps(__mmask8 __U, __m256 __A, __m256 __B, __m256 __C) +{ + return (__m256) __builtin_ia32_selectps_256((__mmask8) __U, + __builtin_ia32_vfmaddps256 ((__v8sf) __A, + (__v8sf) __B, + -(__v8sf) __C), + (__v8sf)_mm256_setzero_ps()); +} + +static __inline__ __m256 __DEFAULT_FN_ATTRS256 +_mm256_mask3_fnmadd_ps(__m256 __A, __m256 __B, __m256 __C, __mmask8 __U) +{ + return (__m256) __builtin_ia32_selectps_256((__mmask8) __U, + __builtin_ia32_vfmaddps256 (-(__v8sf) __A, + (__v8sf) __B, + (__v8sf) __C), + (__v8sf) __C); +} + +static __inline__ __m256 __DEFAULT_FN_ATTRS256 +_mm256_maskz_fnmadd_ps(__mmask8 __U, __m256 __A, __m256 __B, __m256 __C) +{ + return (__m256) __builtin_ia32_selectps_256((__mmask8) __U, + __builtin_ia32_vfmaddps256 (-(__v8sf) __A, + (__v8sf) __B, + (__v8sf) __C), + (__v8sf)_mm256_setzero_ps()); +} + +static __inline__ __m256 __DEFAULT_FN_ATTRS256 +_mm256_maskz_fnmsub_ps(__mmask8 __U, __m256 __A, __m256 __B, __m256 __C) +{ + return (__m256) __builtin_ia32_selectps_256((__mmask8) __U, + __builtin_ia32_vfmaddps256 (-(__v8sf) __A, + (__v8sf) __B, + -(__v8sf) __C), + (__v8sf)_mm256_setzero_ps()); +} + +static __inline__ __m128d __DEFAULT_FN_ATTRS128 +_mm_mask_fmaddsub_pd(__m128d __A, __mmask8 __U, __m128d __B, __m128d __C) +{ + return (__m128d) __builtin_ia32_selectpd_128((__mmask8) __U, + __builtin_ia32_vfmaddsubpd ((__v2df) __A, + (__v2df) __B, + (__v2df) __C), + (__v2df) __A); +} + +static __inline__ __m128d __DEFAULT_FN_ATTRS128 +_mm_mask3_fmaddsub_pd(__m128d __A, __m128d __B, __m128d __C, __mmask8 __U) +{ + return (__m128d) __builtin_ia32_selectpd_128((__mmask8) __U, + __builtin_ia32_vfmaddsubpd ((__v2df) __A, + (__v2df) __B, + (__v2df) __C), + (__v2df) __C); +} + +static __inline__ __m128d __DEFAULT_FN_ATTRS128 +_mm_maskz_fmaddsub_pd(__mmask8 __U, __m128d __A, __m128d __B, __m128d __C) +{ + return (__m128d) __builtin_ia32_selectpd_128((__mmask8) __U, + __builtin_ia32_vfmaddsubpd ((__v2df) __A, + (__v2df) __B, + (__v2df) __C), + (__v2df)_mm_setzero_pd()); +} + +static __inline__ __m128d __DEFAULT_FN_ATTRS128 +_mm_mask_fmsubadd_pd(__m128d __A, __mmask8 __U, __m128d __B, __m128d __C) +{ + return (__m128d) __builtin_ia32_selectpd_128((__mmask8) __U, + __builtin_ia32_vfmaddsubpd ((__v2df) __A, + (__v2df) __B, + -(__v2df) __C), + (__v2df) __A); +} + +static __inline__ __m128d __DEFAULT_FN_ATTRS128 +_mm_maskz_fmsubadd_pd(__mmask8 __U, __m128d __A, __m128d __B, __m128d __C) +{ + return (__m128d) __builtin_ia32_selectpd_128((__mmask8) __U, + __builtin_ia32_vfmaddsubpd ((__v2df) __A, + (__v2df) __B, + -(__v2df) __C), + (__v2df)_mm_setzero_pd()); +} + +static __inline__ __m256d __DEFAULT_FN_ATTRS256 +_mm256_mask_fmaddsub_pd(__m256d __A, __mmask8 __U, __m256d __B, __m256d __C) +{ + return (__m256d) __builtin_ia32_selectpd_256((__mmask8) __U, + __builtin_ia32_vfmaddsubpd256 ((__v4df) __A, + (__v4df) __B, + (__v4df) __C), + (__v4df) __A); +} + +static __inline__ __m256d __DEFAULT_FN_ATTRS256 +_mm256_mask3_fmaddsub_pd(__m256d __A, __m256d __B, __m256d __C, __mmask8 __U) +{ + return (__m256d) __builtin_ia32_selectpd_256((__mmask8) __U, + __builtin_ia32_vfmaddsubpd256 ((__v4df) __A, + (__v4df) __B, + (__v4df) __C), + (__v4df) __C); +} + +static __inline__ __m256d __DEFAULT_FN_ATTRS256 +_mm256_maskz_fmaddsub_pd(__mmask8 __U, __m256d __A, __m256d __B, __m256d __C) +{ + return (__m256d) __builtin_ia32_selectpd_256((__mmask8) __U, + __builtin_ia32_vfmaddsubpd256 ((__v4df) __A, + (__v4df) __B, + (__v4df) __C), + (__v4df)_mm256_setzero_pd()); +} + +static __inline__ __m256d __DEFAULT_FN_ATTRS256 +_mm256_mask_fmsubadd_pd(__m256d __A, __mmask8 __U, __m256d __B, __m256d __C) +{ + return (__m256d) __builtin_ia32_selectpd_256((__mmask8) __U, + __builtin_ia32_vfmaddsubpd256 ((__v4df) __A, + (__v4df) __B, + -(__v4df) __C), + (__v4df) __A); +} + +static __inline__ __m256d __DEFAULT_FN_ATTRS256 +_mm256_maskz_fmsubadd_pd(__mmask8 __U, __m256d __A, __m256d __B, __m256d __C) +{ + return (__m256d) __builtin_ia32_selectpd_256((__mmask8) __U, + __builtin_ia32_vfmaddsubpd256 ((__v4df) __A, + (__v4df) __B, + -(__v4df) __C), + (__v4df)_mm256_setzero_pd()); +} + +static __inline__ __m128 __DEFAULT_FN_ATTRS128 +_mm_mask_fmaddsub_ps(__m128 __A, __mmask8 __U, __m128 __B, __m128 __C) +{ + return (__m128) __builtin_ia32_selectps_128((__mmask8) __U, + __builtin_ia32_vfmaddsubps ((__v4sf) __A, + (__v4sf) __B, + (__v4sf) __C), + (__v4sf) __A); +} + +static __inline__ __m128 __DEFAULT_FN_ATTRS128 +_mm_mask3_fmaddsub_ps(__m128 __A, __m128 __B, __m128 __C, __mmask8 __U) +{ + return (__m128) __builtin_ia32_selectps_128((__mmask8) __U, + __builtin_ia32_vfmaddsubps ((__v4sf) __A, + (__v4sf) __B, + (__v4sf) __C), + (__v4sf) __C); +} + +static __inline__ __m128 __DEFAULT_FN_ATTRS128 +_mm_maskz_fmaddsub_ps(__mmask8 __U, __m128 __A, __m128 __B, __m128 __C) +{ + return (__m128) __builtin_ia32_selectps_128((__mmask8) __U, + __builtin_ia32_vfmaddsubps ((__v4sf) __A, + (__v4sf) __B, + (__v4sf) __C), + (__v4sf)_mm_setzero_ps()); +} + +static __inline__ __m128 __DEFAULT_FN_ATTRS128 +_mm_mask_fmsubadd_ps(__m128 __A, __mmask8 __U, __m128 __B, __m128 __C) +{ + return (__m128) __builtin_ia32_selectps_128((__mmask8) __U, + __builtin_ia32_vfmaddsubps ((__v4sf) __A, + (__v4sf) __B, + -(__v4sf) __C), + (__v4sf) __A); +} + +static __inline__ __m128 __DEFAULT_FN_ATTRS128 +_mm_maskz_fmsubadd_ps(__mmask8 __U, __m128 __A, __m128 __B, __m128 __C) +{ + return (__m128) __builtin_ia32_selectps_128((__mmask8) __U, + __builtin_ia32_vfmaddsubps ((__v4sf) __A, + (__v4sf) __B, + -(__v4sf) __C), + (__v4sf)_mm_setzero_ps()); +} + +static __inline__ __m256 __DEFAULT_FN_ATTRS256 +_mm256_mask_fmaddsub_ps(__m256 __A, __mmask8 __U, __m256 __B, + __m256 __C) +{ + return (__m256) __builtin_ia32_selectps_256((__mmask8) __U, + __builtin_ia32_vfmaddsubps256 ((__v8sf) __A, + (__v8sf) __B, + (__v8sf) __C), + (__v8sf) __A); +} + +static __inline__ __m256 __DEFAULT_FN_ATTRS256 +_mm256_mask3_fmaddsub_ps(__m256 __A, __m256 __B, __m256 __C, __mmask8 __U) +{ + return (__m256) __builtin_ia32_selectps_256((__mmask8) __U, + __builtin_ia32_vfmaddsubps256 ((__v8sf) __A, + (__v8sf) __B, + (__v8sf) __C), + (__v8sf) __C); +} + +static __inline__ __m256 __DEFAULT_FN_ATTRS256 +_mm256_maskz_fmaddsub_ps(__mmask8 __U, __m256 __A, __m256 __B, __m256 __C) +{ + return (__m256) __builtin_ia32_selectps_256((__mmask8) __U, + __builtin_ia32_vfmaddsubps256 ((__v8sf) __A, + (__v8sf) __B, + (__v8sf) __C), + (__v8sf)_mm256_setzero_ps()); +} + +static __inline__ __m256 __DEFAULT_FN_ATTRS256 +_mm256_mask_fmsubadd_ps(__m256 __A, __mmask8 __U, __m256 __B, __m256 __C) +{ + return (__m256) __builtin_ia32_selectps_256((__mmask8) __U, + __builtin_ia32_vfmaddsubps256 ((__v8sf) __A, + (__v8sf) __B, + -(__v8sf) __C), + (__v8sf) __A); +} + +static __inline__ __m256 __DEFAULT_FN_ATTRS256 +_mm256_maskz_fmsubadd_ps(__mmask8 __U, __m256 __A, __m256 __B, __m256 __C) +{ + return (__m256) __builtin_ia32_selectps_256((__mmask8) __U, + __builtin_ia32_vfmaddsubps256 ((__v8sf) __A, + (__v8sf) __B, + -(__v8sf) __C), + (__v8sf)_mm256_setzero_ps()); +} + +static __inline__ __m128d __DEFAULT_FN_ATTRS128 +_mm_mask3_fmsub_pd(__m128d __A, __m128d __B, __m128d __C, __mmask8 __U) +{ + return (__m128d) __builtin_ia32_selectpd_128((__mmask8) __U, + __builtin_ia32_vfmaddpd ((__v2df) __A, + (__v2df) __B, + -(__v2df) __C), + (__v2df) __C); +} + +static __inline__ __m256d __DEFAULT_FN_ATTRS256 +_mm256_mask3_fmsub_pd(__m256d __A, __m256d __B, __m256d __C, __mmask8 __U) +{ + return (__m256d) __builtin_ia32_selectpd_256((__mmask8) __U, + __builtin_ia32_vfmaddpd256 ((__v4df) __A, + (__v4df) __B, + -(__v4df) __C), + (__v4df) __C); +} + +static __inline__ __m128 __DEFAULT_FN_ATTRS128 +_mm_mask3_fmsub_ps(__m128 __A, __m128 __B, __m128 __C, __mmask8 __U) +{ + return (__m128) __builtin_ia32_selectps_128((__mmask8) __U, + __builtin_ia32_vfmaddps ((__v4sf) __A, + (__v4sf) __B, + -(__v4sf) __C), + (__v4sf) __C); +} + +static __inline__ __m256 __DEFAULT_FN_ATTRS256 +_mm256_mask3_fmsub_ps(__m256 __A, __m256 __B, __m256 __C, __mmask8 __U) +{ + return (__m256) __builtin_ia32_selectps_256((__mmask8) __U, + __builtin_ia32_vfmaddps256 ((__v8sf) __A, + (__v8sf) __B, + -(__v8sf) __C), + (__v8sf) __C); +} + +static __inline__ __m128d __DEFAULT_FN_ATTRS128 +_mm_mask3_fmsubadd_pd(__m128d __A, __m128d __B, __m128d __C, __mmask8 __U) +{ + return (__m128d) __builtin_ia32_selectpd_128((__mmask8) __U, + __builtin_ia32_vfmaddsubpd ((__v2df) __A, + (__v2df) __B, + -(__v2df) __C), + (__v2df) __C); +} + +static __inline__ __m256d __DEFAULT_FN_ATTRS256 +_mm256_mask3_fmsubadd_pd(__m256d __A, __m256d __B, __m256d __C, __mmask8 __U) +{ + return (__m256d) __builtin_ia32_selectpd_256((__mmask8) __U, + __builtin_ia32_vfmaddsubpd256 ((__v4df) __A, + (__v4df) __B, + -(__v4df) __C), + (__v4df) __C); +} + +static __inline__ __m128 __DEFAULT_FN_ATTRS128 +_mm_mask3_fmsubadd_ps(__m128 __A, __m128 __B, __m128 __C, __mmask8 __U) +{ + return (__m128) __builtin_ia32_selectps_128((__mmask8) __U, + __builtin_ia32_vfmaddsubps ((__v4sf) __A, + (__v4sf) __B, + -(__v4sf) __C), + (__v4sf) __C); +} + +static __inline__ __m256 __DEFAULT_FN_ATTRS256 +_mm256_mask3_fmsubadd_ps(__m256 __A, __m256 __B, __m256 __C, __mmask8 __U) +{ + return (__m256) __builtin_ia32_selectps_256((__mmask8) __U, + __builtin_ia32_vfmaddsubps256 ((__v8sf) __A, + (__v8sf) __B, + -(__v8sf) __C), + (__v8sf) __C); +} + +static __inline__ __m128d __DEFAULT_FN_ATTRS128 +_mm_mask_fnmadd_pd(__m128d __A, __mmask8 __U, __m128d __B, __m128d __C) +{ + return (__m128d) __builtin_ia32_selectpd_128((__mmask8) __U, + __builtin_ia32_vfmaddpd ((__v2df) __A, + -(__v2df) __B, + (__v2df) __C), + (__v2df) __A); +} + +static __inline__ __m256d __DEFAULT_FN_ATTRS256 +_mm256_mask_fnmadd_pd(__m256d __A, __mmask8 __U, __m256d __B, __m256d __C) +{ + return (__m256d) __builtin_ia32_selectpd_256((__mmask8) __U, + __builtin_ia32_vfmaddpd256 ((__v4df) __A, + -(__v4df) __B, + (__v4df) __C), + (__v4df) __A); +} + +static __inline__ __m128 __DEFAULT_FN_ATTRS128 +_mm_mask_fnmadd_ps(__m128 __A, __mmask8 __U, __m128 __B, __m128 __C) +{ + return (__m128) __builtin_ia32_selectps_128((__mmask8) __U, + __builtin_ia32_vfmaddps ((__v4sf) __A, + -(__v4sf) __B, + (__v4sf) __C), + (__v4sf) __A); +} + +static __inline__ __m256 __DEFAULT_FN_ATTRS256 +_mm256_mask_fnmadd_ps(__m256 __A, __mmask8 __U, __m256 __B, __m256 __C) +{ + return (__m256) __builtin_ia32_selectps_256((__mmask8) __U, + __builtin_ia32_vfmaddps256 ((__v8sf) __A, + -(__v8sf) __B, + (__v8sf) __C), + (__v8sf) __A); +} + +static __inline__ __m128d __DEFAULT_FN_ATTRS128 +_mm_mask_fnmsub_pd(__m128d __A, __mmask8 __U, __m128d __B, __m128d __C) +{ + return (__m128d) __builtin_ia32_selectpd_128((__mmask8) __U, + __builtin_ia32_vfmaddpd ((__v2df) __A, + -(__v2df) __B, + -(__v2df) __C), + (__v2df) __A); +} + +static __inline__ __m128d __DEFAULT_FN_ATTRS128 +_mm_mask3_fnmsub_pd(__m128d __A, __m128d __B, __m128d __C, __mmask8 __U) +{ + return (__m128d) __builtin_ia32_selectpd_128((__mmask8) __U, + __builtin_ia32_vfmaddpd ((__v2df) __A, + -(__v2df) __B, + -(__v2df) __C), + (__v2df) __C); +} + +static __inline__ __m256d __DEFAULT_FN_ATTRS256 +_mm256_mask_fnmsub_pd(__m256d __A, __mmask8 __U, __m256d __B, __m256d __C) +{ + return (__m256d) __builtin_ia32_selectpd_256((__mmask8) __U, + __builtin_ia32_vfmaddpd256 ((__v4df) __A, + -(__v4df) __B, + -(__v4df) __C), + (__v4df) __A); +} + +static __inline__ __m256d __DEFAULT_FN_ATTRS256 +_mm256_mask3_fnmsub_pd(__m256d __A, __m256d __B, __m256d __C, __mmask8 __U) +{ + return (__m256d) __builtin_ia32_selectpd_256((__mmask8) __U, + __builtin_ia32_vfmaddpd256 ((__v4df) __A, + -(__v4df) __B, + -(__v4df) __C), + (__v4df) __C); +} + +static __inline__ __m128 __DEFAULT_FN_ATTRS128 +_mm_mask_fnmsub_ps(__m128 __A, __mmask8 __U, __m128 __B, __m128 __C) +{ + return (__m128) __builtin_ia32_selectps_128((__mmask8) __U, + __builtin_ia32_vfmaddps ((__v4sf) __A, + -(__v4sf) __B, + -(__v4sf) __C), + (__v4sf) __A); +} + +static __inline__ __m128 __DEFAULT_FN_ATTRS128 +_mm_mask3_fnmsub_ps(__m128 __A, __m128 __B, __m128 __C, __mmask8 __U) +{ + return (__m128) __builtin_ia32_selectps_128((__mmask8) __U, + __builtin_ia32_vfmaddps ((__v4sf) __A, + -(__v4sf) __B, + -(__v4sf) __C), + (__v4sf) __C); +} + +static __inline__ __m256 __DEFAULT_FN_ATTRS256 +_mm256_mask_fnmsub_ps(__m256 __A, __mmask8 __U, __m256 __B, __m256 __C) +{ + return (__m256) __builtin_ia32_selectps_256((__mmask8) __U, + __builtin_ia32_vfmaddps256 ((__v8sf) __A, + -(__v8sf) __B, + -(__v8sf) __C), + (__v8sf) __A); +} + +static __inline__ __m256 __DEFAULT_FN_ATTRS256 +_mm256_mask3_fnmsub_ps(__m256 __A, __m256 __B, __m256 __C, __mmask8 __U) +{ + return (__m256) __builtin_ia32_selectps_256((__mmask8) __U, + __builtin_ia32_vfmaddps256 ((__v8sf) __A, + -(__v8sf) __B, + -(__v8sf) __C), + (__v8sf) __C); +} + +static __inline__ __m128d __DEFAULT_FN_ATTRS128 +_mm_mask_add_pd(__m128d __W, __mmask8 __U, __m128d __A, __m128d __B) { + return (__m128d)__builtin_ia32_selectpd_128((__mmask8)__U, + (__v2df)_mm_add_pd(__A, __B), + (__v2df)__W); +} + +static __inline__ __m128d __DEFAULT_FN_ATTRS128 +_mm_maskz_add_pd(__mmask8 __U, __m128d __A, __m128d __B) { + return (__m128d)__builtin_ia32_selectpd_128((__mmask8)__U, + (__v2df)_mm_add_pd(__A, __B), + (__v2df)_mm_setzero_pd()); +} + +static __inline__ __m256d __DEFAULT_FN_ATTRS256 +_mm256_mask_add_pd(__m256d __W, __mmask8 __U, __m256d __A, __m256d __B) { + return (__m256d)__builtin_ia32_selectpd_256((__mmask8)__U, + (__v4df)_mm256_add_pd(__A, __B), + (__v4df)__W); +} + +static __inline__ __m256d __DEFAULT_FN_ATTRS256 +_mm256_maskz_add_pd(__mmask8 __U, __m256d __A, __m256d __B) { + return (__m256d)__builtin_ia32_selectpd_256((__mmask8)__U, + (__v4df)_mm256_add_pd(__A, __B), + (__v4df)_mm256_setzero_pd()); +} + +static __inline__ __m128 __DEFAULT_FN_ATTRS128 +_mm_mask_add_ps(__m128 __W, __mmask8 __U, __m128 __A, __m128 __B) { + return (__m128)__builtin_ia32_selectps_128((__mmask8)__U, + (__v4sf)_mm_add_ps(__A, __B), + (__v4sf)__W); +} + +static __inline__ __m128 __DEFAULT_FN_ATTRS128 +_mm_maskz_add_ps(__mmask8 __U, __m128 __A, __m128 __B) { + return (__m128)__builtin_ia32_selectps_128((__mmask8)__U, + (__v4sf)_mm_add_ps(__A, __B), + (__v4sf)_mm_setzero_ps()); +} + +static __inline__ __m256 __DEFAULT_FN_ATTRS256 +_mm256_mask_add_ps(__m256 __W, __mmask8 __U, __m256 __A, __m256 __B) { + return (__m256)__builtin_ia32_selectps_256((__mmask8)__U, + (__v8sf)_mm256_add_ps(__A, __B), + (__v8sf)__W); +} + +static __inline__ __m256 __DEFAULT_FN_ATTRS256 +_mm256_maskz_add_ps(__mmask8 __U, __m256 __A, __m256 __B) { + return (__m256)__builtin_ia32_selectps_256((__mmask8)__U, + (__v8sf)_mm256_add_ps(__A, __B), + (__v8sf)_mm256_setzero_ps()); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_mask_blend_epi32 (__mmask8 __U, __m128i __A, __m128i __W) { + return (__m128i) __builtin_ia32_selectd_128 ((__mmask8) __U, + (__v4si) __W, + (__v4si) __A); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_mask_blend_epi32 (__mmask8 __U, __m256i __A, __m256i __W) { + return (__m256i) __builtin_ia32_selectd_256 ((__mmask8) __U, + (__v8si) __W, + (__v8si) __A); +} + +static __inline__ __m128d __DEFAULT_FN_ATTRS128 +_mm_mask_blend_pd (__mmask8 __U, __m128d __A, __m128d __W) { + return (__m128d) __builtin_ia32_selectpd_128 ((__mmask8) __U, + (__v2df) __W, + (__v2df) __A); +} + +static __inline__ __m256d __DEFAULT_FN_ATTRS256 +_mm256_mask_blend_pd (__mmask8 __U, __m256d __A, __m256d __W) { + return (__m256d) __builtin_ia32_selectpd_256 ((__mmask8) __U, + (__v4df) __W, + (__v4df) __A); +} + +static __inline__ __m128 __DEFAULT_FN_ATTRS128 +_mm_mask_blend_ps (__mmask8 __U, __m128 __A, __m128 __W) { + return (__m128) __builtin_ia32_selectps_128 ((__mmask8) __U, + (__v4sf) __W, + (__v4sf) __A); +} + +static __inline__ __m256 __DEFAULT_FN_ATTRS256 +_mm256_mask_blend_ps (__mmask8 __U, __m256 __A, __m256 __W) { + return (__m256) __builtin_ia32_selectps_256 ((__mmask8) __U, + (__v8sf) __W, + (__v8sf) __A); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_mask_blend_epi64 (__mmask8 __U, __m128i __A, __m128i __W) { + return (__m128i) __builtin_ia32_selectq_128 ((__mmask8) __U, + (__v2di) __W, + (__v2di) __A); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_mask_blend_epi64 (__mmask8 __U, __m256i __A, __m256i __W) { + return (__m256i) __builtin_ia32_selectq_256 ((__mmask8) __U, + (__v4di) __W, + (__v4di) __A); +} + +static __inline__ __m128d __DEFAULT_FN_ATTRS128 +_mm_mask_compress_pd (__m128d __W, __mmask8 __U, __m128d __A) { + return (__m128d) __builtin_ia32_compressdf128_mask ((__v2df) __A, + (__v2df) __W, + (__mmask8) __U); +} + +static __inline__ __m128d __DEFAULT_FN_ATTRS128 +_mm_maskz_compress_pd (__mmask8 __U, __m128d __A) { + return (__m128d) __builtin_ia32_compressdf128_mask ((__v2df) __A, + (__v2df) + _mm_setzero_pd (), + (__mmask8) __U); +} + +static __inline__ __m256d __DEFAULT_FN_ATTRS256 +_mm256_mask_compress_pd (__m256d __W, __mmask8 __U, __m256d __A) { + return (__m256d) __builtin_ia32_compressdf256_mask ((__v4df) __A, + (__v4df) __W, + (__mmask8) __U); +} + +static __inline__ __m256d __DEFAULT_FN_ATTRS256 +_mm256_maskz_compress_pd (__mmask8 __U, __m256d __A) { + return (__m256d) __builtin_ia32_compressdf256_mask ((__v4df) __A, + (__v4df) + _mm256_setzero_pd (), + (__mmask8) __U); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_mask_compress_epi64 (__m128i __W, __mmask8 __U, __m128i __A) { + return (__m128i) __builtin_ia32_compressdi128_mask ((__v2di) __A, + (__v2di) __W, + (__mmask8) __U); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_maskz_compress_epi64 (__mmask8 __U, __m128i __A) { + return (__m128i) __builtin_ia32_compressdi128_mask ((__v2di) __A, + (__v2di) + _mm_setzero_si128 (), + (__mmask8) __U); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_mask_compress_epi64 (__m256i __W, __mmask8 __U, __m256i __A) { + return (__m256i) __builtin_ia32_compressdi256_mask ((__v4di) __A, + (__v4di) __W, + (__mmask8) __U); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_maskz_compress_epi64 (__mmask8 __U, __m256i __A) { + return (__m256i) __builtin_ia32_compressdi256_mask ((__v4di) __A, + (__v4di) + _mm256_setzero_si256 (), + (__mmask8) __U); +} + +static __inline__ __m128 __DEFAULT_FN_ATTRS128 +_mm_mask_compress_ps (__m128 __W, __mmask8 __U, __m128 __A) { + return (__m128) __builtin_ia32_compresssf128_mask ((__v4sf) __A, + (__v4sf) __W, + (__mmask8) __U); +} + +static __inline__ __m128 __DEFAULT_FN_ATTRS128 +_mm_maskz_compress_ps (__mmask8 __U, __m128 __A) { + return (__m128) __builtin_ia32_compresssf128_mask ((__v4sf) __A, + (__v4sf) + _mm_setzero_ps (), + (__mmask8) __U); +} + +static __inline__ __m256 __DEFAULT_FN_ATTRS256 +_mm256_mask_compress_ps (__m256 __W, __mmask8 __U, __m256 __A) { + return (__m256) __builtin_ia32_compresssf256_mask ((__v8sf) __A, + (__v8sf) __W, + (__mmask8) __U); +} + +static __inline__ __m256 __DEFAULT_FN_ATTRS256 +_mm256_maskz_compress_ps (__mmask8 __U, __m256 __A) { + return (__m256) __builtin_ia32_compresssf256_mask ((__v8sf) __A, + (__v8sf) + _mm256_setzero_ps (), + (__mmask8) __U); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_mask_compress_epi32 (__m128i __W, __mmask8 __U, __m128i __A) { + return (__m128i) __builtin_ia32_compresssi128_mask ((__v4si) __A, + (__v4si) __W, + (__mmask8) __U); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_maskz_compress_epi32 (__mmask8 __U, __m128i __A) { + return (__m128i) __builtin_ia32_compresssi128_mask ((__v4si) __A, + (__v4si) + _mm_setzero_si128 (), + (__mmask8) __U); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_mask_compress_epi32 (__m256i __W, __mmask8 __U, __m256i __A) { + return (__m256i) __builtin_ia32_compresssi256_mask ((__v8si) __A, + (__v8si) __W, + (__mmask8) __U); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_maskz_compress_epi32 (__mmask8 __U, __m256i __A) { + return (__m256i) __builtin_ia32_compresssi256_mask ((__v8si) __A, + (__v8si) + _mm256_setzero_si256 (), + (__mmask8) __U); +} + +static __inline__ void __DEFAULT_FN_ATTRS128 +_mm_mask_compressstoreu_pd (void *__P, __mmask8 __U, __m128d __A) { + __builtin_ia32_compressstoredf128_mask ((__v2df *) __P, + (__v2df) __A, + (__mmask8) __U); +} + +static __inline__ void __DEFAULT_FN_ATTRS256 +_mm256_mask_compressstoreu_pd (void *__P, __mmask8 __U, __m256d __A) { + __builtin_ia32_compressstoredf256_mask ((__v4df *) __P, + (__v4df) __A, + (__mmask8) __U); +} + +static __inline__ void __DEFAULT_FN_ATTRS128 +_mm_mask_compressstoreu_epi64 (void *__P, __mmask8 __U, __m128i __A) { + __builtin_ia32_compressstoredi128_mask ((__v2di *) __P, + (__v2di) __A, + (__mmask8) __U); +} + +static __inline__ void __DEFAULT_FN_ATTRS256 +_mm256_mask_compressstoreu_epi64 (void *__P, __mmask8 __U, __m256i __A) { + __builtin_ia32_compressstoredi256_mask ((__v4di *) __P, + (__v4di) __A, + (__mmask8) __U); +} + +static __inline__ void __DEFAULT_FN_ATTRS128 +_mm_mask_compressstoreu_ps (void *__P, __mmask8 __U, __m128 __A) { + __builtin_ia32_compressstoresf128_mask ((__v4sf *) __P, + (__v4sf) __A, + (__mmask8) __U); +} + +static __inline__ void __DEFAULT_FN_ATTRS256 +_mm256_mask_compressstoreu_ps (void *__P, __mmask8 __U, __m256 __A) { + __builtin_ia32_compressstoresf256_mask ((__v8sf *) __P, + (__v8sf) __A, + (__mmask8) __U); +} + +static __inline__ void __DEFAULT_FN_ATTRS128 +_mm_mask_compressstoreu_epi32 (void *__P, __mmask8 __U, __m128i __A) { + __builtin_ia32_compressstoresi128_mask ((__v4si *) __P, + (__v4si) __A, + (__mmask8) __U); +} + +static __inline__ void __DEFAULT_FN_ATTRS256 +_mm256_mask_compressstoreu_epi32 (void *__P, __mmask8 __U, __m256i __A) { + __builtin_ia32_compressstoresi256_mask ((__v8si *) __P, + (__v8si) __A, + (__mmask8) __U); +} + +static __inline__ __m128d __DEFAULT_FN_ATTRS128 +_mm_mask_cvtepi32_pd (__m128d __W, __mmask8 __U, __m128i __A) { + return (__m128d)__builtin_ia32_selectpd_128((__mmask8) __U, + (__v2df)_mm_cvtepi32_pd(__A), + (__v2df)__W); +} + +static __inline__ __m128d __DEFAULT_FN_ATTRS128 +_mm_maskz_cvtepi32_pd (__mmask8 __U, __m128i __A) { + return (__m128d)__builtin_ia32_selectpd_128((__mmask8) __U, + (__v2df)_mm_cvtepi32_pd(__A), + (__v2df)_mm_setzero_pd()); +} + +static __inline__ __m256d __DEFAULT_FN_ATTRS256 +_mm256_mask_cvtepi32_pd (__m256d __W, __mmask8 __U, __m128i __A) { + return (__m256d)__builtin_ia32_selectpd_256((__mmask8) __U, + (__v4df)_mm256_cvtepi32_pd(__A), + (__v4df)__W); +} + +static __inline__ __m256d __DEFAULT_FN_ATTRS256 +_mm256_maskz_cvtepi32_pd (__mmask8 __U, __m128i __A) { + return (__m256d)__builtin_ia32_selectpd_256((__mmask8) __U, + (__v4df)_mm256_cvtepi32_pd(__A), + (__v4df)_mm256_setzero_pd()); +} + +static __inline__ __m128 __DEFAULT_FN_ATTRS128 +_mm_mask_cvtepi32_ps (__m128 __W, __mmask8 __U, __m128i __A) { + return (__m128)__builtin_ia32_selectps_128((__mmask8)__U, + (__v4sf)_mm_cvtepi32_ps(__A), + (__v4sf)__W); +} + +static __inline__ __m128 __DEFAULT_FN_ATTRS128 +_mm_maskz_cvtepi32_ps (__mmask8 __U, __m128i __A) { + return (__m128)__builtin_ia32_selectps_128((__mmask8)__U, + (__v4sf)_mm_cvtepi32_ps(__A), + (__v4sf)_mm_setzero_ps()); +} + +static __inline__ __m256 __DEFAULT_FN_ATTRS256 +_mm256_mask_cvtepi32_ps (__m256 __W, __mmask8 __U, __m256i __A) { + return (__m256)__builtin_ia32_selectps_256((__mmask8)__U, + (__v8sf)_mm256_cvtepi32_ps(__A), + (__v8sf)__W); +} + +static __inline__ __m256 __DEFAULT_FN_ATTRS256 +_mm256_maskz_cvtepi32_ps (__mmask8 __U, __m256i __A) { + return (__m256)__builtin_ia32_selectps_256((__mmask8)__U, + (__v8sf)_mm256_cvtepi32_ps(__A), + (__v8sf)_mm256_setzero_ps()); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_mask_cvtpd_epi32 (__m128i __W, __mmask8 __U, __m128d __A) { + return (__m128i) __builtin_ia32_cvtpd2dq128_mask ((__v2df) __A, + (__v4si) __W, + (__mmask8) __U); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_maskz_cvtpd_epi32 (__mmask8 __U, __m128d __A) { + return (__m128i) __builtin_ia32_cvtpd2dq128_mask ((__v2df) __A, + (__v4si) + _mm_setzero_si128 (), + (__mmask8) __U); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS256 +_mm256_mask_cvtpd_epi32 (__m128i __W, __mmask8 __U, __m256d __A) { + return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U, + (__v4si)_mm256_cvtpd_epi32(__A), + (__v4si)__W); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS256 +_mm256_maskz_cvtpd_epi32 (__mmask8 __U, __m256d __A) { + return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U, + (__v4si)_mm256_cvtpd_epi32(__A), + (__v4si)_mm_setzero_si128()); +} + +static __inline__ __m128 __DEFAULT_FN_ATTRS128 +_mm_mask_cvtpd_ps (__m128 __W, __mmask8 __U, __m128d __A) { + return (__m128) __builtin_ia32_cvtpd2ps_mask ((__v2df) __A, + (__v4sf) __W, + (__mmask8) __U); +} + +static __inline__ __m128 __DEFAULT_FN_ATTRS128 +_mm_maskz_cvtpd_ps (__mmask8 __U, __m128d __A) { + return (__m128) __builtin_ia32_cvtpd2ps_mask ((__v2df) __A, + (__v4sf) + _mm_setzero_ps (), + (__mmask8) __U); +} + +static __inline__ __m128 __DEFAULT_FN_ATTRS256 +_mm256_mask_cvtpd_ps (__m128 __W, __mmask8 __U, __m256d __A) { + return (__m128)__builtin_ia32_selectps_128((__mmask8)__U, + (__v4sf)_mm256_cvtpd_ps(__A), + (__v4sf)__W); +} + +static __inline__ __m128 __DEFAULT_FN_ATTRS256 +_mm256_maskz_cvtpd_ps (__mmask8 __U, __m256d __A) { + return (__m128)__builtin_ia32_selectps_128((__mmask8)__U, + (__v4sf)_mm256_cvtpd_ps(__A), + (__v4sf)_mm_setzero_ps()); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_cvtpd_epu32 (__m128d __A) { + return (__m128i) __builtin_ia32_cvtpd2udq128_mask ((__v2df) __A, + (__v4si) + _mm_setzero_si128 (), + (__mmask8) -1); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_mask_cvtpd_epu32 (__m128i __W, __mmask8 __U, __m128d __A) { + return (__m128i) __builtin_ia32_cvtpd2udq128_mask ((__v2df) __A, + (__v4si) __W, + (__mmask8) __U); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_maskz_cvtpd_epu32 (__mmask8 __U, __m128d __A) { + return (__m128i) __builtin_ia32_cvtpd2udq128_mask ((__v2df) __A, + (__v4si) + _mm_setzero_si128 (), + (__mmask8) __U); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS256 +_mm256_cvtpd_epu32 (__m256d __A) { + return (__m128i) __builtin_ia32_cvtpd2udq256_mask ((__v4df) __A, + (__v4si) + _mm_setzero_si128 (), + (__mmask8) -1); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS256 +_mm256_mask_cvtpd_epu32 (__m128i __W, __mmask8 __U, __m256d __A) { + return (__m128i) __builtin_ia32_cvtpd2udq256_mask ((__v4df) __A, + (__v4si) __W, + (__mmask8) __U); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS256 +_mm256_maskz_cvtpd_epu32 (__mmask8 __U, __m256d __A) { + return (__m128i) __builtin_ia32_cvtpd2udq256_mask ((__v4df) __A, + (__v4si) + _mm_setzero_si128 (), + (__mmask8) __U); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_mask_cvtps_epi32 (__m128i __W, __mmask8 __U, __m128 __A) { + return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U, + (__v4si)_mm_cvtps_epi32(__A), + (__v4si)__W); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_maskz_cvtps_epi32 (__mmask8 __U, __m128 __A) { + return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U, + (__v4si)_mm_cvtps_epi32(__A), + (__v4si)_mm_setzero_si128()); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_mask_cvtps_epi32 (__m256i __W, __mmask8 __U, __m256 __A) { + return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U, + (__v8si)_mm256_cvtps_epi32(__A), + (__v8si)__W); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_maskz_cvtps_epi32 (__mmask8 __U, __m256 __A) { + return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U, + (__v8si)_mm256_cvtps_epi32(__A), + (__v8si)_mm256_setzero_si256()); +} + +static __inline__ __m128d __DEFAULT_FN_ATTRS128 +_mm_mask_cvtps_pd (__m128d __W, __mmask8 __U, __m128 __A) { + return (__m128d)__builtin_ia32_selectpd_128((__mmask8)__U, + (__v2df)_mm_cvtps_pd(__A), + (__v2df)__W); +} + +static __inline__ __m128d __DEFAULT_FN_ATTRS128 +_mm_maskz_cvtps_pd (__mmask8 __U, __m128 __A) { + return (__m128d)__builtin_ia32_selectpd_128((__mmask8)__U, + (__v2df)_mm_cvtps_pd(__A), + (__v2df)_mm_setzero_pd()); +} + +static __inline__ __m256d __DEFAULT_FN_ATTRS256 +_mm256_mask_cvtps_pd (__m256d __W, __mmask8 __U, __m128 __A) { + return (__m256d)__builtin_ia32_selectpd_256((__mmask8)__U, + (__v4df)_mm256_cvtps_pd(__A), + (__v4df)__W); +} + +static __inline__ __m256d __DEFAULT_FN_ATTRS256 +_mm256_maskz_cvtps_pd (__mmask8 __U, __m128 __A) { + return (__m256d)__builtin_ia32_selectpd_256((__mmask8)__U, + (__v4df)_mm256_cvtps_pd(__A), + (__v4df)_mm256_setzero_pd()); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_cvtps_epu32 (__m128 __A) { + return (__m128i) __builtin_ia32_cvtps2udq128_mask ((__v4sf) __A, + (__v4si) + _mm_setzero_si128 (), + (__mmask8) -1); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_mask_cvtps_epu32 (__m128i __W, __mmask8 __U, __m128 __A) { + return (__m128i) __builtin_ia32_cvtps2udq128_mask ((__v4sf) __A, + (__v4si) __W, + (__mmask8) __U); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_maskz_cvtps_epu32 (__mmask8 __U, __m128 __A) { + return (__m128i) __builtin_ia32_cvtps2udq128_mask ((__v4sf) __A, + (__v4si) + _mm_setzero_si128 (), + (__mmask8) __U); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_cvtps_epu32 (__m256 __A) { + return (__m256i) __builtin_ia32_cvtps2udq256_mask ((__v8sf) __A, + (__v8si) + _mm256_setzero_si256 (), + (__mmask8) -1); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_mask_cvtps_epu32 (__m256i __W, __mmask8 __U, __m256 __A) { + return (__m256i) __builtin_ia32_cvtps2udq256_mask ((__v8sf) __A, + (__v8si) __W, + (__mmask8) __U); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_maskz_cvtps_epu32 (__mmask8 __U, __m256 __A) { + return (__m256i) __builtin_ia32_cvtps2udq256_mask ((__v8sf) __A, + (__v8si) + _mm256_setzero_si256 (), + (__mmask8) __U); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_mask_cvttpd_epi32 (__m128i __W, __mmask8 __U, __m128d __A) { + return (__m128i) __builtin_ia32_cvttpd2dq128_mask ((__v2df) __A, + (__v4si) __W, + (__mmask8) __U); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_maskz_cvttpd_epi32 (__mmask8 __U, __m128d __A) { + return (__m128i) __builtin_ia32_cvttpd2dq128_mask ((__v2df) __A, + (__v4si) + _mm_setzero_si128 (), + (__mmask8) __U); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS256 +_mm256_mask_cvttpd_epi32 (__m128i __W, __mmask8 __U, __m256d __A) { + return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U, + (__v4si)_mm256_cvttpd_epi32(__A), + (__v4si)__W); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS256 +_mm256_maskz_cvttpd_epi32 (__mmask8 __U, __m256d __A) { + return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U, + (__v4si)_mm256_cvttpd_epi32(__A), + (__v4si)_mm_setzero_si128()); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_cvttpd_epu32 (__m128d __A) { + return (__m128i) __builtin_ia32_cvttpd2udq128_mask ((__v2df) __A, + (__v4si) + _mm_setzero_si128 (), + (__mmask8) -1); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_mask_cvttpd_epu32 (__m128i __W, __mmask8 __U, __m128d __A) { + return (__m128i) __builtin_ia32_cvttpd2udq128_mask ((__v2df) __A, + (__v4si) __W, + (__mmask8) __U); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_maskz_cvttpd_epu32 (__mmask8 __U, __m128d __A) { + return (__m128i) __builtin_ia32_cvttpd2udq128_mask ((__v2df) __A, + (__v4si) + _mm_setzero_si128 (), + (__mmask8) __U); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS256 +_mm256_cvttpd_epu32 (__m256d __A) { + return (__m128i) __builtin_ia32_cvttpd2udq256_mask ((__v4df) __A, + (__v4si) + _mm_setzero_si128 (), + (__mmask8) -1); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS256 +_mm256_mask_cvttpd_epu32 (__m128i __W, __mmask8 __U, __m256d __A) { + return (__m128i) __builtin_ia32_cvttpd2udq256_mask ((__v4df) __A, + (__v4si) __W, + (__mmask8) __U); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS256 +_mm256_maskz_cvttpd_epu32 (__mmask8 __U, __m256d __A) { + return (__m128i) __builtin_ia32_cvttpd2udq256_mask ((__v4df) __A, + (__v4si) + _mm_setzero_si128 (), + (__mmask8) __U); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_mask_cvttps_epi32 (__m128i __W, __mmask8 __U, __m128 __A) { + return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U, + (__v4si)_mm_cvttps_epi32(__A), + (__v4si)__W); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_maskz_cvttps_epi32 (__mmask8 __U, __m128 __A) { + return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U, + (__v4si)_mm_cvttps_epi32(__A), + (__v4si)_mm_setzero_si128()); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_mask_cvttps_epi32 (__m256i __W, __mmask8 __U, __m256 __A) { + return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U, + (__v8si)_mm256_cvttps_epi32(__A), + (__v8si)__W); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_maskz_cvttps_epi32 (__mmask8 __U, __m256 __A) { + return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U, + (__v8si)_mm256_cvttps_epi32(__A), + (__v8si)_mm256_setzero_si256()); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_cvttps_epu32 (__m128 __A) { + return (__m128i) __builtin_ia32_cvttps2udq128_mask ((__v4sf) __A, + (__v4si) + _mm_setzero_si128 (), + (__mmask8) -1); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_mask_cvttps_epu32 (__m128i __W, __mmask8 __U, __m128 __A) { + return (__m128i) __builtin_ia32_cvttps2udq128_mask ((__v4sf) __A, + (__v4si) __W, + (__mmask8) __U); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_maskz_cvttps_epu32 (__mmask8 __U, __m128 __A) { + return (__m128i) __builtin_ia32_cvttps2udq128_mask ((__v4sf) __A, + (__v4si) + _mm_setzero_si128 (), + (__mmask8) __U); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_cvttps_epu32 (__m256 __A) { + return (__m256i) __builtin_ia32_cvttps2udq256_mask ((__v8sf) __A, + (__v8si) + _mm256_setzero_si256 (), + (__mmask8) -1); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_mask_cvttps_epu32 (__m256i __W, __mmask8 __U, __m256 __A) { + return (__m256i) __builtin_ia32_cvttps2udq256_mask ((__v8sf) __A, + (__v8si) __W, + (__mmask8) __U); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_maskz_cvttps_epu32 (__mmask8 __U, __m256 __A) { + return (__m256i) __builtin_ia32_cvttps2udq256_mask ((__v8sf) __A, + (__v8si) + _mm256_setzero_si256 (), + (__mmask8) __U); +} + +static __inline__ __m128d __DEFAULT_FN_ATTRS128 +_mm_cvtepu32_pd (__m128i __A) { + return (__m128d) __builtin_convertvector( + __builtin_shufflevector((__v4su)__A, (__v4su)__A, 0, 1), __v2df); +} + +static __inline__ __m128d __DEFAULT_FN_ATTRS128 +_mm_mask_cvtepu32_pd (__m128d __W, __mmask8 __U, __m128i __A) { + return (__m128d)__builtin_ia32_selectpd_128((__mmask8) __U, + (__v2df)_mm_cvtepu32_pd(__A), + (__v2df)__W); +} + +static __inline__ __m128d __DEFAULT_FN_ATTRS128 +_mm_maskz_cvtepu32_pd (__mmask8 __U, __m128i __A) { + return (__m128d)__builtin_ia32_selectpd_128((__mmask8) __U, + (__v2df)_mm_cvtepu32_pd(__A), + (__v2df)_mm_setzero_pd()); +} + +static __inline__ __m256d __DEFAULT_FN_ATTRS256 +_mm256_cvtepu32_pd (__m128i __A) { + return (__m256d)__builtin_convertvector((__v4su)__A, __v4df); +} + +static __inline__ __m256d __DEFAULT_FN_ATTRS256 +_mm256_mask_cvtepu32_pd (__m256d __W, __mmask8 __U, __m128i __A) { + return (__m256d)__builtin_ia32_selectpd_256((__mmask8) __U, + (__v4df)_mm256_cvtepu32_pd(__A), + (__v4df)__W); +} + +static __inline__ __m256d __DEFAULT_FN_ATTRS256 +_mm256_maskz_cvtepu32_pd (__mmask8 __U, __m128i __A) { + return (__m256d)__builtin_ia32_selectpd_256((__mmask8) __U, + (__v4df)_mm256_cvtepu32_pd(__A), + (__v4df)_mm256_setzero_pd()); +} + +static __inline__ __m128 __DEFAULT_FN_ATTRS128 +_mm_cvtepu32_ps (__m128i __A) { + return (__m128)__builtin_convertvector((__v4su)__A, __v4sf); +} + +static __inline__ __m128 __DEFAULT_FN_ATTRS128 +_mm_mask_cvtepu32_ps (__m128 __W, __mmask8 __U, __m128i __A) { + return (__m128)__builtin_ia32_selectps_128((__mmask8)__U, + (__v4sf)_mm_cvtepu32_ps(__A), + (__v4sf)__W); +} + +static __inline__ __m128 __DEFAULT_FN_ATTRS128 +_mm_maskz_cvtepu32_ps (__mmask8 __U, __m128i __A) { + return (__m128)__builtin_ia32_selectps_128((__mmask8)__U, + (__v4sf)_mm_cvtepu32_ps(__A), + (__v4sf)_mm_setzero_ps()); +} + +static __inline__ __m256 __DEFAULT_FN_ATTRS256 +_mm256_cvtepu32_ps (__m256i __A) { + return (__m256)__builtin_convertvector((__v8su)__A, __v8sf); +} + +static __inline__ __m256 __DEFAULT_FN_ATTRS256 +_mm256_mask_cvtepu32_ps (__m256 __W, __mmask8 __U, __m256i __A) { + return (__m256)__builtin_ia32_selectps_256((__mmask8)__U, + (__v8sf)_mm256_cvtepu32_ps(__A), + (__v8sf)__W); +} + +static __inline__ __m256 __DEFAULT_FN_ATTRS256 +_mm256_maskz_cvtepu32_ps (__mmask8 __U, __m256i __A) { + return (__m256)__builtin_ia32_selectps_256((__mmask8)__U, + (__v8sf)_mm256_cvtepu32_ps(__A), + (__v8sf)_mm256_setzero_ps()); +} + +static __inline__ __m128d __DEFAULT_FN_ATTRS128 +_mm_mask_div_pd(__m128d __W, __mmask8 __U, __m128d __A, __m128d __B) { + return (__m128d)__builtin_ia32_selectpd_128((__mmask8)__U, + (__v2df)_mm_div_pd(__A, __B), + (__v2df)__W); +} + +static __inline__ __m128d __DEFAULT_FN_ATTRS128 +_mm_maskz_div_pd(__mmask8 __U, __m128d __A, __m128d __B) { + return (__m128d)__builtin_ia32_selectpd_128((__mmask8)__U, + (__v2df)_mm_div_pd(__A, __B), + (__v2df)_mm_setzero_pd()); +} + +static __inline__ __m256d __DEFAULT_FN_ATTRS256 +_mm256_mask_div_pd(__m256d __W, __mmask8 __U, __m256d __A, __m256d __B) { + return (__m256d)__builtin_ia32_selectpd_256((__mmask8)__U, + (__v4df)_mm256_div_pd(__A, __B), + (__v4df)__W); +} + +static __inline__ __m256d __DEFAULT_FN_ATTRS256 +_mm256_maskz_div_pd(__mmask8 __U, __m256d __A, __m256d __B) { + return (__m256d)__builtin_ia32_selectpd_256((__mmask8)__U, + (__v4df)_mm256_div_pd(__A, __B), + (__v4df)_mm256_setzero_pd()); +} + +static __inline__ __m128 __DEFAULT_FN_ATTRS128 +_mm_mask_div_ps(__m128 __W, __mmask8 __U, __m128 __A, __m128 __B) { + return (__m128)__builtin_ia32_selectps_128((__mmask8)__U, + (__v4sf)_mm_div_ps(__A, __B), + (__v4sf)__W); +} + +static __inline__ __m128 __DEFAULT_FN_ATTRS128 +_mm_maskz_div_ps(__mmask8 __U, __m128 __A, __m128 __B) { + return (__m128)__builtin_ia32_selectps_128((__mmask8)__U, + (__v4sf)_mm_div_ps(__A, __B), + (__v4sf)_mm_setzero_ps()); +} + +static __inline__ __m256 __DEFAULT_FN_ATTRS256 +_mm256_mask_div_ps(__m256 __W, __mmask8 __U, __m256 __A, __m256 __B) { + return (__m256)__builtin_ia32_selectps_256((__mmask8)__U, + (__v8sf)_mm256_div_ps(__A, __B), + (__v8sf)__W); +} + +static __inline__ __m256 __DEFAULT_FN_ATTRS256 +_mm256_maskz_div_ps(__mmask8 __U, __m256 __A, __m256 __B) { + return (__m256)__builtin_ia32_selectps_256((__mmask8)__U, + (__v8sf)_mm256_div_ps(__A, __B), + (__v8sf)_mm256_setzero_ps()); +} + +static __inline__ __m128d __DEFAULT_FN_ATTRS128 +_mm_mask_expand_pd (__m128d __W, __mmask8 __U, __m128d __A) { + return (__m128d) __builtin_ia32_expanddf128_mask ((__v2df) __A, + (__v2df) __W, + (__mmask8) __U); +} + +static __inline__ __m128d __DEFAULT_FN_ATTRS128 +_mm_maskz_expand_pd (__mmask8 __U, __m128d __A) { + return (__m128d) __builtin_ia32_expanddf128_mask ((__v2df) __A, + (__v2df) + _mm_setzero_pd (), + (__mmask8) __U); +} + +static __inline__ __m256d __DEFAULT_FN_ATTRS256 +_mm256_mask_expand_pd (__m256d __W, __mmask8 __U, __m256d __A) { + return (__m256d) __builtin_ia32_expanddf256_mask ((__v4df) __A, + (__v4df) __W, + (__mmask8) __U); +} + +static __inline__ __m256d __DEFAULT_FN_ATTRS256 +_mm256_maskz_expand_pd (__mmask8 __U, __m256d __A) { + return (__m256d) __builtin_ia32_expanddf256_mask ((__v4df) __A, + (__v4df) + _mm256_setzero_pd (), + (__mmask8) __U); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_mask_expand_epi64 (__m128i __W, __mmask8 __U, __m128i __A) { + return (__m128i) __builtin_ia32_expanddi128_mask ((__v2di) __A, + (__v2di) __W, + (__mmask8) __U); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_maskz_expand_epi64 (__mmask8 __U, __m128i __A) { + return (__m128i) __builtin_ia32_expanddi128_mask ((__v2di) __A, + (__v2di) + _mm_setzero_si128 (), + (__mmask8) __U); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_mask_expand_epi64 (__m256i __W, __mmask8 __U, __m256i __A) { + return (__m256i) __builtin_ia32_expanddi256_mask ((__v4di) __A, + (__v4di) __W, + (__mmask8) __U); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_maskz_expand_epi64 (__mmask8 __U, __m256i __A) { + return (__m256i) __builtin_ia32_expanddi256_mask ((__v4di) __A, + (__v4di) + _mm256_setzero_si256 (), + (__mmask8) __U); +} + +static __inline__ __m128d __DEFAULT_FN_ATTRS128 +_mm_mask_expandloadu_pd (__m128d __W, __mmask8 __U, void const *__P) { + return (__m128d) __builtin_ia32_expandloaddf128_mask ((const __v2df *) __P, + (__v2df) __W, + (__mmask8) + __U); +} + +static __inline__ __m128d __DEFAULT_FN_ATTRS128 +_mm_maskz_expandloadu_pd (__mmask8 __U, void const *__P) { + return (__m128d) __builtin_ia32_expandloaddf128_mask ((const __v2df *) __P, + (__v2df) + _mm_setzero_pd (), + (__mmask8) + __U); +} + +static __inline__ __m256d __DEFAULT_FN_ATTRS256 +_mm256_mask_expandloadu_pd (__m256d __W, __mmask8 __U, void const *__P) { + return (__m256d) __builtin_ia32_expandloaddf256_mask ((const __v4df *) __P, + (__v4df) __W, + (__mmask8) + __U); +} + +static __inline__ __m256d __DEFAULT_FN_ATTRS256 +_mm256_maskz_expandloadu_pd (__mmask8 __U, void const *__P) { + return (__m256d) __builtin_ia32_expandloaddf256_mask ((const __v4df *) __P, + (__v4df) + _mm256_setzero_pd (), + (__mmask8) + __U); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_mask_expandloadu_epi64 (__m128i __W, __mmask8 __U, void const *__P) { + return (__m128i) __builtin_ia32_expandloaddi128_mask ((const __v2di *) __P, + (__v2di) __W, + (__mmask8) + __U); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_maskz_expandloadu_epi64 (__mmask8 __U, void const *__P) { + return (__m128i) __builtin_ia32_expandloaddi128_mask ((const __v2di *) __P, + (__v2di) + _mm_setzero_si128 (), + (__mmask8) + __U); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_mask_expandloadu_epi64 (__m256i __W, __mmask8 __U, + void const *__P) { + return (__m256i) __builtin_ia32_expandloaddi256_mask ((const __v4di *) __P, + (__v4di) __W, + (__mmask8) + __U); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_maskz_expandloadu_epi64 (__mmask8 __U, void const *__P) { + return (__m256i) __builtin_ia32_expandloaddi256_mask ((const __v4di *) __P, + (__v4di) + _mm256_setzero_si256 (), + (__mmask8) + __U); +} + +static __inline__ __m128 __DEFAULT_FN_ATTRS128 +_mm_mask_expandloadu_ps (__m128 __W, __mmask8 __U, void const *__P) { + return (__m128) __builtin_ia32_expandloadsf128_mask ((const __v4sf *) __P, + (__v4sf) __W, + (__mmask8) __U); +} + +static __inline__ __m128 __DEFAULT_FN_ATTRS128 +_mm_maskz_expandloadu_ps (__mmask8 __U, void const *__P) { + return (__m128) __builtin_ia32_expandloadsf128_mask ((const __v4sf *) __P, + (__v4sf) + _mm_setzero_ps (), + (__mmask8) + __U); +} + +static __inline__ __m256 __DEFAULT_FN_ATTRS256 +_mm256_mask_expandloadu_ps (__m256 __W, __mmask8 __U, void const *__P) { + return (__m256) __builtin_ia32_expandloadsf256_mask ((const __v8sf *) __P, + (__v8sf) __W, + (__mmask8) __U); +} + +static __inline__ __m256 __DEFAULT_FN_ATTRS256 +_mm256_maskz_expandloadu_ps (__mmask8 __U, void const *__P) { + return (__m256) __builtin_ia32_expandloadsf256_mask ((const __v8sf *) __P, + (__v8sf) + _mm256_setzero_ps (), + (__mmask8) + __U); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_mask_expandloadu_epi32 (__m128i __W, __mmask8 __U, void const *__P) { + return (__m128i) __builtin_ia32_expandloadsi128_mask ((const __v4si *) __P, + (__v4si) __W, + (__mmask8) + __U); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_maskz_expandloadu_epi32 (__mmask8 __U, void const *__P) { + return (__m128i) __builtin_ia32_expandloadsi128_mask ((const __v4si *) __P, + (__v4si) + _mm_setzero_si128 (), + (__mmask8) __U); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_mask_expandloadu_epi32 (__m256i __W, __mmask8 __U, + void const *__P) { + return (__m256i) __builtin_ia32_expandloadsi256_mask ((const __v8si *) __P, + (__v8si) __W, + (__mmask8) + __U); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_maskz_expandloadu_epi32 (__mmask8 __U, void const *__P) { + return (__m256i) __builtin_ia32_expandloadsi256_mask ((const __v8si *) __P, + (__v8si) + _mm256_setzero_si256 (), + (__mmask8) + __U); +} + +static __inline__ __m128 __DEFAULT_FN_ATTRS128 +_mm_mask_expand_ps (__m128 __W, __mmask8 __U, __m128 __A) { + return (__m128) __builtin_ia32_expandsf128_mask ((__v4sf) __A, + (__v4sf) __W, + (__mmask8) __U); +} + +static __inline__ __m128 __DEFAULT_FN_ATTRS128 +_mm_maskz_expand_ps (__mmask8 __U, __m128 __A) { + return (__m128) __builtin_ia32_expandsf128_mask ((__v4sf) __A, + (__v4sf) + _mm_setzero_ps (), + (__mmask8) __U); +} + +static __inline__ __m256 __DEFAULT_FN_ATTRS256 +_mm256_mask_expand_ps (__m256 __W, __mmask8 __U, __m256 __A) { + return (__m256) __builtin_ia32_expandsf256_mask ((__v8sf) __A, + (__v8sf) __W, + (__mmask8) __U); +} + +static __inline__ __m256 __DEFAULT_FN_ATTRS256 +_mm256_maskz_expand_ps (__mmask8 __U, __m256 __A) { + return (__m256) __builtin_ia32_expandsf256_mask ((__v8sf) __A, + (__v8sf) + _mm256_setzero_ps (), + (__mmask8) __U); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_mask_expand_epi32 (__m128i __W, __mmask8 __U, __m128i __A) { + return (__m128i) __builtin_ia32_expandsi128_mask ((__v4si) __A, + (__v4si) __W, + (__mmask8) __U); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_maskz_expand_epi32 (__mmask8 __U, __m128i __A) { + return (__m128i) __builtin_ia32_expandsi128_mask ((__v4si) __A, + (__v4si) + _mm_setzero_si128 (), + (__mmask8) __U); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_mask_expand_epi32 (__m256i __W, __mmask8 __U, __m256i __A) { + return (__m256i) __builtin_ia32_expandsi256_mask ((__v8si) __A, + (__v8si) __W, + (__mmask8) __U); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_maskz_expand_epi32 (__mmask8 __U, __m256i __A) { + return (__m256i) __builtin_ia32_expandsi256_mask ((__v8si) __A, + (__v8si) + _mm256_setzero_si256 (), + (__mmask8) __U); +} + +static __inline__ __m128d __DEFAULT_FN_ATTRS128 +_mm_getexp_pd (__m128d __A) { + return (__m128d) __builtin_ia32_getexppd128_mask ((__v2df) __A, + (__v2df) + _mm_setzero_pd (), + (__mmask8) -1); +} + +static __inline__ __m128d __DEFAULT_FN_ATTRS128 +_mm_mask_getexp_pd (__m128d __W, __mmask8 __U, __m128d __A) { + return (__m128d) __builtin_ia32_getexppd128_mask ((__v2df) __A, + (__v2df) __W, + (__mmask8) __U); +} + +static __inline__ __m128d __DEFAULT_FN_ATTRS128 +_mm_maskz_getexp_pd (__mmask8 __U, __m128d __A) { + return (__m128d) __builtin_ia32_getexppd128_mask ((__v2df) __A, + (__v2df) + _mm_setzero_pd (), + (__mmask8) __U); +} + +static __inline__ __m256d __DEFAULT_FN_ATTRS256 +_mm256_getexp_pd (__m256d __A) { + return (__m256d) __builtin_ia32_getexppd256_mask ((__v4df) __A, + (__v4df) + _mm256_setzero_pd (), + (__mmask8) -1); +} + +static __inline__ __m256d __DEFAULT_FN_ATTRS256 +_mm256_mask_getexp_pd (__m256d __W, __mmask8 __U, __m256d __A) { + return (__m256d) __builtin_ia32_getexppd256_mask ((__v4df) __A, + (__v4df) __W, + (__mmask8) __U); +} + +static __inline__ __m256d __DEFAULT_FN_ATTRS256 +_mm256_maskz_getexp_pd (__mmask8 __U, __m256d __A) { + return (__m256d) __builtin_ia32_getexppd256_mask ((__v4df) __A, + (__v4df) + _mm256_setzero_pd (), + (__mmask8) __U); +} + +static __inline__ __m128 __DEFAULT_FN_ATTRS128 +_mm_getexp_ps (__m128 __A) { + return (__m128) __builtin_ia32_getexpps128_mask ((__v4sf) __A, + (__v4sf) + _mm_setzero_ps (), + (__mmask8) -1); +} + +static __inline__ __m128 __DEFAULT_FN_ATTRS128 +_mm_mask_getexp_ps (__m128 __W, __mmask8 __U, __m128 __A) { + return (__m128) __builtin_ia32_getexpps128_mask ((__v4sf) __A, + (__v4sf) __W, + (__mmask8) __U); +} + +static __inline__ __m128 __DEFAULT_FN_ATTRS128 +_mm_maskz_getexp_ps (__mmask8 __U, __m128 __A) { + return (__m128) __builtin_ia32_getexpps128_mask ((__v4sf) __A, + (__v4sf) + _mm_setzero_ps (), + (__mmask8) __U); +} + +static __inline__ __m256 __DEFAULT_FN_ATTRS256 +_mm256_getexp_ps (__m256 __A) { + return (__m256) __builtin_ia32_getexpps256_mask ((__v8sf) __A, + (__v8sf) + _mm256_setzero_ps (), + (__mmask8) -1); +} + +static __inline__ __m256 __DEFAULT_FN_ATTRS256 +_mm256_mask_getexp_ps (__m256 __W, __mmask8 __U, __m256 __A) { + return (__m256) __builtin_ia32_getexpps256_mask ((__v8sf) __A, + (__v8sf) __W, + (__mmask8) __U); +} + +static __inline__ __m256 __DEFAULT_FN_ATTRS256 +_mm256_maskz_getexp_ps (__mmask8 __U, __m256 __A) { + return (__m256) __builtin_ia32_getexpps256_mask ((__v8sf) __A, + (__v8sf) + _mm256_setzero_ps (), + (__mmask8) __U); +} + +static __inline__ __m128d __DEFAULT_FN_ATTRS128 +_mm_mask_max_pd(__m128d __W, __mmask8 __U, __m128d __A, __m128d __B) { + return (__m128d)__builtin_ia32_selectpd_128((__mmask8)__U, + (__v2df)_mm_max_pd(__A, __B), + (__v2df)__W); +} + +static __inline__ __m128d __DEFAULT_FN_ATTRS128 +_mm_maskz_max_pd(__mmask8 __U, __m128d __A, __m128d __B) { + return (__m128d)__builtin_ia32_selectpd_128((__mmask8)__U, + (__v2df)_mm_max_pd(__A, __B), + (__v2df)_mm_setzero_pd()); +} + +static __inline__ __m256d __DEFAULT_FN_ATTRS256 +_mm256_mask_max_pd(__m256d __W, __mmask8 __U, __m256d __A, __m256d __B) { + return (__m256d)__builtin_ia32_selectpd_256((__mmask8)__U, + (__v4df)_mm256_max_pd(__A, __B), + (__v4df)__W); +} + +static __inline__ __m256d __DEFAULT_FN_ATTRS256 +_mm256_maskz_max_pd(__mmask8 __U, __m256d __A, __m256d __B) { + return (__m256d)__builtin_ia32_selectpd_256((__mmask8)__U, + (__v4df)_mm256_max_pd(__A, __B), + (__v4df)_mm256_setzero_pd()); +} + +static __inline__ __m128 __DEFAULT_FN_ATTRS128 +_mm_mask_max_ps(__m128 __W, __mmask8 __U, __m128 __A, __m128 __B) { + return (__m128)__builtin_ia32_selectps_128((__mmask8)__U, + (__v4sf)_mm_max_ps(__A, __B), + (__v4sf)__W); +} + +static __inline__ __m128 __DEFAULT_FN_ATTRS128 +_mm_maskz_max_ps(__mmask8 __U, __m128 __A, __m128 __B) { + return (__m128)__builtin_ia32_selectps_128((__mmask8)__U, + (__v4sf)_mm_max_ps(__A, __B), + (__v4sf)_mm_setzero_ps()); +} + +static __inline__ __m256 __DEFAULT_FN_ATTRS256 +_mm256_mask_max_ps(__m256 __W, __mmask8 __U, __m256 __A, __m256 __B) { + return (__m256)__builtin_ia32_selectps_256((__mmask8)__U, + (__v8sf)_mm256_max_ps(__A, __B), + (__v8sf)__W); +} + +static __inline__ __m256 __DEFAULT_FN_ATTRS256 +_mm256_maskz_max_ps(__mmask8 __U, __m256 __A, __m256 __B) { + return (__m256)__builtin_ia32_selectps_256((__mmask8)__U, + (__v8sf)_mm256_max_ps(__A, __B), + (__v8sf)_mm256_setzero_ps()); +} + +static __inline__ __m128d __DEFAULT_FN_ATTRS128 +_mm_mask_min_pd(__m128d __W, __mmask8 __U, __m128d __A, __m128d __B) { + return (__m128d)__builtin_ia32_selectpd_128((__mmask8)__U, + (__v2df)_mm_min_pd(__A, __B), + (__v2df)__W); +} + +static __inline__ __m128d __DEFAULT_FN_ATTRS128 +_mm_maskz_min_pd(__mmask8 __U, __m128d __A, __m128d __B) { + return (__m128d)__builtin_ia32_selectpd_128((__mmask8)__U, + (__v2df)_mm_min_pd(__A, __B), + (__v2df)_mm_setzero_pd()); +} + +static __inline__ __m256d __DEFAULT_FN_ATTRS256 +_mm256_mask_min_pd(__m256d __W, __mmask8 __U, __m256d __A, __m256d __B) { + return (__m256d)__builtin_ia32_selectpd_256((__mmask8)__U, + (__v4df)_mm256_min_pd(__A, __B), + (__v4df)__W); +} + +static __inline__ __m256d __DEFAULT_FN_ATTRS256 +_mm256_maskz_min_pd(__mmask8 __U, __m256d __A, __m256d __B) { + return (__m256d)__builtin_ia32_selectpd_256((__mmask8)__U, + (__v4df)_mm256_min_pd(__A, __B), + (__v4df)_mm256_setzero_pd()); +} + +static __inline__ __m128 __DEFAULT_FN_ATTRS128 +_mm_mask_min_ps(__m128 __W, __mmask8 __U, __m128 __A, __m128 __B) { + return (__m128)__builtin_ia32_selectps_128((__mmask8)__U, + (__v4sf)_mm_min_ps(__A, __B), + (__v4sf)__W); +} + +static __inline__ __m128 __DEFAULT_FN_ATTRS128 +_mm_maskz_min_ps(__mmask8 __U, __m128 __A, __m128 __B) { + return (__m128)__builtin_ia32_selectps_128((__mmask8)__U, + (__v4sf)_mm_min_ps(__A, __B), + (__v4sf)_mm_setzero_ps()); +} + +static __inline__ __m256 __DEFAULT_FN_ATTRS256 +_mm256_mask_min_ps(__m256 __W, __mmask8 __U, __m256 __A, __m256 __B) { + return (__m256)__builtin_ia32_selectps_256((__mmask8)__U, + (__v8sf)_mm256_min_ps(__A, __B), + (__v8sf)__W); +} + +static __inline__ __m256 __DEFAULT_FN_ATTRS256 +_mm256_maskz_min_ps(__mmask8 __U, __m256 __A, __m256 __B) { + return (__m256)__builtin_ia32_selectps_256((__mmask8)__U, + (__v8sf)_mm256_min_ps(__A, __B), + (__v8sf)_mm256_setzero_ps()); +} + +static __inline__ __m128d __DEFAULT_FN_ATTRS128 +_mm_mask_mul_pd(__m128d __W, __mmask8 __U, __m128d __A, __m128d __B) { + return (__m128d)__builtin_ia32_selectpd_128((__mmask8)__U, + (__v2df)_mm_mul_pd(__A, __B), + (__v2df)__W); +} + +static __inline__ __m128d __DEFAULT_FN_ATTRS128 +_mm_maskz_mul_pd(__mmask8 __U, __m128d __A, __m128d __B) { + return (__m128d)__builtin_ia32_selectpd_128((__mmask8)__U, + (__v2df)_mm_mul_pd(__A, __B), + (__v2df)_mm_setzero_pd()); +} + +static __inline__ __m256d __DEFAULT_FN_ATTRS256 +_mm256_mask_mul_pd(__m256d __W, __mmask8 __U, __m256d __A, __m256d __B) { + return (__m256d)__builtin_ia32_selectpd_256((__mmask8)__U, + (__v4df)_mm256_mul_pd(__A, __B), + (__v4df)__W); +} + +static __inline__ __m256d __DEFAULT_FN_ATTRS256 +_mm256_maskz_mul_pd(__mmask8 __U, __m256d __A, __m256d __B) { + return (__m256d)__builtin_ia32_selectpd_256((__mmask8)__U, + (__v4df)_mm256_mul_pd(__A, __B), + (__v4df)_mm256_setzero_pd()); +} + +static __inline__ __m128 __DEFAULT_FN_ATTRS128 +_mm_mask_mul_ps(__m128 __W, __mmask8 __U, __m128 __A, __m128 __B) { + return (__m128)__builtin_ia32_selectps_128((__mmask8)__U, + (__v4sf)_mm_mul_ps(__A, __B), + (__v4sf)__W); +} + +static __inline__ __m128 __DEFAULT_FN_ATTRS128 +_mm_maskz_mul_ps(__mmask8 __U, __m128 __A, __m128 __B) { + return (__m128)__builtin_ia32_selectps_128((__mmask8)__U, + (__v4sf)_mm_mul_ps(__A, __B), + (__v4sf)_mm_setzero_ps()); +} + +static __inline__ __m256 __DEFAULT_FN_ATTRS256 +_mm256_mask_mul_ps(__m256 __W, __mmask8 __U, __m256 __A, __m256 __B) { + return (__m256)__builtin_ia32_selectps_256((__mmask8)__U, + (__v8sf)_mm256_mul_ps(__A, __B), + (__v8sf)__W); +} + +static __inline__ __m256 __DEFAULT_FN_ATTRS256 +_mm256_maskz_mul_ps(__mmask8 __U, __m256 __A, __m256 __B) { + return (__m256)__builtin_ia32_selectps_256((__mmask8)__U, + (__v8sf)_mm256_mul_ps(__A, __B), + (__v8sf)_mm256_setzero_ps()); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_mask_abs_epi32(__m128i __W, __mmask8 __U, __m128i __A) { + return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U, + (__v4si)_mm_abs_epi32(__A), + (__v4si)__W); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_maskz_abs_epi32(__mmask8 __U, __m128i __A) { + return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U, + (__v4si)_mm_abs_epi32(__A), + (__v4si)_mm_setzero_si128()); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_mask_abs_epi32(__m256i __W, __mmask8 __U, __m256i __A) { + return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U, + (__v8si)_mm256_abs_epi32(__A), + (__v8si)__W); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_maskz_abs_epi32(__mmask8 __U, __m256i __A) { + return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U, + (__v8si)_mm256_abs_epi32(__A), + (__v8si)_mm256_setzero_si256()); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_abs_epi64 (__m128i __A) { + return (__m128i)__builtin_elementwise_abs((__v2di)__A); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_mask_abs_epi64 (__m128i __W, __mmask8 __U, __m128i __A) { + return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U, + (__v2di)_mm_abs_epi64(__A), + (__v2di)__W); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_maskz_abs_epi64 (__mmask8 __U, __m128i __A) { + return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U, + (__v2di)_mm_abs_epi64(__A), + (__v2di)_mm_setzero_si128()); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_abs_epi64 (__m256i __A) { + return (__m256i)__builtin_elementwise_abs((__v4di)__A); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_mask_abs_epi64 (__m256i __W, __mmask8 __U, __m256i __A) { + return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U, + (__v4di)_mm256_abs_epi64(__A), + (__v4di)__W); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_maskz_abs_epi64 (__mmask8 __U, __m256i __A) { + return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U, + (__v4di)_mm256_abs_epi64(__A), + (__v4di)_mm256_setzero_si256()); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_maskz_max_epi32(__mmask8 __M, __m128i __A, __m128i __B) { + return (__m128i)__builtin_ia32_selectd_128((__mmask8)__M, + (__v4si)_mm_max_epi32(__A, __B), + (__v4si)_mm_setzero_si128()); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_mask_max_epi32(__m128i __W, __mmask8 __M, __m128i __A, __m128i __B) { + return (__m128i)__builtin_ia32_selectd_128((__mmask8)__M, + (__v4si)_mm_max_epi32(__A, __B), + (__v4si)__W); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_maskz_max_epi32(__mmask8 __M, __m256i __A, __m256i __B) { + return (__m256i)__builtin_ia32_selectd_256((__mmask8)__M, + (__v8si)_mm256_max_epi32(__A, __B), + (__v8si)_mm256_setzero_si256()); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_mask_max_epi32(__m256i __W, __mmask8 __M, __m256i __A, __m256i __B) { + return (__m256i)__builtin_ia32_selectd_256((__mmask8)__M, + (__v8si)_mm256_max_epi32(__A, __B), + (__v8si)__W); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_max_epi64 (__m128i __A, __m128i __B) { + return (__m128i)__builtin_elementwise_max((__v2di)__A, (__v2di)__B); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_maskz_max_epi64 (__mmask8 __M, __m128i __A, __m128i __B) { + return (__m128i)__builtin_ia32_selectq_128((__mmask8)__M, + (__v2di)_mm_max_epi64(__A, __B), + (__v2di)_mm_setzero_si128()); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_mask_max_epi64 (__m128i __W, __mmask8 __M, __m128i __A, __m128i __B) { + return (__m128i)__builtin_ia32_selectq_128((__mmask8)__M, + (__v2di)_mm_max_epi64(__A, __B), + (__v2di)__W); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_max_epi64 (__m256i __A, __m256i __B) { + return (__m256i)__builtin_elementwise_max((__v4di)__A, (__v4di)__B); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_maskz_max_epi64 (__mmask8 __M, __m256i __A, __m256i __B) { + return (__m256i)__builtin_ia32_selectq_256((__mmask8)__M, + (__v4di)_mm256_max_epi64(__A, __B), + (__v4di)_mm256_setzero_si256()); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_mask_max_epi64 (__m256i __W, __mmask8 __M, __m256i __A, __m256i __B) { + return (__m256i)__builtin_ia32_selectq_256((__mmask8)__M, + (__v4di)_mm256_max_epi64(__A, __B), + (__v4di)__W); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_maskz_max_epu32(__mmask8 __M, __m128i __A, __m128i __B) { + return (__m128i)__builtin_ia32_selectd_128((__mmask8)__M, + (__v4si)_mm_max_epu32(__A, __B), + (__v4si)_mm_setzero_si128()); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_mask_max_epu32(__m128i __W, __mmask8 __M, __m128i __A, __m128i __B) { + return (__m128i)__builtin_ia32_selectd_128((__mmask8)__M, + (__v4si)_mm_max_epu32(__A, __B), + (__v4si)__W); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_maskz_max_epu32(__mmask8 __M, __m256i __A, __m256i __B) { + return (__m256i)__builtin_ia32_selectd_256((__mmask8)__M, + (__v8si)_mm256_max_epu32(__A, __B), + (__v8si)_mm256_setzero_si256()); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_mask_max_epu32(__m256i __W, __mmask8 __M, __m256i __A, __m256i __B) { + return (__m256i)__builtin_ia32_selectd_256((__mmask8)__M, + (__v8si)_mm256_max_epu32(__A, __B), + (__v8si)__W); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_max_epu64 (__m128i __A, __m128i __B) { + return (__m128i)__builtin_elementwise_max((__v2du)__A, (__v2du)__B); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_maskz_max_epu64 (__mmask8 __M, __m128i __A, __m128i __B) { + return (__m128i)__builtin_ia32_selectq_128((__mmask8)__M, + (__v2di)_mm_max_epu64(__A, __B), + (__v2di)_mm_setzero_si128()); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_mask_max_epu64 (__m128i __W, __mmask8 __M, __m128i __A, __m128i __B) { + return (__m128i)__builtin_ia32_selectq_128((__mmask8)__M, + (__v2di)_mm_max_epu64(__A, __B), + (__v2di)__W); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_max_epu64 (__m256i __A, __m256i __B) { + return (__m256i)__builtin_elementwise_max((__v4du)__A, (__v4du)__B); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_maskz_max_epu64 (__mmask8 __M, __m256i __A, __m256i __B) { + return (__m256i)__builtin_ia32_selectq_256((__mmask8)__M, + (__v4di)_mm256_max_epu64(__A, __B), + (__v4di)_mm256_setzero_si256()); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_mask_max_epu64 (__m256i __W, __mmask8 __M, __m256i __A, __m256i __B) { + return (__m256i)__builtin_ia32_selectq_256((__mmask8)__M, + (__v4di)_mm256_max_epu64(__A, __B), + (__v4di)__W); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_maskz_min_epi32(__mmask8 __M, __m128i __A, __m128i __B) { + return (__m128i)__builtin_ia32_selectd_128((__mmask8)__M, + (__v4si)_mm_min_epi32(__A, __B), + (__v4si)_mm_setzero_si128()); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_mask_min_epi32(__m128i __W, __mmask8 __M, __m128i __A, __m128i __B) { + return (__m128i)__builtin_ia32_selectd_128((__mmask8)__M, + (__v4si)_mm_min_epi32(__A, __B), + (__v4si)__W); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_maskz_min_epi32(__mmask8 __M, __m256i __A, __m256i __B) { + return (__m256i)__builtin_ia32_selectd_256((__mmask8)__M, + (__v8si)_mm256_min_epi32(__A, __B), + (__v8si)_mm256_setzero_si256()); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_mask_min_epi32(__m256i __W, __mmask8 __M, __m256i __A, __m256i __B) { + return (__m256i)__builtin_ia32_selectd_256((__mmask8)__M, + (__v8si)_mm256_min_epi32(__A, __B), + (__v8si)__W); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_min_epi64 (__m128i __A, __m128i __B) { + return (__m128i)__builtin_elementwise_min((__v2di)__A, (__v2di)__B); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_mask_min_epi64 (__m128i __W, __mmask8 __M, __m128i __A, __m128i __B) { + return (__m128i)__builtin_ia32_selectq_128((__mmask8)__M, + (__v2di)_mm_min_epi64(__A, __B), + (__v2di)__W); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_maskz_min_epi64 (__mmask8 __M, __m128i __A, __m128i __B) { + return (__m128i)__builtin_ia32_selectq_128((__mmask8)__M, + (__v2di)_mm_min_epi64(__A, __B), + (__v2di)_mm_setzero_si128()); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_min_epi64 (__m256i __A, __m256i __B) { + return (__m256i)__builtin_elementwise_min((__v4di)__A, (__v4di)__B); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_mask_min_epi64 (__m256i __W, __mmask8 __M, __m256i __A, __m256i __B) { + return (__m256i)__builtin_ia32_selectq_256((__mmask8)__M, + (__v4di)_mm256_min_epi64(__A, __B), + (__v4di)__W); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_maskz_min_epi64 (__mmask8 __M, __m256i __A, __m256i __B) { + return (__m256i)__builtin_ia32_selectq_256((__mmask8)__M, + (__v4di)_mm256_min_epi64(__A, __B), + (__v4di)_mm256_setzero_si256()); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_maskz_min_epu32(__mmask8 __M, __m128i __A, __m128i __B) { + return (__m128i)__builtin_ia32_selectd_128((__mmask8)__M, + (__v4si)_mm_min_epu32(__A, __B), + (__v4si)_mm_setzero_si128()); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_mask_min_epu32(__m128i __W, __mmask8 __M, __m128i __A, __m128i __B) { + return (__m128i)__builtin_ia32_selectd_128((__mmask8)__M, + (__v4si)_mm_min_epu32(__A, __B), + (__v4si)__W); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_maskz_min_epu32(__mmask8 __M, __m256i __A, __m256i __B) { + return (__m256i)__builtin_ia32_selectd_256((__mmask8)__M, + (__v8si)_mm256_min_epu32(__A, __B), + (__v8si)_mm256_setzero_si256()); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_mask_min_epu32(__m256i __W, __mmask8 __M, __m256i __A, __m256i __B) { + return (__m256i)__builtin_ia32_selectd_256((__mmask8)__M, + (__v8si)_mm256_min_epu32(__A, __B), + (__v8si)__W); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_min_epu64 (__m128i __A, __m128i __B) { + return (__m128i)__builtin_elementwise_min((__v2du)__A, (__v2du)__B); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_mask_min_epu64 (__m128i __W, __mmask8 __M, __m128i __A, __m128i __B) { + return (__m128i)__builtin_ia32_selectq_128((__mmask8)__M, + (__v2di)_mm_min_epu64(__A, __B), + (__v2di)__W); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_maskz_min_epu64 (__mmask8 __M, __m128i __A, __m128i __B) { + return (__m128i)__builtin_ia32_selectq_128((__mmask8)__M, + (__v2di)_mm_min_epu64(__A, __B), + (__v2di)_mm_setzero_si128()); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_min_epu64 (__m256i __A, __m256i __B) { + return (__m256i)__builtin_elementwise_min((__v4du)__A, (__v4du)__B); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_mask_min_epu64 (__m256i __W, __mmask8 __M, __m256i __A, __m256i __B) { + return (__m256i)__builtin_ia32_selectq_256((__mmask8)__M, + (__v4di)_mm256_min_epu64(__A, __B), + (__v4di)__W); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_maskz_min_epu64 (__mmask8 __M, __m256i __A, __m256i __B) { + return (__m256i)__builtin_ia32_selectq_256((__mmask8)__M, + (__v4di)_mm256_min_epu64(__A, __B), + (__v4di)_mm256_setzero_si256()); +} + +#define _mm_roundscale_pd(A, imm) \ + ((__m128d)__builtin_ia32_rndscalepd_128_mask((__v2df)(__m128d)(A), \ + (int)(imm), \ + (__v2df)_mm_setzero_pd(), \ + (__mmask8)-1)) + + +#define _mm_mask_roundscale_pd(W, U, A, imm) \ + ((__m128d)__builtin_ia32_rndscalepd_128_mask((__v2df)(__m128d)(A), \ + (int)(imm), \ + (__v2df)(__m128d)(W), \ + (__mmask8)(U))) + + +#define _mm_maskz_roundscale_pd(U, A, imm) \ + ((__m128d)__builtin_ia32_rndscalepd_128_mask((__v2df)(__m128d)(A), \ + (int)(imm), \ + (__v2df)_mm_setzero_pd(), \ + (__mmask8)(U))) + + +#define _mm256_roundscale_pd(A, imm) \ + ((__m256d)__builtin_ia32_rndscalepd_256_mask((__v4df)(__m256d)(A), \ + (int)(imm), \ + (__v4df)_mm256_setzero_pd(), \ + (__mmask8)-1)) + + +#define _mm256_mask_roundscale_pd(W, U, A, imm) \ + ((__m256d)__builtin_ia32_rndscalepd_256_mask((__v4df)(__m256d)(A), \ + (int)(imm), \ + (__v4df)(__m256d)(W), \ + (__mmask8)(U))) + + +#define _mm256_maskz_roundscale_pd(U, A, imm) \ + ((__m256d)__builtin_ia32_rndscalepd_256_mask((__v4df)(__m256d)(A), \ + (int)(imm), \ + (__v4df)_mm256_setzero_pd(), \ + (__mmask8)(U))) + +#define _mm_roundscale_ps(A, imm) \ + ((__m128)__builtin_ia32_rndscaleps_128_mask((__v4sf)(__m128)(A), (int)(imm), \ + (__v4sf)_mm_setzero_ps(), \ + (__mmask8)-1)) + + +#define _mm_mask_roundscale_ps(W, U, A, imm) \ + ((__m128)__builtin_ia32_rndscaleps_128_mask((__v4sf)(__m128)(A), (int)(imm), \ + (__v4sf)(__m128)(W), \ + (__mmask8)(U))) + + +#define _mm_maskz_roundscale_ps(U, A, imm) \ + ((__m128)__builtin_ia32_rndscaleps_128_mask((__v4sf)(__m128)(A), (int)(imm), \ + (__v4sf)_mm_setzero_ps(), \ + (__mmask8)(U))) + +#define _mm256_roundscale_ps(A, imm) \ + ((__m256)__builtin_ia32_rndscaleps_256_mask((__v8sf)(__m256)(A), (int)(imm), \ + (__v8sf)_mm256_setzero_ps(), \ + (__mmask8)-1)) + +#define _mm256_mask_roundscale_ps(W, U, A, imm) \ + ((__m256)__builtin_ia32_rndscaleps_256_mask((__v8sf)(__m256)(A), (int)(imm), \ + (__v8sf)(__m256)(W), \ + (__mmask8)(U))) + + +#define _mm256_maskz_roundscale_ps(U, A, imm) \ + ((__m256)__builtin_ia32_rndscaleps_256_mask((__v8sf)(__m256)(A), (int)(imm), \ + (__v8sf)_mm256_setzero_ps(), \ + (__mmask8)(U))) + +static __inline__ __m128d __DEFAULT_FN_ATTRS128 +_mm_scalef_pd (__m128d __A, __m128d __B) { + return (__m128d) __builtin_ia32_scalefpd128_mask ((__v2df) __A, + (__v2df) __B, + (__v2df) + _mm_setzero_pd (), + (__mmask8) -1); +} + +static __inline__ __m128d __DEFAULT_FN_ATTRS128 +_mm_mask_scalef_pd (__m128d __W, __mmask8 __U, __m128d __A, + __m128d __B) { + return (__m128d) __builtin_ia32_scalefpd128_mask ((__v2df) __A, + (__v2df) __B, + (__v2df) __W, + (__mmask8) __U); +} + +static __inline__ __m128d __DEFAULT_FN_ATTRS128 +_mm_maskz_scalef_pd (__mmask8 __U, __m128d __A, __m128d __B) { + return (__m128d) __builtin_ia32_scalefpd128_mask ((__v2df) __A, + (__v2df) __B, + (__v2df) + _mm_setzero_pd (), + (__mmask8) __U); +} + +static __inline__ __m256d __DEFAULT_FN_ATTRS256 +_mm256_scalef_pd (__m256d __A, __m256d __B) { + return (__m256d) __builtin_ia32_scalefpd256_mask ((__v4df) __A, + (__v4df) __B, + (__v4df) + _mm256_setzero_pd (), + (__mmask8) -1); +} + +static __inline__ __m256d __DEFAULT_FN_ATTRS256 +_mm256_mask_scalef_pd (__m256d __W, __mmask8 __U, __m256d __A, + __m256d __B) { + return (__m256d) __builtin_ia32_scalefpd256_mask ((__v4df) __A, + (__v4df) __B, + (__v4df) __W, + (__mmask8) __U); +} + +static __inline__ __m256d __DEFAULT_FN_ATTRS256 +_mm256_maskz_scalef_pd (__mmask8 __U, __m256d __A, __m256d __B) { + return (__m256d) __builtin_ia32_scalefpd256_mask ((__v4df) __A, + (__v4df) __B, + (__v4df) + _mm256_setzero_pd (), + (__mmask8) __U); +} + +static __inline__ __m128 __DEFAULT_FN_ATTRS128 +_mm_scalef_ps (__m128 __A, __m128 __B) { + return (__m128) __builtin_ia32_scalefps128_mask ((__v4sf) __A, + (__v4sf) __B, + (__v4sf) + _mm_setzero_ps (), + (__mmask8) -1); +} + +static __inline__ __m128 __DEFAULT_FN_ATTRS128 +_mm_mask_scalef_ps (__m128 __W, __mmask8 __U, __m128 __A, __m128 __B) { + return (__m128) __builtin_ia32_scalefps128_mask ((__v4sf) __A, + (__v4sf) __B, + (__v4sf) __W, + (__mmask8) __U); +} + +static __inline__ __m128 __DEFAULT_FN_ATTRS128 +_mm_maskz_scalef_ps (__mmask8 __U, __m128 __A, __m128 __B) { + return (__m128) __builtin_ia32_scalefps128_mask ((__v4sf) __A, + (__v4sf) __B, + (__v4sf) + _mm_setzero_ps (), + (__mmask8) __U); +} + +static __inline__ __m256 __DEFAULT_FN_ATTRS256 +_mm256_scalef_ps (__m256 __A, __m256 __B) { + return (__m256) __builtin_ia32_scalefps256_mask ((__v8sf) __A, + (__v8sf) __B, + (__v8sf) + _mm256_setzero_ps (), + (__mmask8) -1); +} + +static __inline__ __m256 __DEFAULT_FN_ATTRS256 +_mm256_mask_scalef_ps (__m256 __W, __mmask8 __U, __m256 __A, + __m256 __B) { + return (__m256) __builtin_ia32_scalefps256_mask ((__v8sf) __A, + (__v8sf) __B, + (__v8sf) __W, + (__mmask8) __U); +} + +static __inline__ __m256 __DEFAULT_FN_ATTRS256 +_mm256_maskz_scalef_ps (__mmask8 __U, __m256 __A, __m256 __B) { + return (__m256) __builtin_ia32_scalefps256_mask ((__v8sf) __A, + (__v8sf) __B, + (__v8sf) + _mm256_setzero_ps (), + (__mmask8) __U); +} + +#define _mm_i64scatter_pd(addr, index, v1, scale) \ + __builtin_ia32_scatterdiv2df((void *)(addr), (__mmask8)-1, \ + (__v2di)(__m128i)(index), \ + (__v2df)(__m128d)(v1), (int)(scale)) + +#define _mm_mask_i64scatter_pd(addr, mask, index, v1, scale) \ + __builtin_ia32_scatterdiv2df((void *)(addr), (__mmask8)(mask), \ + (__v2di)(__m128i)(index), \ + (__v2df)(__m128d)(v1), (int)(scale)) + +#define _mm_i64scatter_epi64(addr, index, v1, scale) \ + __builtin_ia32_scatterdiv2di((void *)(addr), (__mmask8)-1, \ + (__v2di)(__m128i)(index), \ + (__v2di)(__m128i)(v1), (int)(scale)) + +#define _mm_mask_i64scatter_epi64(addr, mask, index, v1, scale) \ + __builtin_ia32_scatterdiv2di((void *)(addr), (__mmask8)(mask), \ + (__v2di)(__m128i)(index), \ + (__v2di)(__m128i)(v1), (int)(scale)) + +#define _mm256_i64scatter_pd(addr, index, v1, scale) \ + __builtin_ia32_scatterdiv4df((void *)(addr), (__mmask8)-1, \ + (__v4di)(__m256i)(index), \ + (__v4df)(__m256d)(v1), (int)(scale)) + +#define _mm256_mask_i64scatter_pd(addr, mask, index, v1, scale) \ + __builtin_ia32_scatterdiv4df((void *)(addr), (__mmask8)(mask), \ + (__v4di)(__m256i)(index), \ + (__v4df)(__m256d)(v1), (int)(scale)) + +#define _mm256_i64scatter_epi64(addr, index, v1, scale) \ + __builtin_ia32_scatterdiv4di((void *)(addr), (__mmask8)-1, \ + (__v4di)(__m256i)(index), \ + (__v4di)(__m256i)(v1), (int)(scale)) + +#define _mm256_mask_i64scatter_epi64(addr, mask, index, v1, scale) \ + __builtin_ia32_scatterdiv4di((void *)(addr), (__mmask8)(mask), \ + (__v4di)(__m256i)(index), \ + (__v4di)(__m256i)(v1), (int)(scale)) + +#define _mm_i64scatter_ps(addr, index, v1, scale) \ + __builtin_ia32_scatterdiv4sf((void *)(addr), (__mmask8)-1, \ + (__v2di)(__m128i)(index), (__v4sf)(__m128)(v1), \ + (int)(scale)) + +#define _mm_mask_i64scatter_ps(addr, mask, index, v1, scale) \ + __builtin_ia32_scatterdiv4sf((void *)(addr), (__mmask8)(mask), \ + (__v2di)(__m128i)(index), (__v4sf)(__m128)(v1), \ + (int)(scale)) + +#define _mm_i64scatter_epi32(addr, index, v1, scale) \ + __builtin_ia32_scatterdiv4si((void *)(addr), (__mmask8)-1, \ + (__v2di)(__m128i)(index), \ + (__v4si)(__m128i)(v1), (int)(scale)) + +#define _mm_mask_i64scatter_epi32(addr, mask, index, v1, scale) \ + __builtin_ia32_scatterdiv4si((void *)(addr), (__mmask8)(mask), \ + (__v2di)(__m128i)(index), \ + (__v4si)(__m128i)(v1), (int)(scale)) + +#define _mm256_i64scatter_ps(addr, index, v1, scale) \ + __builtin_ia32_scatterdiv8sf((void *)(addr), (__mmask8)-1, \ + (__v4di)(__m256i)(index), (__v4sf)(__m128)(v1), \ + (int)(scale)) + +#define _mm256_mask_i64scatter_ps(addr, mask, index, v1, scale) \ + __builtin_ia32_scatterdiv8sf((void *)(addr), (__mmask8)(mask), \ + (__v4di)(__m256i)(index), (__v4sf)(__m128)(v1), \ + (int)(scale)) + +#define _mm256_i64scatter_epi32(addr, index, v1, scale) \ + __builtin_ia32_scatterdiv8si((void *)(addr), (__mmask8)-1, \ + (__v4di)(__m256i)(index), \ + (__v4si)(__m128i)(v1), (int)(scale)) + +#define _mm256_mask_i64scatter_epi32(addr, mask, index, v1, scale) \ + __builtin_ia32_scatterdiv8si((void *)(addr), (__mmask8)(mask), \ + (__v4di)(__m256i)(index), \ + (__v4si)(__m128i)(v1), (int)(scale)) + +#define _mm_i32scatter_pd(addr, index, v1, scale) \ + __builtin_ia32_scattersiv2df((void *)(addr), (__mmask8)-1, \ + (__v4si)(__m128i)(index), \ + (__v2df)(__m128d)(v1), (int)(scale)) + +#define _mm_mask_i32scatter_pd(addr, mask, index, v1, scale) \ + __builtin_ia32_scattersiv2df((void *)(addr), (__mmask8)(mask), \ + (__v4si)(__m128i)(index), \ + (__v2df)(__m128d)(v1), (int)(scale)) + +#define _mm_i32scatter_epi64(addr, index, v1, scale) \ + __builtin_ia32_scattersiv2di((void *)(addr), (__mmask8)-1, \ + (__v4si)(__m128i)(index), \ + (__v2di)(__m128i)(v1), (int)(scale)) + +#define _mm_mask_i32scatter_epi64(addr, mask, index, v1, scale) \ + __builtin_ia32_scattersiv2di((void *)(addr), (__mmask8)(mask), \ + (__v4si)(__m128i)(index), \ + (__v2di)(__m128i)(v1), (int)(scale)) + +#define _mm256_i32scatter_pd(addr, index, v1, scale) \ + __builtin_ia32_scattersiv4df((void *)(addr), (__mmask8)-1, \ + (__v4si)(__m128i)(index), \ + (__v4df)(__m256d)(v1), (int)(scale)) + +#define _mm256_mask_i32scatter_pd(addr, mask, index, v1, scale) \ + __builtin_ia32_scattersiv4df((void *)(addr), (__mmask8)(mask), \ + (__v4si)(__m128i)(index), \ + (__v4df)(__m256d)(v1), (int)(scale)) + +#define _mm256_i32scatter_epi64(addr, index, v1, scale) \ + __builtin_ia32_scattersiv4di((void *)(addr), (__mmask8)-1, \ + (__v4si)(__m128i)(index), \ + (__v4di)(__m256i)(v1), (int)(scale)) + +#define _mm256_mask_i32scatter_epi64(addr, mask, index, v1, scale) \ + __builtin_ia32_scattersiv4di((void *)(addr), (__mmask8)(mask), \ + (__v4si)(__m128i)(index), \ + (__v4di)(__m256i)(v1), (int)(scale)) + +#define _mm_i32scatter_ps(addr, index, v1, scale) \ + __builtin_ia32_scattersiv4sf((void *)(addr), (__mmask8)-1, \ + (__v4si)(__m128i)(index), (__v4sf)(__m128)(v1), \ + (int)(scale)) + +#define _mm_mask_i32scatter_ps(addr, mask, index, v1, scale) \ + __builtin_ia32_scattersiv4sf((void *)(addr), (__mmask8)(mask), \ + (__v4si)(__m128i)(index), (__v4sf)(__m128)(v1), \ + (int)(scale)) + +#define _mm_i32scatter_epi32(addr, index, v1, scale) \ + __builtin_ia32_scattersiv4si((void *)(addr), (__mmask8)-1, \ + (__v4si)(__m128i)(index), \ + (__v4si)(__m128i)(v1), (int)(scale)) + +#define _mm_mask_i32scatter_epi32(addr, mask, index, v1, scale) \ + __builtin_ia32_scattersiv4si((void *)(addr), (__mmask8)(mask), \ + (__v4si)(__m128i)(index), \ + (__v4si)(__m128i)(v1), (int)(scale)) + +#define _mm256_i32scatter_ps(addr, index, v1, scale) \ + __builtin_ia32_scattersiv8sf((void *)(addr), (__mmask8)-1, \ + (__v8si)(__m256i)(index), (__v8sf)(__m256)(v1), \ + (int)(scale)) + +#define _mm256_mask_i32scatter_ps(addr, mask, index, v1, scale) \ + __builtin_ia32_scattersiv8sf((void *)(addr), (__mmask8)(mask), \ + (__v8si)(__m256i)(index), (__v8sf)(__m256)(v1), \ + (int)(scale)) + +#define _mm256_i32scatter_epi32(addr, index, v1, scale) \ + __builtin_ia32_scattersiv8si((void *)(addr), (__mmask8)-1, \ + (__v8si)(__m256i)(index), \ + (__v8si)(__m256i)(v1), (int)(scale)) + +#define _mm256_mask_i32scatter_epi32(addr, mask, index, v1, scale) \ + __builtin_ia32_scattersiv8si((void *)(addr), (__mmask8)(mask), \ + (__v8si)(__m256i)(index), \ + (__v8si)(__m256i)(v1), (int)(scale)) + + static __inline__ __m128d __DEFAULT_FN_ATTRS128 + _mm_mask_sqrt_pd(__m128d __W, __mmask8 __U, __m128d __A) { + return (__m128d)__builtin_ia32_selectpd_128((__mmask8)__U, + (__v2df)_mm_sqrt_pd(__A), + (__v2df)__W); + } + + static __inline__ __m128d __DEFAULT_FN_ATTRS128 + _mm_maskz_sqrt_pd(__mmask8 __U, __m128d __A) { + return (__m128d)__builtin_ia32_selectpd_128((__mmask8)__U, + (__v2df)_mm_sqrt_pd(__A), + (__v2df)_mm_setzero_pd()); + } + + static __inline__ __m256d __DEFAULT_FN_ATTRS256 + _mm256_mask_sqrt_pd(__m256d __W, __mmask8 __U, __m256d __A) { + return (__m256d)__builtin_ia32_selectpd_256((__mmask8)__U, + (__v4df)_mm256_sqrt_pd(__A), + (__v4df)__W); + } + + static __inline__ __m256d __DEFAULT_FN_ATTRS256 + _mm256_maskz_sqrt_pd(__mmask8 __U, __m256d __A) { + return (__m256d)__builtin_ia32_selectpd_256((__mmask8)__U, + (__v4df)_mm256_sqrt_pd(__A), + (__v4df)_mm256_setzero_pd()); + } + + static __inline__ __m128 __DEFAULT_FN_ATTRS128 + _mm_mask_sqrt_ps(__m128 __W, __mmask8 __U, __m128 __A) { + return (__m128)__builtin_ia32_selectps_128((__mmask8)__U, + (__v4sf)_mm_sqrt_ps(__A), + (__v4sf)__W); + } + + static __inline__ __m128 __DEFAULT_FN_ATTRS128 + _mm_maskz_sqrt_ps(__mmask8 __U, __m128 __A) { + return (__m128)__builtin_ia32_selectps_128((__mmask8)__U, + (__v4sf)_mm_sqrt_ps(__A), + (__v4sf)_mm_setzero_ps()); + } + + static __inline__ __m256 __DEFAULT_FN_ATTRS256 + _mm256_mask_sqrt_ps(__m256 __W, __mmask8 __U, __m256 __A) { + return (__m256)__builtin_ia32_selectps_256((__mmask8)__U, + (__v8sf)_mm256_sqrt_ps(__A), + (__v8sf)__W); + } + + static __inline__ __m256 __DEFAULT_FN_ATTRS256 + _mm256_maskz_sqrt_ps(__mmask8 __U, __m256 __A) { + return (__m256)__builtin_ia32_selectps_256((__mmask8)__U, + (__v8sf)_mm256_sqrt_ps(__A), + (__v8sf)_mm256_setzero_ps()); + } + + static __inline__ __m128d __DEFAULT_FN_ATTRS128 + _mm_mask_sub_pd(__m128d __W, __mmask8 __U, __m128d __A, __m128d __B) { + return (__m128d)__builtin_ia32_selectpd_128((__mmask8)__U, + (__v2df)_mm_sub_pd(__A, __B), + (__v2df)__W); + } + + static __inline__ __m128d __DEFAULT_FN_ATTRS128 + _mm_maskz_sub_pd(__mmask8 __U, __m128d __A, __m128d __B) { + return (__m128d)__builtin_ia32_selectpd_128((__mmask8)__U, + (__v2df)_mm_sub_pd(__A, __B), + (__v2df)_mm_setzero_pd()); + } + + static __inline__ __m256d __DEFAULT_FN_ATTRS256 + _mm256_mask_sub_pd(__m256d __W, __mmask8 __U, __m256d __A, __m256d __B) { + return (__m256d)__builtin_ia32_selectpd_256((__mmask8)__U, + (__v4df)_mm256_sub_pd(__A, __B), + (__v4df)__W); + } + + static __inline__ __m256d __DEFAULT_FN_ATTRS256 + _mm256_maskz_sub_pd(__mmask8 __U, __m256d __A, __m256d __B) { + return (__m256d)__builtin_ia32_selectpd_256((__mmask8)__U, + (__v4df)_mm256_sub_pd(__A, __B), + (__v4df)_mm256_setzero_pd()); + } + + static __inline__ __m128 __DEFAULT_FN_ATTRS128 + _mm_mask_sub_ps(__m128 __W, __mmask8 __U, __m128 __A, __m128 __B) { + return (__m128)__builtin_ia32_selectps_128((__mmask8)__U, + (__v4sf)_mm_sub_ps(__A, __B), + (__v4sf)__W); + } + + static __inline__ __m128 __DEFAULT_FN_ATTRS128 + _mm_maskz_sub_ps(__mmask8 __U, __m128 __A, __m128 __B) { + return (__m128)__builtin_ia32_selectps_128((__mmask8)__U, + (__v4sf)_mm_sub_ps(__A, __B), + (__v4sf)_mm_setzero_ps()); + } + + static __inline__ __m256 __DEFAULT_FN_ATTRS256 + _mm256_mask_sub_ps(__m256 __W, __mmask8 __U, __m256 __A, __m256 __B) { + return (__m256)__builtin_ia32_selectps_256((__mmask8)__U, + (__v8sf)_mm256_sub_ps(__A, __B), + (__v8sf)__W); + } + + static __inline__ __m256 __DEFAULT_FN_ATTRS256 + _mm256_maskz_sub_ps(__mmask8 __U, __m256 __A, __m256 __B) { + return (__m256)__builtin_ia32_selectps_256((__mmask8)__U, + (__v8sf)_mm256_sub_ps(__A, __B), + (__v8sf)_mm256_setzero_ps()); + } + + static __inline__ __m128i __DEFAULT_FN_ATTRS128 + _mm_permutex2var_epi32(__m128i __A, __m128i __I, __m128i __B) { + return (__m128i)__builtin_ia32_vpermi2vard128((__v4si) __A, (__v4si)__I, + (__v4si)__B); + } + + static __inline__ __m128i __DEFAULT_FN_ATTRS128 + _mm_mask_permutex2var_epi32(__m128i __A, __mmask8 __U, __m128i __I, + __m128i __B) { + return (__m128i)__builtin_ia32_selectd_128(__U, + (__v4si)_mm_permutex2var_epi32(__A, __I, __B), + (__v4si)__A); + } + + static __inline__ __m128i __DEFAULT_FN_ATTRS128 + _mm_mask2_permutex2var_epi32(__m128i __A, __m128i __I, __mmask8 __U, + __m128i __B) { + return (__m128i)__builtin_ia32_selectd_128(__U, + (__v4si)_mm_permutex2var_epi32(__A, __I, __B), + (__v4si)__I); + } + + static __inline__ __m128i __DEFAULT_FN_ATTRS128 + _mm_maskz_permutex2var_epi32(__mmask8 __U, __m128i __A, __m128i __I, + __m128i __B) { + return (__m128i)__builtin_ia32_selectd_128(__U, + (__v4si)_mm_permutex2var_epi32(__A, __I, __B), + (__v4si)_mm_setzero_si128()); + } + + static __inline__ __m256i __DEFAULT_FN_ATTRS256 + _mm256_permutex2var_epi32(__m256i __A, __m256i __I, __m256i __B) { + return (__m256i)__builtin_ia32_vpermi2vard256((__v8si)__A, (__v8si) __I, + (__v8si) __B); + } + + static __inline__ __m256i __DEFAULT_FN_ATTRS256 + _mm256_mask_permutex2var_epi32(__m256i __A, __mmask8 __U, __m256i __I, + __m256i __B) { + return (__m256i)__builtin_ia32_selectd_256(__U, + (__v8si)_mm256_permutex2var_epi32(__A, __I, __B), + (__v8si)__A); + } + + static __inline__ __m256i __DEFAULT_FN_ATTRS256 + _mm256_mask2_permutex2var_epi32(__m256i __A, __m256i __I, __mmask8 __U, + __m256i __B) { + return (__m256i)__builtin_ia32_selectd_256(__U, + (__v8si)_mm256_permutex2var_epi32(__A, __I, __B), + (__v8si)__I); + } + + static __inline__ __m256i __DEFAULT_FN_ATTRS256 + _mm256_maskz_permutex2var_epi32(__mmask8 __U, __m256i __A, __m256i __I, + __m256i __B) { + return (__m256i)__builtin_ia32_selectd_256(__U, + (__v8si)_mm256_permutex2var_epi32(__A, __I, __B), + (__v8si)_mm256_setzero_si256()); + } + + static __inline__ __m128d __DEFAULT_FN_ATTRS128 + _mm_permutex2var_pd(__m128d __A, __m128i __I, __m128d __B) { + return (__m128d)__builtin_ia32_vpermi2varpd128((__v2df)__A, (__v2di)__I, + (__v2df)__B); + } + + static __inline__ __m128d __DEFAULT_FN_ATTRS128 + _mm_mask_permutex2var_pd(__m128d __A, __mmask8 __U, __m128i __I, __m128d __B) { + return (__m128d)__builtin_ia32_selectpd_128(__U, + (__v2df)_mm_permutex2var_pd(__A, __I, __B), + (__v2df)__A); + } + + static __inline__ __m128d __DEFAULT_FN_ATTRS128 + _mm_mask2_permutex2var_pd(__m128d __A, __m128i __I, __mmask8 __U, __m128d __B) { + return (__m128d)__builtin_ia32_selectpd_128(__U, + (__v2df)_mm_permutex2var_pd(__A, __I, __B), + (__v2df)(__m128d)__I); + } + + static __inline__ __m128d __DEFAULT_FN_ATTRS128 + _mm_maskz_permutex2var_pd(__mmask8 __U, __m128d __A, __m128i __I, __m128d __B) { + return (__m128d)__builtin_ia32_selectpd_128(__U, + (__v2df)_mm_permutex2var_pd(__A, __I, __B), + (__v2df)_mm_setzero_pd()); + } + + static __inline__ __m256d __DEFAULT_FN_ATTRS256 + _mm256_permutex2var_pd(__m256d __A, __m256i __I, __m256d __B) { + return (__m256d)__builtin_ia32_vpermi2varpd256((__v4df)__A, (__v4di)__I, + (__v4df)__B); + } + + static __inline__ __m256d __DEFAULT_FN_ATTRS256 + _mm256_mask_permutex2var_pd(__m256d __A, __mmask8 __U, __m256i __I, + __m256d __B) { + return (__m256d)__builtin_ia32_selectpd_256(__U, + (__v4df)_mm256_permutex2var_pd(__A, __I, __B), + (__v4df)__A); + } + + static __inline__ __m256d __DEFAULT_FN_ATTRS256 + _mm256_mask2_permutex2var_pd(__m256d __A, __m256i __I, __mmask8 __U, + __m256d __B) { + return (__m256d)__builtin_ia32_selectpd_256(__U, + (__v4df)_mm256_permutex2var_pd(__A, __I, __B), + (__v4df)(__m256d)__I); + } + + static __inline__ __m256d __DEFAULT_FN_ATTRS256 + _mm256_maskz_permutex2var_pd(__mmask8 __U, __m256d __A, __m256i __I, + __m256d __B) { + return (__m256d)__builtin_ia32_selectpd_256(__U, + (__v4df)_mm256_permutex2var_pd(__A, __I, __B), + (__v4df)_mm256_setzero_pd()); + } + + static __inline__ __m128 __DEFAULT_FN_ATTRS128 + _mm_permutex2var_ps(__m128 __A, __m128i __I, __m128 __B) { + return (__m128)__builtin_ia32_vpermi2varps128((__v4sf)__A, (__v4si)__I, + (__v4sf)__B); + } + + static __inline__ __m128 __DEFAULT_FN_ATTRS128 + _mm_mask_permutex2var_ps(__m128 __A, __mmask8 __U, __m128i __I, __m128 __B) { + return (__m128)__builtin_ia32_selectps_128(__U, + (__v4sf)_mm_permutex2var_ps(__A, __I, __B), + (__v4sf)__A); + } + + static __inline__ __m128 __DEFAULT_FN_ATTRS128 + _mm_mask2_permutex2var_ps(__m128 __A, __m128i __I, __mmask8 __U, __m128 __B) { + return (__m128)__builtin_ia32_selectps_128(__U, + (__v4sf)_mm_permutex2var_ps(__A, __I, __B), + (__v4sf)(__m128)__I); + } + + static __inline__ __m128 __DEFAULT_FN_ATTRS128 + _mm_maskz_permutex2var_ps(__mmask8 __U, __m128 __A, __m128i __I, __m128 __B) { + return (__m128)__builtin_ia32_selectps_128(__U, + (__v4sf)_mm_permutex2var_ps(__A, __I, __B), + (__v4sf)_mm_setzero_ps()); + } + + static __inline__ __m256 __DEFAULT_FN_ATTRS256 + _mm256_permutex2var_ps(__m256 __A, __m256i __I, __m256 __B) { + return (__m256)__builtin_ia32_vpermi2varps256((__v8sf)__A, (__v8si)__I, + (__v8sf) __B); + } + + static __inline__ __m256 __DEFAULT_FN_ATTRS256 + _mm256_mask_permutex2var_ps(__m256 __A, __mmask8 __U, __m256i __I, __m256 __B) { + return (__m256)__builtin_ia32_selectps_256(__U, + (__v8sf)_mm256_permutex2var_ps(__A, __I, __B), + (__v8sf)__A); + } + + static __inline__ __m256 __DEFAULT_FN_ATTRS256 + _mm256_mask2_permutex2var_ps(__m256 __A, __m256i __I, __mmask8 __U, + __m256 __B) { + return (__m256)__builtin_ia32_selectps_256(__U, + (__v8sf)_mm256_permutex2var_ps(__A, __I, __B), + (__v8sf)(__m256)__I); + } + + static __inline__ __m256 __DEFAULT_FN_ATTRS256 + _mm256_maskz_permutex2var_ps(__mmask8 __U, __m256 __A, __m256i __I, + __m256 __B) { + return (__m256)__builtin_ia32_selectps_256(__U, + (__v8sf)_mm256_permutex2var_ps(__A, __I, __B), + (__v8sf)_mm256_setzero_ps()); + } + + static __inline__ __m128i __DEFAULT_FN_ATTRS128 + _mm_permutex2var_epi64(__m128i __A, __m128i __I, __m128i __B) { + return (__m128i)__builtin_ia32_vpermi2varq128((__v2di)__A, (__v2di)__I, + (__v2di)__B); + } + + static __inline__ __m128i __DEFAULT_FN_ATTRS128 + _mm_mask_permutex2var_epi64(__m128i __A, __mmask8 __U, __m128i __I, + __m128i __B) { + return (__m128i)__builtin_ia32_selectq_128(__U, + (__v2di)_mm_permutex2var_epi64(__A, __I, __B), + (__v2di)__A); + } + + static __inline__ __m128i __DEFAULT_FN_ATTRS128 + _mm_mask2_permutex2var_epi64(__m128i __A, __m128i __I, __mmask8 __U, + __m128i __B) { + return (__m128i)__builtin_ia32_selectq_128(__U, + (__v2di)_mm_permutex2var_epi64(__A, __I, __B), + (__v2di)__I); + } + + static __inline__ __m128i __DEFAULT_FN_ATTRS128 + _mm_maskz_permutex2var_epi64(__mmask8 __U, __m128i __A, __m128i __I, + __m128i __B) { + return (__m128i)__builtin_ia32_selectq_128(__U, + (__v2di)_mm_permutex2var_epi64(__A, __I, __B), + (__v2di)_mm_setzero_si128()); + } + + + static __inline__ __m256i __DEFAULT_FN_ATTRS256 + _mm256_permutex2var_epi64(__m256i __A, __m256i __I, __m256i __B) { + return (__m256i)__builtin_ia32_vpermi2varq256((__v4di)__A, (__v4di) __I, + (__v4di) __B); + } + + static __inline__ __m256i __DEFAULT_FN_ATTRS256 + _mm256_mask_permutex2var_epi64(__m256i __A, __mmask8 __U, __m256i __I, + __m256i __B) { + return (__m256i)__builtin_ia32_selectq_256(__U, + (__v4di)_mm256_permutex2var_epi64(__A, __I, __B), + (__v4di)__A); + } + + static __inline__ __m256i __DEFAULT_FN_ATTRS256 + _mm256_mask2_permutex2var_epi64(__m256i __A, __m256i __I, __mmask8 __U, + __m256i __B) { + return (__m256i)__builtin_ia32_selectq_256(__U, + (__v4di)_mm256_permutex2var_epi64(__A, __I, __B), + (__v4di)__I); + } + + static __inline__ __m256i __DEFAULT_FN_ATTRS256 + _mm256_maskz_permutex2var_epi64(__mmask8 __U, __m256i __A, __m256i __I, + __m256i __B) { + return (__m256i)__builtin_ia32_selectq_256(__U, + (__v4di)_mm256_permutex2var_epi64(__A, __I, __B), + (__v4di)_mm256_setzero_si256()); + } + + static __inline__ __m128i __DEFAULT_FN_ATTRS128 + _mm_mask_cvtepi8_epi32(__m128i __W, __mmask8 __U, __m128i __A) + { + return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U, + (__v4si)_mm_cvtepi8_epi32(__A), + (__v4si)__W); + } + + static __inline__ __m128i __DEFAULT_FN_ATTRS128 + _mm_maskz_cvtepi8_epi32(__mmask8 __U, __m128i __A) + { + return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U, + (__v4si)_mm_cvtepi8_epi32(__A), + (__v4si)_mm_setzero_si128()); + } + + static __inline__ __m256i __DEFAULT_FN_ATTRS256 + _mm256_mask_cvtepi8_epi32 (__m256i __W, __mmask8 __U, __m128i __A) + { + return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U, + (__v8si)_mm256_cvtepi8_epi32(__A), + (__v8si)__W); + } + + static __inline__ __m256i __DEFAULT_FN_ATTRS256 + _mm256_maskz_cvtepi8_epi32 (__mmask8 __U, __m128i __A) + { + return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U, + (__v8si)_mm256_cvtepi8_epi32(__A), + (__v8si)_mm256_setzero_si256()); + } + + static __inline__ __m128i __DEFAULT_FN_ATTRS128 + _mm_mask_cvtepi8_epi64(__m128i __W, __mmask8 __U, __m128i __A) + { + return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U, + (__v2di)_mm_cvtepi8_epi64(__A), + (__v2di)__W); + } + + static __inline__ __m128i __DEFAULT_FN_ATTRS128 + _mm_maskz_cvtepi8_epi64(__mmask8 __U, __m128i __A) + { + return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U, + (__v2di)_mm_cvtepi8_epi64(__A), + (__v2di)_mm_setzero_si128()); + } + + static __inline__ __m256i __DEFAULT_FN_ATTRS256 + _mm256_mask_cvtepi8_epi64(__m256i __W, __mmask8 __U, __m128i __A) + { + return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U, + (__v4di)_mm256_cvtepi8_epi64(__A), + (__v4di)__W); + } + + static __inline__ __m256i __DEFAULT_FN_ATTRS256 + _mm256_maskz_cvtepi8_epi64(__mmask8 __U, __m128i __A) + { + return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U, + (__v4di)_mm256_cvtepi8_epi64(__A), + (__v4di)_mm256_setzero_si256()); + } + + static __inline__ __m128i __DEFAULT_FN_ATTRS128 + _mm_mask_cvtepi32_epi64(__m128i __W, __mmask8 __U, __m128i __X) + { + return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U, + (__v2di)_mm_cvtepi32_epi64(__X), + (__v2di)__W); + } + + static __inline__ __m128i __DEFAULT_FN_ATTRS128 + _mm_maskz_cvtepi32_epi64(__mmask8 __U, __m128i __X) + { + return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U, + (__v2di)_mm_cvtepi32_epi64(__X), + (__v2di)_mm_setzero_si128()); + } + + static __inline__ __m256i __DEFAULT_FN_ATTRS256 + _mm256_mask_cvtepi32_epi64(__m256i __W, __mmask8 __U, __m128i __X) + { + return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U, + (__v4di)_mm256_cvtepi32_epi64(__X), + (__v4di)__W); + } + + static __inline__ __m256i __DEFAULT_FN_ATTRS256 + _mm256_maskz_cvtepi32_epi64(__mmask8 __U, __m128i __X) + { + return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U, + (__v4di)_mm256_cvtepi32_epi64(__X), + (__v4di)_mm256_setzero_si256()); + } + + static __inline__ __m128i __DEFAULT_FN_ATTRS128 + _mm_mask_cvtepi16_epi32(__m128i __W, __mmask8 __U, __m128i __A) + { + return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U, + (__v4si)_mm_cvtepi16_epi32(__A), + (__v4si)__W); + } + + static __inline__ __m128i __DEFAULT_FN_ATTRS128 + _mm_maskz_cvtepi16_epi32(__mmask8 __U, __m128i __A) + { + return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U, + (__v4si)_mm_cvtepi16_epi32(__A), + (__v4si)_mm_setzero_si128()); + } + + static __inline__ __m256i __DEFAULT_FN_ATTRS256 + _mm256_mask_cvtepi16_epi32(__m256i __W, __mmask8 __U, __m128i __A) + { + return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U, + (__v8si)_mm256_cvtepi16_epi32(__A), + (__v8si)__W); + } + + static __inline__ __m256i __DEFAULT_FN_ATTRS256 + _mm256_maskz_cvtepi16_epi32 (__mmask8 __U, __m128i __A) + { + return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U, + (__v8si)_mm256_cvtepi16_epi32(__A), + (__v8si)_mm256_setzero_si256()); + } + + static __inline__ __m128i __DEFAULT_FN_ATTRS128 + _mm_mask_cvtepi16_epi64(__m128i __W, __mmask8 __U, __m128i __A) + { + return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U, + (__v2di)_mm_cvtepi16_epi64(__A), + (__v2di)__W); + } + + static __inline__ __m128i __DEFAULT_FN_ATTRS128 + _mm_maskz_cvtepi16_epi64(__mmask8 __U, __m128i __A) + { + return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U, + (__v2di)_mm_cvtepi16_epi64(__A), + (__v2di)_mm_setzero_si128()); + } + + static __inline__ __m256i __DEFAULT_FN_ATTRS256 + _mm256_mask_cvtepi16_epi64(__m256i __W, __mmask8 __U, __m128i __A) + { + return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U, + (__v4di)_mm256_cvtepi16_epi64(__A), + (__v4di)__W); + } + + static __inline__ __m256i __DEFAULT_FN_ATTRS256 + _mm256_maskz_cvtepi16_epi64(__mmask8 __U, __m128i __A) + { + return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U, + (__v4di)_mm256_cvtepi16_epi64(__A), + (__v4di)_mm256_setzero_si256()); + } + + + static __inline__ __m128i __DEFAULT_FN_ATTRS128 + _mm_mask_cvtepu8_epi32(__m128i __W, __mmask8 __U, __m128i __A) + { + return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U, + (__v4si)_mm_cvtepu8_epi32(__A), + (__v4si)__W); + } + + static __inline__ __m128i __DEFAULT_FN_ATTRS128 + _mm_maskz_cvtepu8_epi32(__mmask8 __U, __m128i __A) + { + return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U, + (__v4si)_mm_cvtepu8_epi32(__A), + (__v4si)_mm_setzero_si128()); + } + + static __inline__ __m256i __DEFAULT_FN_ATTRS256 + _mm256_mask_cvtepu8_epi32(__m256i __W, __mmask8 __U, __m128i __A) + { + return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U, + (__v8si)_mm256_cvtepu8_epi32(__A), + (__v8si)__W); + } + + static __inline__ __m256i __DEFAULT_FN_ATTRS256 + _mm256_maskz_cvtepu8_epi32(__mmask8 __U, __m128i __A) + { + return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U, + (__v8si)_mm256_cvtepu8_epi32(__A), + (__v8si)_mm256_setzero_si256()); + } + + static __inline__ __m128i __DEFAULT_FN_ATTRS128 + _mm_mask_cvtepu8_epi64(__m128i __W, __mmask8 __U, __m128i __A) + { + return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U, + (__v2di)_mm_cvtepu8_epi64(__A), + (__v2di)__W); + } + + static __inline__ __m128i __DEFAULT_FN_ATTRS128 + _mm_maskz_cvtepu8_epi64(__mmask8 __U, __m128i __A) + { + return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U, + (__v2di)_mm_cvtepu8_epi64(__A), + (__v2di)_mm_setzero_si128()); + } + + static __inline__ __m256i __DEFAULT_FN_ATTRS256 + _mm256_mask_cvtepu8_epi64(__m256i __W, __mmask8 __U, __m128i __A) + { + return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U, + (__v4di)_mm256_cvtepu8_epi64(__A), + (__v4di)__W); + } + + static __inline__ __m256i __DEFAULT_FN_ATTRS256 + _mm256_maskz_cvtepu8_epi64 (__mmask8 __U, __m128i __A) + { + return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U, + (__v4di)_mm256_cvtepu8_epi64(__A), + (__v4di)_mm256_setzero_si256()); + } + + static __inline__ __m128i __DEFAULT_FN_ATTRS128 + _mm_mask_cvtepu32_epi64(__m128i __W, __mmask8 __U, __m128i __X) + { + return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U, + (__v2di)_mm_cvtepu32_epi64(__X), + (__v2di)__W); + } + + static __inline__ __m128i __DEFAULT_FN_ATTRS128 + _mm_maskz_cvtepu32_epi64(__mmask8 __U, __m128i __X) + { + return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U, + (__v2di)_mm_cvtepu32_epi64(__X), + (__v2di)_mm_setzero_si128()); + } + + static __inline__ __m256i __DEFAULT_FN_ATTRS256 + _mm256_mask_cvtepu32_epi64(__m256i __W, __mmask8 __U, __m128i __X) + { + return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U, + (__v4di)_mm256_cvtepu32_epi64(__X), + (__v4di)__W); + } + + static __inline__ __m256i __DEFAULT_FN_ATTRS256 + _mm256_maskz_cvtepu32_epi64(__mmask8 __U, __m128i __X) + { + return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U, + (__v4di)_mm256_cvtepu32_epi64(__X), + (__v4di)_mm256_setzero_si256()); + } + + static __inline__ __m128i __DEFAULT_FN_ATTRS128 + _mm_mask_cvtepu16_epi32(__m128i __W, __mmask8 __U, __m128i __A) + { + return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U, + (__v4si)_mm_cvtepu16_epi32(__A), + (__v4si)__W); + } + + static __inline__ __m128i __DEFAULT_FN_ATTRS128 + _mm_maskz_cvtepu16_epi32(__mmask8 __U, __m128i __A) + { + return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U, + (__v4si)_mm_cvtepu16_epi32(__A), + (__v4si)_mm_setzero_si128()); + } + + static __inline__ __m256i __DEFAULT_FN_ATTRS256 + _mm256_mask_cvtepu16_epi32(__m256i __W, __mmask8 __U, __m128i __A) + { + return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U, + (__v8si)_mm256_cvtepu16_epi32(__A), + (__v8si)__W); + } + + static __inline__ __m256i __DEFAULT_FN_ATTRS256 + _mm256_maskz_cvtepu16_epi32(__mmask8 __U, __m128i __A) + { + return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U, + (__v8si)_mm256_cvtepu16_epi32(__A), + (__v8si)_mm256_setzero_si256()); + } + + static __inline__ __m128i __DEFAULT_FN_ATTRS128 + _mm_mask_cvtepu16_epi64(__m128i __W, __mmask8 __U, __m128i __A) + { + return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U, + (__v2di)_mm_cvtepu16_epi64(__A), + (__v2di)__W); + } + + static __inline__ __m128i __DEFAULT_FN_ATTRS128 + _mm_maskz_cvtepu16_epi64(__mmask8 __U, __m128i __A) + { + return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U, + (__v2di)_mm_cvtepu16_epi64(__A), + (__v2di)_mm_setzero_si128()); + } + + static __inline__ __m256i __DEFAULT_FN_ATTRS256 + _mm256_mask_cvtepu16_epi64(__m256i __W, __mmask8 __U, __m128i __A) + { + return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U, + (__v4di)_mm256_cvtepu16_epi64(__A), + (__v4di)__W); + } + + static __inline__ __m256i __DEFAULT_FN_ATTRS256 + _mm256_maskz_cvtepu16_epi64(__mmask8 __U, __m128i __A) + { + return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U, + (__v4di)_mm256_cvtepu16_epi64(__A), + (__v4di)_mm256_setzero_si256()); + } + + +#define _mm_rol_epi32(a, b) \ + ((__m128i)__builtin_ia32_prold128((__v4si)(__m128i)(a), (int)(b))) + +#define _mm_mask_rol_epi32(w, u, a, b) \ + ((__m128i)__builtin_ia32_selectd_128((__mmask8)(u), \ + (__v4si)_mm_rol_epi32((a), (b)), \ + (__v4si)(__m128i)(w))) + +#define _mm_maskz_rol_epi32(u, a, b) \ + ((__m128i)__builtin_ia32_selectd_128((__mmask8)(u), \ + (__v4si)_mm_rol_epi32((a), (b)), \ + (__v4si)_mm_setzero_si128())) + +#define _mm256_rol_epi32(a, b) \ + ((__m256i)__builtin_ia32_prold256((__v8si)(__m256i)(a), (int)(b))) + +#define _mm256_mask_rol_epi32(w, u, a, b) \ + ((__m256i)__builtin_ia32_selectd_256((__mmask8)(u), \ + (__v8si)_mm256_rol_epi32((a), (b)), \ + (__v8si)(__m256i)(w))) + +#define _mm256_maskz_rol_epi32(u, a, b) \ + ((__m256i)__builtin_ia32_selectd_256((__mmask8)(u), \ + (__v8si)_mm256_rol_epi32((a), (b)), \ + (__v8si)_mm256_setzero_si256())) + +#define _mm_rol_epi64(a, b) \ + ((__m128i)__builtin_ia32_prolq128((__v2di)(__m128i)(a), (int)(b))) + +#define _mm_mask_rol_epi64(w, u, a, b) \ + ((__m128i)__builtin_ia32_selectq_128((__mmask8)(u), \ + (__v2di)_mm_rol_epi64((a), (b)), \ + (__v2di)(__m128i)(w))) + +#define _mm_maskz_rol_epi64(u, a, b) \ + ((__m128i)__builtin_ia32_selectq_128((__mmask8)(u), \ + (__v2di)_mm_rol_epi64((a), (b)), \ + (__v2di)_mm_setzero_si128())) + +#define _mm256_rol_epi64(a, b) \ + ((__m256i)__builtin_ia32_prolq256((__v4di)(__m256i)(a), (int)(b))) + +#define _mm256_mask_rol_epi64(w, u, a, b) \ + ((__m256i)__builtin_ia32_selectq_256((__mmask8)(u), \ + (__v4di)_mm256_rol_epi64((a), (b)), \ + (__v4di)(__m256i)(w))) + +#define _mm256_maskz_rol_epi64(u, a, b) \ + ((__m256i)__builtin_ia32_selectq_256((__mmask8)(u), \ + (__v4di)_mm256_rol_epi64((a), (b)), \ + (__v4di)_mm256_setzero_si256())) + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_rolv_epi32 (__m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_prolvd128((__v4si)__A, (__v4si)__B); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_mask_rolv_epi32 (__m128i __W, __mmask8 __U, __m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_selectd_128(__U, + (__v4si)_mm_rolv_epi32(__A, __B), + (__v4si)__W); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_maskz_rolv_epi32 (__mmask8 __U, __m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_selectd_128(__U, + (__v4si)_mm_rolv_epi32(__A, __B), + (__v4si)_mm_setzero_si128()); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_rolv_epi32 (__m256i __A, __m256i __B) +{ + return (__m256i)__builtin_ia32_prolvd256((__v8si)__A, (__v8si)__B); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_mask_rolv_epi32 (__m256i __W, __mmask8 __U, __m256i __A, __m256i __B) +{ + return (__m256i)__builtin_ia32_selectd_256(__U, + (__v8si)_mm256_rolv_epi32(__A, __B), + (__v8si)__W); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_maskz_rolv_epi32 (__mmask8 __U, __m256i __A, __m256i __B) +{ + return (__m256i)__builtin_ia32_selectd_256(__U, + (__v8si)_mm256_rolv_epi32(__A, __B), + (__v8si)_mm256_setzero_si256()); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_rolv_epi64 (__m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_prolvq128((__v2di)__A, (__v2di)__B); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_mask_rolv_epi64 (__m128i __W, __mmask8 __U, __m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_selectq_128(__U, + (__v2di)_mm_rolv_epi64(__A, __B), + (__v2di)__W); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_maskz_rolv_epi64 (__mmask8 __U, __m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_selectq_128(__U, + (__v2di)_mm_rolv_epi64(__A, __B), + (__v2di)_mm_setzero_si128()); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_rolv_epi64 (__m256i __A, __m256i __B) +{ + return (__m256i)__builtin_ia32_prolvq256((__v4di)__A, (__v4di)__B); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_mask_rolv_epi64 (__m256i __W, __mmask8 __U, __m256i __A, __m256i __B) +{ + return (__m256i)__builtin_ia32_selectq_256(__U, + (__v4di)_mm256_rolv_epi64(__A, __B), + (__v4di)__W); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_maskz_rolv_epi64 (__mmask8 __U, __m256i __A, __m256i __B) +{ + return (__m256i)__builtin_ia32_selectq_256(__U, + (__v4di)_mm256_rolv_epi64(__A, __B), + (__v4di)_mm256_setzero_si256()); +} + +#define _mm_ror_epi32(a, b) \ + ((__m128i)__builtin_ia32_prord128((__v4si)(__m128i)(a), (int)(b))) + +#define _mm_mask_ror_epi32(w, u, a, b) \ + ((__m128i)__builtin_ia32_selectd_128((__mmask8)(u), \ + (__v4si)_mm_ror_epi32((a), (b)), \ + (__v4si)(__m128i)(w))) + +#define _mm_maskz_ror_epi32(u, a, b) \ + ((__m128i)__builtin_ia32_selectd_128((__mmask8)(u), \ + (__v4si)_mm_ror_epi32((a), (b)), \ + (__v4si)_mm_setzero_si128())) + +#define _mm256_ror_epi32(a, b) \ + ((__m256i)__builtin_ia32_prord256((__v8si)(__m256i)(a), (int)(b))) + +#define _mm256_mask_ror_epi32(w, u, a, b) \ + ((__m256i)__builtin_ia32_selectd_256((__mmask8)(u), \ + (__v8si)_mm256_ror_epi32((a), (b)), \ + (__v8si)(__m256i)(w))) + +#define _mm256_maskz_ror_epi32(u, a, b) \ + ((__m256i)__builtin_ia32_selectd_256((__mmask8)(u), \ + (__v8si)_mm256_ror_epi32((a), (b)), \ + (__v8si)_mm256_setzero_si256())) + +#define _mm_ror_epi64(a, b) \ + ((__m128i)__builtin_ia32_prorq128((__v2di)(__m128i)(a), (int)(b))) + +#define _mm_mask_ror_epi64(w, u, a, b) \ + ((__m128i)__builtin_ia32_selectq_128((__mmask8)(u), \ + (__v2di)_mm_ror_epi64((a), (b)), \ + (__v2di)(__m128i)(w))) + +#define _mm_maskz_ror_epi64(u, a, b) \ + ((__m128i)__builtin_ia32_selectq_128((__mmask8)(u), \ + (__v2di)_mm_ror_epi64((a), (b)), \ + (__v2di)_mm_setzero_si128())) + +#define _mm256_ror_epi64(a, b) \ + ((__m256i)__builtin_ia32_prorq256((__v4di)(__m256i)(a), (int)(b))) + +#define _mm256_mask_ror_epi64(w, u, a, b) \ + ((__m256i)__builtin_ia32_selectq_256((__mmask8)(u), \ + (__v4di)_mm256_ror_epi64((a), (b)), \ + (__v4di)(__m256i)(w))) + +#define _mm256_maskz_ror_epi64(u, a, b) \ + ((__m256i)__builtin_ia32_selectq_256((__mmask8)(u), \ + (__v4di)_mm256_ror_epi64((a), (b)), \ + (__v4di)_mm256_setzero_si256())) + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_mask_sll_epi32(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U, + (__v4si)_mm_sll_epi32(__A, __B), + (__v4si)__W); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_maskz_sll_epi32(__mmask8 __U, __m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U, + (__v4si)_mm_sll_epi32(__A, __B), + (__v4si)_mm_setzero_si128()); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_mask_sll_epi32(__m256i __W, __mmask8 __U, __m256i __A, __m128i __B) +{ + return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U, + (__v8si)_mm256_sll_epi32(__A, __B), + (__v8si)__W); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_maskz_sll_epi32(__mmask8 __U, __m256i __A, __m128i __B) +{ + return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U, + (__v8si)_mm256_sll_epi32(__A, __B), + (__v8si)_mm256_setzero_si256()); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_mask_slli_epi32(__m128i __W, __mmask8 __U, __m128i __A, unsigned int __B) +{ + return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U, + (__v4si)_mm_slli_epi32(__A, (int)__B), + (__v4si)__W); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_maskz_slli_epi32(__mmask8 __U, __m128i __A, unsigned int __B) +{ + return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U, + (__v4si)_mm_slli_epi32(__A, (int)__B), + (__v4si)_mm_setzero_si128()); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_mask_slli_epi32(__m256i __W, __mmask8 __U, __m256i __A, unsigned int __B) +{ + return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U, + (__v8si)_mm256_slli_epi32(__A, (int)__B), + (__v8si)__W); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_maskz_slli_epi32(__mmask8 __U, __m256i __A, unsigned int __B) +{ + return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U, + (__v8si)_mm256_slli_epi32(__A, (int)__B), + (__v8si)_mm256_setzero_si256()); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_mask_sll_epi64(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U, + (__v2di)_mm_sll_epi64(__A, __B), + (__v2di)__W); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_maskz_sll_epi64(__mmask8 __U, __m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U, + (__v2di)_mm_sll_epi64(__A, __B), + (__v2di)_mm_setzero_si128()); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_mask_sll_epi64(__m256i __W, __mmask8 __U, __m256i __A, __m128i __B) +{ + return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U, + (__v4di)_mm256_sll_epi64(__A, __B), + (__v4di)__W); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_maskz_sll_epi64(__mmask8 __U, __m256i __A, __m128i __B) +{ + return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U, + (__v4di)_mm256_sll_epi64(__A, __B), + (__v4di)_mm256_setzero_si256()); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_mask_slli_epi64(__m128i __W, __mmask8 __U, __m128i __A, unsigned int __B) +{ + return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U, + (__v2di)_mm_slli_epi64(__A, (int)__B), + (__v2di)__W); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_maskz_slli_epi64(__mmask8 __U, __m128i __A, unsigned int __B) +{ + return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U, + (__v2di)_mm_slli_epi64(__A, (int)__B), + (__v2di)_mm_setzero_si128()); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_mask_slli_epi64(__m256i __W, __mmask8 __U, __m256i __A, unsigned int __B) +{ + return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U, + (__v4di)_mm256_slli_epi64(__A, (int)__B), + (__v4di)__W); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_maskz_slli_epi64(__mmask8 __U, __m256i __A, unsigned int __B) +{ + return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U, + (__v4di)_mm256_slli_epi64(__A, (int)__B), + (__v4di)_mm256_setzero_si256()); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_rorv_epi32 (__m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_prorvd128((__v4si)__A, (__v4si)__B); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_mask_rorv_epi32 (__m128i __W, __mmask8 __U, __m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_selectd_128(__U, + (__v4si)_mm_rorv_epi32(__A, __B), + (__v4si)__W); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_maskz_rorv_epi32 (__mmask8 __U, __m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_selectd_128(__U, + (__v4si)_mm_rorv_epi32(__A, __B), + (__v4si)_mm_setzero_si128()); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_rorv_epi32 (__m256i __A, __m256i __B) +{ + return (__m256i)__builtin_ia32_prorvd256((__v8si)__A, (__v8si)__B); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_mask_rorv_epi32 (__m256i __W, __mmask8 __U, __m256i __A, __m256i __B) +{ + return (__m256i)__builtin_ia32_selectd_256(__U, + (__v8si)_mm256_rorv_epi32(__A, __B), + (__v8si)__W); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_maskz_rorv_epi32 (__mmask8 __U, __m256i __A, __m256i __B) +{ + return (__m256i)__builtin_ia32_selectd_256(__U, + (__v8si)_mm256_rorv_epi32(__A, __B), + (__v8si)_mm256_setzero_si256()); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_rorv_epi64 (__m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_prorvq128((__v2di)__A, (__v2di)__B); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_mask_rorv_epi64 (__m128i __W, __mmask8 __U, __m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_selectq_128(__U, + (__v2di)_mm_rorv_epi64(__A, __B), + (__v2di)__W); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_maskz_rorv_epi64 (__mmask8 __U, __m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_selectq_128(__U, + (__v2di)_mm_rorv_epi64(__A, __B), + (__v2di)_mm_setzero_si128()); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_rorv_epi64 (__m256i __A, __m256i __B) +{ + return (__m256i)__builtin_ia32_prorvq256((__v4di)__A, (__v4di)__B); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_mask_rorv_epi64 (__m256i __W, __mmask8 __U, __m256i __A, __m256i __B) +{ + return (__m256i)__builtin_ia32_selectq_256(__U, + (__v4di)_mm256_rorv_epi64(__A, __B), + (__v4di)__W); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_maskz_rorv_epi64 (__mmask8 __U, __m256i __A, __m256i __B) +{ + return (__m256i)__builtin_ia32_selectq_256(__U, + (__v4di)_mm256_rorv_epi64(__A, __B), + (__v4di)_mm256_setzero_si256()); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_mask_sllv_epi64(__m128i __W, __mmask8 __U, __m128i __X, __m128i __Y) +{ + return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U, + (__v2di)_mm_sllv_epi64(__X, __Y), + (__v2di)__W); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_maskz_sllv_epi64(__mmask8 __U, __m128i __X, __m128i __Y) +{ + return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U, + (__v2di)_mm_sllv_epi64(__X, __Y), + (__v2di)_mm_setzero_si128()); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_mask_sllv_epi64(__m256i __W, __mmask8 __U, __m256i __X, __m256i __Y) +{ + return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U, + (__v4di)_mm256_sllv_epi64(__X, __Y), + (__v4di)__W); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_maskz_sllv_epi64(__mmask8 __U, __m256i __X, __m256i __Y) +{ + return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U, + (__v4di)_mm256_sllv_epi64(__X, __Y), + (__v4di)_mm256_setzero_si256()); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_mask_sllv_epi32(__m128i __W, __mmask8 __U, __m128i __X, __m128i __Y) +{ + return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U, + (__v4si)_mm_sllv_epi32(__X, __Y), + (__v4si)__W); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_maskz_sllv_epi32(__mmask8 __U, __m128i __X, __m128i __Y) +{ + return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U, + (__v4si)_mm_sllv_epi32(__X, __Y), + (__v4si)_mm_setzero_si128()); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_mask_sllv_epi32(__m256i __W, __mmask8 __U, __m256i __X, __m256i __Y) +{ + return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U, + (__v8si)_mm256_sllv_epi32(__X, __Y), + (__v8si)__W); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_maskz_sllv_epi32(__mmask8 __U, __m256i __X, __m256i __Y) +{ + return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U, + (__v8si)_mm256_sllv_epi32(__X, __Y), + (__v8si)_mm256_setzero_si256()); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_mask_srlv_epi64(__m128i __W, __mmask8 __U, __m128i __X, __m128i __Y) +{ + return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U, + (__v2di)_mm_srlv_epi64(__X, __Y), + (__v2di)__W); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_maskz_srlv_epi64(__mmask8 __U, __m128i __X, __m128i __Y) +{ + return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U, + (__v2di)_mm_srlv_epi64(__X, __Y), + (__v2di)_mm_setzero_si128()); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_mask_srlv_epi64(__m256i __W, __mmask8 __U, __m256i __X, __m256i __Y) +{ + return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U, + (__v4di)_mm256_srlv_epi64(__X, __Y), + (__v4di)__W); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_maskz_srlv_epi64(__mmask8 __U, __m256i __X, __m256i __Y) +{ + return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U, + (__v4di)_mm256_srlv_epi64(__X, __Y), + (__v4di)_mm256_setzero_si256()); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_mask_srlv_epi32(__m128i __W, __mmask8 __U, __m128i __X, __m128i __Y) +{ + return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U, + (__v4si)_mm_srlv_epi32(__X, __Y), + (__v4si)__W); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_maskz_srlv_epi32(__mmask8 __U, __m128i __X, __m128i __Y) +{ + return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U, + (__v4si)_mm_srlv_epi32(__X, __Y), + (__v4si)_mm_setzero_si128()); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_mask_srlv_epi32(__m256i __W, __mmask8 __U, __m256i __X, __m256i __Y) +{ + return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U, + (__v8si)_mm256_srlv_epi32(__X, __Y), + (__v8si)__W); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_maskz_srlv_epi32(__mmask8 __U, __m256i __X, __m256i __Y) +{ + return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U, + (__v8si)_mm256_srlv_epi32(__X, __Y), + (__v8si)_mm256_setzero_si256()); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_mask_srl_epi32(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U, + (__v4si)_mm_srl_epi32(__A, __B), + (__v4si)__W); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_maskz_srl_epi32(__mmask8 __U, __m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U, + (__v4si)_mm_srl_epi32(__A, __B), + (__v4si)_mm_setzero_si128()); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_mask_srl_epi32(__m256i __W, __mmask8 __U, __m256i __A, __m128i __B) +{ + return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U, + (__v8si)_mm256_srl_epi32(__A, __B), + (__v8si)__W); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_maskz_srl_epi32(__mmask8 __U, __m256i __A, __m128i __B) +{ + return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U, + (__v8si)_mm256_srl_epi32(__A, __B), + (__v8si)_mm256_setzero_si256()); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_mask_srli_epi32(__m128i __W, __mmask8 __U, __m128i __A, unsigned int __B) +{ + return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U, + (__v4si)_mm_srli_epi32(__A, (int)__B), + (__v4si)__W); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_maskz_srli_epi32(__mmask8 __U, __m128i __A, unsigned int __B) +{ + return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U, + (__v4si)_mm_srli_epi32(__A, (int)__B), + (__v4si)_mm_setzero_si128()); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_mask_srli_epi32(__m256i __W, __mmask8 __U, __m256i __A, unsigned int __B) +{ + return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U, + (__v8si)_mm256_srli_epi32(__A, (int)__B), + (__v8si)__W); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_maskz_srli_epi32(__mmask8 __U, __m256i __A, unsigned int __B) +{ + return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U, + (__v8si)_mm256_srli_epi32(__A, (int)__B), + (__v8si)_mm256_setzero_si256()); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_mask_srl_epi64(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U, + (__v2di)_mm_srl_epi64(__A, __B), + (__v2di)__W); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_maskz_srl_epi64(__mmask8 __U, __m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U, + (__v2di)_mm_srl_epi64(__A, __B), + (__v2di)_mm_setzero_si128()); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_mask_srl_epi64(__m256i __W, __mmask8 __U, __m256i __A, __m128i __B) +{ + return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U, + (__v4di)_mm256_srl_epi64(__A, __B), + (__v4di)__W); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_maskz_srl_epi64(__mmask8 __U, __m256i __A, __m128i __B) +{ + return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U, + (__v4di)_mm256_srl_epi64(__A, __B), + (__v4di)_mm256_setzero_si256()); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_mask_srli_epi64(__m128i __W, __mmask8 __U, __m128i __A, unsigned int __B) +{ + return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U, + (__v2di)_mm_srli_epi64(__A, (int)__B), + (__v2di)__W); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_maskz_srli_epi64(__mmask8 __U, __m128i __A, unsigned int __B) +{ + return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U, + (__v2di)_mm_srli_epi64(__A, (int)__B), + (__v2di)_mm_setzero_si128()); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_mask_srli_epi64(__m256i __W, __mmask8 __U, __m256i __A, unsigned int __B) +{ + return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U, + (__v4di)_mm256_srli_epi64(__A, (int)__B), + (__v4di)__W); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_maskz_srli_epi64(__mmask8 __U, __m256i __A, unsigned int __B) +{ + return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U, + (__v4di)_mm256_srli_epi64(__A, (int)__B), + (__v4di)_mm256_setzero_si256()); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_mask_srav_epi32(__m128i __W, __mmask8 __U, __m128i __X, __m128i __Y) +{ + return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U, + (__v4si)_mm_srav_epi32(__X, __Y), + (__v4si)__W); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_maskz_srav_epi32(__mmask8 __U, __m128i __X, __m128i __Y) +{ + return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U, + (__v4si)_mm_srav_epi32(__X, __Y), + (__v4si)_mm_setzero_si128()); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_mask_srav_epi32(__m256i __W, __mmask8 __U, __m256i __X, __m256i __Y) +{ + return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U, + (__v8si)_mm256_srav_epi32(__X, __Y), + (__v8si)__W); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_maskz_srav_epi32(__mmask8 __U, __m256i __X, __m256i __Y) +{ + return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U, + (__v8si)_mm256_srav_epi32(__X, __Y), + (__v8si)_mm256_setzero_si256()); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_srav_epi64(__m128i __X, __m128i __Y) +{ + return (__m128i)__builtin_ia32_psravq128((__v2di)__X, (__v2di)__Y); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_mask_srav_epi64(__m128i __W, __mmask8 __U, __m128i __X, __m128i __Y) +{ + return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U, + (__v2di)_mm_srav_epi64(__X, __Y), + (__v2di)__W); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_maskz_srav_epi64(__mmask8 __U, __m128i __X, __m128i __Y) +{ + return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U, + (__v2di)_mm_srav_epi64(__X, __Y), + (__v2di)_mm_setzero_si128()); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_srav_epi64(__m256i __X, __m256i __Y) +{ + return (__m256i)__builtin_ia32_psravq256((__v4di)__X, (__v4di) __Y); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_mask_srav_epi64(__m256i __W, __mmask8 __U, __m256i __X, __m256i __Y) +{ + return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U, + (__v4di)_mm256_srav_epi64(__X, __Y), + (__v4di)__W); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_maskz_srav_epi64 (__mmask8 __U, __m256i __X, __m256i __Y) +{ + return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U, + (__v4di)_mm256_srav_epi64(__X, __Y), + (__v4di)_mm256_setzero_si256()); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_mask_mov_epi32 (__m128i __W, __mmask8 __U, __m128i __A) +{ + return (__m128i) __builtin_ia32_selectd_128 ((__mmask8) __U, + (__v4si) __A, + (__v4si) __W); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_maskz_mov_epi32 (__mmask8 __U, __m128i __A) +{ + return (__m128i) __builtin_ia32_selectd_128 ((__mmask8) __U, + (__v4si) __A, + (__v4si) _mm_setzero_si128 ()); +} + + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_mask_mov_epi32 (__m256i __W, __mmask8 __U, __m256i __A) +{ + return (__m256i) __builtin_ia32_selectd_256 ((__mmask8) __U, + (__v8si) __A, + (__v8si) __W); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_maskz_mov_epi32 (__mmask8 __U, __m256i __A) +{ + return (__m256i) __builtin_ia32_selectd_256 ((__mmask8) __U, + (__v8si) __A, + (__v8si) _mm256_setzero_si256 ()); +} + +static __inline __m128i __DEFAULT_FN_ATTRS128 +_mm_load_epi32 (void const *__P) +{ + return *(const __m128i *) __P; +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_mask_load_epi32 (__m128i __W, __mmask8 __U, void const *__P) +{ + return (__m128i) __builtin_ia32_movdqa32load128_mask ((const __v4si *) __P, + (__v4si) __W, + (__mmask8) + __U); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_maskz_load_epi32 (__mmask8 __U, void const *__P) +{ + return (__m128i) __builtin_ia32_movdqa32load128_mask ((const __v4si *) __P, + (__v4si) + _mm_setzero_si128 (), + (__mmask8) + __U); +} + +static __inline __m256i __DEFAULT_FN_ATTRS256 +_mm256_load_epi32 (void const *__P) +{ + return *(const __m256i *) __P; +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_mask_load_epi32 (__m256i __W, __mmask8 __U, void const *__P) +{ + return (__m256i) __builtin_ia32_movdqa32load256_mask ((const __v8si *) __P, + (__v8si) __W, + (__mmask8) + __U); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_maskz_load_epi32 (__mmask8 __U, void const *__P) +{ + return (__m256i) __builtin_ia32_movdqa32load256_mask ((const __v8si *) __P, + (__v8si) + _mm256_setzero_si256 (), + (__mmask8) + __U); +} + +static __inline void __DEFAULT_FN_ATTRS128 +_mm_store_epi32 (void *__P, __m128i __A) +{ + *(__m128i *) __P = __A; +} + +static __inline__ void __DEFAULT_FN_ATTRS128 +_mm_mask_store_epi32 (void *__P, __mmask8 __U, __m128i __A) +{ + __builtin_ia32_movdqa32store128_mask ((__v4si *) __P, + (__v4si) __A, + (__mmask8) __U); +} + +static __inline void __DEFAULT_FN_ATTRS256 +_mm256_store_epi32 (void *__P, __m256i __A) +{ + *(__m256i *) __P = __A; +} + +static __inline__ void __DEFAULT_FN_ATTRS256 +_mm256_mask_store_epi32 (void *__P, __mmask8 __U, __m256i __A) +{ + __builtin_ia32_movdqa32store256_mask ((__v8si *) __P, + (__v8si) __A, + (__mmask8) __U); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_mask_mov_epi64 (__m128i __W, __mmask8 __U, __m128i __A) +{ + return (__m128i) __builtin_ia32_selectq_128 ((__mmask8) __U, + (__v2di) __A, + (__v2di) __W); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_maskz_mov_epi64 (__mmask8 __U, __m128i __A) +{ + return (__m128i) __builtin_ia32_selectq_128 ((__mmask8) __U, + (__v2di) __A, + (__v2di) _mm_setzero_si128 ()); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_mask_mov_epi64 (__m256i __W, __mmask8 __U, __m256i __A) +{ + return (__m256i) __builtin_ia32_selectq_256 ((__mmask8) __U, + (__v4di) __A, + (__v4di) __W); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_maskz_mov_epi64 (__mmask8 __U, __m256i __A) +{ + return (__m256i) __builtin_ia32_selectq_256 ((__mmask8) __U, + (__v4di) __A, + (__v4di) _mm256_setzero_si256 ()); +} + +static __inline __m128i __DEFAULT_FN_ATTRS128 +_mm_load_epi64 (void const *__P) +{ + return *(const __m128i *) __P; +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_mask_load_epi64 (__m128i __W, __mmask8 __U, void const *__P) +{ + return (__m128i) __builtin_ia32_movdqa64load128_mask ((const __v2di *) __P, + (__v2di) __W, + (__mmask8) + __U); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_maskz_load_epi64 (__mmask8 __U, void const *__P) +{ + return (__m128i) __builtin_ia32_movdqa64load128_mask ((const __v2di *) __P, + (__v2di) + _mm_setzero_si128 (), + (__mmask8) + __U); +} + +static __inline __m256i __DEFAULT_FN_ATTRS256 +_mm256_load_epi64 (void const *__P) +{ + return *(const __m256i *) __P; +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_mask_load_epi64 (__m256i __W, __mmask8 __U, void const *__P) +{ + return (__m256i) __builtin_ia32_movdqa64load256_mask ((const __v4di *) __P, + (__v4di) __W, + (__mmask8) + __U); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_maskz_load_epi64 (__mmask8 __U, void const *__P) +{ + return (__m256i) __builtin_ia32_movdqa64load256_mask ((const __v4di *) __P, + (__v4di) + _mm256_setzero_si256 (), + (__mmask8) + __U); +} + +static __inline void __DEFAULT_FN_ATTRS128 +_mm_store_epi64 (void *__P, __m128i __A) +{ + *(__m128i *) __P = __A; +} + +static __inline__ void __DEFAULT_FN_ATTRS128 +_mm_mask_store_epi64 (void *__P, __mmask8 __U, __m128i __A) +{ + __builtin_ia32_movdqa64store128_mask ((__v2di *) __P, + (__v2di) __A, + (__mmask8) __U); +} + +static __inline void __DEFAULT_FN_ATTRS256 +_mm256_store_epi64 (void *__P, __m256i __A) +{ + *(__m256i *) __P = __A; +} + +static __inline__ void __DEFAULT_FN_ATTRS256 +_mm256_mask_store_epi64 (void *__P, __mmask8 __U, __m256i __A) +{ + __builtin_ia32_movdqa64store256_mask ((__v4di *) __P, + (__v4di) __A, + (__mmask8) __U); +} + +static __inline__ __m128d __DEFAULT_FN_ATTRS128 +_mm_mask_movedup_pd (__m128d __W, __mmask8 __U, __m128d __A) +{ + return (__m128d)__builtin_ia32_selectpd_128((__mmask8)__U, + (__v2df)_mm_movedup_pd(__A), + (__v2df)__W); +} + +static __inline__ __m128d __DEFAULT_FN_ATTRS128 +_mm_maskz_movedup_pd (__mmask8 __U, __m128d __A) +{ + return (__m128d)__builtin_ia32_selectpd_128((__mmask8)__U, + (__v2df)_mm_movedup_pd(__A), + (__v2df)_mm_setzero_pd()); +} + +static __inline__ __m256d __DEFAULT_FN_ATTRS256 +_mm256_mask_movedup_pd (__m256d __W, __mmask8 __U, __m256d __A) +{ + return (__m256d)__builtin_ia32_selectpd_256((__mmask8)__U, + (__v4df)_mm256_movedup_pd(__A), + (__v4df)__W); +} + +static __inline__ __m256d __DEFAULT_FN_ATTRS256 +_mm256_maskz_movedup_pd (__mmask8 __U, __m256d __A) +{ + return (__m256d)__builtin_ia32_selectpd_256((__mmask8)__U, + (__v4df)_mm256_movedup_pd(__A), + (__v4df)_mm256_setzero_pd()); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_mask_set1_epi32(__m128i __O, __mmask8 __M, int __A) +{ + return (__m128i)__builtin_ia32_selectd_128(__M, + (__v4si) _mm_set1_epi32(__A), + (__v4si)__O); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_maskz_set1_epi32( __mmask8 __M, int __A) +{ + return (__m128i)__builtin_ia32_selectd_128(__M, + (__v4si) _mm_set1_epi32(__A), + (__v4si)_mm_setzero_si128()); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_mask_set1_epi32(__m256i __O, __mmask8 __M, int __A) +{ + return (__m256i)__builtin_ia32_selectd_256(__M, + (__v8si) _mm256_set1_epi32(__A), + (__v8si)__O); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_maskz_set1_epi32( __mmask8 __M, int __A) +{ + return (__m256i)__builtin_ia32_selectd_256(__M, + (__v8si) _mm256_set1_epi32(__A), + (__v8si)_mm256_setzero_si256()); +} + + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_mask_set1_epi64 (__m128i __O, __mmask8 __M, long long __A) +{ + return (__m128i) __builtin_ia32_selectq_128(__M, + (__v2di) _mm_set1_epi64x(__A), + (__v2di) __O); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_maskz_set1_epi64 (__mmask8 __M, long long __A) +{ + return (__m128i) __builtin_ia32_selectq_128(__M, + (__v2di) _mm_set1_epi64x(__A), + (__v2di) _mm_setzero_si128()); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_mask_set1_epi64 (__m256i __O, __mmask8 __M, long long __A) +{ + return (__m256i) __builtin_ia32_selectq_256(__M, + (__v4di) _mm256_set1_epi64x(__A), + (__v4di) __O) ; +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_maskz_set1_epi64 (__mmask8 __M, long long __A) +{ + return (__m256i) __builtin_ia32_selectq_256(__M, + (__v4di) _mm256_set1_epi64x(__A), + (__v4di) _mm256_setzero_si256()); +} + +#define _mm_fixupimm_pd(A, B, C, imm) \ + ((__m128d)__builtin_ia32_fixupimmpd128_mask((__v2df)(__m128d)(A), \ + (__v2df)(__m128d)(B), \ + (__v2di)(__m128i)(C), (int)(imm), \ + (__mmask8)-1)) + +#define _mm_mask_fixupimm_pd(A, U, B, C, imm) \ + ((__m128d)__builtin_ia32_fixupimmpd128_mask((__v2df)(__m128d)(A), \ + (__v2df)(__m128d)(B), \ + (__v2di)(__m128i)(C), (int)(imm), \ + (__mmask8)(U))) + +#define _mm_maskz_fixupimm_pd(U, A, B, C, imm) \ + ((__m128d)__builtin_ia32_fixupimmpd128_maskz((__v2df)(__m128d)(A), \ + (__v2df)(__m128d)(B), \ + (__v2di)(__m128i)(C), \ + (int)(imm), (__mmask8)(U))) + +#define _mm256_fixupimm_pd(A, B, C, imm) \ + ((__m256d)__builtin_ia32_fixupimmpd256_mask((__v4df)(__m256d)(A), \ + (__v4df)(__m256d)(B), \ + (__v4di)(__m256i)(C), (int)(imm), \ + (__mmask8)-1)) + +#define _mm256_mask_fixupimm_pd(A, U, B, C, imm) \ + ((__m256d)__builtin_ia32_fixupimmpd256_mask((__v4df)(__m256d)(A), \ + (__v4df)(__m256d)(B), \ + (__v4di)(__m256i)(C), (int)(imm), \ + (__mmask8)(U))) + +#define _mm256_maskz_fixupimm_pd(U, A, B, C, imm) \ + ((__m256d)__builtin_ia32_fixupimmpd256_maskz((__v4df)(__m256d)(A), \ + (__v4df)(__m256d)(B), \ + (__v4di)(__m256i)(C), \ + (int)(imm), (__mmask8)(U))) + +#define _mm_fixupimm_ps(A, B, C, imm) \ + ((__m128)__builtin_ia32_fixupimmps128_mask((__v4sf)(__m128)(A), \ + (__v4sf)(__m128)(B), \ + (__v4si)(__m128i)(C), (int)(imm), \ + (__mmask8)-1)) + +#define _mm_mask_fixupimm_ps(A, U, B, C, imm) \ + ((__m128)__builtin_ia32_fixupimmps128_mask((__v4sf)(__m128)(A), \ + (__v4sf)(__m128)(B), \ + (__v4si)(__m128i)(C), (int)(imm), \ + (__mmask8)(U))) + +#define _mm_maskz_fixupimm_ps(U, A, B, C, imm) \ + ((__m128)__builtin_ia32_fixupimmps128_maskz((__v4sf)(__m128)(A), \ + (__v4sf)(__m128)(B), \ + (__v4si)(__m128i)(C), (int)(imm), \ + (__mmask8)(U))) + +#define _mm256_fixupimm_ps(A, B, C, imm) \ + ((__m256)__builtin_ia32_fixupimmps256_mask((__v8sf)(__m256)(A), \ + (__v8sf)(__m256)(B), \ + (__v8si)(__m256i)(C), (int)(imm), \ + (__mmask8)-1)) + +#define _mm256_mask_fixupimm_ps(A, U, B, C, imm) \ + ((__m256)__builtin_ia32_fixupimmps256_mask((__v8sf)(__m256)(A), \ + (__v8sf)(__m256)(B), \ + (__v8si)(__m256i)(C), (int)(imm), \ + (__mmask8)(U))) + +#define _mm256_maskz_fixupimm_ps(U, A, B, C, imm) \ + ((__m256)__builtin_ia32_fixupimmps256_maskz((__v8sf)(__m256)(A), \ + (__v8sf)(__m256)(B), \ + (__v8si)(__m256i)(C), (int)(imm), \ + (__mmask8)(U))) + +static __inline__ __m128d __DEFAULT_FN_ATTRS128 +_mm_mask_load_pd (__m128d __W, __mmask8 __U, void const *__P) +{ + return (__m128d) __builtin_ia32_loadapd128_mask ((const __v2df *) __P, + (__v2df) __W, + (__mmask8) __U); +} + +static __inline__ __m128d __DEFAULT_FN_ATTRS128 +_mm_maskz_load_pd (__mmask8 __U, void const *__P) +{ + return (__m128d) __builtin_ia32_loadapd128_mask ((const __v2df *) __P, + (__v2df) + _mm_setzero_pd (), + (__mmask8) __U); +} + +static __inline__ __m256d __DEFAULT_FN_ATTRS256 +_mm256_mask_load_pd (__m256d __W, __mmask8 __U, void const *__P) +{ + return (__m256d) __builtin_ia32_loadapd256_mask ((const __v4df *) __P, + (__v4df) __W, + (__mmask8) __U); +} + +static __inline__ __m256d __DEFAULT_FN_ATTRS256 +_mm256_maskz_load_pd (__mmask8 __U, void const *__P) +{ + return (__m256d) __builtin_ia32_loadapd256_mask ((const __v4df *) __P, + (__v4df) + _mm256_setzero_pd (), + (__mmask8) __U); +} + +static __inline__ __m128 __DEFAULT_FN_ATTRS128 +_mm_mask_load_ps (__m128 __W, __mmask8 __U, void const *__P) +{ + return (__m128) __builtin_ia32_loadaps128_mask ((const __v4sf *) __P, + (__v4sf) __W, + (__mmask8) __U); +} + +static __inline__ __m128 __DEFAULT_FN_ATTRS128 +_mm_maskz_load_ps (__mmask8 __U, void const *__P) +{ + return (__m128) __builtin_ia32_loadaps128_mask ((const __v4sf *) __P, + (__v4sf) + _mm_setzero_ps (), + (__mmask8) __U); +} + +static __inline__ __m256 __DEFAULT_FN_ATTRS256 +_mm256_mask_load_ps (__m256 __W, __mmask8 __U, void const *__P) +{ + return (__m256) __builtin_ia32_loadaps256_mask ((const __v8sf *) __P, + (__v8sf) __W, + (__mmask8) __U); +} + +static __inline__ __m256 __DEFAULT_FN_ATTRS256 +_mm256_maskz_load_ps (__mmask8 __U, void const *__P) +{ + return (__m256) __builtin_ia32_loadaps256_mask ((const __v8sf *) __P, + (__v8sf) + _mm256_setzero_ps (), + (__mmask8) __U); +} + +static __inline __m128i __DEFAULT_FN_ATTRS128 +_mm_loadu_epi64 (void const *__P) +{ + struct __loadu_epi64 { + __m128i_u __v; + } __attribute__((__packed__, __may_alias__)); + return ((const struct __loadu_epi64*)__P)->__v; +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_mask_loadu_epi64 (__m128i __W, __mmask8 __U, void const *__P) +{ + return (__m128i) __builtin_ia32_loaddqudi128_mask ((const __v2di *) __P, + (__v2di) __W, + (__mmask8) __U); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_maskz_loadu_epi64 (__mmask8 __U, void const *__P) +{ + return (__m128i) __builtin_ia32_loaddqudi128_mask ((const __v2di *) __P, + (__v2di) + _mm_setzero_si128 (), + (__mmask8) __U); +} + +static __inline __m256i __DEFAULT_FN_ATTRS256 +_mm256_loadu_epi64 (void const *__P) +{ + struct __loadu_epi64 { + __m256i_u __v; + } __attribute__((__packed__, __may_alias__)); + return ((const struct __loadu_epi64*)__P)->__v; +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_mask_loadu_epi64 (__m256i __W, __mmask8 __U, void const *__P) +{ + return (__m256i) __builtin_ia32_loaddqudi256_mask ((const __v4di *) __P, + (__v4di) __W, + (__mmask8) __U); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_maskz_loadu_epi64 (__mmask8 __U, void const *__P) +{ + return (__m256i) __builtin_ia32_loaddqudi256_mask ((const __v4di *) __P, + (__v4di) + _mm256_setzero_si256 (), + (__mmask8) __U); +} + +static __inline __m128i __DEFAULT_FN_ATTRS128 +_mm_loadu_epi32 (void const *__P) +{ + struct __loadu_epi32 { + __m128i_u __v; + } __attribute__((__packed__, __may_alias__)); + return ((const struct __loadu_epi32*)__P)->__v; +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_mask_loadu_epi32 (__m128i __W, __mmask8 __U, void const *__P) +{ + return (__m128i) __builtin_ia32_loaddqusi128_mask ((const __v4si *) __P, + (__v4si) __W, + (__mmask8) __U); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_maskz_loadu_epi32 (__mmask8 __U, void const *__P) +{ + return (__m128i) __builtin_ia32_loaddqusi128_mask ((const __v4si *) __P, + (__v4si) + _mm_setzero_si128 (), + (__mmask8) __U); +} + +static __inline __m256i __DEFAULT_FN_ATTRS256 +_mm256_loadu_epi32 (void const *__P) +{ + struct __loadu_epi32 { + __m256i_u __v; + } __attribute__((__packed__, __may_alias__)); + return ((const struct __loadu_epi32*)__P)->__v; +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_mask_loadu_epi32 (__m256i __W, __mmask8 __U, void const *__P) +{ + return (__m256i) __builtin_ia32_loaddqusi256_mask ((const __v8si *) __P, + (__v8si) __W, + (__mmask8) __U); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_maskz_loadu_epi32 (__mmask8 __U, void const *__P) +{ + return (__m256i) __builtin_ia32_loaddqusi256_mask ((const __v8si *) __P, + (__v8si) + _mm256_setzero_si256 (), + (__mmask8) __U); +} + +static __inline__ __m128d __DEFAULT_FN_ATTRS128 +_mm_mask_loadu_pd (__m128d __W, __mmask8 __U, void const *__P) +{ + return (__m128d) __builtin_ia32_loadupd128_mask ((const __v2df *) __P, + (__v2df) __W, + (__mmask8) __U); +} + +static __inline__ __m128d __DEFAULT_FN_ATTRS128 +_mm_maskz_loadu_pd (__mmask8 __U, void const *__P) +{ + return (__m128d) __builtin_ia32_loadupd128_mask ((const __v2df *) __P, + (__v2df) + _mm_setzero_pd (), + (__mmask8) __U); +} + +static __inline__ __m256d __DEFAULT_FN_ATTRS256 +_mm256_mask_loadu_pd (__m256d __W, __mmask8 __U, void const *__P) +{ + return (__m256d) __builtin_ia32_loadupd256_mask ((const __v4df *) __P, + (__v4df) __W, + (__mmask8) __U); +} + +static __inline__ __m256d __DEFAULT_FN_ATTRS256 +_mm256_maskz_loadu_pd (__mmask8 __U, void const *__P) +{ + return (__m256d) __builtin_ia32_loadupd256_mask ((const __v4df *) __P, + (__v4df) + _mm256_setzero_pd (), + (__mmask8) __U); +} + +static __inline__ __m128 __DEFAULT_FN_ATTRS128 +_mm_mask_loadu_ps (__m128 __W, __mmask8 __U, void const *__P) +{ + return (__m128) __builtin_ia32_loadups128_mask ((const __v4sf *) __P, + (__v4sf) __W, + (__mmask8) __U); +} + +static __inline__ __m128 __DEFAULT_FN_ATTRS128 +_mm_maskz_loadu_ps (__mmask8 __U, void const *__P) +{ + return (__m128) __builtin_ia32_loadups128_mask ((const __v4sf *) __P, + (__v4sf) + _mm_setzero_ps (), + (__mmask8) __U); +} + +static __inline__ __m256 __DEFAULT_FN_ATTRS256 +_mm256_mask_loadu_ps (__m256 __W, __mmask8 __U, void const *__P) +{ + return (__m256) __builtin_ia32_loadups256_mask ((const __v8sf *) __P, + (__v8sf) __W, + (__mmask8) __U); +} + +static __inline__ __m256 __DEFAULT_FN_ATTRS256 +_mm256_maskz_loadu_ps (__mmask8 __U, void const *__P) +{ + return (__m256) __builtin_ia32_loadups256_mask ((const __v8sf *) __P, + (__v8sf) + _mm256_setzero_ps (), + (__mmask8) __U); +} + +static __inline__ void __DEFAULT_FN_ATTRS128 +_mm_mask_store_pd (void *__P, __mmask8 __U, __m128d __A) +{ + __builtin_ia32_storeapd128_mask ((__v2df *) __P, + (__v2df) __A, + (__mmask8) __U); +} + +static __inline__ void __DEFAULT_FN_ATTRS256 +_mm256_mask_store_pd (void *__P, __mmask8 __U, __m256d __A) +{ + __builtin_ia32_storeapd256_mask ((__v4df *) __P, + (__v4df) __A, + (__mmask8) __U); +} + +static __inline__ void __DEFAULT_FN_ATTRS128 +_mm_mask_store_ps (void *__P, __mmask8 __U, __m128 __A) +{ + __builtin_ia32_storeaps128_mask ((__v4sf *) __P, + (__v4sf) __A, + (__mmask8) __U); +} + +static __inline__ void __DEFAULT_FN_ATTRS256 +_mm256_mask_store_ps (void *__P, __mmask8 __U, __m256 __A) +{ + __builtin_ia32_storeaps256_mask ((__v8sf *) __P, + (__v8sf) __A, + (__mmask8) __U); +} + +static __inline void __DEFAULT_FN_ATTRS128 +_mm_storeu_epi64 (void *__P, __m128i __A) +{ + struct __storeu_epi64 { + __m128i_u __v; + } __attribute__((__packed__, __may_alias__)); + ((struct __storeu_epi64*)__P)->__v = __A; +} + +static __inline__ void __DEFAULT_FN_ATTRS128 +_mm_mask_storeu_epi64 (void *__P, __mmask8 __U, __m128i __A) +{ + __builtin_ia32_storedqudi128_mask ((__v2di *) __P, + (__v2di) __A, + (__mmask8) __U); +} + +static __inline void __DEFAULT_FN_ATTRS256 +_mm256_storeu_epi64 (void *__P, __m256i __A) +{ + struct __storeu_epi64 { + __m256i_u __v; + } __attribute__((__packed__, __may_alias__)); + ((struct __storeu_epi64*)__P)->__v = __A; +} + +static __inline__ void __DEFAULT_FN_ATTRS256 +_mm256_mask_storeu_epi64 (void *__P, __mmask8 __U, __m256i __A) +{ + __builtin_ia32_storedqudi256_mask ((__v4di *) __P, + (__v4di) __A, + (__mmask8) __U); +} + +static __inline void __DEFAULT_FN_ATTRS128 +_mm_storeu_epi32 (void *__P, __m128i __A) +{ + struct __storeu_epi32 { + __m128i_u __v; + } __attribute__((__packed__, __may_alias__)); + ((struct __storeu_epi32*)__P)->__v = __A; +} + +static __inline__ void __DEFAULT_FN_ATTRS128 +_mm_mask_storeu_epi32 (void *__P, __mmask8 __U, __m128i __A) +{ + __builtin_ia32_storedqusi128_mask ((__v4si *) __P, + (__v4si) __A, + (__mmask8) __U); +} + +static __inline void __DEFAULT_FN_ATTRS256 +_mm256_storeu_epi32 (void *__P, __m256i __A) +{ + struct __storeu_epi32 { + __m256i_u __v; + } __attribute__((__packed__, __may_alias__)); + ((struct __storeu_epi32*)__P)->__v = __A; +} + +static __inline__ void __DEFAULT_FN_ATTRS256 +_mm256_mask_storeu_epi32 (void *__P, __mmask8 __U, __m256i __A) +{ + __builtin_ia32_storedqusi256_mask ((__v8si *) __P, + (__v8si) __A, + (__mmask8) __U); +} + +static __inline__ void __DEFAULT_FN_ATTRS128 +_mm_mask_storeu_pd (void *__P, __mmask8 __U, __m128d __A) +{ + __builtin_ia32_storeupd128_mask ((__v2df *) __P, + (__v2df) __A, + (__mmask8) __U); +} + +static __inline__ void __DEFAULT_FN_ATTRS256 +_mm256_mask_storeu_pd (void *__P, __mmask8 __U, __m256d __A) +{ + __builtin_ia32_storeupd256_mask ((__v4df *) __P, + (__v4df) __A, + (__mmask8) __U); +} + +static __inline__ void __DEFAULT_FN_ATTRS128 +_mm_mask_storeu_ps (void *__P, __mmask8 __U, __m128 __A) +{ + __builtin_ia32_storeups128_mask ((__v4sf *) __P, + (__v4sf) __A, + (__mmask8) __U); +} + +static __inline__ void __DEFAULT_FN_ATTRS256 +_mm256_mask_storeu_ps (void *__P, __mmask8 __U, __m256 __A) +{ + __builtin_ia32_storeups256_mask ((__v8sf *) __P, + (__v8sf) __A, + (__mmask8) __U); +} + + +static __inline__ __m128d __DEFAULT_FN_ATTRS128 +_mm_mask_unpackhi_pd(__m128d __W, __mmask8 __U, __m128d __A, __m128d __B) +{ + return (__m128d)__builtin_ia32_selectpd_128((__mmask8)__U, + (__v2df)_mm_unpackhi_pd(__A, __B), + (__v2df)__W); +} + +static __inline__ __m128d __DEFAULT_FN_ATTRS128 +_mm_maskz_unpackhi_pd(__mmask8 __U, __m128d __A, __m128d __B) +{ + return (__m128d)__builtin_ia32_selectpd_128((__mmask8)__U, + (__v2df)_mm_unpackhi_pd(__A, __B), + (__v2df)_mm_setzero_pd()); +} + +static __inline__ __m256d __DEFAULT_FN_ATTRS256 +_mm256_mask_unpackhi_pd(__m256d __W, __mmask8 __U, __m256d __A, __m256d __B) +{ + return (__m256d)__builtin_ia32_selectpd_256((__mmask8)__U, + (__v4df)_mm256_unpackhi_pd(__A, __B), + (__v4df)__W); +} + +static __inline__ __m256d __DEFAULT_FN_ATTRS256 +_mm256_maskz_unpackhi_pd(__mmask8 __U, __m256d __A, __m256d __B) +{ + return (__m256d)__builtin_ia32_selectpd_256((__mmask8)__U, + (__v4df)_mm256_unpackhi_pd(__A, __B), + (__v4df)_mm256_setzero_pd()); +} + +static __inline__ __m128 __DEFAULT_FN_ATTRS128 +_mm_mask_unpackhi_ps(__m128 __W, __mmask8 __U, __m128 __A, __m128 __B) +{ + return (__m128)__builtin_ia32_selectps_128((__mmask8)__U, + (__v4sf)_mm_unpackhi_ps(__A, __B), + (__v4sf)__W); +} + +static __inline__ __m128 __DEFAULT_FN_ATTRS128 +_mm_maskz_unpackhi_ps(__mmask8 __U, __m128 __A, __m128 __B) +{ + return (__m128)__builtin_ia32_selectps_128((__mmask8)__U, + (__v4sf)_mm_unpackhi_ps(__A, __B), + (__v4sf)_mm_setzero_ps()); +} + +static __inline__ __m256 __DEFAULT_FN_ATTRS256 +_mm256_mask_unpackhi_ps(__m256 __W, __mmask8 __U, __m256 __A, __m256 __B) +{ + return (__m256)__builtin_ia32_selectps_256((__mmask8)__U, + (__v8sf)_mm256_unpackhi_ps(__A, __B), + (__v8sf)__W); +} + +static __inline__ __m256 __DEFAULT_FN_ATTRS256 +_mm256_maskz_unpackhi_ps(__mmask8 __U, __m256 __A, __m256 __B) +{ + return (__m256)__builtin_ia32_selectps_256((__mmask8)__U, + (__v8sf)_mm256_unpackhi_ps(__A, __B), + (__v8sf)_mm256_setzero_ps()); +} + +static __inline__ __m128d __DEFAULT_FN_ATTRS128 +_mm_mask_unpacklo_pd(__m128d __W, __mmask8 __U, __m128d __A, __m128d __B) +{ + return (__m128d)__builtin_ia32_selectpd_128((__mmask8)__U, + (__v2df)_mm_unpacklo_pd(__A, __B), + (__v2df)__W); +} + +static __inline__ __m128d __DEFAULT_FN_ATTRS128 +_mm_maskz_unpacklo_pd(__mmask8 __U, __m128d __A, __m128d __B) +{ + return (__m128d)__builtin_ia32_selectpd_128((__mmask8)__U, + (__v2df)_mm_unpacklo_pd(__A, __B), + (__v2df)_mm_setzero_pd()); +} + +static __inline__ __m256d __DEFAULT_FN_ATTRS256 +_mm256_mask_unpacklo_pd(__m256d __W, __mmask8 __U, __m256d __A, __m256d __B) +{ + return (__m256d)__builtin_ia32_selectpd_256((__mmask8)__U, + (__v4df)_mm256_unpacklo_pd(__A, __B), + (__v4df)__W); +} + +static __inline__ __m256d __DEFAULT_FN_ATTRS256 +_mm256_maskz_unpacklo_pd(__mmask8 __U, __m256d __A, __m256d __B) +{ + return (__m256d)__builtin_ia32_selectpd_256((__mmask8)__U, + (__v4df)_mm256_unpacklo_pd(__A, __B), + (__v4df)_mm256_setzero_pd()); +} + +static __inline__ __m128 __DEFAULT_FN_ATTRS128 +_mm_mask_unpacklo_ps(__m128 __W, __mmask8 __U, __m128 __A, __m128 __B) +{ + return (__m128)__builtin_ia32_selectps_128((__mmask8)__U, + (__v4sf)_mm_unpacklo_ps(__A, __B), + (__v4sf)__W); +} + +static __inline__ __m128 __DEFAULT_FN_ATTRS128 +_mm_maskz_unpacklo_ps(__mmask8 __U, __m128 __A, __m128 __B) +{ + return (__m128)__builtin_ia32_selectps_128((__mmask8)__U, + (__v4sf)_mm_unpacklo_ps(__A, __B), + (__v4sf)_mm_setzero_ps()); +} + +static __inline__ __m256 __DEFAULT_FN_ATTRS256 +_mm256_mask_unpacklo_ps(__m256 __W, __mmask8 __U, __m256 __A, __m256 __B) +{ + return (__m256)__builtin_ia32_selectps_256((__mmask8)__U, + (__v8sf)_mm256_unpacklo_ps(__A, __B), + (__v8sf)__W); +} + +static __inline__ __m256 __DEFAULT_FN_ATTRS256 +_mm256_maskz_unpacklo_ps(__mmask8 __U, __m256 __A, __m256 __B) +{ + return (__m256)__builtin_ia32_selectps_256((__mmask8)__U, + (__v8sf)_mm256_unpacklo_ps(__A, __B), + (__v8sf)_mm256_setzero_ps()); +} + +static __inline__ __m128d __DEFAULT_FN_ATTRS128 +_mm_rcp14_pd (__m128d __A) +{ + return (__m128d) __builtin_ia32_rcp14pd128_mask ((__v2df) __A, + (__v2df) + _mm_setzero_pd (), + (__mmask8) -1); +} + +static __inline__ __m128d __DEFAULT_FN_ATTRS128 +_mm_mask_rcp14_pd (__m128d __W, __mmask8 __U, __m128d __A) +{ + return (__m128d) __builtin_ia32_rcp14pd128_mask ((__v2df) __A, + (__v2df) __W, + (__mmask8) __U); +} + +static __inline__ __m128d __DEFAULT_FN_ATTRS128 +_mm_maskz_rcp14_pd (__mmask8 __U, __m128d __A) +{ + return (__m128d) __builtin_ia32_rcp14pd128_mask ((__v2df) __A, + (__v2df) + _mm_setzero_pd (), + (__mmask8) __U); +} + +static __inline__ __m256d __DEFAULT_FN_ATTRS256 +_mm256_rcp14_pd (__m256d __A) +{ + return (__m256d) __builtin_ia32_rcp14pd256_mask ((__v4df) __A, + (__v4df) + _mm256_setzero_pd (), + (__mmask8) -1); +} + +static __inline__ __m256d __DEFAULT_FN_ATTRS256 +_mm256_mask_rcp14_pd (__m256d __W, __mmask8 __U, __m256d __A) +{ + return (__m256d) __builtin_ia32_rcp14pd256_mask ((__v4df) __A, + (__v4df) __W, + (__mmask8) __U); +} + +static __inline__ __m256d __DEFAULT_FN_ATTRS256 +_mm256_maskz_rcp14_pd (__mmask8 __U, __m256d __A) +{ + return (__m256d) __builtin_ia32_rcp14pd256_mask ((__v4df) __A, + (__v4df) + _mm256_setzero_pd (), + (__mmask8) __U); +} + +static __inline__ __m128 __DEFAULT_FN_ATTRS128 +_mm_rcp14_ps (__m128 __A) +{ + return (__m128) __builtin_ia32_rcp14ps128_mask ((__v4sf) __A, + (__v4sf) + _mm_setzero_ps (), + (__mmask8) -1); +} + +static __inline__ __m128 __DEFAULT_FN_ATTRS128 +_mm_mask_rcp14_ps (__m128 __W, __mmask8 __U, __m128 __A) +{ + return (__m128) __builtin_ia32_rcp14ps128_mask ((__v4sf) __A, + (__v4sf) __W, + (__mmask8) __U); +} + +static __inline__ __m128 __DEFAULT_FN_ATTRS128 +_mm_maskz_rcp14_ps (__mmask8 __U, __m128 __A) +{ + return (__m128) __builtin_ia32_rcp14ps128_mask ((__v4sf) __A, + (__v4sf) + _mm_setzero_ps (), + (__mmask8) __U); +} + +static __inline__ __m256 __DEFAULT_FN_ATTRS256 +_mm256_rcp14_ps (__m256 __A) +{ + return (__m256) __builtin_ia32_rcp14ps256_mask ((__v8sf) __A, + (__v8sf) + _mm256_setzero_ps (), + (__mmask8) -1); +} + +static __inline__ __m256 __DEFAULT_FN_ATTRS256 +_mm256_mask_rcp14_ps (__m256 __W, __mmask8 __U, __m256 __A) +{ + return (__m256) __builtin_ia32_rcp14ps256_mask ((__v8sf) __A, + (__v8sf) __W, + (__mmask8) __U); +} + +static __inline__ __m256 __DEFAULT_FN_ATTRS256 +_mm256_maskz_rcp14_ps (__mmask8 __U, __m256 __A) +{ + return (__m256) __builtin_ia32_rcp14ps256_mask ((__v8sf) __A, + (__v8sf) + _mm256_setzero_ps (), + (__mmask8) __U); +} + +#define _mm_mask_permute_pd(W, U, X, C) \ + ((__m128d)__builtin_ia32_selectpd_128((__mmask8)(U), \ + (__v2df)_mm_permute_pd((X), (C)), \ + (__v2df)(__m128d)(W))) + +#define _mm_maskz_permute_pd(U, X, C) \ + ((__m128d)__builtin_ia32_selectpd_128((__mmask8)(U), \ + (__v2df)_mm_permute_pd((X), (C)), \ + (__v2df)_mm_setzero_pd())) + +#define _mm256_mask_permute_pd(W, U, X, C) \ + ((__m256d)__builtin_ia32_selectpd_256((__mmask8)(U), \ + (__v4df)_mm256_permute_pd((X), (C)), \ + (__v4df)(__m256d)(W))) + +#define _mm256_maskz_permute_pd(U, X, C) \ + ((__m256d)__builtin_ia32_selectpd_256((__mmask8)(U), \ + (__v4df)_mm256_permute_pd((X), (C)), \ + (__v4df)_mm256_setzero_pd())) + +#define _mm_mask_permute_ps(W, U, X, C) \ + ((__m128)__builtin_ia32_selectps_128((__mmask8)(U), \ + (__v4sf)_mm_permute_ps((X), (C)), \ + (__v4sf)(__m128)(W))) + +#define _mm_maskz_permute_ps(U, X, C) \ + ((__m128)__builtin_ia32_selectps_128((__mmask8)(U), \ + (__v4sf)_mm_permute_ps((X), (C)), \ + (__v4sf)_mm_setzero_ps())) + +#define _mm256_mask_permute_ps(W, U, X, C) \ + ((__m256)__builtin_ia32_selectps_256((__mmask8)(U), \ + (__v8sf)_mm256_permute_ps((X), (C)), \ + (__v8sf)(__m256)(W))) + +#define _mm256_maskz_permute_ps(U, X, C) \ + ((__m256)__builtin_ia32_selectps_256((__mmask8)(U), \ + (__v8sf)_mm256_permute_ps((X), (C)), \ + (__v8sf)_mm256_setzero_ps())) + +static __inline__ __m128d __DEFAULT_FN_ATTRS128 +_mm_mask_permutevar_pd(__m128d __W, __mmask8 __U, __m128d __A, __m128i __C) +{ + return (__m128d)__builtin_ia32_selectpd_128((__mmask8)__U, + (__v2df)_mm_permutevar_pd(__A, __C), + (__v2df)__W); +} + +static __inline__ __m128d __DEFAULT_FN_ATTRS128 +_mm_maskz_permutevar_pd(__mmask8 __U, __m128d __A, __m128i __C) +{ + return (__m128d)__builtin_ia32_selectpd_128((__mmask8)__U, + (__v2df)_mm_permutevar_pd(__A, __C), + (__v2df)_mm_setzero_pd()); +} + +static __inline__ __m256d __DEFAULT_FN_ATTRS256 +_mm256_mask_permutevar_pd(__m256d __W, __mmask8 __U, __m256d __A, __m256i __C) +{ + return (__m256d)__builtin_ia32_selectpd_256((__mmask8)__U, + (__v4df)_mm256_permutevar_pd(__A, __C), + (__v4df)__W); +} + +static __inline__ __m256d __DEFAULT_FN_ATTRS256 +_mm256_maskz_permutevar_pd(__mmask8 __U, __m256d __A, __m256i __C) +{ + return (__m256d)__builtin_ia32_selectpd_256((__mmask8)__U, + (__v4df)_mm256_permutevar_pd(__A, __C), + (__v4df)_mm256_setzero_pd()); +} + +static __inline__ __m128 __DEFAULT_FN_ATTRS128 +_mm_mask_permutevar_ps(__m128 __W, __mmask8 __U, __m128 __A, __m128i __C) +{ + return (__m128)__builtin_ia32_selectps_128((__mmask8)__U, + (__v4sf)_mm_permutevar_ps(__A, __C), + (__v4sf)__W); +} + +static __inline__ __m128 __DEFAULT_FN_ATTRS128 +_mm_maskz_permutevar_ps(__mmask8 __U, __m128 __A, __m128i __C) +{ + return (__m128)__builtin_ia32_selectps_128((__mmask8)__U, + (__v4sf)_mm_permutevar_ps(__A, __C), + (__v4sf)_mm_setzero_ps()); +} + +static __inline__ __m256 __DEFAULT_FN_ATTRS256 +_mm256_mask_permutevar_ps(__m256 __W, __mmask8 __U, __m256 __A, __m256i __C) +{ + return (__m256)__builtin_ia32_selectps_256((__mmask8)__U, + (__v8sf)_mm256_permutevar_ps(__A, __C), + (__v8sf)__W); +} + +static __inline__ __m256 __DEFAULT_FN_ATTRS256 +_mm256_maskz_permutevar_ps(__mmask8 __U, __m256 __A, __m256i __C) +{ + return (__m256)__builtin_ia32_selectps_256((__mmask8)__U, + (__v8sf)_mm256_permutevar_ps(__A, __C), + (__v8sf)_mm256_setzero_ps()); +} + +static __inline__ __mmask8 __DEFAULT_FN_ATTRS128 +_mm_test_epi32_mask (__m128i __A, __m128i __B) +{ + return _mm_cmpneq_epi32_mask (_mm_and_si128 (__A, __B), _mm_setzero_si128()); +} + +static __inline__ __mmask8 __DEFAULT_FN_ATTRS128 +_mm_mask_test_epi32_mask (__mmask8 __U, __m128i __A, __m128i __B) +{ + return _mm_mask_cmpneq_epi32_mask (__U, _mm_and_si128 (__A, __B), + _mm_setzero_si128()); +} + +static __inline__ __mmask8 __DEFAULT_FN_ATTRS256 +_mm256_test_epi32_mask (__m256i __A, __m256i __B) +{ + return _mm256_cmpneq_epi32_mask (_mm256_and_si256 (__A, __B), + _mm256_setzero_si256()); +} + +static __inline__ __mmask8 __DEFAULT_FN_ATTRS256 +_mm256_mask_test_epi32_mask (__mmask8 __U, __m256i __A, __m256i __B) +{ + return _mm256_mask_cmpneq_epi32_mask (__U, _mm256_and_si256 (__A, __B), + _mm256_setzero_si256()); +} + +static __inline__ __mmask8 __DEFAULT_FN_ATTRS128 +_mm_test_epi64_mask (__m128i __A, __m128i __B) +{ + return _mm_cmpneq_epi64_mask (_mm_and_si128 (__A, __B), _mm_setzero_si128()); +} + +static __inline__ __mmask8 __DEFAULT_FN_ATTRS128 +_mm_mask_test_epi64_mask (__mmask8 __U, __m128i __A, __m128i __B) +{ + return _mm_mask_cmpneq_epi64_mask (__U, _mm_and_si128 (__A, __B), + _mm_setzero_si128()); +} + +static __inline__ __mmask8 __DEFAULT_FN_ATTRS256 +_mm256_test_epi64_mask (__m256i __A, __m256i __B) +{ + return _mm256_cmpneq_epi64_mask (_mm256_and_si256 (__A, __B), + _mm256_setzero_si256()); +} + +static __inline__ __mmask8 __DEFAULT_FN_ATTRS256 +_mm256_mask_test_epi64_mask (__mmask8 __U, __m256i __A, __m256i __B) +{ + return _mm256_mask_cmpneq_epi64_mask (__U, _mm256_and_si256 (__A, __B), + _mm256_setzero_si256()); +} + +static __inline__ __mmask8 __DEFAULT_FN_ATTRS128 +_mm_testn_epi32_mask (__m128i __A, __m128i __B) +{ + return _mm_cmpeq_epi32_mask (_mm_and_si128 (__A, __B), _mm_setzero_si128()); +} + +static __inline__ __mmask8 __DEFAULT_FN_ATTRS128 +_mm_mask_testn_epi32_mask (__mmask8 __U, __m128i __A, __m128i __B) +{ + return _mm_mask_cmpeq_epi32_mask (__U, _mm_and_si128 (__A, __B), + _mm_setzero_si128()); +} + +static __inline__ __mmask8 __DEFAULT_FN_ATTRS256 +_mm256_testn_epi32_mask (__m256i __A, __m256i __B) +{ + return _mm256_cmpeq_epi32_mask (_mm256_and_si256 (__A, __B), + _mm256_setzero_si256()); +} + +static __inline__ __mmask8 __DEFAULT_FN_ATTRS256 +_mm256_mask_testn_epi32_mask (__mmask8 __U, __m256i __A, __m256i __B) +{ + return _mm256_mask_cmpeq_epi32_mask (__U, _mm256_and_si256 (__A, __B), + _mm256_setzero_si256()); +} + +static __inline__ __mmask8 __DEFAULT_FN_ATTRS128 +_mm_testn_epi64_mask (__m128i __A, __m128i __B) +{ + return _mm_cmpeq_epi64_mask (_mm_and_si128 (__A, __B), _mm_setzero_si128()); +} + +static __inline__ __mmask8 __DEFAULT_FN_ATTRS128 +_mm_mask_testn_epi64_mask (__mmask8 __U, __m128i __A, __m128i __B) +{ + return _mm_mask_cmpeq_epi64_mask (__U, _mm_and_si128 (__A, __B), + _mm_setzero_si128()); +} + +static __inline__ __mmask8 __DEFAULT_FN_ATTRS256 +_mm256_testn_epi64_mask (__m256i __A, __m256i __B) +{ + return _mm256_cmpeq_epi64_mask (_mm256_and_si256 (__A, __B), + _mm256_setzero_si256()); +} + +static __inline__ __mmask8 __DEFAULT_FN_ATTRS256 +_mm256_mask_testn_epi64_mask (__mmask8 __U, __m256i __A, __m256i __B) +{ + return _mm256_mask_cmpeq_epi64_mask (__U, _mm256_and_si256 (__A, __B), + _mm256_setzero_si256()); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_mask_unpackhi_epi32(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U, + (__v4si)_mm_unpackhi_epi32(__A, __B), + (__v4si)__W); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_maskz_unpackhi_epi32(__mmask8 __U, __m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U, + (__v4si)_mm_unpackhi_epi32(__A, __B), + (__v4si)_mm_setzero_si128()); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_mask_unpackhi_epi32(__m256i __W, __mmask8 __U, __m256i __A, __m256i __B) +{ + return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U, + (__v8si)_mm256_unpackhi_epi32(__A, __B), + (__v8si)__W); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_maskz_unpackhi_epi32(__mmask8 __U, __m256i __A, __m256i __B) +{ + return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U, + (__v8si)_mm256_unpackhi_epi32(__A, __B), + (__v8si)_mm256_setzero_si256()); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_mask_unpackhi_epi64(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U, + (__v2di)_mm_unpackhi_epi64(__A, __B), + (__v2di)__W); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_maskz_unpackhi_epi64(__mmask8 __U, __m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U, + (__v2di)_mm_unpackhi_epi64(__A, __B), + (__v2di)_mm_setzero_si128()); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_mask_unpackhi_epi64(__m256i __W, __mmask8 __U, __m256i __A, __m256i __B) +{ + return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U, + (__v4di)_mm256_unpackhi_epi64(__A, __B), + (__v4di)__W); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_maskz_unpackhi_epi64(__mmask8 __U, __m256i __A, __m256i __B) +{ + return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U, + (__v4di)_mm256_unpackhi_epi64(__A, __B), + (__v4di)_mm256_setzero_si256()); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_mask_unpacklo_epi32(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U, + (__v4si)_mm_unpacklo_epi32(__A, __B), + (__v4si)__W); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_maskz_unpacklo_epi32(__mmask8 __U, __m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U, + (__v4si)_mm_unpacklo_epi32(__A, __B), + (__v4si)_mm_setzero_si128()); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_mask_unpacklo_epi32(__m256i __W, __mmask8 __U, __m256i __A, __m256i __B) +{ + return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U, + (__v8si)_mm256_unpacklo_epi32(__A, __B), + (__v8si)__W); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_maskz_unpacklo_epi32(__mmask8 __U, __m256i __A, __m256i __B) +{ + return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U, + (__v8si)_mm256_unpacklo_epi32(__A, __B), + (__v8si)_mm256_setzero_si256()); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_mask_unpacklo_epi64(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U, + (__v2di)_mm_unpacklo_epi64(__A, __B), + (__v2di)__W); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_maskz_unpacklo_epi64(__mmask8 __U, __m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U, + (__v2di)_mm_unpacklo_epi64(__A, __B), + (__v2di)_mm_setzero_si128()); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_mask_unpacklo_epi64(__m256i __W, __mmask8 __U, __m256i __A, __m256i __B) +{ + return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U, + (__v4di)_mm256_unpacklo_epi64(__A, __B), + (__v4di)__W); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_maskz_unpacklo_epi64(__mmask8 __U, __m256i __A, __m256i __B) +{ + return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U, + (__v4di)_mm256_unpacklo_epi64(__A, __B), + (__v4di)_mm256_setzero_si256()); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_mask_sra_epi32(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U, + (__v4si)_mm_sra_epi32(__A, __B), + (__v4si)__W); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_maskz_sra_epi32(__mmask8 __U, __m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U, + (__v4si)_mm_sra_epi32(__A, __B), + (__v4si)_mm_setzero_si128()); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_mask_sra_epi32(__m256i __W, __mmask8 __U, __m256i __A, __m128i __B) +{ + return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U, + (__v8si)_mm256_sra_epi32(__A, __B), + (__v8si)__W); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_maskz_sra_epi32(__mmask8 __U, __m256i __A, __m128i __B) +{ + return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U, + (__v8si)_mm256_sra_epi32(__A, __B), + (__v8si)_mm256_setzero_si256()); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_mask_srai_epi32(__m128i __W, __mmask8 __U, __m128i __A, unsigned int __B) +{ + return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U, + (__v4si)_mm_srai_epi32(__A, (int)__B), + (__v4si)__W); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_maskz_srai_epi32(__mmask8 __U, __m128i __A, unsigned int __B) +{ + return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U, + (__v4si)_mm_srai_epi32(__A, (int)__B), + (__v4si)_mm_setzero_si128()); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_mask_srai_epi32(__m256i __W, __mmask8 __U, __m256i __A, unsigned int __B) +{ + return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U, + (__v8si)_mm256_srai_epi32(__A, (int)__B), + (__v8si)__W); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_maskz_srai_epi32(__mmask8 __U, __m256i __A, unsigned int __B) +{ + return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U, + (__v8si)_mm256_srai_epi32(__A, (int)__B), + (__v8si)_mm256_setzero_si256()); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_sra_epi64(__m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_psraq128((__v2di)__A, (__v2di)__B); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_mask_sra_epi64(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U, \ + (__v2di)_mm_sra_epi64(__A, __B), \ + (__v2di)__W); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_maskz_sra_epi64(__mmask8 __U, __m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U, \ + (__v2di)_mm_sra_epi64(__A, __B), \ + (__v2di)_mm_setzero_si128()); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_sra_epi64(__m256i __A, __m128i __B) +{ + return (__m256i)__builtin_ia32_psraq256((__v4di) __A, (__v2di) __B); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_mask_sra_epi64(__m256i __W, __mmask8 __U, __m256i __A, __m128i __B) +{ + return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U, \ + (__v4di)_mm256_sra_epi64(__A, __B), \ + (__v4di)__W); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_maskz_sra_epi64(__mmask8 __U, __m256i __A, __m128i __B) +{ + return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U, \ + (__v4di)_mm256_sra_epi64(__A, __B), \ + (__v4di)_mm256_setzero_si256()); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_srai_epi64(__m128i __A, unsigned int __imm) +{ + return (__m128i)__builtin_ia32_psraqi128((__v2di)__A, (int)__imm); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_mask_srai_epi64(__m128i __W, __mmask8 __U, __m128i __A, unsigned int __imm) +{ + return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U, \ + (__v2di)_mm_srai_epi64(__A, __imm), \ + (__v2di)__W); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_maskz_srai_epi64(__mmask8 __U, __m128i __A, unsigned int __imm) +{ + return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U, \ + (__v2di)_mm_srai_epi64(__A, __imm), \ + (__v2di)_mm_setzero_si128()); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_srai_epi64(__m256i __A, unsigned int __imm) +{ + return (__m256i)__builtin_ia32_psraqi256((__v4di)__A, (int)__imm); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_mask_srai_epi64(__m256i __W, __mmask8 __U, __m256i __A, + unsigned int __imm) +{ + return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U, \ + (__v4di)_mm256_srai_epi64(__A, __imm), \ + (__v4di)__W); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_maskz_srai_epi64(__mmask8 __U, __m256i __A, unsigned int __imm) +{ + return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U, \ + (__v4di)_mm256_srai_epi64(__A, __imm), \ + (__v4di)_mm256_setzero_si256()); +} + +#define _mm_ternarylogic_epi32(A, B, C, imm) \ + ((__m128i)__builtin_ia32_pternlogd128_mask( \ + (__v4si)(__m128i)(A), (__v4si)(__m128i)(B), (__v4si)(__m128i)(C), \ + (unsigned char)(imm), (__mmask8)-1)) + +#define _mm_mask_ternarylogic_epi32(A, U, B, C, imm) \ + ((__m128i)__builtin_ia32_pternlogd128_mask( \ + (__v4si)(__m128i)(A), (__v4si)(__m128i)(B), (__v4si)(__m128i)(C), \ + (unsigned char)(imm), (__mmask8)(U))) + +#define _mm_maskz_ternarylogic_epi32(U, A, B, C, imm) \ + ((__m128i)__builtin_ia32_pternlogd128_maskz( \ + (__v4si)(__m128i)(A), (__v4si)(__m128i)(B), (__v4si)(__m128i)(C), \ + (unsigned char)(imm), (__mmask8)(U))) + +#define _mm256_ternarylogic_epi32(A, B, C, imm) \ + ((__m256i)__builtin_ia32_pternlogd256_mask( \ + (__v8si)(__m256i)(A), (__v8si)(__m256i)(B), (__v8si)(__m256i)(C), \ + (unsigned char)(imm), (__mmask8)-1)) + +#define _mm256_mask_ternarylogic_epi32(A, U, B, C, imm) \ + ((__m256i)__builtin_ia32_pternlogd256_mask( \ + (__v8si)(__m256i)(A), (__v8si)(__m256i)(B), (__v8si)(__m256i)(C), \ + (unsigned char)(imm), (__mmask8)(U))) + +#define _mm256_maskz_ternarylogic_epi32(U, A, B, C, imm) \ + ((__m256i)__builtin_ia32_pternlogd256_maskz( \ + (__v8si)(__m256i)(A), (__v8si)(__m256i)(B), (__v8si)(__m256i)(C), \ + (unsigned char)(imm), (__mmask8)(U))) + +#define _mm_ternarylogic_epi64(A, B, C, imm) \ + ((__m128i)__builtin_ia32_pternlogq128_mask( \ + (__v2di)(__m128i)(A), (__v2di)(__m128i)(B), (__v2di)(__m128i)(C), \ + (unsigned char)(imm), (__mmask8)-1)) + +#define _mm_mask_ternarylogic_epi64(A, U, B, C, imm) \ + ((__m128i)__builtin_ia32_pternlogq128_mask( \ + (__v2di)(__m128i)(A), (__v2di)(__m128i)(B), (__v2di)(__m128i)(C), \ + (unsigned char)(imm), (__mmask8)(U))) + +#define _mm_maskz_ternarylogic_epi64(U, A, B, C, imm) \ + ((__m128i)__builtin_ia32_pternlogq128_maskz( \ + (__v2di)(__m128i)(A), (__v2di)(__m128i)(B), (__v2di)(__m128i)(C), \ + (unsigned char)(imm), (__mmask8)(U))) + +#define _mm256_ternarylogic_epi64(A, B, C, imm) \ + ((__m256i)__builtin_ia32_pternlogq256_mask( \ + (__v4di)(__m256i)(A), (__v4di)(__m256i)(B), (__v4di)(__m256i)(C), \ + (unsigned char)(imm), (__mmask8)-1)) + +#define _mm256_mask_ternarylogic_epi64(A, U, B, C, imm) \ + ((__m256i)__builtin_ia32_pternlogq256_mask( \ + (__v4di)(__m256i)(A), (__v4di)(__m256i)(B), (__v4di)(__m256i)(C), \ + (unsigned char)(imm), (__mmask8)(U))) + +#define _mm256_maskz_ternarylogic_epi64(U, A, B, C, imm) \ + ((__m256i)__builtin_ia32_pternlogq256_maskz( \ + (__v4di)(__m256i)(A), (__v4di)(__m256i)(B), (__v4di)(__m256i)(C), \ + (unsigned char)(imm), (__mmask8)(U))) + +#define _mm256_shuffle_f32x4(A, B, imm) \ + ((__m256)__builtin_ia32_shuf_f32x4_256((__v8sf)(__m256)(A), \ + (__v8sf)(__m256)(B), (int)(imm))) + +#define _mm256_mask_shuffle_f32x4(W, U, A, B, imm) \ + ((__m256)__builtin_ia32_selectps_256((__mmask8)(U), \ + (__v8sf)_mm256_shuffle_f32x4((A), (B), (imm)), \ + (__v8sf)(__m256)(W))) + +#define _mm256_maskz_shuffle_f32x4(U, A, B, imm) \ + ((__m256)__builtin_ia32_selectps_256((__mmask8)(U), \ + (__v8sf)_mm256_shuffle_f32x4((A), (B), (imm)), \ + (__v8sf)_mm256_setzero_ps())) + +#define _mm256_shuffle_f64x2(A, B, imm) \ + ((__m256d)__builtin_ia32_shuf_f64x2_256((__v4df)(__m256d)(A), \ + (__v4df)(__m256d)(B), (int)(imm))) + +#define _mm256_mask_shuffle_f64x2(W, U, A, B, imm) \ + ((__m256d)__builtin_ia32_selectpd_256((__mmask8)(U), \ + (__v4df)_mm256_shuffle_f64x2((A), (B), (imm)), \ + (__v4df)(__m256d)(W))) + +#define _mm256_maskz_shuffle_f64x2(U, A, B, imm) \ + ((__m256d)__builtin_ia32_selectpd_256((__mmask8)(U), \ + (__v4df)_mm256_shuffle_f64x2((A), (B), (imm)), \ + (__v4df)_mm256_setzero_pd())) + +#define _mm256_shuffle_i32x4(A, B, imm) \ + ((__m256i)__builtin_ia32_shuf_i32x4_256((__v8si)(__m256i)(A), \ + (__v8si)(__m256i)(B), (int)(imm))) + +#define _mm256_mask_shuffle_i32x4(W, U, A, B, imm) \ + ((__m256i)__builtin_ia32_selectd_256((__mmask8)(U), \ + (__v8si)_mm256_shuffle_i32x4((A), (B), (imm)), \ + (__v8si)(__m256i)(W))) + +#define _mm256_maskz_shuffle_i32x4(U, A, B, imm) \ + ((__m256i)__builtin_ia32_selectd_256((__mmask8)(U), \ + (__v8si)_mm256_shuffle_i32x4((A), (B), (imm)), \ + (__v8si)_mm256_setzero_si256())) + +#define _mm256_shuffle_i64x2(A, B, imm) \ + ((__m256i)__builtin_ia32_shuf_i64x2_256((__v4di)(__m256i)(A), \ + (__v4di)(__m256i)(B), (int)(imm))) + +#define _mm256_mask_shuffle_i64x2(W, U, A, B, imm) \ + ((__m256i)__builtin_ia32_selectq_256((__mmask8)(U), \ + (__v4di)_mm256_shuffle_i64x2((A), (B), (imm)), \ + (__v4di)(__m256i)(W))) + + +#define _mm256_maskz_shuffle_i64x2(U, A, B, imm) \ + ((__m256i)__builtin_ia32_selectq_256((__mmask8)(U), \ + (__v4di)_mm256_shuffle_i64x2((A), (B), (imm)), \ + (__v4di)_mm256_setzero_si256())) + +#define _mm_mask_shuffle_pd(W, U, A, B, M) \ + ((__m128d)__builtin_ia32_selectpd_128((__mmask8)(U), \ + (__v2df)_mm_shuffle_pd((A), (B), (M)), \ + (__v2df)(__m128d)(W))) + +#define _mm_maskz_shuffle_pd(U, A, B, M) \ + ((__m128d)__builtin_ia32_selectpd_128((__mmask8)(U), \ + (__v2df)_mm_shuffle_pd((A), (B), (M)), \ + (__v2df)_mm_setzero_pd())) + +#define _mm256_mask_shuffle_pd(W, U, A, B, M) \ + ((__m256d)__builtin_ia32_selectpd_256((__mmask8)(U), \ + (__v4df)_mm256_shuffle_pd((A), (B), (M)), \ + (__v4df)(__m256d)(W))) + +#define _mm256_maskz_shuffle_pd(U, A, B, M) \ + ((__m256d)__builtin_ia32_selectpd_256((__mmask8)(U), \ + (__v4df)_mm256_shuffle_pd((A), (B), (M)), \ + (__v4df)_mm256_setzero_pd())) + +#define _mm_mask_shuffle_ps(W, U, A, B, M) \ + ((__m128)__builtin_ia32_selectps_128((__mmask8)(U), \ + (__v4sf)_mm_shuffle_ps((A), (B), (M)), \ + (__v4sf)(__m128)(W))) + +#define _mm_maskz_shuffle_ps(U, A, B, M) \ + ((__m128)__builtin_ia32_selectps_128((__mmask8)(U), \ + (__v4sf)_mm_shuffle_ps((A), (B), (M)), \ + (__v4sf)_mm_setzero_ps())) + +#define _mm256_mask_shuffle_ps(W, U, A, B, M) \ + ((__m256)__builtin_ia32_selectps_256((__mmask8)(U), \ + (__v8sf)_mm256_shuffle_ps((A), (B), (M)), \ + (__v8sf)(__m256)(W))) + +#define _mm256_maskz_shuffle_ps(U, A, B, M) \ + ((__m256)__builtin_ia32_selectps_256((__mmask8)(U), \ + (__v8sf)_mm256_shuffle_ps((A), (B), (M)), \ + (__v8sf)_mm256_setzero_ps())) + +static __inline__ __m128d __DEFAULT_FN_ATTRS128 +_mm_rsqrt14_pd (__m128d __A) +{ + return (__m128d) __builtin_ia32_rsqrt14pd128_mask ((__v2df) __A, + (__v2df) + _mm_setzero_pd (), + (__mmask8) -1); +} + +static __inline__ __m128d __DEFAULT_FN_ATTRS128 +_mm_mask_rsqrt14_pd (__m128d __W, __mmask8 __U, __m128d __A) +{ + return (__m128d) __builtin_ia32_rsqrt14pd128_mask ((__v2df) __A, + (__v2df) __W, + (__mmask8) __U); +} + +static __inline__ __m128d __DEFAULT_FN_ATTRS128 +_mm_maskz_rsqrt14_pd (__mmask8 __U, __m128d __A) +{ + return (__m128d) __builtin_ia32_rsqrt14pd128_mask ((__v2df) __A, + (__v2df) + _mm_setzero_pd (), + (__mmask8) __U); +} + +static __inline__ __m256d __DEFAULT_FN_ATTRS256 +_mm256_rsqrt14_pd (__m256d __A) +{ + return (__m256d) __builtin_ia32_rsqrt14pd256_mask ((__v4df) __A, + (__v4df) + _mm256_setzero_pd (), + (__mmask8) -1); +} + +static __inline__ __m256d __DEFAULT_FN_ATTRS256 +_mm256_mask_rsqrt14_pd (__m256d __W, __mmask8 __U, __m256d __A) +{ + return (__m256d) __builtin_ia32_rsqrt14pd256_mask ((__v4df) __A, + (__v4df) __W, + (__mmask8) __U); +} + +static __inline__ __m256d __DEFAULT_FN_ATTRS256 +_mm256_maskz_rsqrt14_pd (__mmask8 __U, __m256d __A) +{ + return (__m256d) __builtin_ia32_rsqrt14pd256_mask ((__v4df) __A, + (__v4df) + _mm256_setzero_pd (), + (__mmask8) __U); +} + +static __inline__ __m128 __DEFAULT_FN_ATTRS128 +_mm_rsqrt14_ps (__m128 __A) +{ + return (__m128) __builtin_ia32_rsqrt14ps128_mask ((__v4sf) __A, + (__v4sf) + _mm_setzero_ps (), + (__mmask8) -1); +} + +static __inline__ __m128 __DEFAULT_FN_ATTRS128 +_mm_mask_rsqrt14_ps (__m128 __W, __mmask8 __U, __m128 __A) +{ + return (__m128) __builtin_ia32_rsqrt14ps128_mask ((__v4sf) __A, + (__v4sf) __W, + (__mmask8) __U); +} + +static __inline__ __m128 __DEFAULT_FN_ATTRS128 +_mm_maskz_rsqrt14_ps (__mmask8 __U, __m128 __A) +{ + return (__m128) __builtin_ia32_rsqrt14ps128_mask ((__v4sf) __A, + (__v4sf) + _mm_setzero_ps (), + (__mmask8) __U); +} + +static __inline__ __m256 __DEFAULT_FN_ATTRS256 +_mm256_rsqrt14_ps (__m256 __A) +{ + return (__m256) __builtin_ia32_rsqrt14ps256_mask ((__v8sf) __A, + (__v8sf) + _mm256_setzero_ps (), + (__mmask8) -1); +} + +static __inline__ __m256 __DEFAULT_FN_ATTRS256 +_mm256_mask_rsqrt14_ps (__m256 __W, __mmask8 __U, __m256 __A) +{ + return (__m256) __builtin_ia32_rsqrt14ps256_mask ((__v8sf) __A, + (__v8sf) __W, + (__mmask8) __U); +} + +static __inline__ __m256 __DEFAULT_FN_ATTRS256 +_mm256_maskz_rsqrt14_ps (__mmask8 __U, __m256 __A) +{ + return (__m256) __builtin_ia32_rsqrt14ps256_mask ((__v8sf) __A, + (__v8sf) + _mm256_setzero_ps (), + (__mmask8) __U); +} + +static __inline__ __m256 __DEFAULT_FN_ATTRS256 +_mm256_broadcast_f32x4(__m128 __A) +{ + return (__m256)__builtin_shufflevector((__v4sf)__A, (__v4sf)__A, + 0, 1, 2, 3, 0, 1, 2, 3); +} + +static __inline__ __m256 __DEFAULT_FN_ATTRS256 +_mm256_mask_broadcast_f32x4(__m256 __O, __mmask8 __M, __m128 __A) +{ + return (__m256)__builtin_ia32_selectps_256((__mmask8)__M, + (__v8sf)_mm256_broadcast_f32x4(__A), + (__v8sf)__O); +} + +static __inline__ __m256 __DEFAULT_FN_ATTRS256 +_mm256_maskz_broadcast_f32x4 (__mmask8 __M, __m128 __A) +{ + return (__m256)__builtin_ia32_selectps_256((__mmask8)__M, + (__v8sf)_mm256_broadcast_f32x4(__A), + (__v8sf)_mm256_setzero_ps()); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_broadcast_i32x4(__m128i __A) +{ + return (__m256i)__builtin_shufflevector((__v4si)__A, (__v4si)__A, + 0, 1, 2, 3, 0, 1, 2, 3); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_mask_broadcast_i32x4(__m256i __O, __mmask8 __M, __m128i __A) +{ + return (__m256i)__builtin_ia32_selectd_256((__mmask8)__M, + (__v8si)_mm256_broadcast_i32x4(__A), + (__v8si)__O); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_maskz_broadcast_i32x4(__mmask8 __M, __m128i __A) +{ + return (__m256i)__builtin_ia32_selectd_256((__mmask8)__M, + (__v8si)_mm256_broadcast_i32x4(__A), + (__v8si)_mm256_setzero_si256()); +} + +static __inline__ __m256d __DEFAULT_FN_ATTRS256 +_mm256_mask_broadcastsd_pd (__m256d __O, __mmask8 __M, __m128d __A) +{ + return (__m256d)__builtin_ia32_selectpd_256(__M, + (__v4df) _mm256_broadcastsd_pd(__A), + (__v4df) __O); +} + +static __inline__ __m256d __DEFAULT_FN_ATTRS256 +_mm256_maskz_broadcastsd_pd (__mmask8 __M, __m128d __A) +{ + return (__m256d)__builtin_ia32_selectpd_256(__M, + (__v4df) _mm256_broadcastsd_pd(__A), + (__v4df) _mm256_setzero_pd()); +} + +static __inline__ __m128 __DEFAULT_FN_ATTRS128 +_mm_mask_broadcastss_ps (__m128 __O, __mmask8 __M, __m128 __A) +{ + return (__m128)__builtin_ia32_selectps_128(__M, + (__v4sf) _mm_broadcastss_ps(__A), + (__v4sf) __O); +} + +static __inline__ __m128 __DEFAULT_FN_ATTRS128 +_mm_maskz_broadcastss_ps (__mmask8 __M, __m128 __A) +{ + return (__m128)__builtin_ia32_selectps_128(__M, + (__v4sf) _mm_broadcastss_ps(__A), + (__v4sf) _mm_setzero_ps()); +} + +static __inline__ __m256 __DEFAULT_FN_ATTRS256 +_mm256_mask_broadcastss_ps (__m256 __O, __mmask8 __M, __m128 __A) +{ + return (__m256)__builtin_ia32_selectps_256(__M, + (__v8sf) _mm256_broadcastss_ps(__A), + (__v8sf) __O); +} + +static __inline__ __m256 __DEFAULT_FN_ATTRS256 +_mm256_maskz_broadcastss_ps (__mmask8 __M, __m128 __A) +{ + return (__m256)__builtin_ia32_selectps_256(__M, + (__v8sf) _mm256_broadcastss_ps(__A), + (__v8sf) _mm256_setzero_ps()); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_mask_broadcastd_epi32 (__m128i __O, __mmask8 __M, __m128i __A) +{ + return (__m128i)__builtin_ia32_selectd_128(__M, + (__v4si) _mm_broadcastd_epi32(__A), + (__v4si) __O); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_maskz_broadcastd_epi32 (__mmask8 __M, __m128i __A) +{ + return (__m128i)__builtin_ia32_selectd_128(__M, + (__v4si) _mm_broadcastd_epi32(__A), + (__v4si) _mm_setzero_si128()); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_mask_broadcastd_epi32 (__m256i __O, __mmask8 __M, __m128i __A) +{ + return (__m256i)__builtin_ia32_selectd_256(__M, + (__v8si) _mm256_broadcastd_epi32(__A), + (__v8si) __O); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_maskz_broadcastd_epi32 (__mmask8 __M, __m128i __A) +{ + return (__m256i)__builtin_ia32_selectd_256(__M, + (__v8si) _mm256_broadcastd_epi32(__A), + (__v8si) _mm256_setzero_si256()); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_mask_broadcastq_epi64 (__m128i __O, __mmask8 __M, __m128i __A) +{ + return (__m128i)__builtin_ia32_selectq_128(__M, + (__v2di) _mm_broadcastq_epi64(__A), + (__v2di) __O); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_maskz_broadcastq_epi64 (__mmask8 __M, __m128i __A) +{ + return (__m128i)__builtin_ia32_selectq_128(__M, + (__v2di) _mm_broadcastq_epi64(__A), + (__v2di) _mm_setzero_si128()); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_mask_broadcastq_epi64 (__m256i __O, __mmask8 __M, __m128i __A) +{ + return (__m256i)__builtin_ia32_selectq_256(__M, + (__v4di) _mm256_broadcastq_epi64(__A), + (__v4di) __O); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_maskz_broadcastq_epi64 (__mmask8 __M, __m128i __A) +{ + return (__m256i)__builtin_ia32_selectq_256(__M, + (__v4di) _mm256_broadcastq_epi64(__A), + (__v4di) _mm256_setzero_si256()); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_cvtsepi32_epi8 (__m128i __A) +{ + return (__m128i) __builtin_ia32_pmovsdb128_mask ((__v4si) __A, + (__v16qi)_mm_undefined_si128(), + (__mmask8) -1); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_mask_cvtsepi32_epi8 (__m128i __O, __mmask8 __M, __m128i __A) +{ + return (__m128i) __builtin_ia32_pmovsdb128_mask ((__v4si) __A, + (__v16qi) __O, __M); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_maskz_cvtsepi32_epi8 (__mmask8 __M, __m128i __A) +{ + return (__m128i) __builtin_ia32_pmovsdb128_mask ((__v4si) __A, + (__v16qi) _mm_setzero_si128 (), + __M); +} + +static __inline__ void __DEFAULT_FN_ATTRS128 +_mm_mask_cvtsepi32_storeu_epi8 (void * __P, __mmask8 __M, __m128i __A) +{ + __builtin_ia32_pmovsdb128mem_mask ((__v16qi *) __P, (__v4si) __A, __M); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS256 +_mm256_cvtsepi32_epi8 (__m256i __A) +{ + return (__m128i) __builtin_ia32_pmovsdb256_mask ((__v8si) __A, + (__v16qi)_mm_undefined_si128(), + (__mmask8) -1); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS256 +_mm256_mask_cvtsepi32_epi8 (__m128i __O, __mmask8 __M, __m256i __A) +{ + return (__m128i) __builtin_ia32_pmovsdb256_mask ((__v8si) __A, + (__v16qi) __O, __M); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS256 +_mm256_maskz_cvtsepi32_epi8 (__mmask8 __M, __m256i __A) +{ + return (__m128i) __builtin_ia32_pmovsdb256_mask ((__v8si) __A, + (__v16qi) _mm_setzero_si128 (), + __M); +} + +static __inline__ void __DEFAULT_FN_ATTRS256 +_mm256_mask_cvtsepi32_storeu_epi8 (void * __P, __mmask8 __M, __m256i __A) +{ + __builtin_ia32_pmovsdb256mem_mask ((__v16qi *) __P, (__v8si) __A, __M); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_cvtsepi32_epi16 (__m128i __A) +{ + return (__m128i) __builtin_ia32_pmovsdw128_mask ((__v4si) __A, + (__v8hi)_mm_setzero_si128 (), + (__mmask8) -1); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_mask_cvtsepi32_epi16 (__m128i __O, __mmask8 __M, __m128i __A) +{ + return (__m128i) __builtin_ia32_pmovsdw128_mask ((__v4si) __A, + (__v8hi)__O, + __M); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_maskz_cvtsepi32_epi16 (__mmask8 __M, __m128i __A) +{ + return (__m128i) __builtin_ia32_pmovsdw128_mask ((__v4si) __A, + (__v8hi) _mm_setzero_si128 (), + __M); +} + +static __inline__ void __DEFAULT_FN_ATTRS128 +_mm_mask_cvtsepi32_storeu_epi16 (void * __P, __mmask8 __M, __m128i __A) +{ + __builtin_ia32_pmovsdw128mem_mask ((__v8hi *) __P, (__v4si) __A, __M); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS256 +_mm256_cvtsepi32_epi16 (__m256i __A) +{ + return (__m128i) __builtin_ia32_pmovsdw256_mask ((__v8si) __A, + (__v8hi)_mm_undefined_si128(), + (__mmask8) -1); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS256 +_mm256_mask_cvtsepi32_epi16 (__m128i __O, __mmask8 __M, __m256i __A) +{ + return (__m128i) __builtin_ia32_pmovsdw256_mask ((__v8si) __A, + (__v8hi) __O, __M); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS256 +_mm256_maskz_cvtsepi32_epi16 (__mmask8 __M, __m256i __A) +{ + return (__m128i) __builtin_ia32_pmovsdw256_mask ((__v8si) __A, + (__v8hi) _mm_setzero_si128 (), + __M); +} + +static __inline__ void __DEFAULT_FN_ATTRS256 +_mm256_mask_cvtsepi32_storeu_epi16 (void * __P, __mmask8 __M, __m256i __A) +{ + __builtin_ia32_pmovsdw256mem_mask ((__v8hi *) __P, (__v8si) __A, __M); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_cvtsepi64_epi8 (__m128i __A) +{ + return (__m128i) __builtin_ia32_pmovsqb128_mask ((__v2di) __A, + (__v16qi)_mm_undefined_si128(), + (__mmask8) -1); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_mask_cvtsepi64_epi8 (__m128i __O, __mmask8 __M, __m128i __A) +{ + return (__m128i) __builtin_ia32_pmovsqb128_mask ((__v2di) __A, + (__v16qi) __O, __M); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_maskz_cvtsepi64_epi8 (__mmask8 __M, __m128i __A) +{ + return (__m128i) __builtin_ia32_pmovsqb128_mask ((__v2di) __A, + (__v16qi) _mm_setzero_si128 (), + __M); +} + +static __inline__ void __DEFAULT_FN_ATTRS128 +_mm_mask_cvtsepi64_storeu_epi8 (void * __P, __mmask8 __M, __m128i __A) +{ + __builtin_ia32_pmovsqb128mem_mask ((__v16qi *) __P, (__v2di) __A, __M); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS256 +_mm256_cvtsepi64_epi8 (__m256i __A) +{ + return (__m128i) __builtin_ia32_pmovsqb256_mask ((__v4di) __A, + (__v16qi)_mm_undefined_si128(), + (__mmask8) -1); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS256 +_mm256_mask_cvtsepi64_epi8 (__m128i __O, __mmask8 __M, __m256i __A) +{ + return (__m128i) __builtin_ia32_pmovsqb256_mask ((__v4di) __A, + (__v16qi) __O, __M); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS256 +_mm256_maskz_cvtsepi64_epi8 (__mmask8 __M, __m256i __A) +{ + return (__m128i) __builtin_ia32_pmovsqb256_mask ((__v4di) __A, + (__v16qi) _mm_setzero_si128 (), + __M); +} + +static __inline__ void __DEFAULT_FN_ATTRS256 +_mm256_mask_cvtsepi64_storeu_epi8 (void * __P, __mmask8 __M, __m256i __A) +{ + __builtin_ia32_pmovsqb256mem_mask ((__v16qi *) __P, (__v4di) __A, __M); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_cvtsepi64_epi32 (__m128i __A) +{ + return (__m128i) __builtin_ia32_pmovsqd128_mask ((__v2di) __A, + (__v4si)_mm_undefined_si128(), + (__mmask8) -1); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_mask_cvtsepi64_epi32 (__m128i __O, __mmask8 __M, __m128i __A) +{ + return (__m128i) __builtin_ia32_pmovsqd128_mask ((__v2di) __A, + (__v4si) __O, __M); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_maskz_cvtsepi64_epi32 (__mmask8 __M, __m128i __A) +{ + return (__m128i) __builtin_ia32_pmovsqd128_mask ((__v2di) __A, + (__v4si) _mm_setzero_si128 (), + __M); +} + +static __inline__ void __DEFAULT_FN_ATTRS128 +_mm_mask_cvtsepi64_storeu_epi32 (void * __P, __mmask8 __M, __m128i __A) +{ + __builtin_ia32_pmovsqd128mem_mask ((__v4si *) __P, (__v2di) __A, __M); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS256 +_mm256_cvtsepi64_epi32 (__m256i __A) +{ + return (__m128i) __builtin_ia32_pmovsqd256_mask ((__v4di) __A, + (__v4si)_mm_undefined_si128(), + (__mmask8) -1); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS256 +_mm256_mask_cvtsepi64_epi32 (__m128i __O, __mmask8 __M, __m256i __A) +{ + return (__m128i) __builtin_ia32_pmovsqd256_mask ((__v4di) __A, + (__v4si)__O, + __M); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS256 +_mm256_maskz_cvtsepi64_epi32 (__mmask8 __M, __m256i __A) +{ + return (__m128i) __builtin_ia32_pmovsqd256_mask ((__v4di) __A, + (__v4si) _mm_setzero_si128 (), + __M); +} + +static __inline__ void __DEFAULT_FN_ATTRS256 +_mm256_mask_cvtsepi64_storeu_epi32 (void * __P, __mmask8 __M, __m256i __A) +{ + __builtin_ia32_pmovsqd256mem_mask ((__v4si *) __P, (__v4di) __A, __M); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_cvtsepi64_epi16 (__m128i __A) +{ + return (__m128i) __builtin_ia32_pmovsqw128_mask ((__v2di) __A, + (__v8hi)_mm_undefined_si128(), + (__mmask8) -1); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_mask_cvtsepi64_epi16 (__m128i __O, __mmask8 __M, __m128i __A) +{ + return (__m128i) __builtin_ia32_pmovsqw128_mask ((__v2di) __A, + (__v8hi) __O, __M); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_maskz_cvtsepi64_epi16 (__mmask8 __M, __m128i __A) +{ + return (__m128i) __builtin_ia32_pmovsqw128_mask ((__v2di) __A, + (__v8hi) _mm_setzero_si128 (), + __M); +} + +static __inline__ void __DEFAULT_FN_ATTRS128 +_mm_mask_cvtsepi64_storeu_epi16 (void * __P, __mmask8 __M, __m128i __A) +{ + __builtin_ia32_pmovsqw128mem_mask ((__v8hi *) __P, (__v2di) __A, __M); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS256 +_mm256_cvtsepi64_epi16 (__m256i __A) +{ + return (__m128i) __builtin_ia32_pmovsqw256_mask ((__v4di) __A, + (__v8hi)_mm_undefined_si128(), + (__mmask8) -1); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS256 +_mm256_mask_cvtsepi64_epi16 (__m128i __O, __mmask8 __M, __m256i __A) +{ + return (__m128i) __builtin_ia32_pmovsqw256_mask ((__v4di) __A, + (__v8hi) __O, __M); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS256 +_mm256_maskz_cvtsepi64_epi16 (__mmask8 __M, __m256i __A) +{ + return (__m128i) __builtin_ia32_pmovsqw256_mask ((__v4di) __A, + (__v8hi) _mm_setzero_si128 (), + __M); +} + +static __inline__ void __DEFAULT_FN_ATTRS256 +_mm256_mask_cvtsepi64_storeu_epi16 (void * __P, __mmask8 __M, __m256i __A) +{ + __builtin_ia32_pmovsqw256mem_mask ((__v8hi *) __P, (__v4di) __A, __M); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_cvtusepi32_epi8 (__m128i __A) +{ + return (__m128i) __builtin_ia32_pmovusdb128_mask ((__v4si) __A, + (__v16qi)_mm_undefined_si128(), + (__mmask8) -1); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_mask_cvtusepi32_epi8 (__m128i __O, __mmask8 __M, __m128i __A) +{ + return (__m128i) __builtin_ia32_pmovusdb128_mask ((__v4si) __A, + (__v16qi) __O, + __M); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_maskz_cvtusepi32_epi8 (__mmask8 __M, __m128i __A) +{ + return (__m128i) __builtin_ia32_pmovusdb128_mask ((__v4si) __A, + (__v16qi) _mm_setzero_si128 (), + __M); +} + +static __inline__ void __DEFAULT_FN_ATTRS128 +_mm_mask_cvtusepi32_storeu_epi8 (void * __P, __mmask8 __M, __m128i __A) +{ + __builtin_ia32_pmovusdb128mem_mask ((__v16qi *) __P, (__v4si) __A, __M); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS256 +_mm256_cvtusepi32_epi8 (__m256i __A) +{ + return (__m128i) __builtin_ia32_pmovusdb256_mask ((__v8si) __A, + (__v16qi)_mm_undefined_si128(), + (__mmask8) -1); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS256 +_mm256_mask_cvtusepi32_epi8 (__m128i __O, __mmask8 __M, __m256i __A) +{ + return (__m128i) __builtin_ia32_pmovusdb256_mask ((__v8si) __A, + (__v16qi) __O, + __M); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS256 +_mm256_maskz_cvtusepi32_epi8 (__mmask8 __M, __m256i __A) +{ + return (__m128i) __builtin_ia32_pmovusdb256_mask ((__v8si) __A, + (__v16qi) _mm_setzero_si128 (), + __M); +} + +static __inline__ void __DEFAULT_FN_ATTRS256 +_mm256_mask_cvtusepi32_storeu_epi8 (void * __P, __mmask8 __M, __m256i __A) +{ + __builtin_ia32_pmovusdb256mem_mask ((__v16qi*) __P, (__v8si) __A, __M); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_cvtusepi32_epi16 (__m128i __A) +{ + return (__m128i) __builtin_ia32_pmovusdw128_mask ((__v4si) __A, + (__v8hi)_mm_undefined_si128(), + (__mmask8) -1); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_mask_cvtusepi32_epi16 (__m128i __O, __mmask8 __M, __m128i __A) +{ + return (__m128i) __builtin_ia32_pmovusdw128_mask ((__v4si) __A, + (__v8hi) __O, __M); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_maskz_cvtusepi32_epi16 (__mmask8 __M, __m128i __A) +{ + return (__m128i) __builtin_ia32_pmovusdw128_mask ((__v4si) __A, + (__v8hi) _mm_setzero_si128 (), + __M); +} + +static __inline__ void __DEFAULT_FN_ATTRS128 +_mm_mask_cvtusepi32_storeu_epi16 (void * __P, __mmask8 __M, __m128i __A) +{ + __builtin_ia32_pmovusdw128mem_mask ((__v8hi *) __P, (__v4si) __A, __M); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS256 +_mm256_cvtusepi32_epi16 (__m256i __A) +{ + return (__m128i) __builtin_ia32_pmovusdw256_mask ((__v8si) __A, + (__v8hi) _mm_undefined_si128(), + (__mmask8) -1); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS256 +_mm256_mask_cvtusepi32_epi16 (__m128i __O, __mmask8 __M, __m256i __A) +{ + return (__m128i) __builtin_ia32_pmovusdw256_mask ((__v8si) __A, + (__v8hi) __O, __M); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS256 +_mm256_maskz_cvtusepi32_epi16 (__mmask8 __M, __m256i __A) +{ + return (__m128i) __builtin_ia32_pmovusdw256_mask ((__v8si) __A, + (__v8hi) _mm_setzero_si128 (), + __M); +} + +static __inline__ void __DEFAULT_FN_ATTRS256 +_mm256_mask_cvtusepi32_storeu_epi16 (void * __P, __mmask8 __M, __m256i __A) +{ + __builtin_ia32_pmovusdw256mem_mask ((__v8hi *) __P, (__v8si) __A, __M); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_cvtusepi64_epi8 (__m128i __A) +{ + return (__m128i) __builtin_ia32_pmovusqb128_mask ((__v2di) __A, + (__v16qi)_mm_undefined_si128(), + (__mmask8) -1); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_mask_cvtusepi64_epi8 (__m128i __O, __mmask8 __M, __m128i __A) +{ + return (__m128i) __builtin_ia32_pmovusqb128_mask ((__v2di) __A, + (__v16qi) __O, + __M); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_maskz_cvtusepi64_epi8 (__mmask8 __M, __m128i __A) +{ + return (__m128i) __builtin_ia32_pmovusqb128_mask ((__v2di) __A, + (__v16qi) _mm_setzero_si128 (), + __M); +} + +static __inline__ void __DEFAULT_FN_ATTRS128 +_mm_mask_cvtusepi64_storeu_epi8 (void * __P, __mmask8 __M, __m128i __A) +{ + __builtin_ia32_pmovusqb128mem_mask ((__v16qi *) __P, (__v2di) __A, __M); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS256 +_mm256_cvtusepi64_epi8 (__m256i __A) +{ + return (__m128i) __builtin_ia32_pmovusqb256_mask ((__v4di) __A, + (__v16qi)_mm_undefined_si128(), + (__mmask8) -1); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS256 +_mm256_mask_cvtusepi64_epi8 (__m128i __O, __mmask8 __M, __m256i __A) +{ + return (__m128i) __builtin_ia32_pmovusqb256_mask ((__v4di) __A, + (__v16qi) __O, + __M); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS256 +_mm256_maskz_cvtusepi64_epi8 (__mmask8 __M, __m256i __A) +{ + return (__m128i) __builtin_ia32_pmovusqb256_mask ((__v4di) __A, + (__v16qi) _mm_setzero_si128 (), + __M); +} + +static __inline__ void __DEFAULT_FN_ATTRS256 +_mm256_mask_cvtusepi64_storeu_epi8 (void * __P, __mmask8 __M, __m256i __A) +{ + __builtin_ia32_pmovusqb256mem_mask ((__v16qi *) __P, (__v4di) __A, __M); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_cvtusepi64_epi32 (__m128i __A) +{ + return (__m128i) __builtin_ia32_pmovusqd128_mask ((__v2di) __A, + (__v4si)_mm_undefined_si128(), + (__mmask8) -1); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_mask_cvtusepi64_epi32 (__m128i __O, __mmask8 __M, __m128i __A) +{ + return (__m128i) __builtin_ia32_pmovusqd128_mask ((__v2di) __A, + (__v4si) __O, __M); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_maskz_cvtusepi64_epi32 (__mmask8 __M, __m128i __A) +{ + return (__m128i) __builtin_ia32_pmovusqd128_mask ((__v2di) __A, + (__v4si) _mm_setzero_si128 (), + __M); +} + +static __inline__ void __DEFAULT_FN_ATTRS128 +_mm_mask_cvtusepi64_storeu_epi32 (void * __P, __mmask8 __M, __m128i __A) +{ + __builtin_ia32_pmovusqd128mem_mask ((__v4si *) __P, (__v2di) __A, __M); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS256 +_mm256_cvtusepi64_epi32 (__m256i __A) +{ + return (__m128i) __builtin_ia32_pmovusqd256_mask ((__v4di) __A, + (__v4si)_mm_undefined_si128(), + (__mmask8) -1); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS256 +_mm256_mask_cvtusepi64_epi32 (__m128i __O, __mmask8 __M, __m256i __A) +{ + return (__m128i) __builtin_ia32_pmovusqd256_mask ((__v4di) __A, + (__v4si) __O, __M); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS256 +_mm256_maskz_cvtusepi64_epi32 (__mmask8 __M, __m256i __A) +{ + return (__m128i) __builtin_ia32_pmovusqd256_mask ((__v4di) __A, + (__v4si) _mm_setzero_si128 (), + __M); +} + +static __inline__ void __DEFAULT_FN_ATTRS256 +_mm256_mask_cvtusepi64_storeu_epi32 (void * __P, __mmask8 __M, __m256i __A) +{ + __builtin_ia32_pmovusqd256mem_mask ((__v4si *) __P, (__v4di) __A, __M); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_cvtusepi64_epi16 (__m128i __A) +{ + return (__m128i) __builtin_ia32_pmovusqw128_mask ((__v2di) __A, + (__v8hi)_mm_undefined_si128(), + (__mmask8) -1); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_mask_cvtusepi64_epi16 (__m128i __O, __mmask8 __M, __m128i __A) +{ + return (__m128i) __builtin_ia32_pmovusqw128_mask ((__v2di) __A, + (__v8hi) __O, __M); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_maskz_cvtusepi64_epi16 (__mmask8 __M, __m128i __A) +{ + return (__m128i) __builtin_ia32_pmovusqw128_mask ((__v2di) __A, + (__v8hi) _mm_setzero_si128 (), + __M); +} + +static __inline__ void __DEFAULT_FN_ATTRS128 +_mm_mask_cvtusepi64_storeu_epi16 (void * __P, __mmask8 __M, __m128i __A) +{ + __builtin_ia32_pmovusqw128mem_mask ((__v8hi *) __P, (__v2di) __A, __M); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS256 +_mm256_cvtusepi64_epi16 (__m256i __A) +{ + return (__m128i) __builtin_ia32_pmovusqw256_mask ((__v4di) __A, + (__v8hi)_mm_undefined_si128(), + (__mmask8) -1); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS256 +_mm256_mask_cvtusepi64_epi16 (__m128i __O, __mmask8 __M, __m256i __A) +{ + return (__m128i) __builtin_ia32_pmovusqw256_mask ((__v4di) __A, + (__v8hi) __O, __M); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS256 +_mm256_maskz_cvtusepi64_epi16 (__mmask8 __M, __m256i __A) +{ + return (__m128i) __builtin_ia32_pmovusqw256_mask ((__v4di) __A, + (__v8hi) _mm_setzero_si128 (), + __M); +} + +static __inline__ void __DEFAULT_FN_ATTRS256 +_mm256_mask_cvtusepi64_storeu_epi16 (void * __P, __mmask8 __M, __m256i __A) +{ + __builtin_ia32_pmovusqw256mem_mask ((__v8hi *) __P, (__v4di) __A, __M); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_cvtepi32_epi8 (__m128i __A) +{ + return (__m128i)__builtin_shufflevector( + __builtin_convertvector((__v4si)__A, __v4qi), (__v4qi){0, 0, 0, 0}, 0, 1, + 2, 3, 4, 5, 6, 7, 7, 7, 7, 7, 7, 7, 7, 7); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_mask_cvtepi32_epi8 (__m128i __O, __mmask8 __M, __m128i __A) +{ + return (__m128i) __builtin_ia32_pmovdb128_mask ((__v4si) __A, + (__v16qi) __O, __M); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_maskz_cvtepi32_epi8 (__mmask8 __M, __m128i __A) +{ + return (__m128i) __builtin_ia32_pmovdb128_mask ((__v4si) __A, + (__v16qi) + _mm_setzero_si128 (), + __M); +} + +static __inline__ void __DEFAULT_FN_ATTRS128 +_mm_mask_cvtepi32_storeu_epi8 (void * __P, __mmask8 __M, __m128i __A) +{ + __builtin_ia32_pmovdb128mem_mask ((__v16qi *) __P, (__v4si) __A, __M); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS256 +_mm256_cvtepi32_epi8 (__m256i __A) +{ + return (__m128i)__builtin_shufflevector( + __builtin_convertvector((__v8si)__A, __v8qi), + (__v8qi){0, 0, 0, 0, 0, 0, 0, 0}, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, + 12, 13, 14, 15); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS256 +_mm256_mask_cvtepi32_epi8 (__m128i __O, __mmask8 __M, __m256i __A) +{ + return (__m128i) __builtin_ia32_pmovdb256_mask ((__v8si) __A, + (__v16qi) __O, __M); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS256 +_mm256_maskz_cvtepi32_epi8 (__mmask8 __M, __m256i __A) +{ + return (__m128i) __builtin_ia32_pmovdb256_mask ((__v8si) __A, + (__v16qi) _mm_setzero_si128 (), + __M); +} + +static __inline__ void __DEFAULT_FN_ATTRS256 +_mm256_mask_cvtepi32_storeu_epi8 (void * __P, __mmask8 __M, __m256i __A) +{ + __builtin_ia32_pmovdb256mem_mask ((__v16qi *) __P, (__v8si) __A, __M); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_cvtepi32_epi16 (__m128i __A) +{ + return (__m128i)__builtin_shufflevector( + __builtin_convertvector((__v4si)__A, __v4hi), (__v4hi){0, 0, 0, 0}, 0, 1, + 2, 3, 4, 5, 6, 7); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_mask_cvtepi32_epi16 (__m128i __O, __mmask8 __M, __m128i __A) +{ + return (__m128i) __builtin_ia32_pmovdw128_mask ((__v4si) __A, + (__v8hi) __O, __M); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_maskz_cvtepi32_epi16 (__mmask8 __M, __m128i __A) +{ + return (__m128i) __builtin_ia32_pmovdw128_mask ((__v4si) __A, + (__v8hi) _mm_setzero_si128 (), + __M); +} + +static __inline__ void __DEFAULT_FN_ATTRS128 +_mm_mask_cvtepi32_storeu_epi16 (void * __P, __mmask8 __M, __m128i __A) +{ + __builtin_ia32_pmovdw128mem_mask ((__v8hi *) __P, (__v4si) __A, __M); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS256 +_mm256_cvtepi32_epi16 (__m256i __A) +{ + return (__m128i)__builtin_convertvector((__v8si)__A, __v8hi); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS256 +_mm256_mask_cvtepi32_epi16 (__m128i __O, __mmask8 __M, __m256i __A) +{ + return (__m128i) __builtin_ia32_pmovdw256_mask ((__v8si) __A, + (__v8hi) __O, __M); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS256 +_mm256_maskz_cvtepi32_epi16 (__mmask8 __M, __m256i __A) +{ + return (__m128i) __builtin_ia32_pmovdw256_mask ((__v8si) __A, + (__v8hi) _mm_setzero_si128 (), + __M); +} + +static __inline__ void __DEFAULT_FN_ATTRS256 +_mm256_mask_cvtepi32_storeu_epi16 (void * __P, __mmask8 __M, __m256i __A) +{ + __builtin_ia32_pmovdw256mem_mask ((__v8hi *) __P, (__v8si) __A, __M); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_cvtepi64_epi8 (__m128i __A) +{ + return (__m128i)__builtin_shufflevector( + __builtin_convertvector((__v2di)__A, __v2qi), (__v2qi){0, 0}, 0, 1, 2, 3, + 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_mask_cvtepi64_epi8 (__m128i __O, __mmask8 __M, __m128i __A) +{ + return (__m128i) __builtin_ia32_pmovqb128_mask ((__v2di) __A, + (__v16qi) __O, __M); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_maskz_cvtepi64_epi8 (__mmask8 __M, __m128i __A) +{ + return (__m128i) __builtin_ia32_pmovqb128_mask ((__v2di) __A, + (__v16qi) _mm_setzero_si128 (), + __M); +} + +static __inline__ void __DEFAULT_FN_ATTRS128 +_mm_mask_cvtepi64_storeu_epi8 (void * __P, __mmask8 __M, __m128i __A) +{ + __builtin_ia32_pmovqb128mem_mask ((__v16qi *) __P, (__v2di) __A, __M); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS256 +_mm256_cvtepi64_epi8 (__m256i __A) +{ + return (__m128i)__builtin_shufflevector( + __builtin_convertvector((__v4di)__A, __v4qi), (__v4qi){0, 0, 0, 0}, 0, 1, + 2, 3, 4, 5, 6, 7, 7, 7, 7, 7, 7, 7, 7, 7); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS256 +_mm256_mask_cvtepi64_epi8 (__m128i __O, __mmask8 __M, __m256i __A) +{ + return (__m128i) __builtin_ia32_pmovqb256_mask ((__v4di) __A, + (__v16qi) __O, __M); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS256 +_mm256_maskz_cvtepi64_epi8 (__mmask8 __M, __m256i __A) +{ + return (__m128i) __builtin_ia32_pmovqb256_mask ((__v4di) __A, + (__v16qi) _mm_setzero_si128 (), + __M); +} + +static __inline__ void __DEFAULT_FN_ATTRS256 +_mm256_mask_cvtepi64_storeu_epi8 (void * __P, __mmask8 __M, __m256i __A) +{ + __builtin_ia32_pmovqb256mem_mask ((__v16qi *) __P, (__v4di) __A, __M); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_cvtepi64_epi32 (__m128i __A) +{ + return (__m128i)__builtin_shufflevector( + __builtin_convertvector((__v2di)__A, __v2si), (__v2si){0, 0}, 0, 1, 2, 3); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_mask_cvtepi64_epi32 (__m128i __O, __mmask8 __M, __m128i __A) +{ + return (__m128i) __builtin_ia32_pmovqd128_mask ((__v2di) __A, + (__v4si) __O, __M); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_maskz_cvtepi64_epi32 (__mmask8 __M, __m128i __A) +{ + return (__m128i) __builtin_ia32_pmovqd128_mask ((__v2di) __A, + (__v4si) _mm_setzero_si128 (), + __M); +} + +static __inline__ void __DEFAULT_FN_ATTRS128 +_mm_mask_cvtepi64_storeu_epi32 (void * __P, __mmask8 __M, __m128i __A) +{ + __builtin_ia32_pmovqd128mem_mask ((__v4si *) __P, (__v2di) __A, __M); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS256 +_mm256_cvtepi64_epi32 (__m256i __A) +{ + return (__m128i)__builtin_convertvector((__v4di)__A, __v4si); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS256 +_mm256_mask_cvtepi64_epi32 (__m128i __O, __mmask8 __M, __m256i __A) +{ + return (__m128i)__builtin_ia32_selectd_128((__mmask8)__M, + (__v4si)_mm256_cvtepi64_epi32(__A), + (__v4si)__O); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS256 +_mm256_maskz_cvtepi64_epi32 (__mmask8 __M, __m256i __A) +{ + return (__m128i)__builtin_ia32_selectd_128((__mmask8)__M, + (__v4si)_mm256_cvtepi64_epi32(__A), + (__v4si)_mm_setzero_si128()); +} + +static __inline__ void __DEFAULT_FN_ATTRS256 +_mm256_mask_cvtepi64_storeu_epi32 (void * __P, __mmask8 __M, __m256i __A) +{ + __builtin_ia32_pmovqd256mem_mask ((__v4si *) __P, (__v4di) __A, __M); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_cvtepi64_epi16 (__m128i __A) +{ + return (__m128i)__builtin_shufflevector( + __builtin_convertvector((__v2di)__A, __v2hi), (__v2hi){0, 0}, 0, 1, 2, 3, + 3, 3, 3, 3); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_mask_cvtepi64_epi16 (__m128i __O, __mmask8 __M, __m128i __A) +{ + return (__m128i) __builtin_ia32_pmovqw128_mask ((__v2di) __A, + (__v8hi)__O, + __M); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_maskz_cvtepi64_epi16 (__mmask8 __M, __m128i __A) +{ + return (__m128i) __builtin_ia32_pmovqw128_mask ((__v2di) __A, + (__v8hi) _mm_setzero_si128 (), + __M); +} + +static __inline__ void __DEFAULT_FN_ATTRS128 +_mm_mask_cvtepi64_storeu_epi16 (void * __P, __mmask8 __M, __m128i __A) +{ + __builtin_ia32_pmovqw128mem_mask ((__v8hi *) __P, (__v2di) __A, __M); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS256 +_mm256_cvtepi64_epi16 (__m256i __A) +{ + return (__m128i)__builtin_shufflevector( + __builtin_convertvector((__v4di)__A, __v4hi), (__v4hi){0, 0, 0, 0}, 0, 1, + 2, 3, 4, 5, 6, 7); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS256 +_mm256_mask_cvtepi64_epi16 (__m128i __O, __mmask8 __M, __m256i __A) +{ + return (__m128i) __builtin_ia32_pmovqw256_mask ((__v4di) __A, + (__v8hi) __O, __M); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS256 +_mm256_maskz_cvtepi64_epi16 (__mmask8 __M, __m256i __A) +{ + return (__m128i) __builtin_ia32_pmovqw256_mask ((__v4di) __A, + (__v8hi) _mm_setzero_si128 (), + __M); +} + +static __inline__ void __DEFAULT_FN_ATTRS256 +_mm256_mask_cvtepi64_storeu_epi16 (void * __P, __mmask8 __M, __m256i __A) +{ + __builtin_ia32_pmovqw256mem_mask ((__v8hi *) __P, (__v4di) __A, __M); +} + +#define _mm256_extractf32x4_ps(A, imm) \ + ((__m128)__builtin_ia32_extractf32x4_256_mask((__v8sf)(__m256)(A), \ + (int)(imm), \ + (__v4sf)_mm_undefined_ps(), \ + (__mmask8)-1)) + +#define _mm256_mask_extractf32x4_ps(W, U, A, imm) \ + ((__m128)__builtin_ia32_extractf32x4_256_mask((__v8sf)(__m256)(A), \ + (int)(imm), \ + (__v4sf)(__m128)(W), \ + (__mmask8)(U))) + +#define _mm256_maskz_extractf32x4_ps(U, A, imm) \ + ((__m128)__builtin_ia32_extractf32x4_256_mask((__v8sf)(__m256)(A), \ + (int)(imm), \ + (__v4sf)_mm_setzero_ps(), \ + (__mmask8)(U))) + +#define _mm256_extracti32x4_epi32(A, imm) \ + ((__m128i)__builtin_ia32_extracti32x4_256_mask((__v8si)(__m256i)(A), \ + (int)(imm), \ + (__v4si)_mm_undefined_si128(), \ + (__mmask8)-1)) + +#define _mm256_mask_extracti32x4_epi32(W, U, A, imm) \ + ((__m128i)__builtin_ia32_extracti32x4_256_mask((__v8si)(__m256i)(A), \ + (int)(imm), \ + (__v4si)(__m128i)(W), \ + (__mmask8)(U))) + +#define _mm256_maskz_extracti32x4_epi32(U, A, imm) \ + ((__m128i)__builtin_ia32_extracti32x4_256_mask((__v8si)(__m256i)(A), \ + (int)(imm), \ + (__v4si)_mm_setzero_si128(), \ + (__mmask8)(U))) + +#define _mm256_insertf32x4(A, B, imm) \ + ((__m256)__builtin_ia32_insertf32x4_256((__v8sf)(__m256)(A), \ + (__v4sf)(__m128)(B), (int)(imm))) + +#define _mm256_mask_insertf32x4(W, U, A, B, imm) \ + ((__m256)__builtin_ia32_selectps_256((__mmask8)(U), \ + (__v8sf)_mm256_insertf32x4((A), (B), (imm)), \ + (__v8sf)(__m256)(W))) + +#define _mm256_maskz_insertf32x4(U, A, B, imm) \ + ((__m256)__builtin_ia32_selectps_256((__mmask8)(U), \ + (__v8sf)_mm256_insertf32x4((A), (B), (imm)), \ + (__v8sf)_mm256_setzero_ps())) + +#define _mm256_inserti32x4(A, B, imm) \ + ((__m256i)__builtin_ia32_inserti32x4_256((__v8si)(__m256i)(A), \ + (__v4si)(__m128i)(B), (int)(imm))) + +#define _mm256_mask_inserti32x4(W, U, A, B, imm) \ + ((__m256i)__builtin_ia32_selectd_256((__mmask8)(U), \ + (__v8si)_mm256_inserti32x4((A), (B), (imm)), \ + (__v8si)(__m256i)(W))) + +#define _mm256_maskz_inserti32x4(U, A, B, imm) \ + ((__m256i)__builtin_ia32_selectd_256((__mmask8)(U), \ + (__v8si)_mm256_inserti32x4((A), (B), (imm)), \ + (__v8si)_mm256_setzero_si256())) + +#define _mm_getmant_pd(A, B, C) \ + ((__m128d)__builtin_ia32_getmantpd128_mask((__v2df)(__m128d)(A), \ + (int)(((C)<<2) | (B)), \ + (__v2df)_mm_setzero_pd(), \ + (__mmask8)-1)) + +#define _mm_mask_getmant_pd(W, U, A, B, C) \ + ((__m128d)__builtin_ia32_getmantpd128_mask((__v2df)(__m128d)(A), \ + (int)(((C)<<2) | (B)), \ + (__v2df)(__m128d)(W), \ + (__mmask8)(U))) + +#define _mm_maskz_getmant_pd(U, A, B, C) \ + ((__m128d)__builtin_ia32_getmantpd128_mask((__v2df)(__m128d)(A), \ + (int)(((C)<<2) | (B)), \ + (__v2df)_mm_setzero_pd(), \ + (__mmask8)(U))) + +#define _mm256_getmant_pd(A, B, C) \ + ((__m256d)__builtin_ia32_getmantpd256_mask((__v4df)(__m256d)(A), \ + (int)(((C)<<2) | (B)), \ + (__v4df)_mm256_setzero_pd(), \ + (__mmask8)-1)) + +#define _mm256_mask_getmant_pd(W, U, A, B, C) \ + ((__m256d)__builtin_ia32_getmantpd256_mask((__v4df)(__m256d)(A), \ + (int)(((C)<<2) | (B)), \ + (__v4df)(__m256d)(W), \ + (__mmask8)(U))) + +#define _mm256_maskz_getmant_pd(U, A, B, C) \ + ((__m256d)__builtin_ia32_getmantpd256_mask((__v4df)(__m256d)(A), \ + (int)(((C)<<2) | (B)), \ + (__v4df)_mm256_setzero_pd(), \ + (__mmask8)(U))) + +#define _mm_getmant_ps(A, B, C) \ + ((__m128)__builtin_ia32_getmantps128_mask((__v4sf)(__m128)(A), \ + (int)(((C)<<2) | (B)), \ + (__v4sf)_mm_setzero_ps(), \ + (__mmask8)-1)) + +#define _mm_mask_getmant_ps(W, U, A, B, C) \ + ((__m128)__builtin_ia32_getmantps128_mask((__v4sf)(__m128)(A), \ + (int)(((C)<<2) | (B)), \ + (__v4sf)(__m128)(W), \ + (__mmask8)(U))) + +#define _mm_maskz_getmant_ps(U, A, B, C) \ + ((__m128)__builtin_ia32_getmantps128_mask((__v4sf)(__m128)(A), \ + (int)(((C)<<2) | (B)), \ + (__v4sf)_mm_setzero_ps(), \ + (__mmask8)(U))) + +#define _mm256_getmant_ps(A, B, C) \ + ((__m256)__builtin_ia32_getmantps256_mask((__v8sf)(__m256)(A), \ + (int)(((C)<<2) | (B)), \ + (__v8sf)_mm256_setzero_ps(), \ + (__mmask8)-1)) + +#define _mm256_mask_getmant_ps(W, U, A, B, C) \ + ((__m256)__builtin_ia32_getmantps256_mask((__v8sf)(__m256)(A), \ + (int)(((C)<<2) | (B)), \ + (__v8sf)(__m256)(W), \ + (__mmask8)(U))) + +#define _mm256_maskz_getmant_ps(U, A, B, C) \ + ((__m256)__builtin_ia32_getmantps256_mask((__v8sf)(__m256)(A), \ + (int)(((C)<<2) | (B)), \ + (__v8sf)_mm256_setzero_ps(), \ + (__mmask8)(U))) + +#define _mm_mmask_i64gather_pd(v1_old, mask, index, addr, scale) \ + ((__m128d)__builtin_ia32_gather3div2df((__v2df)(__m128d)(v1_old), \ + (void const *)(addr), \ + (__v2di)(__m128i)(index), \ + (__mmask8)(mask), (int)(scale))) + +#define _mm_mmask_i64gather_epi64(v1_old, mask, index, addr, scale) \ + ((__m128i)__builtin_ia32_gather3div2di((__v2di)(__m128i)(v1_old), \ + (void const *)(addr), \ + (__v2di)(__m128i)(index), \ + (__mmask8)(mask), (int)(scale))) + +#define _mm256_mmask_i64gather_pd(v1_old, mask, index, addr, scale) \ + ((__m256d)__builtin_ia32_gather3div4df((__v4df)(__m256d)(v1_old), \ + (void const *)(addr), \ + (__v4di)(__m256i)(index), \ + (__mmask8)(mask), (int)(scale))) + +#define _mm256_mmask_i64gather_epi64(v1_old, mask, index, addr, scale) \ + ((__m256i)__builtin_ia32_gather3div4di((__v4di)(__m256i)(v1_old), \ + (void const *)(addr), \ + (__v4di)(__m256i)(index), \ + (__mmask8)(mask), (int)(scale))) + +#define _mm_mmask_i64gather_ps(v1_old, mask, index, addr, scale) \ + ((__m128)__builtin_ia32_gather3div4sf((__v4sf)(__m128)(v1_old), \ + (void const *)(addr), \ + (__v2di)(__m128i)(index), \ + (__mmask8)(mask), (int)(scale))) + +#define _mm_mmask_i64gather_epi32(v1_old, mask, index, addr, scale) \ + ((__m128i)__builtin_ia32_gather3div4si((__v4si)(__m128i)(v1_old), \ + (void const *)(addr), \ + (__v2di)(__m128i)(index), \ + (__mmask8)(mask), (int)(scale))) + +#define _mm256_mmask_i64gather_ps(v1_old, mask, index, addr, scale) \ + ((__m128)__builtin_ia32_gather3div8sf((__v4sf)(__m128)(v1_old), \ + (void const *)(addr), \ + (__v4di)(__m256i)(index), \ + (__mmask8)(mask), (int)(scale))) + +#define _mm256_mmask_i64gather_epi32(v1_old, mask, index, addr, scale) \ + ((__m128i)__builtin_ia32_gather3div8si((__v4si)(__m128i)(v1_old), \ + (void const *)(addr), \ + (__v4di)(__m256i)(index), \ + (__mmask8)(mask), (int)(scale))) + +#define _mm_mmask_i32gather_pd(v1_old, mask, index, addr, scale) \ + ((__m128d)__builtin_ia32_gather3siv2df((__v2df)(__m128d)(v1_old), \ + (void const *)(addr), \ + (__v4si)(__m128i)(index), \ + (__mmask8)(mask), (int)(scale))) + +#define _mm_mmask_i32gather_epi64(v1_old, mask, index, addr, scale) \ + ((__m128i)__builtin_ia32_gather3siv2di((__v2di)(__m128i)(v1_old), \ + (void const *)(addr), \ + (__v4si)(__m128i)(index), \ + (__mmask8)(mask), (int)(scale))) + +#define _mm256_mmask_i32gather_pd(v1_old, mask, index, addr, scale) \ + ((__m256d)__builtin_ia32_gather3siv4df((__v4df)(__m256d)(v1_old), \ + (void const *)(addr), \ + (__v4si)(__m128i)(index), \ + (__mmask8)(mask), (int)(scale))) + +#define _mm256_mmask_i32gather_epi64(v1_old, mask, index, addr, scale) \ + ((__m256i)__builtin_ia32_gather3siv4di((__v4di)(__m256i)(v1_old), \ + (void const *)(addr), \ + (__v4si)(__m128i)(index), \ + (__mmask8)(mask), (int)(scale))) + +#define _mm_mmask_i32gather_ps(v1_old, mask, index, addr, scale) \ + ((__m128)__builtin_ia32_gather3siv4sf((__v4sf)(__m128)(v1_old), \ + (void const *)(addr), \ + (__v4si)(__m128i)(index), \ + (__mmask8)(mask), (int)(scale))) + +#define _mm_mmask_i32gather_epi32(v1_old, mask, index, addr, scale) \ + ((__m128i)__builtin_ia32_gather3siv4si((__v4si)(__m128i)(v1_old), \ + (void const *)(addr), \ + (__v4si)(__m128i)(index), \ + (__mmask8)(mask), (int)(scale))) + +#define _mm256_mmask_i32gather_ps(v1_old, mask, index, addr, scale) \ + ((__m256)__builtin_ia32_gather3siv8sf((__v8sf)(__m256)(v1_old), \ + (void const *)(addr), \ + (__v8si)(__m256i)(index), \ + (__mmask8)(mask), (int)(scale))) + +#define _mm256_mmask_i32gather_epi32(v1_old, mask, index, addr, scale) \ + ((__m256i)__builtin_ia32_gather3siv8si((__v8si)(__m256i)(v1_old), \ + (void const *)(addr), \ + (__v8si)(__m256i)(index), \ + (__mmask8)(mask), (int)(scale))) + +#define _mm256_permutex_pd(X, C) \ + ((__m256d)__builtin_ia32_permdf256((__v4df)(__m256d)(X), (int)(C))) + +#define _mm256_mask_permutex_pd(W, U, X, C) \ + ((__m256d)__builtin_ia32_selectpd_256((__mmask8)(U), \ + (__v4df)_mm256_permutex_pd((X), (C)), \ + (__v4df)(__m256d)(W))) + +#define _mm256_maskz_permutex_pd(U, X, C) \ + ((__m256d)__builtin_ia32_selectpd_256((__mmask8)(U), \ + (__v4df)_mm256_permutex_pd((X), (C)), \ + (__v4df)_mm256_setzero_pd())) + +#define _mm256_permutex_epi64(X, C) \ + ((__m256i)__builtin_ia32_permdi256((__v4di)(__m256i)(X), (int)(C))) + +#define _mm256_mask_permutex_epi64(W, U, X, C) \ + ((__m256i)__builtin_ia32_selectq_256((__mmask8)(U), \ + (__v4di)_mm256_permutex_epi64((X), (C)), \ + (__v4di)(__m256i)(W))) + +#define _mm256_maskz_permutex_epi64(U, X, C) \ + ((__m256i)__builtin_ia32_selectq_256((__mmask8)(U), \ + (__v4di)_mm256_permutex_epi64((X), (C)), \ + (__v4di)_mm256_setzero_si256())) + +static __inline__ __m256d __DEFAULT_FN_ATTRS256 +_mm256_permutexvar_pd (__m256i __X, __m256d __Y) +{ + return (__m256d)__builtin_ia32_permvardf256((__v4df)__Y, (__v4di)__X); +} + +static __inline__ __m256d __DEFAULT_FN_ATTRS256 +_mm256_mask_permutexvar_pd (__m256d __W, __mmask8 __U, __m256i __X, + __m256d __Y) +{ + return (__m256d)__builtin_ia32_selectpd_256((__mmask8)__U, + (__v4df)_mm256_permutexvar_pd(__X, __Y), + (__v4df)__W); +} + +static __inline__ __m256d __DEFAULT_FN_ATTRS256 +_mm256_maskz_permutexvar_pd (__mmask8 __U, __m256i __X, __m256d __Y) +{ + return (__m256d)__builtin_ia32_selectpd_256((__mmask8)__U, + (__v4df)_mm256_permutexvar_pd(__X, __Y), + (__v4df)_mm256_setzero_pd()); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_permutexvar_epi64 ( __m256i __X, __m256i __Y) +{ + return (__m256i)__builtin_ia32_permvardi256((__v4di) __Y, (__v4di) __X); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_maskz_permutexvar_epi64 (__mmask8 __M, __m256i __X, __m256i __Y) +{ + return (__m256i)__builtin_ia32_selectq_256((__mmask8)__M, + (__v4di)_mm256_permutexvar_epi64(__X, __Y), + (__v4di)_mm256_setzero_si256()); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_mask_permutexvar_epi64 (__m256i __W, __mmask8 __M, __m256i __X, + __m256i __Y) +{ + return (__m256i)__builtin_ia32_selectq_256((__mmask8)__M, + (__v4di)_mm256_permutexvar_epi64(__X, __Y), + (__v4di)__W); +} + +#define _mm256_permutexvar_ps(A, B) _mm256_permutevar8x32_ps((B), (A)) + +static __inline__ __m256 __DEFAULT_FN_ATTRS256 +_mm256_mask_permutexvar_ps(__m256 __W, __mmask8 __U, __m256i __X, __m256 __Y) +{ + return (__m256)__builtin_ia32_selectps_256((__mmask8)__U, + (__v8sf)_mm256_permutexvar_ps(__X, __Y), + (__v8sf)__W); +} + +static __inline__ __m256 __DEFAULT_FN_ATTRS256 +_mm256_maskz_permutexvar_ps(__mmask8 __U, __m256i __X, __m256 __Y) +{ + return (__m256)__builtin_ia32_selectps_256((__mmask8)__U, + (__v8sf)_mm256_permutexvar_ps(__X, __Y), + (__v8sf)_mm256_setzero_ps()); +} + +#define _mm256_permutexvar_epi32(A, B) _mm256_permutevar8x32_epi32((B), (A)) + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_mask_permutexvar_epi32(__m256i __W, __mmask8 __M, __m256i __X, + __m256i __Y) +{ + return (__m256i)__builtin_ia32_selectd_256((__mmask8)__M, + (__v8si)_mm256_permutexvar_epi32(__X, __Y), + (__v8si)__W); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_maskz_permutexvar_epi32(__mmask8 __M, __m256i __X, __m256i __Y) +{ + return (__m256i)__builtin_ia32_selectd_256((__mmask8)__M, + (__v8si)_mm256_permutexvar_epi32(__X, __Y), + (__v8si)_mm256_setzero_si256()); +} + +#define _mm_alignr_epi32(A, B, imm) \ + ((__m128i)__builtin_ia32_alignd128((__v4si)(__m128i)(A), \ + (__v4si)(__m128i)(B), (int)(imm))) + +#define _mm_mask_alignr_epi32(W, U, A, B, imm) \ + ((__m128i)__builtin_ia32_selectd_128((__mmask8)(U), \ + (__v4si)_mm_alignr_epi32((A), (B), (imm)), \ + (__v4si)(__m128i)(W))) + +#define _mm_maskz_alignr_epi32(U, A, B, imm) \ + ((__m128i)__builtin_ia32_selectd_128((__mmask8)(U), \ + (__v4si)_mm_alignr_epi32((A), (B), (imm)), \ + (__v4si)_mm_setzero_si128())) + +#define _mm256_alignr_epi32(A, B, imm) \ + ((__m256i)__builtin_ia32_alignd256((__v8si)(__m256i)(A), \ + (__v8si)(__m256i)(B), (int)(imm))) + +#define _mm256_mask_alignr_epi32(W, U, A, B, imm) \ + ((__m256i)__builtin_ia32_selectd_256((__mmask8)(U), \ + (__v8si)_mm256_alignr_epi32((A), (B), (imm)), \ + (__v8si)(__m256i)(W))) + +#define _mm256_maskz_alignr_epi32(U, A, B, imm) \ + ((__m256i)__builtin_ia32_selectd_256((__mmask8)(U), \ + (__v8si)_mm256_alignr_epi32((A), (B), (imm)), \ + (__v8si)_mm256_setzero_si256())) + +#define _mm_alignr_epi64(A, B, imm) \ + ((__m128i)__builtin_ia32_alignq128((__v2di)(__m128i)(A), \ + (__v2di)(__m128i)(B), (int)(imm))) + +#define _mm_mask_alignr_epi64(W, U, A, B, imm) \ + ((__m128i)__builtin_ia32_selectq_128((__mmask8)(U), \ + (__v2di)_mm_alignr_epi64((A), (B), (imm)), \ + (__v2di)(__m128i)(W))) + +#define _mm_maskz_alignr_epi64(U, A, B, imm) \ + ((__m128i)__builtin_ia32_selectq_128((__mmask8)(U), \ + (__v2di)_mm_alignr_epi64((A), (B), (imm)), \ + (__v2di)_mm_setzero_si128())) + +#define _mm256_alignr_epi64(A, B, imm) \ + ((__m256i)__builtin_ia32_alignq256((__v4di)(__m256i)(A), \ + (__v4di)(__m256i)(B), (int)(imm))) + +#define _mm256_mask_alignr_epi64(W, U, A, B, imm) \ + ((__m256i)__builtin_ia32_selectq_256((__mmask8)(U), \ + (__v4di)_mm256_alignr_epi64((A), (B), (imm)), \ + (__v4di)(__m256i)(W))) + +#define _mm256_maskz_alignr_epi64(U, A, B, imm) \ + ((__m256i)__builtin_ia32_selectq_256((__mmask8)(U), \ + (__v4di)_mm256_alignr_epi64((A), (B), (imm)), \ + (__v4di)_mm256_setzero_si256())) + +static __inline__ __m128 __DEFAULT_FN_ATTRS128 +_mm_mask_movehdup_ps (__m128 __W, __mmask8 __U, __m128 __A) +{ + return (__m128)__builtin_ia32_selectps_128((__mmask8)__U, + (__v4sf)_mm_movehdup_ps(__A), + (__v4sf)__W); +} + +static __inline__ __m128 __DEFAULT_FN_ATTRS128 +_mm_maskz_movehdup_ps (__mmask8 __U, __m128 __A) +{ + return (__m128)__builtin_ia32_selectps_128((__mmask8)__U, + (__v4sf)_mm_movehdup_ps(__A), + (__v4sf)_mm_setzero_ps()); +} + +static __inline__ __m256 __DEFAULT_FN_ATTRS256 +_mm256_mask_movehdup_ps (__m256 __W, __mmask8 __U, __m256 __A) +{ + return (__m256)__builtin_ia32_selectps_256((__mmask8)__U, + (__v8sf)_mm256_movehdup_ps(__A), + (__v8sf)__W); +} + +static __inline__ __m256 __DEFAULT_FN_ATTRS256 +_mm256_maskz_movehdup_ps (__mmask8 __U, __m256 __A) +{ + return (__m256)__builtin_ia32_selectps_256((__mmask8)__U, + (__v8sf)_mm256_movehdup_ps(__A), + (__v8sf)_mm256_setzero_ps()); +} + +static __inline__ __m128 __DEFAULT_FN_ATTRS128 +_mm_mask_moveldup_ps (__m128 __W, __mmask8 __U, __m128 __A) +{ + return (__m128)__builtin_ia32_selectps_128((__mmask8)__U, + (__v4sf)_mm_moveldup_ps(__A), + (__v4sf)__W); +} + +static __inline__ __m128 __DEFAULT_FN_ATTRS128 +_mm_maskz_moveldup_ps (__mmask8 __U, __m128 __A) +{ + return (__m128)__builtin_ia32_selectps_128((__mmask8)__U, + (__v4sf)_mm_moveldup_ps(__A), + (__v4sf)_mm_setzero_ps()); +} + +static __inline__ __m256 __DEFAULT_FN_ATTRS256 +_mm256_mask_moveldup_ps (__m256 __W, __mmask8 __U, __m256 __A) +{ + return (__m256)__builtin_ia32_selectps_256((__mmask8)__U, + (__v8sf)_mm256_moveldup_ps(__A), + (__v8sf)__W); +} + +static __inline__ __m256 __DEFAULT_FN_ATTRS256 +_mm256_maskz_moveldup_ps (__mmask8 __U, __m256 __A) +{ + return (__m256)__builtin_ia32_selectps_256((__mmask8)__U, + (__v8sf)_mm256_moveldup_ps(__A), + (__v8sf)_mm256_setzero_ps()); +} + +#define _mm256_mask_shuffle_epi32(W, U, A, I) \ + ((__m256i)__builtin_ia32_selectd_256((__mmask8)(U), \ + (__v8si)_mm256_shuffle_epi32((A), (I)), \ + (__v8si)(__m256i)(W))) + +#define _mm256_maskz_shuffle_epi32(U, A, I) \ + ((__m256i)__builtin_ia32_selectd_256((__mmask8)(U), \ + (__v8si)_mm256_shuffle_epi32((A), (I)), \ + (__v8si)_mm256_setzero_si256())) + +#define _mm_mask_shuffle_epi32(W, U, A, I) \ + ((__m128i)__builtin_ia32_selectd_128((__mmask8)(U), \ + (__v4si)_mm_shuffle_epi32((A), (I)), \ + (__v4si)(__m128i)(W))) + +#define _mm_maskz_shuffle_epi32(U, A, I) \ + ((__m128i)__builtin_ia32_selectd_128((__mmask8)(U), \ + (__v4si)_mm_shuffle_epi32((A), (I)), \ + (__v4si)_mm_setzero_si128())) + +static __inline__ __m128d __DEFAULT_FN_ATTRS128 +_mm_mask_mov_pd (__m128d __W, __mmask8 __U, __m128d __A) +{ + return (__m128d) __builtin_ia32_selectpd_128 ((__mmask8) __U, + (__v2df) __A, + (__v2df) __W); +} + +static __inline__ __m128d __DEFAULT_FN_ATTRS128 +_mm_maskz_mov_pd (__mmask8 __U, __m128d __A) +{ + return (__m128d) __builtin_ia32_selectpd_128 ((__mmask8) __U, + (__v2df) __A, + (__v2df) _mm_setzero_pd ()); +} + +static __inline__ __m256d __DEFAULT_FN_ATTRS256 +_mm256_mask_mov_pd (__m256d __W, __mmask8 __U, __m256d __A) +{ + return (__m256d) __builtin_ia32_selectpd_256 ((__mmask8) __U, + (__v4df) __A, + (__v4df) __W); +} + +static __inline__ __m256d __DEFAULT_FN_ATTRS256 +_mm256_maskz_mov_pd (__mmask8 __U, __m256d __A) +{ + return (__m256d) __builtin_ia32_selectpd_256 ((__mmask8) __U, + (__v4df) __A, + (__v4df) _mm256_setzero_pd ()); +} + +static __inline__ __m128 __DEFAULT_FN_ATTRS128 +_mm_mask_mov_ps (__m128 __W, __mmask8 __U, __m128 __A) +{ + return (__m128) __builtin_ia32_selectps_128 ((__mmask8) __U, + (__v4sf) __A, + (__v4sf) __W); +} + +static __inline__ __m128 __DEFAULT_FN_ATTRS128 +_mm_maskz_mov_ps (__mmask8 __U, __m128 __A) +{ + return (__m128) __builtin_ia32_selectps_128 ((__mmask8) __U, + (__v4sf) __A, + (__v4sf) _mm_setzero_ps ()); +} + +static __inline__ __m256 __DEFAULT_FN_ATTRS256 +_mm256_mask_mov_ps (__m256 __W, __mmask8 __U, __m256 __A) +{ + return (__m256) __builtin_ia32_selectps_256 ((__mmask8) __U, + (__v8sf) __A, + (__v8sf) __W); +} + +static __inline__ __m256 __DEFAULT_FN_ATTRS256 +_mm256_maskz_mov_ps (__mmask8 __U, __m256 __A) +{ + return (__m256) __builtin_ia32_selectps_256 ((__mmask8) __U, + (__v8sf) __A, + (__v8sf) _mm256_setzero_ps ()); +} + +static __inline__ __m128 __DEFAULT_FN_ATTRS128 +_mm_mask_cvtph_ps (__m128 __W, __mmask8 __U, __m128i __A) +{ + return (__m128) __builtin_ia32_vcvtph2ps_mask ((__v8hi) __A, + (__v4sf) __W, + (__mmask8) __U); +} + +static __inline__ __m128 __DEFAULT_FN_ATTRS128 +_mm_maskz_cvtph_ps (__mmask8 __U, __m128i __A) +{ + return (__m128) __builtin_ia32_vcvtph2ps_mask ((__v8hi) __A, + (__v4sf) + _mm_setzero_ps (), + (__mmask8) __U); +} + +static __inline__ __m256 __DEFAULT_FN_ATTRS256 +_mm256_mask_cvtph_ps (__m256 __W, __mmask8 __U, __m128i __A) +{ + return (__m256) __builtin_ia32_vcvtph2ps256_mask ((__v8hi) __A, + (__v8sf) __W, + (__mmask8) __U); +} + +static __inline__ __m256 __DEFAULT_FN_ATTRS256 +_mm256_maskz_cvtph_ps (__mmask8 __U, __m128i __A) +{ + return (__m256) __builtin_ia32_vcvtph2ps256_mask ((__v8hi) __A, + (__v8sf) + _mm256_setzero_ps (), + (__mmask8) __U); +} + +#define _mm_mask_cvt_roundps_ph(W, U, A, I) \ + ((__m128i)__builtin_ia32_vcvtps2ph_mask((__v4sf)(__m128)(A), (int)(I), \ + (__v8hi)(__m128i)(W), \ + (__mmask8)(U))) + +#define _mm_maskz_cvt_roundps_ph(U, A, I) \ + ((__m128i)__builtin_ia32_vcvtps2ph_mask((__v4sf)(__m128)(A), (int)(I), \ + (__v8hi)_mm_setzero_si128(), \ + (__mmask8)(U))) + +#define _mm_mask_cvtps_ph _mm_mask_cvt_roundps_ph +#define _mm_maskz_cvtps_ph _mm_maskz_cvt_roundps_ph + +#define _mm256_mask_cvt_roundps_ph(W, U, A, I) \ + ((__m128i)__builtin_ia32_vcvtps2ph256_mask((__v8sf)(__m256)(A), (int)(I), \ + (__v8hi)(__m128i)(W), \ + (__mmask8)(U))) + +#define _mm256_maskz_cvt_roundps_ph(U, A, I) \ + ((__m128i)__builtin_ia32_vcvtps2ph256_mask((__v8sf)(__m256)(A), (int)(I), \ + (__v8hi)_mm_setzero_si128(), \ + (__mmask8)(U))) + +#define _mm256_mask_cvtps_ph _mm256_mask_cvt_roundps_ph +#define _mm256_maskz_cvtps_ph _mm256_maskz_cvt_roundps_ph + + +#undef __DEFAULT_FN_ATTRS128 +#undef __DEFAULT_FN_ATTRS256 + +#endif /* __AVX512VLINTRIN_H */ diff --git a/third_party/intel/clang/avx512vlvbmi2intrin.h b/third_party/intel/clang/avx512vlvbmi2intrin.h new file mode 100644 index 000000000..77af2d5cb --- /dev/null +++ b/third_party/intel/clang/avx512vlvbmi2intrin.h @@ -0,0 +1,695 @@ +/*===------------- avx512vlvbmi2intrin.h - VBMI2 intrinsics -----------------=== + * + * + * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. + * See https://llvm.org/LICENSE.txt for license information. + * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + * + *===-----------------------------------------------------------------------=== + */ +#ifndef __IMMINTRIN_H +#error "Never use directly; include instead." +#endif + +#ifndef __AVX512VLVBMI2INTRIN_H +#define __AVX512VLVBMI2INTRIN_H + +/* Define the default attributes for the functions in this file. */ +#define __DEFAULT_FN_ATTRS128 \ + __attribute__((__always_inline__, __nodebug__, \ + __target__("avx512vl,avx512vbmi2,no-evex512"), \ + __min_vector_width__(128))) +#define __DEFAULT_FN_ATTRS256 \ + __attribute__((__always_inline__, __nodebug__, \ + __target__("avx512vl,avx512vbmi2,no-evex512"), \ + __min_vector_width__(256))) + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_mask_compress_epi16(__m128i __S, __mmask8 __U, __m128i __D) +{ + return (__m128i) __builtin_ia32_compresshi128_mask ((__v8hi) __D, + (__v8hi) __S, + __U); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_maskz_compress_epi16(__mmask8 __U, __m128i __D) +{ + return (__m128i) __builtin_ia32_compresshi128_mask ((__v8hi) __D, + (__v8hi) _mm_setzero_si128(), + __U); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_mask_compress_epi8(__m128i __S, __mmask16 __U, __m128i __D) +{ + return (__m128i) __builtin_ia32_compressqi128_mask ((__v16qi) __D, + (__v16qi) __S, + __U); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_maskz_compress_epi8(__mmask16 __U, __m128i __D) +{ + return (__m128i) __builtin_ia32_compressqi128_mask ((__v16qi) __D, + (__v16qi) _mm_setzero_si128(), + __U); +} + +static __inline__ void __DEFAULT_FN_ATTRS128 +_mm_mask_compressstoreu_epi16(void *__P, __mmask8 __U, __m128i __D) +{ + __builtin_ia32_compressstorehi128_mask ((__v8hi *) __P, (__v8hi) __D, + __U); +} + +static __inline__ void __DEFAULT_FN_ATTRS128 +_mm_mask_compressstoreu_epi8(void *__P, __mmask16 __U, __m128i __D) +{ + __builtin_ia32_compressstoreqi128_mask ((__v16qi *) __P, (__v16qi) __D, + __U); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_mask_expand_epi16(__m128i __S, __mmask8 __U, __m128i __D) +{ + return (__m128i) __builtin_ia32_expandhi128_mask ((__v8hi) __D, + (__v8hi) __S, + __U); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_maskz_expand_epi16(__mmask8 __U, __m128i __D) +{ + return (__m128i) __builtin_ia32_expandhi128_mask ((__v8hi) __D, + (__v8hi) _mm_setzero_si128(), + __U); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_mask_expand_epi8(__m128i __S, __mmask16 __U, __m128i __D) +{ + return (__m128i) __builtin_ia32_expandqi128_mask ((__v16qi) __D, + (__v16qi) __S, + __U); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_maskz_expand_epi8(__mmask16 __U, __m128i __D) +{ + return (__m128i) __builtin_ia32_expandqi128_mask ((__v16qi) __D, + (__v16qi) _mm_setzero_si128(), + __U); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_mask_expandloadu_epi16(__m128i __S, __mmask8 __U, void const *__P) +{ + return (__m128i) __builtin_ia32_expandloadhi128_mask ((const __v8hi *)__P, + (__v8hi) __S, + __U); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_maskz_expandloadu_epi16(__mmask8 __U, void const *__P) +{ + return (__m128i) __builtin_ia32_expandloadhi128_mask ((const __v8hi *)__P, + (__v8hi) _mm_setzero_si128(), + __U); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_mask_expandloadu_epi8(__m128i __S, __mmask16 __U, void const *__P) +{ + return (__m128i) __builtin_ia32_expandloadqi128_mask ((const __v16qi *)__P, + (__v16qi) __S, + __U); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_maskz_expandloadu_epi8(__mmask16 __U, void const *__P) +{ + return (__m128i) __builtin_ia32_expandloadqi128_mask ((const __v16qi *)__P, + (__v16qi) _mm_setzero_si128(), + __U); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_mask_compress_epi16(__m256i __S, __mmask16 __U, __m256i __D) +{ + return (__m256i) __builtin_ia32_compresshi256_mask ((__v16hi) __D, + (__v16hi) __S, + __U); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_maskz_compress_epi16(__mmask16 __U, __m256i __D) +{ + return (__m256i) __builtin_ia32_compresshi256_mask ((__v16hi) __D, + (__v16hi) _mm256_setzero_si256(), + __U); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_mask_compress_epi8(__m256i __S, __mmask32 __U, __m256i __D) +{ + return (__m256i) __builtin_ia32_compressqi256_mask ((__v32qi) __D, + (__v32qi) __S, + __U); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_maskz_compress_epi8(__mmask32 __U, __m256i __D) +{ + return (__m256i) __builtin_ia32_compressqi256_mask ((__v32qi) __D, + (__v32qi) _mm256_setzero_si256(), + __U); +} + +static __inline__ void __DEFAULT_FN_ATTRS256 +_mm256_mask_compressstoreu_epi16(void *__P, __mmask16 __U, __m256i __D) +{ + __builtin_ia32_compressstorehi256_mask ((__v16hi *) __P, (__v16hi) __D, + __U); +} + +static __inline__ void __DEFAULT_FN_ATTRS256 +_mm256_mask_compressstoreu_epi8(void *__P, __mmask32 __U, __m256i __D) +{ + __builtin_ia32_compressstoreqi256_mask ((__v32qi *) __P, (__v32qi) __D, + __U); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_mask_expand_epi16(__m256i __S, __mmask16 __U, __m256i __D) +{ + return (__m256i) __builtin_ia32_expandhi256_mask ((__v16hi) __D, + (__v16hi) __S, + __U); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_maskz_expand_epi16(__mmask16 __U, __m256i __D) +{ + return (__m256i) __builtin_ia32_expandhi256_mask ((__v16hi) __D, + (__v16hi) _mm256_setzero_si256(), + __U); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_mask_expand_epi8(__m256i __S, __mmask32 __U, __m256i __D) +{ + return (__m256i) __builtin_ia32_expandqi256_mask ((__v32qi) __D, + (__v32qi) __S, + __U); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_maskz_expand_epi8(__mmask32 __U, __m256i __D) +{ + return (__m256i) __builtin_ia32_expandqi256_mask ((__v32qi) __D, + (__v32qi) _mm256_setzero_si256(), + __U); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_mask_expandloadu_epi16(__m256i __S, __mmask16 __U, void const *__P) +{ + return (__m256i) __builtin_ia32_expandloadhi256_mask ((const __v16hi *)__P, + (__v16hi) __S, + __U); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_maskz_expandloadu_epi16(__mmask16 __U, void const *__P) +{ + return (__m256i) __builtin_ia32_expandloadhi256_mask ((const __v16hi *)__P, + (__v16hi) _mm256_setzero_si256(), + __U); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_mask_expandloadu_epi8(__m256i __S, __mmask32 __U, void const *__P) +{ + return (__m256i) __builtin_ia32_expandloadqi256_mask ((const __v32qi *)__P, + (__v32qi) __S, + __U); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_maskz_expandloadu_epi8(__mmask32 __U, void const *__P) +{ + return (__m256i) __builtin_ia32_expandloadqi256_mask ((const __v32qi *)__P, + (__v32qi) _mm256_setzero_si256(), + __U); +} + +#define _mm256_shldi_epi64(A, B, I) \ + ((__m256i)__builtin_ia32_vpshldq256((__v4di)(__m256i)(A), \ + (__v4di)(__m256i)(B), (int)(I))) + +#define _mm256_mask_shldi_epi64(S, U, A, B, I) \ + ((__m256i)__builtin_ia32_selectq_256((__mmask8)(U), \ + (__v4di)_mm256_shldi_epi64((A), (B), (I)), \ + (__v4di)(__m256i)(S))) + +#define _mm256_maskz_shldi_epi64(U, A, B, I) \ + ((__m256i)__builtin_ia32_selectq_256((__mmask8)(U), \ + (__v4di)_mm256_shldi_epi64((A), (B), (I)), \ + (__v4di)_mm256_setzero_si256())) + +#define _mm_shldi_epi64(A, B, I) \ + ((__m128i)__builtin_ia32_vpshldq128((__v2di)(__m128i)(A), \ + (__v2di)(__m128i)(B), (int)(I))) + +#define _mm_mask_shldi_epi64(S, U, A, B, I) \ + ((__m128i)__builtin_ia32_selectq_128((__mmask8)(U), \ + (__v2di)_mm_shldi_epi64((A), (B), (I)), \ + (__v2di)(__m128i)(S))) + +#define _mm_maskz_shldi_epi64(U, A, B, I) \ + ((__m128i)__builtin_ia32_selectq_128((__mmask8)(U), \ + (__v2di)_mm_shldi_epi64((A), (B), (I)), \ + (__v2di)_mm_setzero_si128())) + +#define _mm256_shldi_epi32(A, B, I) \ + ((__m256i)__builtin_ia32_vpshldd256((__v8si)(__m256i)(A), \ + (__v8si)(__m256i)(B), (int)(I))) + +#define _mm256_mask_shldi_epi32(S, U, A, B, I) \ + ((__m256i)__builtin_ia32_selectd_256((__mmask8)(U), \ + (__v8si)_mm256_shldi_epi32((A), (B), (I)), \ + (__v8si)(__m256i)(S))) + +#define _mm256_maskz_shldi_epi32(U, A, B, I) \ + ((__m256i)__builtin_ia32_selectd_256((__mmask8)(U), \ + (__v8si)_mm256_shldi_epi32((A), (B), (I)), \ + (__v8si)_mm256_setzero_si256())) + +#define _mm_shldi_epi32(A, B, I) \ + ((__m128i)__builtin_ia32_vpshldd128((__v4si)(__m128i)(A), \ + (__v4si)(__m128i)(B), (int)(I))) + +#define _mm_mask_shldi_epi32(S, U, A, B, I) \ + ((__m128i)__builtin_ia32_selectd_128((__mmask8)(U), \ + (__v4si)_mm_shldi_epi32((A), (B), (I)), \ + (__v4si)(__m128i)(S))) + +#define _mm_maskz_shldi_epi32(U, A, B, I) \ + ((__m128i)__builtin_ia32_selectd_128((__mmask8)(U), \ + (__v4si)_mm_shldi_epi32((A), (B), (I)), \ + (__v4si)_mm_setzero_si128())) + +#define _mm256_shldi_epi16(A, B, I) \ + ((__m256i)__builtin_ia32_vpshldw256((__v16hi)(__m256i)(A), \ + (__v16hi)(__m256i)(B), (int)(I))) + +#define _mm256_mask_shldi_epi16(S, U, A, B, I) \ + ((__m256i)__builtin_ia32_selectw_256((__mmask16)(U), \ + (__v16hi)_mm256_shldi_epi16((A), (B), (I)), \ + (__v16hi)(__m256i)(S))) + +#define _mm256_maskz_shldi_epi16(U, A, B, I) \ + ((__m256i)__builtin_ia32_selectw_256((__mmask16)(U), \ + (__v16hi)_mm256_shldi_epi16((A), (B), (I)), \ + (__v16hi)_mm256_setzero_si256())) + +#define _mm_shldi_epi16(A, B, I) \ + ((__m128i)__builtin_ia32_vpshldw128((__v8hi)(__m128i)(A), \ + (__v8hi)(__m128i)(B), (int)(I))) + +#define _mm_mask_shldi_epi16(S, U, A, B, I) \ + ((__m128i)__builtin_ia32_selectw_128((__mmask8)(U), \ + (__v8hi)_mm_shldi_epi16((A), (B), (I)), \ + (__v8hi)(__m128i)(S))) + +#define _mm_maskz_shldi_epi16(U, A, B, I) \ + ((__m128i)__builtin_ia32_selectw_128((__mmask8)(U), \ + (__v8hi)_mm_shldi_epi16((A), (B), (I)), \ + (__v8hi)_mm_setzero_si128())) + +#define _mm256_shrdi_epi64(A, B, I) \ + ((__m256i)__builtin_ia32_vpshrdq256((__v4di)(__m256i)(A), \ + (__v4di)(__m256i)(B), (int)(I))) + +#define _mm256_mask_shrdi_epi64(S, U, A, B, I) \ + ((__m256i)__builtin_ia32_selectq_256((__mmask8)(U), \ + (__v4di)_mm256_shrdi_epi64((A), (B), (I)), \ + (__v4di)(__m256i)(S))) + +#define _mm256_maskz_shrdi_epi64(U, A, B, I) \ + ((__m256i)__builtin_ia32_selectq_256((__mmask8)(U), \ + (__v4di)_mm256_shrdi_epi64((A), (B), (I)), \ + (__v4di)_mm256_setzero_si256())) + +#define _mm_shrdi_epi64(A, B, I) \ + ((__m128i)__builtin_ia32_vpshrdq128((__v2di)(__m128i)(A), \ + (__v2di)(__m128i)(B), (int)(I))) + +#define _mm_mask_shrdi_epi64(S, U, A, B, I) \ + ((__m128i)__builtin_ia32_selectq_128((__mmask8)(U), \ + (__v2di)_mm_shrdi_epi64((A), (B), (I)), \ + (__v2di)(__m128i)(S))) + +#define _mm_maskz_shrdi_epi64(U, A, B, I) \ + ((__m128i)__builtin_ia32_selectq_128((__mmask8)(U), \ + (__v2di)_mm_shrdi_epi64((A), (B), (I)), \ + (__v2di)_mm_setzero_si128())) + +#define _mm256_shrdi_epi32(A, B, I) \ + ((__m256i)__builtin_ia32_vpshrdd256((__v8si)(__m256i)(A), \ + (__v8si)(__m256i)(B), (int)(I))) + +#define _mm256_mask_shrdi_epi32(S, U, A, B, I) \ + ((__m256i)__builtin_ia32_selectd_256((__mmask8)(U), \ + (__v8si)_mm256_shrdi_epi32((A), (B), (I)), \ + (__v8si)(__m256i)(S))) + +#define _mm256_maskz_shrdi_epi32(U, A, B, I) \ + ((__m256i)__builtin_ia32_selectd_256((__mmask8)(U), \ + (__v8si)_mm256_shrdi_epi32((A), (B), (I)), \ + (__v8si)_mm256_setzero_si256())) + +#define _mm_shrdi_epi32(A, B, I) \ + ((__m128i)__builtin_ia32_vpshrdd128((__v4si)(__m128i)(A), \ + (__v4si)(__m128i)(B), (int)(I))) + +#define _mm_mask_shrdi_epi32(S, U, A, B, I) \ + ((__m128i)__builtin_ia32_selectd_128((__mmask8)(U), \ + (__v4si)_mm_shrdi_epi32((A), (B), (I)), \ + (__v4si)(__m128i)(S))) + +#define _mm_maskz_shrdi_epi32(U, A, B, I) \ + ((__m128i)__builtin_ia32_selectd_128((__mmask8)(U), \ + (__v4si)_mm_shrdi_epi32((A), (B), (I)), \ + (__v4si)_mm_setzero_si128())) + +#define _mm256_shrdi_epi16(A, B, I) \ + ((__m256i)__builtin_ia32_vpshrdw256((__v16hi)(__m256i)(A), \ + (__v16hi)(__m256i)(B), (int)(I))) + +#define _mm256_mask_shrdi_epi16(S, U, A, B, I) \ + ((__m256i)__builtin_ia32_selectw_256((__mmask16)(U), \ + (__v16hi)_mm256_shrdi_epi16((A), (B), (I)), \ + (__v16hi)(__m256i)(S))) + +#define _mm256_maskz_shrdi_epi16(U, A, B, I) \ + ((__m256i)__builtin_ia32_selectw_256((__mmask16)(U), \ + (__v16hi)_mm256_shrdi_epi16((A), (B), (I)), \ + (__v16hi)_mm256_setzero_si256())) + +#define _mm_shrdi_epi16(A, B, I) \ + ((__m128i)__builtin_ia32_vpshrdw128((__v8hi)(__m128i)(A), \ + (__v8hi)(__m128i)(B), (int)(I))) + +#define _mm_mask_shrdi_epi16(S, U, A, B, I) \ + ((__m128i)__builtin_ia32_selectw_128((__mmask8)(U), \ + (__v8hi)_mm_shrdi_epi16((A), (B), (I)), \ + (__v8hi)(__m128i)(S))) + +#define _mm_maskz_shrdi_epi16(U, A, B, I) \ + ((__m128i)__builtin_ia32_selectw_128((__mmask8)(U), \ + (__v8hi)_mm_shrdi_epi16((A), (B), (I)), \ + (__v8hi)_mm_setzero_si128())) + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_shldv_epi64(__m256i __A, __m256i __B, __m256i __C) +{ + return (__m256i)__builtin_ia32_vpshldvq256((__v4di)__A, (__v4di)__B, + (__v4di)__C); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_mask_shldv_epi64(__m256i __A, __mmask8 __U, __m256i __B, __m256i __C) +{ + return (__m256i)__builtin_ia32_selectq_256(__U, + (__v4di)_mm256_shldv_epi64(__A, __B, __C), + (__v4di)__A); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_maskz_shldv_epi64(__mmask8 __U, __m256i __A, __m256i __B, __m256i __C) +{ + return (__m256i)__builtin_ia32_selectq_256(__U, + (__v4di)_mm256_shldv_epi64(__A, __B, __C), + (__v4di)_mm256_setzero_si256()); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_shldv_epi64(__m128i __A, __m128i __B, __m128i __C) +{ + return (__m128i)__builtin_ia32_vpshldvq128((__v2di)__A, (__v2di)__B, + (__v2di)__C); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_mask_shldv_epi64(__m128i __A, __mmask8 __U, __m128i __B, __m128i __C) +{ + return (__m128i)__builtin_ia32_selectq_128(__U, + (__v2di)_mm_shldv_epi64(__A, __B, __C), + (__v2di)__A); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_maskz_shldv_epi64(__mmask8 __U, __m128i __A, __m128i __B, __m128i __C) +{ + return (__m128i)__builtin_ia32_selectq_128(__U, + (__v2di)_mm_shldv_epi64(__A, __B, __C), + (__v2di)_mm_setzero_si128()); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_shldv_epi32(__m256i __A, __m256i __B, __m256i __C) +{ + return (__m256i)__builtin_ia32_vpshldvd256((__v8si)__A, (__v8si)__B, + (__v8si)__C); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_mask_shldv_epi32(__m256i __A, __mmask8 __U, __m256i __B, __m256i __C) +{ + return (__m256i)__builtin_ia32_selectd_256(__U, + (__v8si)_mm256_shldv_epi32(__A, __B, __C), + (__v8si)__A); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_maskz_shldv_epi32(__mmask8 __U, __m256i __A, __m256i __B, __m256i __C) +{ + return (__m256i)__builtin_ia32_selectd_256(__U, + (__v8si)_mm256_shldv_epi32(__A, __B, __C), + (__v8si)_mm256_setzero_si256()); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_shldv_epi32(__m128i __A, __m128i __B, __m128i __C) +{ + return (__m128i)__builtin_ia32_vpshldvd128((__v4si)__A, (__v4si)__B, + (__v4si)__C); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_mask_shldv_epi32(__m128i __A, __mmask8 __U, __m128i __B, __m128i __C) +{ + return (__m128i)__builtin_ia32_selectd_128(__U, + (__v4si)_mm_shldv_epi32(__A, __B, __C), + (__v4si)__A); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_maskz_shldv_epi32(__mmask8 __U, __m128i __A, __m128i __B, __m128i __C) +{ + return (__m128i)__builtin_ia32_selectd_128(__U, + (__v4si)_mm_shldv_epi32(__A, __B, __C), + (__v4si)_mm_setzero_si128()); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_shldv_epi16(__m256i __A, __m256i __B, __m256i __C) +{ + return (__m256i)__builtin_ia32_vpshldvw256((__v16hi)__A, (__v16hi)__B, + (__v16hi)__C); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_mask_shldv_epi16(__m256i __A, __mmask16 __U, __m256i __B, __m256i __C) +{ + return (__m256i)__builtin_ia32_selectw_256(__U, + (__v16hi)_mm256_shldv_epi16(__A, __B, __C), + (__v16hi)__A); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_maskz_shldv_epi16(__mmask16 __U, __m256i __A, __m256i __B, __m256i __C) +{ + return (__m256i)__builtin_ia32_selectw_256(__U, + (__v16hi)_mm256_shldv_epi16(__A, __B, __C), + (__v16hi)_mm256_setzero_si256()); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_shldv_epi16(__m128i __A, __m128i __B, __m128i __C) +{ + return (__m128i)__builtin_ia32_vpshldvw128((__v8hi)__A, (__v8hi)__B, + (__v8hi)__C); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_mask_shldv_epi16(__m128i __A, __mmask8 __U, __m128i __B, __m128i __C) +{ + return (__m128i)__builtin_ia32_selectw_128(__U, + (__v8hi)_mm_shldv_epi16(__A, __B, __C), + (__v8hi)__A); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_maskz_shldv_epi16(__mmask8 __U, __m128i __A, __m128i __B, __m128i __C) +{ + return (__m128i)__builtin_ia32_selectw_128(__U, + (__v8hi)_mm_shldv_epi16(__A, __B, __C), + (__v8hi)_mm_setzero_si128()); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_shrdv_epi64(__m256i __A, __m256i __B, __m256i __C) +{ + return (__m256i)__builtin_ia32_vpshrdvq256((__v4di)__A, (__v4di)__B, + (__v4di)__C); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_mask_shrdv_epi64(__m256i __A, __mmask8 __U, __m256i __B, __m256i __C) +{ + return (__m256i)__builtin_ia32_selectq_256(__U, + (__v4di)_mm256_shrdv_epi64(__A, __B, __C), + (__v4di)__A); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_maskz_shrdv_epi64(__mmask8 __U, __m256i __A, __m256i __B, __m256i __C) +{ + return (__m256i)__builtin_ia32_selectq_256(__U, + (__v4di)_mm256_shrdv_epi64(__A, __B, __C), + (__v4di)_mm256_setzero_si256()); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_shrdv_epi64(__m128i __A, __m128i __B, __m128i __C) +{ + return (__m128i)__builtin_ia32_vpshrdvq128((__v2di)__A, (__v2di)__B, + (__v2di)__C); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_mask_shrdv_epi64(__m128i __A, __mmask8 __U, __m128i __B, __m128i __C) +{ + return (__m128i)__builtin_ia32_selectq_128(__U, + (__v2di)_mm_shrdv_epi64(__A, __B, __C), + (__v2di)__A); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_maskz_shrdv_epi64(__mmask8 __U, __m128i __A, __m128i __B, __m128i __C) +{ + return (__m128i)__builtin_ia32_selectq_128(__U, + (__v2di)_mm_shrdv_epi64(__A, __B, __C), + (__v2di)_mm_setzero_si128()); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_shrdv_epi32(__m256i __A, __m256i __B, __m256i __C) +{ + return (__m256i)__builtin_ia32_vpshrdvd256((__v8si)__A, (__v8si)__B, + (__v8si)__C); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_mask_shrdv_epi32(__m256i __A, __mmask8 __U, __m256i __B, __m256i __C) +{ + return (__m256i)__builtin_ia32_selectd_256(__U, + (__v8si)_mm256_shrdv_epi32(__A, __B, __C), + (__v8si)__A); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_maskz_shrdv_epi32(__mmask8 __U, __m256i __A, __m256i __B, __m256i __C) +{ + return (__m256i)__builtin_ia32_selectd_256(__U, + (__v8si)_mm256_shrdv_epi32(__A, __B, __C), + (__v8si)_mm256_setzero_si256()); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_shrdv_epi32(__m128i __A, __m128i __B, __m128i __C) +{ + return (__m128i)__builtin_ia32_vpshrdvd128((__v4si)__A, (__v4si)__B, + (__v4si)__C); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_mask_shrdv_epi32(__m128i __A, __mmask8 __U, __m128i __B, __m128i __C) +{ + return (__m128i)__builtin_ia32_selectd_128(__U, + (__v4si)_mm_shrdv_epi32(__A, __B, __C), + (__v4si)__A); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_maskz_shrdv_epi32(__mmask8 __U, __m128i __A, __m128i __B, __m128i __C) +{ + return (__m128i)__builtin_ia32_selectd_128(__U, + (__v4si)_mm_shrdv_epi32(__A, __B, __C), + (__v4si)_mm_setzero_si128()); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_shrdv_epi16(__m256i __A, __m256i __B, __m256i __C) +{ + return (__m256i)__builtin_ia32_vpshrdvw256((__v16hi)__A, (__v16hi)__B, + (__v16hi)__C); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_mask_shrdv_epi16(__m256i __A, __mmask16 __U, __m256i __B, __m256i __C) +{ + return (__m256i)__builtin_ia32_selectw_256(__U, + (__v16hi)_mm256_shrdv_epi16(__A, __B, __C), + (__v16hi)__A); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_maskz_shrdv_epi16(__mmask16 __U, __m256i __A, __m256i __B, __m256i __C) +{ + return (__m256i)__builtin_ia32_selectw_256(__U, + (__v16hi)_mm256_shrdv_epi16(__A, __B, __C), + (__v16hi)_mm256_setzero_si256()); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_shrdv_epi16(__m128i __A, __m128i __B, __m128i __C) +{ + return (__m128i)__builtin_ia32_vpshrdvw128((__v8hi)__A, (__v8hi)__B, + (__v8hi)__C); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_mask_shrdv_epi16(__m128i __A, __mmask8 __U, __m128i __B, __m128i __C) +{ + return (__m128i)__builtin_ia32_selectw_128(__U, + (__v8hi)_mm_shrdv_epi16(__A, __B, __C), + (__v8hi)__A); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_maskz_shrdv_epi16(__mmask8 __U, __m128i __A, __m128i __B, __m128i __C) +{ + return (__m128i)__builtin_ia32_selectw_128(__U, + (__v8hi)_mm_shrdv_epi16(__A, __B, __C), + (__v8hi)_mm_setzero_si128()); +} + + +#undef __DEFAULT_FN_ATTRS128 +#undef __DEFAULT_FN_ATTRS256 + +#endif diff --git a/third_party/intel/clang/avx512vlvnniintrin.h b/third_party/intel/clang/avx512vlvnniintrin.h new file mode 100644 index 000000000..d1e5cd9d6 --- /dev/null +++ b/third_party/intel/clang/avx512vlvnniintrin.h @@ -0,0 +1,310 @@ +/*===------------- avx512vlvnniintrin.h - VNNI intrinsics ------------------=== + * + * + * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. + * See https://llvm.org/LICENSE.txt for license information. + * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + * + *===-----------------------------------------------------------------------=== + */ +#ifndef __IMMINTRIN_H +#error "Never use directly; include instead." +#endif + +#ifndef __AVX512VLVNNIINTRIN_H +#define __AVX512VLVNNIINTRIN_H + +/* Define the default attributes for the functions in this file. */ +#define __DEFAULT_FN_ATTRS128 \ + __attribute__((__always_inline__, __nodebug__, \ + __target__("avx512vl,avx512vnni,no-evex512"), \ + __min_vector_width__(128))) +#define __DEFAULT_FN_ATTRS256 \ + __attribute__((__always_inline__, __nodebug__, \ + __target__("avx512vl,avx512vnni,no-evex512"), \ + __min_vector_width__(256))) + +/// Multiply groups of 4 adjacent pairs of unsigned 8-bit integers in \a A with +/// corresponding signed 8-bit integers in \a B, producing 4 intermediate signed +/// 16-bit results. Sum these 4 results with the corresponding 32-bit integer +/// in \a S, and store the packed 32-bit results in DST. +/// +/// This intrinsic corresponds to the VPDPBUSD instructions. +/// +/// \code{.operation} +/// FOR j := 0 to 7 +/// tmp1.word := Signed(ZeroExtend16(A.byte[4*j]) * SignExtend16(B.byte[4*j])) +/// tmp2.word := Signed(ZeroExtend16(A.byte[4*j+1]) * SignExtend16(B.byte[4*j+1])) +/// tmp3.word := Signed(ZeroExtend16(A.byte[4*j+2]) * SignExtend16(B.byte[4*j+2])) +/// tmp4.word := Signed(ZeroExtend16(A.byte[4*j+3]) * SignExtend16(B.byte[4*j+3])) +/// DST.dword[j] := S.dword[j] + tmp1 + tmp2 + tmp3 + tmp4 +/// ENDFOR +/// DST[MAX:256] := 0 +/// \endcode +#define _mm256_dpbusd_epi32(S, A, B) \ + ((__m256i)__builtin_ia32_vpdpbusd256((__v8si)(S), (__v8si)(A), (__v8si)(B))) + +/// Multiply groups of 4 adjacent pairs of unsigned 8-bit integers in \a A with +/// corresponding signed 8-bit integers in \a B, producing 4 intermediate signed +/// 16-bit results. Sum these 4 results with the corresponding 32-bit integer +/// in \a S using signed saturation, and store the packed 32-bit results in DST. +/// +/// This intrinsic corresponds to the VPDPBUSDS instructions. +/// +/// \code{.operation} +/// FOR j := 0 to 7 +/// tmp1.word := Signed(ZeroExtend16(A.byte[4*j]) * SignExtend16(B.byte[4*j])) +/// tmp2.word := Signed(ZeroExtend16(A.byte[4*j+1]) * SignExtend16(B.byte[4*j+1])) +/// tmp3.word := Signed(ZeroExtend16(A.byte[4*j+2]) * SignExtend16(B.byte[4*j+2])) +/// tmp4.word := Signed(ZeroExtend16(A.byte[4*j+3]) * SignExtend16(B.byte[4*j+3])) +/// DST.dword[j] := Saturate32(S.dword[j] + tmp1 + tmp2 + tmp3 + tmp4) +/// ENDFOR +/// DST[MAX:256] := 0 +/// \endcode +#define _mm256_dpbusds_epi32(S, A, B) \ + ((__m256i)__builtin_ia32_vpdpbusds256((__v8si)(S), (__v8si)(A), (__v8si)(B))) + +/// Multiply groups of 2 adjacent pairs of signed 16-bit integers in \a A with +/// corresponding 16-bit integers in \a B, producing 2 intermediate signed 32-bit +/// results. Sum these 2 results with the corresponding 32-bit integer in \a S, +/// and store the packed 32-bit results in DST. +/// +/// This intrinsic corresponds to the VPDPWSSD instructions. +/// +/// \code{.operation} +/// FOR j := 0 to 7 +/// tmp1.dword := SignExtend32(A.word[2*j]) * SignExtend32(B.word[2*j]) +/// tmp2.dword := SignExtend32(A.word[2*j+1]) * SignExtend32(B.word[2*j+1]) +/// DST.dword[j] := S.dword[j] + tmp1 + tmp2 +/// ENDFOR +/// DST[MAX:256] := 0 +/// \endcode +#define _mm256_dpwssd_epi32(S, A, B) \ + ((__m256i)__builtin_ia32_vpdpwssd256((__v8si)(S), (__v8si)(A), (__v8si)(B))) + +/// Multiply groups of 2 adjacent pairs of signed 16-bit integers in \a A with +/// corresponding 16-bit integers in \a B, producing 2 intermediate signed 32-bit +/// results. Sum these 2 results with the corresponding 32-bit integer in \a S +/// using signed saturation, and store the packed 32-bit results in DST. +/// +/// This intrinsic corresponds to the VPDPWSSDS instructions. +/// +/// \code{.operation} +/// FOR j := 0 to 7 +/// tmp1.dword := SignExtend32(A.word[2*j]) * SignExtend32(B.word[2*j]) +/// tmp2.dword := SignExtend32(A.word[2*j+1]) * SignExtend32(B.word[2*j+1]) +/// DST.dword[j] := Saturate32(S.dword[j] + tmp1 + tmp2) +/// ENDFOR +/// DST[MAX:256] := 0 +/// \endcode +#define _mm256_dpwssds_epi32(S, A, B) \ + ((__m256i)__builtin_ia32_vpdpwssds256((__v8si)(S), (__v8si)(A), (__v8si)(B))) + +/// Multiply groups of 4 adjacent pairs of unsigned 8-bit integers in \a A with +/// corresponding signed 8-bit integers in \a B, producing 4 intermediate signed +/// 16-bit results. Sum these 4 results with the corresponding 32-bit integer +/// in \a S, and store the packed 32-bit results in DST. +/// +/// This intrinsic corresponds to the VPDPBUSD instructions. +/// +/// \code{.operation} +/// FOR j := 0 to 3 +/// tmp1.word := Signed(ZeroExtend16(A.byte[4*j]) * SignExtend16(B.byte[4*j])) +/// tmp2.word := Signed(ZeroExtend16(A.byte[4*j+1]) * SignExtend16(B.byte[4*j+1])) +/// tmp3.word := Signed(ZeroExtend16(A.byte[4*j+2]) * SignExtend16(B.byte[4*j+2])) +/// tmp4.word := Signed(ZeroExtend16(A.byte[4*j+3]) * SignExtend16(B.byte[4*j+3])) +/// DST.dword[j] := S.dword[j] + tmp1 + tmp2 + tmp3 + tmp4 +/// ENDFOR +/// DST[MAX:128] := 0 +/// \endcode +#define _mm_dpbusd_epi32(S, A, B) \ + ((__m128i)__builtin_ia32_vpdpbusd128((__v4si)(S), (__v4si)(A), (__v4si)(B))) + +/// Multiply groups of 4 adjacent pairs of unsigned 8-bit integers in \a A with +/// corresponding signed 8-bit integers in \a B, producing 4 intermediate signed +/// 16-bit results. Sum these 4 results with the corresponding 32-bit integer +/// in \a S using signed saturation, and store the packed 32-bit results in DST. +/// +/// This intrinsic corresponds to the VPDPBUSDS instructions. +/// +/// \code{.operation} +/// FOR j := 0 to 3 +/// tmp1.word := Signed(ZeroExtend16(A.byte[4*j]) * SignExtend16(B.byte[4*j])) +/// tmp2.word := Signed(ZeroExtend16(A.byte[4*j+1]) * SignExtend16(B.byte[4*j+1])) +/// tmp3.word := Signed(ZeroExtend16(A.byte[4*j+2]) * SignExtend16(B.byte[4*j+2])) +/// tmp4.word := Signed(ZeroExtend16(A.byte[4*j+3]) * SignExtend16(B.byte[4*j+3])) +/// DST.dword[j] := Saturate32(S.dword[j] + tmp1 + tmp2 + tmp3 + tmp4) +/// ENDFOR +/// DST[MAX:128] := 0 +/// \endcode +#define _mm_dpbusds_epi32(S, A, B) \ + ((__m128i)__builtin_ia32_vpdpbusds128((__v4si)(S), (__v4si)(A), (__v4si)(B))) + +/// Multiply groups of 2 adjacent pairs of signed 16-bit integers in \a A with +/// corresponding 16-bit integers in \a B, producing 2 intermediate signed 32-bit +/// results. Sum these 2 results with the corresponding 32-bit integer in \a S, +/// and store the packed 32-bit results in DST. +/// +/// This intrinsic corresponds to the VPDPWSSD instructions. +/// +/// \code{.operation} +/// FOR j := 0 to 3 +/// tmp1.dword := SignExtend32(A.word[2*j]) * SignExtend32(B.word[2*j]) +/// tmp2.dword := SignExtend32(A.word[2*j+1]) * SignExtend32(B.word[2*j+1]) +/// DST.dword[j] := S.dword[j] + tmp1 + tmp2 +/// ENDFOR +/// DST[MAX:128] := 0 +/// \endcode +#define _mm_dpwssd_epi32(S, A, B) \ + ((__m128i)__builtin_ia32_vpdpwssd128((__v4si)(S), (__v4si)(A), (__v4si)(B))) + +/// Multiply groups of 2 adjacent pairs of signed 16-bit integers in \a A with +/// corresponding 16-bit integers in \a B, producing 2 intermediate signed 32-bit +/// results. Sum these 2 results with the corresponding 32-bit integer in \a S +/// using signed saturation, and store the packed 32-bit results in DST. +/// +/// This intrinsic corresponds to the VPDPWSSDS instructions. +/// +/// \code{.operation} +/// FOR j := 0 to 3 +/// tmp1.dword := SignExtend32(A.word[2*j]) * SignExtend32(B.word[2*j]) +/// tmp2.dword := SignExtend32(A.word[2*j+1]) * SignExtend32(B.word[2*j+1]) +/// DST.dword[j] := Saturate32(S.dword[j] + tmp1 + tmp2) +/// ENDFOR +/// DST[MAX:128] := 0 +/// \endcode +#define _mm_dpwssds_epi32(S, A, B) \ + ((__m128i)__builtin_ia32_vpdpwssds128((__v4si)(S), (__v4si)(A), (__v4si)(B))) + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_mask_dpbusd_epi32(__m256i __S, __mmask8 __U, __m256i __A, __m256i __B) +{ + return (__m256i)__builtin_ia32_selectd_256(__U, + (__v8si)_mm256_dpbusd_epi32(__S, __A, __B), + (__v8si)__S); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_maskz_dpbusd_epi32(__mmask8 __U, __m256i __S, __m256i __A, __m256i __B) +{ + return (__m256i)__builtin_ia32_selectd_256(__U, + (__v8si)_mm256_dpbusd_epi32(__S, __A, __B), + (__v8si)_mm256_setzero_si256()); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_mask_dpbusds_epi32(__m256i __S, __mmask8 __U, __m256i __A, __m256i __B) +{ + return (__m256i)__builtin_ia32_selectd_256(__U, + (__v8si)_mm256_dpbusds_epi32(__S, __A, __B), + (__v8si)__S); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_maskz_dpbusds_epi32(__mmask8 __U, __m256i __S, __m256i __A, __m256i __B) +{ + return (__m256i)__builtin_ia32_selectd_256(__U, + (__v8si)_mm256_dpbusds_epi32(__S, __A, __B), + (__v8si)_mm256_setzero_si256()); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_mask_dpwssd_epi32(__m256i __S, __mmask8 __U, __m256i __A, __m256i __B) +{ + return (__m256i)__builtin_ia32_selectd_256(__U, + (__v8si)_mm256_dpwssd_epi32(__S, __A, __B), + (__v8si)__S); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_maskz_dpwssd_epi32(__mmask8 __U, __m256i __S, __m256i __A, __m256i __B) +{ + return (__m256i)__builtin_ia32_selectd_256(__U, + (__v8si)_mm256_dpwssd_epi32(__S, __A, __B), + (__v8si)_mm256_setzero_si256()); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_mask_dpwssds_epi32(__m256i __S, __mmask8 __U, __m256i __A, __m256i __B) +{ + return (__m256i)__builtin_ia32_selectd_256(__U, + (__v8si)_mm256_dpwssds_epi32(__S, __A, __B), + (__v8si)__S); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_maskz_dpwssds_epi32(__mmask8 __U, __m256i __S, __m256i __A, __m256i __B) +{ + return (__m256i)__builtin_ia32_selectd_256(__U, + (__v8si)_mm256_dpwssds_epi32(__S, __A, __B), + (__v8si)_mm256_setzero_si256()); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_mask_dpbusd_epi32(__m128i __S, __mmask8 __U, __m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_selectd_128(__U, + (__v4si)_mm_dpbusd_epi32(__S, __A, __B), + (__v4si)__S); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_maskz_dpbusd_epi32(__mmask8 __U, __m128i __S, __m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_selectd_128(__U, + (__v4si)_mm_dpbusd_epi32(__S, __A, __B), + (__v4si)_mm_setzero_si128()); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_mask_dpbusds_epi32(__m128i __S, __mmask8 __U, __m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_selectd_128(__U, + (__v4si)_mm_dpbusds_epi32(__S, __A, __B), + (__v4si)__S); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_maskz_dpbusds_epi32(__mmask8 __U, __m128i __S, __m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_selectd_128(__U, + (__v4si)_mm_dpbusds_epi32(__S, __A, __B), + (__v4si)_mm_setzero_si128()); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_mask_dpwssd_epi32(__m128i __S, __mmask8 __U, __m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_selectd_128(__U, + (__v4si)_mm_dpwssd_epi32(__S, __A, __B), + (__v4si)__S); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_maskz_dpwssd_epi32(__mmask8 __U, __m128i __S, __m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_selectd_128(__U, + (__v4si)_mm_dpwssd_epi32(__S, __A, __B), + (__v4si)_mm_setzero_si128()); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_mask_dpwssds_epi32(__m128i __S, __mmask8 __U, __m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_selectd_128(__U, + (__v4si)_mm_dpwssds_epi32(__S, __A, __B), + (__v4si)__S); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_maskz_dpwssds_epi32(__mmask8 __U, __m128i __S, __m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_selectd_128(__U, + (__v4si)_mm_dpwssds_epi32(__S, __A, __B), + (__v4si)_mm_setzero_si128()); +} + +#undef __DEFAULT_FN_ATTRS128 +#undef __DEFAULT_FN_ATTRS256 + +#endif diff --git a/third_party/intel/clang/avx512vlvp2intersectintrin.h b/third_party/intel/clang/avx512vlvp2intersectintrin.h new file mode 100644 index 000000000..63a31241a --- /dev/null +++ b/third_party/intel/clang/avx512vlvp2intersectintrin.h @@ -0,0 +1,123 @@ +/*===------ avx512vlvp2intersectintrin.h - VL VP2INTERSECT intrinsics ------=== + * + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + * + *===-----------------------------------------------------------------------=== + */ +#ifndef __IMMINTRIN_H +#error "Never use directly; include instead." +#endif + +#ifndef _AVX512VLVP2INTERSECT_H +#define _AVX512VLVP2INTERSECT_H + +#define __DEFAULT_FN_ATTRS128 \ + __attribute__((__always_inline__, __nodebug__, \ + __target__("avx512vl,avx512vp2intersect,no-evex512"), \ + __min_vector_width__(128))) + +#define __DEFAULT_FN_ATTRS256 \ + __attribute__((__always_inline__, __nodebug__, \ + __target__("avx512vl,avx512vp2intersect,no-evex512"), \ + __min_vector_width__(256))) +/// Store, in an even/odd pair of mask registers, the indicators of the +/// locations of value matches between dwords in operands __a and __b. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VP2INTERSECTD instruction. +/// +/// \param __a +/// A 256-bit vector of [8 x i32]. +/// \param __b +/// A 256-bit vector of [8 x i32] +/// \param __m0 +/// A pointer point to 8-bit mask +/// \param __m1 +/// A pointer point to 8-bit mask +static __inline__ void __DEFAULT_FN_ATTRS256 +_mm256_2intersect_epi32(__m256i __a, __m256i __b, __mmask8 *__m0, __mmask8 *__m1) { + __builtin_ia32_vp2intersect_d_256((__v8si)__a, (__v8si)__b, __m0, __m1); +} + +/// Store, in an even/odd pair of mask registers, the indicators of the +/// locations of value matches between quadwords in operands __a and __b. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VP2INTERSECTQ instruction. +/// +/// \param __a +/// A 256-bit vector of [4 x i64]. +/// \param __b +/// A 256-bit vector of [4 x i64] +/// \param __m0 +/// A pointer point to 8-bit mask +/// \param __m1 +/// A pointer point to 8-bit mask +static __inline__ void __DEFAULT_FN_ATTRS256 +_mm256_2intersect_epi64(__m256i __a, __m256i __b, __mmask8 *__m0, __mmask8 *__m1) { + __builtin_ia32_vp2intersect_q_256((__v4di)__a, (__v4di)__b, __m0, __m1); +} + +/// Store, in an even/odd pair of mask registers, the indicators of the +/// locations of value matches between dwords in operands __a and __b. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VP2INTERSECTD instruction. +/// +/// \param __a +/// A 128-bit vector of [4 x i32]. +/// \param __b +/// A 128-bit vector of [4 x i32] +/// \param __m0 +/// A pointer point to 8-bit mask +/// \param __m1 +/// A pointer point to 8-bit mask +static __inline__ void __DEFAULT_FN_ATTRS128 +_mm_2intersect_epi32(__m128i __a, __m128i __b, __mmask8 *__m0, __mmask8 *__m1) { + __builtin_ia32_vp2intersect_d_128((__v4si)__a, (__v4si)__b, __m0, __m1); +} + +/// Store, in an even/odd pair of mask registers, the indicators of the +/// locations of value matches between quadwords in operands __a and __b. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VP2INTERSECTQ instruction. +/// +/// \param __a +/// A 128-bit vector of [2 x i64]. +/// \param __b +/// A 128-bit vector of [2 x i64] +/// \param __m0 +/// A pointer point to 8-bit mask +/// \param __m1 +/// A pointer point to 8-bit mask +static __inline__ void __DEFAULT_FN_ATTRS128 +_mm_2intersect_epi64(__m128i __a, __m128i __b, __mmask8 *__m0, __mmask8 *__m1) { + __builtin_ia32_vp2intersect_q_128((__v2di)__a, (__v2di)__b, __m0, __m1); +} + +#undef __DEFAULT_FN_ATTRS128 +#undef __DEFAULT_FN_ATTRS256 + +#endif diff --git a/third_party/intel/clang/avx512vnniintrin.h b/third_party/intel/clang/avx512vnniintrin.h new file mode 100644 index 000000000..0fb381a12 --- /dev/null +++ b/third_party/intel/clang/avx512vnniintrin.h @@ -0,0 +1,116 @@ +/*===------------- avx512vnniintrin.h - VNNI intrinsics ------------------=== + * + * + * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. + * See https://llvm.org/LICENSE.txt for license information. + * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + * + *===-----------------------------------------------------------------------=== + */ +#ifndef __IMMINTRIN_H +#error "Never use directly; include instead." +#endif + +#ifndef __AVX512VNNIINTRIN_H +#define __AVX512VNNIINTRIN_H + +/* Define the default attributes for the functions in this file. */ +#define __DEFAULT_FN_ATTRS \ + __attribute__((__always_inline__, __nodebug__, \ + __target__("avx512vnni,evex512"), __min_vector_width__(512))) + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_dpbusd_epi32(__m512i __S, __m512i __A, __m512i __B) +{ + return (__m512i)__builtin_ia32_vpdpbusd512((__v16si)__S, (__v16si)__A, + (__v16si)__B); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_mask_dpbusd_epi32(__m512i __S, __mmask16 __U, __m512i __A, __m512i __B) +{ + return (__m512i)__builtin_ia32_selectd_512(__U, + (__v16si)_mm512_dpbusd_epi32(__S, __A, __B), + (__v16si)__S); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_maskz_dpbusd_epi32(__mmask16 __U, __m512i __S, __m512i __A, __m512i __B) +{ + return (__m512i)__builtin_ia32_selectd_512(__U, + (__v16si)_mm512_dpbusd_epi32(__S, __A, __B), + (__v16si)_mm512_setzero_si512()); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_dpbusds_epi32(__m512i __S, __m512i __A, __m512i __B) +{ + return (__m512i)__builtin_ia32_vpdpbusds512((__v16si)__S, (__v16si)__A, + (__v16si)__B); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_mask_dpbusds_epi32(__m512i __S, __mmask16 __U, __m512i __A, __m512i __B) +{ + return (__m512i)__builtin_ia32_selectd_512(__U, + (__v16si)_mm512_dpbusds_epi32(__S, __A, __B), + (__v16si)__S); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_maskz_dpbusds_epi32(__mmask16 __U, __m512i __S, __m512i __A, __m512i __B) +{ + return (__m512i)__builtin_ia32_selectd_512(__U, + (__v16si)_mm512_dpbusds_epi32(__S, __A, __B), + (__v16si)_mm512_setzero_si512()); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_dpwssd_epi32(__m512i __S, __m512i __A, __m512i __B) +{ + return (__m512i)__builtin_ia32_vpdpwssd512((__v16si)__S, (__v16si)__A, + (__v16si)__B); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_mask_dpwssd_epi32(__m512i __S, __mmask16 __U, __m512i __A, __m512i __B) +{ + return (__m512i)__builtin_ia32_selectd_512(__U, + (__v16si)_mm512_dpwssd_epi32(__S, __A, __B), + (__v16si)__S); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_maskz_dpwssd_epi32(__mmask16 __U, __m512i __S, __m512i __A, __m512i __B) +{ + return (__m512i)__builtin_ia32_selectd_512(__U, + (__v16si)_mm512_dpwssd_epi32(__S, __A, __B), + (__v16si)_mm512_setzero_si512()); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_dpwssds_epi32(__m512i __S, __m512i __A, __m512i __B) +{ + return (__m512i)__builtin_ia32_vpdpwssds512((__v16si)__S, (__v16si)__A, + (__v16si)__B); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_mask_dpwssds_epi32(__m512i __S, __mmask16 __U, __m512i __A, __m512i __B) +{ + return (__m512i)__builtin_ia32_selectd_512(__U, + (__v16si)_mm512_dpwssds_epi32(__S, __A, __B), + (__v16si)__S); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_maskz_dpwssds_epi32(__mmask16 __U, __m512i __S, __m512i __A, __m512i __B) +{ + return (__m512i)__builtin_ia32_selectd_512(__U, + (__v16si)_mm512_dpwssds_epi32(__S, __A, __B), + (__v16si)_mm512_setzero_si512()); +} + +#undef __DEFAULT_FN_ATTRS + +#endif diff --git a/third_party/intel/clang/avx512vp2intersectintrin.h b/third_party/intel/clang/avx512vp2intersectintrin.h new file mode 100644 index 000000000..16552cae3 --- /dev/null +++ b/third_party/intel/clang/avx512vp2intersectintrin.h @@ -0,0 +1,78 @@ +/*===------- avx512vpintersectintrin.h - VP2INTERSECT intrinsics ------------=== + * + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + * + *===-----------------------------------------------------------------------=== + */ +#ifndef __IMMINTRIN_H +#error "Never use directly; include instead." +#endif + +#ifndef _AVX512VP2INTERSECT_H +#define _AVX512VP2INTERSECT_H + +#define __DEFAULT_FN_ATTRS \ + __attribute__((__always_inline__, __nodebug__, \ + __target__("avx512vp2intersect,evex512"), \ + __min_vector_width__(512))) + +/// Store, in an even/odd pair of mask registers, the indicators of the +/// locations of value matches between dwords in operands __a and __b. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VP2INTERSECTD instruction. +/// +/// \param __a +/// A 512-bit vector of [16 x i32]. +/// \param __b +/// A 512-bit vector of [16 x i32] +/// \param __m0 +/// A pointer point to 16-bit mask +/// \param __m1 +/// A pointer point to 16-bit mask +static __inline__ void __DEFAULT_FN_ATTRS +_mm512_2intersect_epi32(__m512i __a, __m512i __b, __mmask16 *__m0, __mmask16 *__m1) { + __builtin_ia32_vp2intersect_d_512((__v16si)__a, (__v16si)__b, __m0, __m1); +} + +/// Store, in an even/odd pair of mask registers, the indicators of the +/// locations of value matches between quadwords in operands __a and __b. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VP2INTERSECTQ instruction. +/// +/// \param __a +/// A 512-bit vector of [8 x i64]. +/// \param __b +/// A 512-bit vector of [8 x i64] +/// \param __m0 +/// A pointer point to 8-bit mask +/// \param __m1 +/// A pointer point to 8-bit mask +static __inline__ void __DEFAULT_FN_ATTRS +_mm512_2intersect_epi64(__m512i __a, __m512i __b, __mmask8 *__m0, __mmask8 *__m1) { + __builtin_ia32_vp2intersect_q_512((__v8di)__a, (__v8di)__b, __m0, __m1); +} + +#undef __DEFAULT_FN_ATTRS + +#endif diff --git a/third_party/intel/clang/avx512vpopcntdqintrin.h b/third_party/intel/clang/avx512vpopcntdqintrin.h new file mode 100644 index 000000000..e73e7e4f7 --- /dev/null +++ b/third_party/intel/clang/avx512vpopcntdqintrin.h @@ -0,0 +1,56 @@ +/*===----- avx512vpopcntdqintrin.h - AVX512VPOPCNTDQ intrinsics-------------=== + * + * + * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. + * See https://llvm.org/LICENSE.txt for license information. + * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + * + *===-----------------------------------------------------------------------=== + */ +#ifndef __IMMINTRIN_H +#error \ + "Never use directly; include instead." +#endif + +#ifndef __AVX512VPOPCNTDQINTRIN_H +#define __AVX512VPOPCNTDQINTRIN_H + +/* Define the default attributes for the functions in this file. */ +#define __DEFAULT_FN_ATTRS \ + __attribute__((__always_inline__, __nodebug__, \ + __target__("avx512vpopcntdq,evex512"), \ + __min_vector_width__(512))) + +static __inline__ __m512i __DEFAULT_FN_ATTRS _mm512_popcnt_epi64(__m512i __A) { + return (__m512i)__builtin_ia32_vpopcntq_512((__v8di)__A); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_mask_popcnt_epi64(__m512i __W, __mmask8 __U, __m512i __A) { + return (__m512i)__builtin_ia32_selectq_512( + (__mmask8)__U, (__v8di)_mm512_popcnt_epi64(__A), (__v8di)__W); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_maskz_popcnt_epi64(__mmask8 __U, __m512i __A) { + return _mm512_mask_popcnt_epi64((__m512i)_mm512_setzero_si512(), __U, __A); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS _mm512_popcnt_epi32(__m512i __A) { + return (__m512i)__builtin_ia32_vpopcntd_512((__v16si)__A); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_mask_popcnt_epi32(__m512i __W, __mmask16 __U, __m512i __A) { + return (__m512i)__builtin_ia32_selectd_512( + (__mmask16)__U, (__v16si)_mm512_popcnt_epi32(__A), (__v16si)__W); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_maskz_popcnt_epi32(__mmask16 __U, __m512i __A) { + return _mm512_mask_popcnt_epi32((__m512i)_mm512_setzero_si512(), __U, __A); +} + +#undef __DEFAULT_FN_ATTRS + +#endif diff --git a/third_party/intel/clang/avx512vpopcntdqvlintrin.h b/third_party/intel/clang/avx512vpopcntdqvlintrin.h new file mode 100644 index 000000000..b2df2e84d --- /dev/null +++ b/third_party/intel/clang/avx512vpopcntdqvlintrin.h @@ -0,0 +1,95 @@ +/*===---- avx512vpopcntdqintrin.h - AVX512VPOPCNTDQ intrinsics -------------=== + * + * + * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. + * See https://llvm.org/LICENSE.txt for license information. + * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + * + *===-----------------------------------------------------------------------=== + */ +#ifndef __IMMINTRIN_H +#error \ + "Never use directly; include instead." +#endif + +#ifndef __AVX512VPOPCNTDQVLINTRIN_H +#define __AVX512VPOPCNTDQVLINTRIN_H + +/* Define the default attributes for the functions in this file. */ +#define __DEFAULT_FN_ATTRS128 \ + __attribute__((__always_inline__, __nodebug__, \ + __target__("avx512vpopcntdq,avx512vl,no-evex512"), \ + __min_vector_width__(128))) +#define __DEFAULT_FN_ATTRS256 \ + __attribute__((__always_inline__, __nodebug__, \ + __target__("avx512vpopcntdq,avx512vl,no-evex512"), \ + __min_vector_width__(256))) + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_popcnt_epi64(__m128i __A) { + return (__m128i)__builtin_ia32_vpopcntq_128((__v2di)__A); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_mask_popcnt_epi64(__m128i __W, __mmask8 __U, __m128i __A) { + return (__m128i)__builtin_ia32_selectq_128( + (__mmask8)__U, (__v2di)_mm_popcnt_epi64(__A), (__v2di)__W); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_maskz_popcnt_epi64(__mmask8 __U, __m128i __A) { + return _mm_mask_popcnt_epi64((__m128i)_mm_setzero_si128(), __U, __A); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_popcnt_epi32(__m128i __A) { + return (__m128i)__builtin_ia32_vpopcntd_128((__v4si)__A); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_mask_popcnt_epi32(__m128i __W, __mmask8 __U, __m128i __A) { + return (__m128i)__builtin_ia32_selectd_128( + (__mmask8)__U, (__v4si)_mm_popcnt_epi32(__A), (__v4si)__W); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_maskz_popcnt_epi32(__mmask8 __U, __m128i __A) { + return _mm_mask_popcnt_epi32((__m128i)_mm_setzero_si128(), __U, __A); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_popcnt_epi64(__m256i __A) { + return (__m256i)__builtin_ia32_vpopcntq_256((__v4di)__A); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_mask_popcnt_epi64(__m256i __W, __mmask8 __U, __m256i __A) { + return (__m256i)__builtin_ia32_selectq_256( + (__mmask8)__U, (__v4di)_mm256_popcnt_epi64(__A), (__v4di)__W); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_maskz_popcnt_epi64(__mmask8 __U, __m256i __A) { + return _mm256_mask_popcnt_epi64((__m256i)_mm256_setzero_si256(), __U, __A); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_popcnt_epi32(__m256i __A) { + return (__m256i)__builtin_ia32_vpopcntd_256((__v8si)__A); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_mask_popcnt_epi32(__m256i __W, __mmask8 __U, __m256i __A) { + return (__m256i)__builtin_ia32_selectd_256( + (__mmask8)__U, (__v8si)_mm256_popcnt_epi32(__A), (__v8si)__W); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_maskz_popcnt_epi32(__mmask8 __U, __m256i __A) { + return _mm256_mask_popcnt_epi32((__m256i)_mm256_setzero_si256(), __U, __A); +} + +#undef __DEFAULT_FN_ATTRS128 +#undef __DEFAULT_FN_ATTRS256 + +#endif diff --git a/third_party/intel/clang/avxifmaintrin.h b/third_party/intel/clang/avxifmaintrin.h new file mode 100644 index 000000000..5c782d2a5 --- /dev/null +++ b/third_party/intel/clang/avxifmaintrin.h @@ -0,0 +1,177 @@ +/*===----------------- avxifmaintrin.h - IFMA intrinsics -------------------=== + * + * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. + * See https://llvm.org/LICENSE.txt for license information. + * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + * + *===-----------------------------------------------------------------------=== + */ + +#ifndef __IMMINTRIN_H +#error "Never use directly; include instead." +#endif + +#ifndef __AVXIFMAINTRIN_H +#define __AVXIFMAINTRIN_H + +/* Define the default attributes for the functions in this file. */ +#define __DEFAULT_FN_ATTRS128 \ + __attribute__((__always_inline__, __nodebug__, __target__("avxifma"), \ + __min_vector_width__(128))) +#define __DEFAULT_FN_ATTRS256 \ + __attribute__((__always_inline__, __nodebug__, __target__("avxifma"), \ + __min_vector_width__(256))) + +// must vex-encoding + +/// Multiply packed unsigned 52-bit integers in each 64-bit element of \a __Y +/// and \a __Z to form a 104-bit intermediate result. Add the high 52-bit +/// unsigned integer from the intermediate result with the corresponding +/// unsigned 64-bit integer in \a __X, and store the results in \a dst. +/// +/// \headerfile +/// +/// \code +/// __m128i +/// _mm_madd52hi_avx_epu64 (__m128i __X, __m128i __Y, __m128i __Z) +/// \endcode +/// +/// This intrinsic corresponds to the \c VPMADD52HUQ instruction. +/// +/// \return +/// return __m128i dst. +/// \param __X +/// A 128-bit vector of [2 x i64] +/// \param __Y +/// A 128-bit vector of [2 x i64] +/// \param __Z +/// A 128-bit vector of [2 x i64] +/// +/// \code{.operation} +/// FOR j := 0 to 1 +/// i := j*64 +/// tmp[127:0] := ZeroExtend64(__Y[i+51:i]) * ZeroExtend64(__Z[i+51:i]) +/// dst[i+63:i] := __X[i+63:i] + ZeroExtend64(tmp[103:52]) +/// ENDFOR +/// dst[MAX:128] := 0 +/// \endcode +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_madd52hi_avx_epu64(__m128i __X, __m128i __Y, __m128i __Z) { + return (__m128i)__builtin_ia32_vpmadd52huq128((__v2di)__X, (__v2di)__Y, + (__v2di)__Z); +} + +/// Multiply packed unsigned 52-bit integers in each 64-bit element of \a __Y +/// and \a __Z to form a 104-bit intermediate result. Add the high 52-bit +/// unsigned integer from the intermediate result with the corresponding +/// unsigned 64-bit integer in \a __X, and store the results in \a dst. +/// +/// \headerfile +/// +/// \code +/// __m256i +/// _mm256_madd52hi_avx_epu64 (__m256i __X, __m256i __Y, __m256i __Z) +/// \endcode +/// +/// This intrinsic corresponds to the \c VPMADD52HUQ instruction. +/// +/// \return +/// return __m256i dst. +/// \param __X +/// A 256-bit vector of [4 x i64] +/// \param __Y +/// A 256-bit vector of [4 x i64] +/// \param __Z +/// A 256-bit vector of [4 x i64] +/// +/// \code{.operation} +/// FOR j := 0 to 3 +/// i := j*64 +/// tmp[127:0] := ZeroExtend64(__Y[i+51:i]) * ZeroExtend64(__Z[i+51:i]) +/// dst[i+63:i] := __X[i+63:i] + ZeroExtend64(tmp[103:52]) +/// ENDFOR +/// dst[MAX:256] := 0 +/// \endcode +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_madd52hi_avx_epu64(__m256i __X, __m256i __Y, __m256i __Z) { + return (__m256i)__builtin_ia32_vpmadd52huq256((__v4di)__X, (__v4di)__Y, + (__v4di)__Z); +} + +/// Multiply packed unsigned 52-bit integers in each 64-bit element of \a __Y +/// and \a __Z to form a 104-bit intermediate result. Add the low 52-bit +/// unsigned integer from the intermediate result with the corresponding +/// unsigned 64-bit integer in \a __X, and store the results in \a dst. +/// +/// \headerfile +/// +/// \code +/// __m128i +/// _mm_madd52lo_avx_epu64 (__m128i __X, __m128i __Y, __m128i __Z) +/// \endcode +/// +/// This intrinsic corresponds to the \c VPMADD52LUQ instruction. +/// +/// \return +/// return __m128i dst. +/// \param __X +/// A 128-bit vector of [2 x i64] +/// \param __Y +/// A 128-bit vector of [2 x i64] +/// \param __Z +/// A 128-bit vector of [2 x i64] +/// +/// \code{.operation} +/// FOR j := 0 to 1 +/// i := j*64 +/// tmp[127:0] := ZeroExtend64(__Y[i+51:i]) * ZeroExtend64(__Z[i+51:i]) +/// dst[i+63:i] := __X[i+63:i] + ZeroExtend64(tmp[51:0]) +/// ENDFOR +/// dst[MAX:128] := 0 +/// \endcode +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_madd52lo_avx_epu64(__m128i __X, __m128i __Y, __m128i __Z) { + return (__m128i)__builtin_ia32_vpmadd52luq128((__v2di)__X, (__v2di)__Y, + (__v2di)__Z); +} + +/// Multiply packed unsigned 52-bit integers in each 64-bit element of \a __Y +/// and \a __Z to form a 104-bit intermediate result. Add the low 52-bit +/// unsigned integer from the intermediate result with the corresponding +/// unsigned 64-bit integer in \a __X, and store the results in \a dst. +/// +/// \headerfile +/// +/// \code +/// __m256i +/// _mm256_madd52lo_avx_epu64 (__m256i __X, __m256i __Y, __m256i __Z) +/// \endcode +/// +/// This intrinsic corresponds to the \c VPMADD52LUQ instruction. +/// +/// \return +/// return __m256i dst. +/// \param __X +/// A 256-bit vector of [4 x i64] +/// \param __Y +/// A 256-bit vector of [4 x i64] +/// \param __Z +/// A 256-bit vector of [4 x i64] +/// +/// \code{.operation} +/// FOR j := 0 to 3 +/// i := j*64 +/// tmp[127:0] := ZeroExtend64(__Y[i+51:i]) * ZeroExtend64(__Z[i+51:i]) +/// dst[i+63:i] := __X[i+63:i] + ZeroExtend64(tmp[51:0]) +/// ENDFOR +/// dst[MAX:256] := 0 +/// \endcode +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_madd52lo_avx_epu64(__m256i __X, __m256i __Y, __m256i __Z) { + return (__m256i)__builtin_ia32_vpmadd52luq256((__v4di)__X, (__v4di)__Y, + (__v4di)__Z); +} +#undef __DEFAULT_FN_ATTRS128 +#undef __DEFAULT_FN_ATTRS256 + +#endif // __AVXIFMAINTRIN_H diff --git a/third_party/intel/clang/avxintrin.h b/third_party/intel/clang/avxintrin.h new file mode 100644 index 000000000..4983f3311 --- /dev/null +++ b/third_party/intel/clang/avxintrin.h @@ -0,0 +1,5126 @@ +/*===---- avxintrin.h - AVX intrinsics -------------------------------------=== + * + * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. + * See https://llvm.org/LICENSE.txt for license information. + * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + * + *===-----------------------------------------------------------------------=== + */ + +#ifndef __IMMINTRIN_H +#error "Never use directly; include instead." +#endif + +#ifndef __AVXINTRIN_H +#define __AVXINTRIN_H + +typedef double __v4df __attribute__ ((__vector_size__ (32))); +typedef float __v8sf __attribute__ ((__vector_size__ (32))); +typedef long long __v4di __attribute__ ((__vector_size__ (32))); +typedef int __v8si __attribute__ ((__vector_size__ (32))); +typedef short __v16hi __attribute__ ((__vector_size__ (32))); +typedef char __v32qi __attribute__ ((__vector_size__ (32))); + +/* Unsigned types */ +typedef unsigned long long __v4du __attribute__ ((__vector_size__ (32))); +typedef unsigned int __v8su __attribute__ ((__vector_size__ (32))); +typedef unsigned short __v16hu __attribute__ ((__vector_size__ (32))); +typedef unsigned char __v32qu __attribute__ ((__vector_size__ (32))); + +/* We need an explicitly signed variant for char. Note that this shouldn't + * appear in the interface though. */ +typedef signed char __v32qs __attribute__((__vector_size__(32))); + +typedef float __m256 __attribute__ ((__vector_size__ (32), __aligned__(32))); +typedef double __m256d __attribute__((__vector_size__(32), __aligned__(32))); +typedef long long __m256i __attribute__((__vector_size__(32), __aligned__(32))); + +typedef float __m256_u __attribute__ ((__vector_size__ (32), __aligned__(1))); +typedef double __m256d_u __attribute__((__vector_size__(32), __aligned__(1))); +typedef long long __m256i_u __attribute__((__vector_size__(32), __aligned__(1))); + +#ifdef __SSE2__ +/* Both _Float16 and __bf16 require SSE2 being enabled. */ +typedef _Float16 __v16hf __attribute__((__vector_size__(32), __aligned__(32))); +typedef _Float16 __m256h __attribute__((__vector_size__(32), __aligned__(32))); +typedef _Float16 __m256h_u __attribute__((__vector_size__(32), __aligned__(1))); + +typedef __bf16 __v16bf __attribute__((__vector_size__(32), __aligned__(32))); +typedef __bf16 __m256bh __attribute__((__vector_size__(32), __aligned__(32))); +#endif + +/* Define the default attributes for the functions in this file. */ +#define __DEFAULT_FN_ATTRS \ + __attribute__((__always_inline__, __nodebug__, __target__("avx,no-evex512"), \ + __min_vector_width__(256))) +#define __DEFAULT_FN_ATTRS128 \ + __attribute__((__always_inline__, __nodebug__, __target__("avx,no-evex512"), \ + __min_vector_width__(128))) + +/* Arithmetic */ +/// Adds two 256-bit vectors of [4 x double]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VADDPD instruction. +/// +/// \param __a +/// A 256-bit vector of [4 x double] containing one of the source operands. +/// \param __b +/// A 256-bit vector of [4 x double] containing one of the source operands. +/// \returns A 256-bit vector of [4 x double] containing the sums of both +/// operands. +static __inline __m256d __DEFAULT_FN_ATTRS +_mm256_add_pd(__m256d __a, __m256d __b) +{ + return (__m256d)((__v4df)__a+(__v4df)__b); +} + +/// Adds two 256-bit vectors of [8 x float]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VADDPS instruction. +/// +/// \param __a +/// A 256-bit vector of [8 x float] containing one of the source operands. +/// \param __b +/// A 256-bit vector of [8 x float] containing one of the source operands. +/// \returns A 256-bit vector of [8 x float] containing the sums of both +/// operands. +static __inline __m256 __DEFAULT_FN_ATTRS +_mm256_add_ps(__m256 __a, __m256 __b) +{ + return (__m256)((__v8sf)__a+(__v8sf)__b); +} + +/// Subtracts two 256-bit vectors of [4 x double]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VSUBPD instruction. +/// +/// \param __a +/// A 256-bit vector of [4 x double] containing the minuend. +/// \param __b +/// A 256-bit vector of [4 x double] containing the subtrahend. +/// \returns A 256-bit vector of [4 x double] containing the differences between +/// both operands. +static __inline __m256d __DEFAULT_FN_ATTRS +_mm256_sub_pd(__m256d __a, __m256d __b) +{ + return (__m256d)((__v4df)__a-(__v4df)__b); +} + +/// Subtracts two 256-bit vectors of [8 x float]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VSUBPS instruction. +/// +/// \param __a +/// A 256-bit vector of [8 x float] containing the minuend. +/// \param __b +/// A 256-bit vector of [8 x float] containing the subtrahend. +/// \returns A 256-bit vector of [8 x float] containing the differences between +/// both operands. +static __inline __m256 __DEFAULT_FN_ATTRS +_mm256_sub_ps(__m256 __a, __m256 __b) +{ + return (__m256)((__v8sf)__a-(__v8sf)__b); +} + +/// Adds the even-indexed values and subtracts the odd-indexed values of +/// two 256-bit vectors of [4 x double]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VADDSUBPD instruction. +/// +/// \param __a +/// A 256-bit vector of [4 x double] containing the left source operand. +/// \param __b +/// A 256-bit vector of [4 x double] containing the right source operand. +/// \returns A 256-bit vector of [4 x double] containing the alternating sums +/// and differences between both operands. +static __inline __m256d __DEFAULT_FN_ATTRS +_mm256_addsub_pd(__m256d __a, __m256d __b) +{ + return (__m256d)__builtin_ia32_addsubpd256((__v4df)__a, (__v4df)__b); +} + +/// Adds the even-indexed values and subtracts the odd-indexed values of +/// two 256-bit vectors of [8 x float]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VADDSUBPS instruction. +/// +/// \param __a +/// A 256-bit vector of [8 x float] containing the left source operand. +/// \param __b +/// A 256-bit vector of [8 x float] containing the right source operand. +/// \returns A 256-bit vector of [8 x float] containing the alternating sums and +/// differences between both operands. +static __inline __m256 __DEFAULT_FN_ATTRS +_mm256_addsub_ps(__m256 __a, __m256 __b) +{ + return (__m256)__builtin_ia32_addsubps256((__v8sf)__a, (__v8sf)__b); +} + +/// Divides two 256-bit vectors of [4 x double]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VDIVPD instruction. +/// +/// \param __a +/// A 256-bit vector of [4 x double] containing the dividend. +/// \param __b +/// A 256-bit vector of [4 x double] containing the divisor. +/// \returns A 256-bit vector of [4 x double] containing the quotients of both +/// operands. +static __inline __m256d __DEFAULT_FN_ATTRS +_mm256_div_pd(__m256d __a, __m256d __b) +{ + return (__m256d)((__v4df)__a/(__v4df)__b); +} + +/// Divides two 256-bit vectors of [8 x float]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VDIVPS instruction. +/// +/// \param __a +/// A 256-bit vector of [8 x float] containing the dividend. +/// \param __b +/// A 256-bit vector of [8 x float] containing the divisor. +/// \returns A 256-bit vector of [8 x float] containing the quotients of both +/// operands. +static __inline __m256 __DEFAULT_FN_ATTRS +_mm256_div_ps(__m256 __a, __m256 __b) +{ + return (__m256)((__v8sf)__a/(__v8sf)__b); +} + +/// Compares two 256-bit vectors of [4 x double] and returns the greater +/// of each pair of values. +/// +/// If either value in a comparison is NaN, returns the value from \a __b. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VMAXPD instruction. +/// +/// \param __a +/// A 256-bit vector of [4 x double] containing one of the operands. +/// \param __b +/// A 256-bit vector of [4 x double] containing one of the operands. +/// \returns A 256-bit vector of [4 x double] containing the maximum values +/// between both operands. +static __inline __m256d __DEFAULT_FN_ATTRS +_mm256_max_pd(__m256d __a, __m256d __b) +{ + return (__m256d)__builtin_ia32_maxpd256((__v4df)__a, (__v4df)__b); +} + +/// Compares two 256-bit vectors of [8 x float] and returns the greater +/// of each pair of values. +/// +/// If either value in a comparison is NaN, returns the value from \a __b. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VMAXPS instruction. +/// +/// \param __a +/// A 256-bit vector of [8 x float] containing one of the operands. +/// \param __b +/// A 256-bit vector of [8 x float] containing one of the operands. +/// \returns A 256-bit vector of [8 x float] containing the maximum values +/// between both operands. +static __inline __m256 __DEFAULT_FN_ATTRS +_mm256_max_ps(__m256 __a, __m256 __b) +{ + return (__m256)__builtin_ia32_maxps256((__v8sf)__a, (__v8sf)__b); +} + +/// Compares two 256-bit vectors of [4 x double] and returns the lesser +/// of each pair of values. +/// +/// If either value in a comparison is NaN, returns the value from \a __b. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VMINPD instruction. +/// +/// \param __a +/// A 256-bit vector of [4 x double] containing one of the operands. +/// \param __b +/// A 256-bit vector of [4 x double] containing one of the operands. +/// \returns A 256-bit vector of [4 x double] containing the minimum values +/// between both operands. +static __inline __m256d __DEFAULT_FN_ATTRS +_mm256_min_pd(__m256d __a, __m256d __b) +{ + return (__m256d)__builtin_ia32_minpd256((__v4df)__a, (__v4df)__b); +} + +/// Compares two 256-bit vectors of [8 x float] and returns the lesser +/// of each pair of values. +/// +/// If either value in a comparison is NaN, returns the value from \a __b. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VMINPS instruction. +/// +/// \param __a +/// A 256-bit vector of [8 x float] containing one of the operands. +/// \param __b +/// A 256-bit vector of [8 x float] containing one of the operands. +/// \returns A 256-bit vector of [8 x float] containing the minimum values +/// between both operands. +static __inline __m256 __DEFAULT_FN_ATTRS +_mm256_min_ps(__m256 __a, __m256 __b) +{ + return (__m256)__builtin_ia32_minps256((__v8sf)__a, (__v8sf)__b); +} + +/// Multiplies two 256-bit vectors of [4 x double]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VMULPD instruction. +/// +/// \param __a +/// A 256-bit vector of [4 x double] containing one of the operands. +/// \param __b +/// A 256-bit vector of [4 x double] containing one of the operands. +/// \returns A 256-bit vector of [4 x double] containing the products of both +/// operands. +static __inline __m256d __DEFAULT_FN_ATTRS +_mm256_mul_pd(__m256d __a, __m256d __b) +{ + return (__m256d)((__v4df)__a * (__v4df)__b); +} + +/// Multiplies two 256-bit vectors of [8 x float]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VMULPS instruction. +/// +/// \param __a +/// A 256-bit vector of [8 x float] containing one of the operands. +/// \param __b +/// A 256-bit vector of [8 x float] containing one of the operands. +/// \returns A 256-bit vector of [8 x float] containing the products of both +/// operands. +static __inline __m256 __DEFAULT_FN_ATTRS +_mm256_mul_ps(__m256 __a, __m256 __b) +{ + return (__m256)((__v8sf)__a * (__v8sf)__b); +} + +/// Calculates the square roots of the values in a 256-bit vector of +/// [4 x double]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VSQRTPD instruction. +/// +/// \param __a +/// A 256-bit vector of [4 x double]. +/// \returns A 256-bit vector of [4 x double] containing the square roots of the +/// values in the operand. +static __inline __m256d __DEFAULT_FN_ATTRS +_mm256_sqrt_pd(__m256d __a) +{ + return (__m256d)__builtin_ia32_sqrtpd256((__v4df)__a); +} + +/// Calculates the square roots of the values in a 256-bit vector of +/// [8 x float]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VSQRTPS instruction. +/// +/// \param __a +/// A 256-bit vector of [8 x float]. +/// \returns A 256-bit vector of [8 x float] containing the square roots of the +/// values in the operand. +static __inline __m256 __DEFAULT_FN_ATTRS +_mm256_sqrt_ps(__m256 __a) +{ + return (__m256)__builtin_ia32_sqrtps256((__v8sf)__a); +} + +/// Calculates the reciprocal square roots of the values in a 256-bit +/// vector of [8 x float]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VRSQRTPS instruction. +/// +/// \param __a +/// A 256-bit vector of [8 x float]. +/// \returns A 256-bit vector of [8 x float] containing the reciprocal square +/// roots of the values in the operand. +static __inline __m256 __DEFAULT_FN_ATTRS +_mm256_rsqrt_ps(__m256 __a) +{ + return (__m256)__builtin_ia32_rsqrtps256((__v8sf)__a); +} + +/// Calculates the reciprocals of the values in a 256-bit vector of +/// [8 x float]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VRCPPS instruction. +/// +/// \param __a +/// A 256-bit vector of [8 x float]. +/// \returns A 256-bit vector of [8 x float] containing the reciprocals of the +/// values in the operand. +static __inline __m256 __DEFAULT_FN_ATTRS +_mm256_rcp_ps(__m256 __a) +{ + return (__m256)__builtin_ia32_rcpps256((__v8sf)__a); +} + +/// Rounds the values in a 256-bit vector of [4 x double] as specified +/// by the byte operand. The source values are rounded to integer values and +/// returned as 64-bit double-precision floating-point values. +/// +/// \headerfile +/// +/// \code +/// __m256d _mm256_round_pd(__m256d V, const int M); +/// \endcode +/// +/// This intrinsic corresponds to the VROUNDPD instruction. +/// +/// \param V +/// A 256-bit vector of [4 x double]. +/// \param M +/// An integer value that specifies the rounding operation. \n +/// Bits [7:4] are reserved. \n +/// Bit [3] is a precision exception value: \n +/// 0: A normal PE exception is used. \n +/// 1: The PE field is not updated. \n +/// Bit [2] is the rounding control source: \n +/// 0: Use bits [1:0] of \a M. \n +/// 1: Use the current MXCSR setting. \n +/// Bits [1:0] contain the rounding control definition: \n +/// 00: Nearest. \n +/// 01: Downward (toward negative infinity). \n +/// 10: Upward (toward positive infinity). \n +/// 11: Truncated. +/// \returns A 256-bit vector of [4 x double] containing the rounded values. +#define _mm256_round_pd(V, M) \ + ((__m256d)__builtin_ia32_roundpd256((__v4df)(__m256d)(V), (M))) + +/// Rounds the values stored in a 256-bit vector of [8 x float] as +/// specified by the byte operand. The source values are rounded to integer +/// values and returned as floating-point values. +/// +/// \headerfile +/// +/// \code +/// __m256 _mm256_round_ps(__m256 V, const int M); +/// \endcode +/// +/// This intrinsic corresponds to the VROUNDPS instruction. +/// +/// \param V +/// A 256-bit vector of [8 x float]. +/// \param M +/// An integer value that specifies the rounding operation. \n +/// Bits [7:4] are reserved. \n +/// Bit [3] is a precision exception value: \n +/// 0: A normal PE exception is used. \n +/// 1: The PE field is not updated. \n +/// Bit [2] is the rounding control source: \n +/// 0: Use bits [1:0] of \a M. \n +/// 1: Use the current MXCSR setting. \n +/// Bits [1:0] contain the rounding control definition: \n +/// 00: Nearest. \n +/// 01: Downward (toward negative infinity). \n +/// 10: Upward (toward positive infinity). \n +/// 11: Truncated. +/// \returns A 256-bit vector of [8 x float] containing the rounded values. +#define _mm256_round_ps(V, M) \ + ((__m256)__builtin_ia32_roundps256((__v8sf)(__m256)(V), (M))) + +/// Rounds up the values stored in a 256-bit vector of [4 x double]. The +/// source values are rounded up to integer values and returned as 64-bit +/// double-precision floating-point values. +/// +/// \headerfile +/// +/// \code +/// __m256d _mm256_ceil_pd(__m256d V); +/// \endcode +/// +/// This intrinsic corresponds to the VROUNDPD instruction. +/// +/// \param V +/// A 256-bit vector of [4 x double]. +/// \returns A 256-bit vector of [4 x double] containing the rounded up values. +#define _mm256_ceil_pd(V) _mm256_round_pd((V), _MM_FROUND_CEIL) + +/// Rounds down the values stored in a 256-bit vector of [4 x double]. +/// The source values are rounded down to integer values and returned as +/// 64-bit double-precision floating-point values. +/// +/// \headerfile +/// +/// \code +/// __m256d _mm256_floor_pd(__m256d V); +/// \endcode +/// +/// This intrinsic corresponds to the VROUNDPD instruction. +/// +/// \param V +/// A 256-bit vector of [4 x double]. +/// \returns A 256-bit vector of [4 x double] containing the rounded down +/// values. +#define _mm256_floor_pd(V) _mm256_round_pd((V), _MM_FROUND_FLOOR) + +/// Rounds up the values stored in a 256-bit vector of [8 x float]. The +/// source values are rounded up to integer values and returned as +/// floating-point values. +/// +/// \headerfile +/// +/// \code +/// __m256 _mm256_ceil_ps(__m256 V); +/// \endcode +/// +/// This intrinsic corresponds to the VROUNDPS instruction. +/// +/// \param V +/// A 256-bit vector of [8 x float]. +/// \returns A 256-bit vector of [8 x float] containing the rounded up values. +#define _mm256_ceil_ps(V) _mm256_round_ps((V), _MM_FROUND_CEIL) + +/// Rounds down the values stored in a 256-bit vector of [8 x float]. The +/// source values are rounded down to integer values and returned as +/// floating-point values. +/// +/// \headerfile +/// +/// \code +/// __m256 _mm256_floor_ps(__m256 V); +/// \endcode +/// +/// This intrinsic corresponds to the VROUNDPS instruction. +/// +/// \param V +/// A 256-bit vector of [8 x float]. +/// \returns A 256-bit vector of [8 x float] containing the rounded down values. +#define _mm256_floor_ps(V) _mm256_round_ps((V), _MM_FROUND_FLOOR) + +/* Logical */ +/// Performs a bitwise AND of two 256-bit vectors of [4 x double]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VANDPD instruction. +/// +/// \param __a +/// A 256-bit vector of [4 x double] containing one of the source operands. +/// \param __b +/// A 256-bit vector of [4 x double] containing one of the source operands. +/// \returns A 256-bit vector of [4 x double] containing the bitwise AND of the +/// values between both operands. +static __inline __m256d __DEFAULT_FN_ATTRS +_mm256_and_pd(__m256d __a, __m256d __b) +{ + return (__m256d)((__v4du)__a & (__v4du)__b); +} + +/// Performs a bitwise AND of two 256-bit vectors of [8 x float]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VANDPS instruction. +/// +/// \param __a +/// A 256-bit vector of [8 x float] containing one of the source operands. +/// \param __b +/// A 256-bit vector of [8 x float] containing one of the source operands. +/// \returns A 256-bit vector of [8 x float] containing the bitwise AND of the +/// values between both operands. +static __inline __m256 __DEFAULT_FN_ATTRS +_mm256_and_ps(__m256 __a, __m256 __b) +{ + return (__m256)((__v8su)__a & (__v8su)__b); +} + +/// Performs a bitwise AND of two 256-bit vectors of [4 x double], using +/// the one's complement of the values contained in the first source operand. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VANDNPD instruction. +/// +/// \param __a +/// A 256-bit vector of [4 x double] containing the left source operand. The +/// one's complement of this value is used in the bitwise AND. +/// \param __b +/// A 256-bit vector of [4 x double] containing the right source operand. +/// \returns A 256-bit vector of [4 x double] containing the bitwise AND of the +/// values of the second operand and the one's complement of the first +/// operand. +static __inline __m256d __DEFAULT_FN_ATTRS +_mm256_andnot_pd(__m256d __a, __m256d __b) +{ + return (__m256d)(~(__v4du)__a & (__v4du)__b); +} + +/// Performs a bitwise AND of two 256-bit vectors of [8 x float], using +/// the one's complement of the values contained in the first source operand. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VANDNPS instruction. +/// +/// \param __a +/// A 256-bit vector of [8 x float] containing the left source operand. The +/// one's complement of this value is used in the bitwise AND. +/// \param __b +/// A 256-bit vector of [8 x float] containing the right source operand. +/// \returns A 256-bit vector of [8 x float] containing the bitwise AND of the +/// values of the second operand and the one's complement of the first +/// operand. +static __inline __m256 __DEFAULT_FN_ATTRS +_mm256_andnot_ps(__m256 __a, __m256 __b) +{ + return (__m256)(~(__v8su)__a & (__v8su)__b); +} + +/// Performs a bitwise OR of two 256-bit vectors of [4 x double]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VORPD instruction. +/// +/// \param __a +/// A 256-bit vector of [4 x double] containing one of the source operands. +/// \param __b +/// A 256-bit vector of [4 x double] containing one of the source operands. +/// \returns A 256-bit vector of [4 x double] containing the bitwise OR of the +/// values between both operands. +static __inline __m256d __DEFAULT_FN_ATTRS +_mm256_or_pd(__m256d __a, __m256d __b) +{ + return (__m256d)((__v4du)__a | (__v4du)__b); +} + +/// Performs a bitwise OR of two 256-bit vectors of [8 x float]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VORPS instruction. +/// +/// \param __a +/// A 256-bit vector of [8 x float] containing one of the source operands. +/// \param __b +/// A 256-bit vector of [8 x float] containing one of the source operands. +/// \returns A 256-bit vector of [8 x float] containing the bitwise OR of the +/// values between both operands. +static __inline __m256 __DEFAULT_FN_ATTRS +_mm256_or_ps(__m256 __a, __m256 __b) +{ + return (__m256)((__v8su)__a | (__v8su)__b); +} + +/// Performs a bitwise XOR of two 256-bit vectors of [4 x double]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VXORPD instruction. +/// +/// \param __a +/// A 256-bit vector of [4 x double] containing one of the source operands. +/// \param __b +/// A 256-bit vector of [4 x double] containing one of the source operands. +/// \returns A 256-bit vector of [4 x double] containing the bitwise XOR of the +/// values between both operands. +static __inline __m256d __DEFAULT_FN_ATTRS +_mm256_xor_pd(__m256d __a, __m256d __b) +{ + return (__m256d)((__v4du)__a ^ (__v4du)__b); +} + +/// Performs a bitwise XOR of two 256-bit vectors of [8 x float]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VXORPS instruction. +/// +/// \param __a +/// A 256-bit vector of [8 x float] containing one of the source operands. +/// \param __b +/// A 256-bit vector of [8 x float] containing one of the source operands. +/// \returns A 256-bit vector of [8 x float] containing the bitwise XOR of the +/// values between both operands. +static __inline __m256 __DEFAULT_FN_ATTRS +_mm256_xor_ps(__m256 __a, __m256 __b) +{ + return (__m256)((__v8su)__a ^ (__v8su)__b); +} + +/* Horizontal arithmetic */ +/// Horizontally adds the adjacent pairs of values contained in two +/// 256-bit vectors of [4 x double]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VHADDPD instruction. +/// +/// \param __a +/// A 256-bit vector of [4 x double] containing one of the source operands. +/// The horizontal sums of the values are returned in the even-indexed +/// elements of a vector of [4 x double]. +/// \param __b +/// A 256-bit vector of [4 x double] containing one of the source operands. +/// The horizontal sums of the values are returned in the odd-indexed +/// elements of a vector of [4 x double]. +/// \returns A 256-bit vector of [4 x double] containing the horizontal sums of +/// both operands. +static __inline __m256d __DEFAULT_FN_ATTRS +_mm256_hadd_pd(__m256d __a, __m256d __b) +{ + return (__m256d)__builtin_ia32_haddpd256((__v4df)__a, (__v4df)__b); +} + +/// Horizontally adds the adjacent pairs of values contained in two +/// 256-bit vectors of [8 x float]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VHADDPS instruction. +/// +/// \param __a +/// A 256-bit vector of [8 x float] containing one of the source operands. +/// The horizontal sums of the values are returned in the elements with +/// index 0, 1, 4, 5 of a vector of [8 x float]. +/// \param __b +/// A 256-bit vector of [8 x float] containing one of the source operands. +/// The horizontal sums of the values are returned in the elements with +/// index 2, 3, 6, 7 of a vector of [8 x float]. +/// \returns A 256-bit vector of [8 x float] containing the horizontal sums of +/// both operands. +static __inline __m256 __DEFAULT_FN_ATTRS +_mm256_hadd_ps(__m256 __a, __m256 __b) +{ + return (__m256)__builtin_ia32_haddps256((__v8sf)__a, (__v8sf)__b); +} + +/// Horizontally subtracts the adjacent pairs of values contained in two +/// 256-bit vectors of [4 x double]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VHSUBPD instruction. +/// +/// \param __a +/// A 256-bit vector of [4 x double] containing one of the source operands. +/// The horizontal differences between the values are returned in the +/// even-indexed elements of a vector of [4 x double]. +/// \param __b +/// A 256-bit vector of [4 x double] containing one of the source operands. +/// The horizontal differences between the values are returned in the +/// odd-indexed elements of a vector of [4 x double]. +/// \returns A 256-bit vector of [4 x double] containing the horizontal +/// differences of both operands. +static __inline __m256d __DEFAULT_FN_ATTRS +_mm256_hsub_pd(__m256d __a, __m256d __b) +{ + return (__m256d)__builtin_ia32_hsubpd256((__v4df)__a, (__v4df)__b); +} + +/// Horizontally subtracts the adjacent pairs of values contained in two +/// 256-bit vectors of [8 x float]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VHSUBPS instruction. +/// +/// \param __a +/// A 256-bit vector of [8 x float] containing one of the source operands. +/// The horizontal differences between the values are returned in the +/// elements with index 0, 1, 4, 5 of a vector of [8 x float]. +/// \param __b +/// A 256-bit vector of [8 x float] containing one of the source operands. +/// The horizontal differences between the values are returned in the +/// elements with index 2, 3, 6, 7 of a vector of [8 x float]. +/// \returns A 256-bit vector of [8 x float] containing the horizontal +/// differences of both operands. +static __inline __m256 __DEFAULT_FN_ATTRS +_mm256_hsub_ps(__m256 __a, __m256 __b) +{ + return (__m256)__builtin_ia32_hsubps256((__v8sf)__a, (__v8sf)__b); +} + +/* Vector permutations */ +/// Copies the values in a 128-bit vector of [2 x double] as specified +/// by the 128-bit integer vector operand. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VPERMILPD instruction. +/// +/// \param __a +/// A 128-bit vector of [2 x double]. +/// \param __c +/// A 128-bit integer vector operand specifying how the values are to be +/// copied. \n +/// Bit [1]: \n +/// 0: Bits [63:0] of the source are copied to bits [63:0] of the returned +/// vector. \n +/// 1: Bits [127:64] of the source are copied to bits [63:0] of the +/// returned vector. \n +/// Bit [65]: \n +/// 0: Bits [63:0] of the source are copied to bits [127:64] of the +/// returned vector. \n +/// 1: Bits [127:64] of the source are copied to bits [127:64] of the +/// returned vector. +/// \returns A 128-bit vector of [2 x double] containing the copied values. +static __inline __m128d __DEFAULT_FN_ATTRS128 +_mm_permutevar_pd(__m128d __a, __m128i __c) +{ + return (__m128d)__builtin_ia32_vpermilvarpd((__v2df)__a, (__v2di)__c); +} + +/// Copies the values in a 256-bit vector of [4 x double] as specified +/// by the 256-bit integer vector operand. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VPERMILPD instruction. +/// +/// \param __a +/// A 256-bit vector of [4 x double]. +/// \param __c +/// A 256-bit integer vector operand specifying how the values are to be +/// copied. \n +/// Bit [1]: \n +/// 0: Bits [63:0] of the source are copied to bits [63:0] of the returned +/// vector. \n +/// 1: Bits [127:64] of the source are copied to bits [63:0] of the +/// returned vector. \n +/// Bit [65]: \n +/// 0: Bits [63:0] of the source are copied to bits [127:64] of the +/// returned vector. \n +/// 1: Bits [127:64] of the source are copied to bits [127:64] of the +/// returned vector. \n +/// Bit [129]: \n +/// 0: Bits [191:128] of the source are copied to bits [191:128] of the +/// returned vector. \n +/// 1: Bits [255:192] of the source are copied to bits [191:128] of the +/// returned vector. \n +/// Bit [193]: \n +/// 0: Bits [191:128] of the source are copied to bits [255:192] of the +/// returned vector. \n +/// 1: Bits [255:192] of the source are copied to bits [255:192] of the +/// returned vector. +/// \returns A 256-bit vector of [4 x double] containing the copied values. +static __inline __m256d __DEFAULT_FN_ATTRS +_mm256_permutevar_pd(__m256d __a, __m256i __c) +{ + return (__m256d)__builtin_ia32_vpermilvarpd256((__v4df)__a, (__v4di)__c); +} + +/// Copies the values stored in a 128-bit vector of [4 x float] as +/// specified by the 128-bit integer vector operand. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VPERMILPS instruction. +/// +/// \param __a +/// A 128-bit vector of [4 x float]. +/// \param __c +/// A 128-bit integer vector operand specifying how the values are to be +/// copied. \n +/// Bits [1:0]: \n +/// 00: Bits [31:0] of the source are copied to bits [31:0] of the +/// returned vector. \n +/// 01: Bits [63:32] of the source are copied to bits [31:0] of the +/// returned vector. \n +/// 10: Bits [95:64] of the source are copied to bits [31:0] of the +/// returned vector. \n +/// 11: Bits [127:96] of the source are copied to bits [31:0] of the +/// returned vector. \n +/// Bits [33:32]: \n +/// 00: Bits [31:0] of the source are copied to bits [63:32] of the +/// returned vector. \n +/// 01: Bits [63:32] of the source are copied to bits [63:32] of the +/// returned vector. \n +/// 10: Bits [95:64] of the source are copied to bits [63:32] of the +/// returned vector. \n +/// 11: Bits [127:96] of the source are copied to bits [63:32] of the +/// returned vector. \n +/// Bits [65:64]: \n +/// 00: Bits [31:0] of the source are copied to bits [95:64] of the +/// returned vector. \n +/// 01: Bits [63:32] of the source are copied to bits [95:64] of the +/// returned vector. \n +/// 10: Bits [95:64] of the source are copied to bits [95:64] of the +/// returned vector. \n +/// 11: Bits [127:96] of the source are copied to bits [95:64] of the +/// returned vector. \n +/// Bits [97:96]: \n +/// 00: Bits [31:0] of the source are copied to bits [127:96] of the +/// returned vector. \n +/// 01: Bits [63:32] of the source are copied to bits [127:96] of the +/// returned vector. \n +/// 10: Bits [95:64] of the source are copied to bits [127:96] of the +/// returned vector. \n +/// 11: Bits [127:96] of the source are copied to bits [127:96] of the +/// returned vector. +/// \returns A 128-bit vector of [4 x float] containing the copied values. +static __inline __m128 __DEFAULT_FN_ATTRS128 +_mm_permutevar_ps(__m128 __a, __m128i __c) +{ + return (__m128)__builtin_ia32_vpermilvarps((__v4sf)__a, (__v4si)__c); +} + +/// Copies the values stored in a 256-bit vector of [8 x float] as +/// specified by the 256-bit integer vector operand. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VPERMILPS instruction. +/// +/// \param __a +/// A 256-bit vector of [8 x float]. +/// \param __c +/// A 256-bit integer vector operand specifying how the values are to be +/// copied. \n +/// Bits [1:0]: \n +/// 00: Bits [31:0] of the source are copied to bits [31:0] of the +/// returned vector. \n +/// 01: Bits [63:32] of the source are copied to bits [31:0] of the +/// returned vector. \n +/// 10: Bits [95:64] of the source are copied to bits [31:0] of the +/// returned vector. \n +/// 11: Bits [127:96] of the source are copied to bits [31:0] of the +/// returned vector. \n +/// Bits [33:32]: \n +/// 00: Bits [31:0] of the source are copied to bits [63:32] of the +/// returned vector. \n +/// 01: Bits [63:32] of the source are copied to bits [63:32] of the +/// returned vector. \n +/// 10: Bits [95:64] of the source are copied to bits [63:32] of the +/// returned vector. \n +/// 11: Bits [127:96] of the source are copied to bits [63:32] of the +/// returned vector. \n +/// Bits [65:64]: \n +/// 00: Bits [31:0] of the source are copied to bits [95:64] of the +/// returned vector. \n +/// 01: Bits [63:32] of the source are copied to bits [95:64] of the +/// returned vector. \n +/// 10: Bits [95:64] of the source are copied to bits [95:64] of the +/// returned vector. \n +/// 11: Bits [127:96] of the source are copied to bits [95:64] of the +/// returned vector. \n +/// Bits [97:96]: \n +/// 00: Bits [31:0] of the source are copied to bits [127:96] of the +/// returned vector. \n +/// 01: Bits [63:32] of the source are copied to bits [127:96] of the +/// returned vector. \n +/// 10: Bits [95:64] of the source are copied to bits [127:96] of the +/// returned vector. \n +/// 11: Bits [127:96] of the source are copied to bits [127:96] of the +/// returned vector. \n +/// Bits [129:128]: \n +/// 00: Bits [159:128] of the source are copied to bits [159:128] of the +/// returned vector. \n +/// 01: Bits [191:160] of the source are copied to bits [159:128] of the +/// returned vector. \n +/// 10: Bits [223:192] of the source are copied to bits [159:128] of the +/// returned vector. \n +/// 11: Bits [255:224] of the source are copied to bits [159:128] of the +/// returned vector. \n +/// Bits [161:160]: \n +/// 00: Bits [159:128] of the source are copied to bits [191:160] of the +/// returned vector. \n +/// 01: Bits [191:160] of the source are copied to bits [191:160] of the +/// returned vector. \n +/// 10: Bits [223:192] of the source are copied to bits [191:160] of the +/// returned vector. \n +/// 11: Bits [255:224] of the source are copied to bits [191:160] of the +/// returned vector. \n +/// Bits [193:192]: \n +/// 00: Bits [159:128] of the source are copied to bits [223:192] of the +/// returned vector. \n +/// 01: Bits [191:160] of the source are copied to bits [223:192] of the +/// returned vector. \n +/// 10: Bits [223:192] of the source are copied to bits [223:192] of the +/// returned vector. \n +/// 11: Bits [255:224] of the source are copied to bits [223:192] of the +/// returned vector. \n +/// Bits [225:224]: \n +/// 00: Bits [159:128] of the source are copied to bits [255:224] of the +/// returned vector. \n +/// 01: Bits [191:160] of the source are copied to bits [255:224] of the +/// returned vector. \n +/// 10: Bits [223:192] of the source are copied to bits [255:224] of the +/// returned vector. \n +/// 11: Bits [255:224] of the source are copied to bits [255:224] of the +/// returned vector. +/// \returns A 256-bit vector of [8 x float] containing the copied values. +static __inline __m256 __DEFAULT_FN_ATTRS +_mm256_permutevar_ps(__m256 __a, __m256i __c) +{ + return (__m256)__builtin_ia32_vpermilvarps256((__v8sf)__a, (__v8si)__c); +} + +/// Copies the values in a 128-bit vector of [2 x double] as specified +/// by the immediate integer operand. +/// +/// \headerfile +/// +/// \code +/// __m128d _mm_permute_pd(__m128d A, const int C); +/// \endcode +/// +/// This intrinsic corresponds to the VPERMILPD instruction. +/// +/// \param A +/// A 128-bit vector of [2 x double]. +/// \param C +/// An immediate integer operand specifying how the values are to be +/// copied. \n +/// Bit [0]: \n +/// 0: Bits [63:0] of the source are copied to bits [63:0] of the returned +/// vector. \n +/// 1: Bits [127:64] of the source are copied to bits [63:0] of the +/// returned vector. \n +/// Bit [1]: \n +/// 0: Bits [63:0] of the source are copied to bits [127:64] of the +/// returned vector. \n +/// 1: Bits [127:64] of the source are copied to bits [127:64] of the +/// returned vector. +/// \returns A 128-bit vector of [2 x double] containing the copied values. +#define _mm_permute_pd(A, C) \ + ((__m128d)__builtin_ia32_vpermilpd((__v2df)(__m128d)(A), (int)(C))) + +/// Copies the values in a 256-bit vector of [4 x double] as specified by +/// the immediate integer operand. +/// +/// \headerfile +/// +/// \code +/// __m256d _mm256_permute_pd(__m256d A, const int C); +/// \endcode +/// +/// This intrinsic corresponds to the VPERMILPD instruction. +/// +/// \param A +/// A 256-bit vector of [4 x double]. +/// \param C +/// An immediate integer operand specifying how the values are to be +/// copied. \n +/// Bit [0]: \n +/// 0: Bits [63:0] of the source are copied to bits [63:0] of the returned +/// vector. \n +/// 1: Bits [127:64] of the source are copied to bits [63:0] of the +/// returned vector. \n +/// Bit [1]: \n +/// 0: Bits [63:0] of the source are copied to bits [127:64] of the +/// returned vector. \n +/// 1: Bits [127:64] of the source are copied to bits [127:64] of the +/// returned vector. \n +/// Bit [2]: \n +/// 0: Bits [191:128] of the source are copied to bits [191:128] of the +/// returned vector. \n +/// 1: Bits [255:192] of the source are copied to bits [191:128] of the +/// returned vector. \n +/// Bit [3]: \n +/// 0: Bits [191:128] of the source are copied to bits [255:192] of the +/// returned vector. \n +/// 1: Bits [255:192] of the source are copied to bits [255:192] of the +/// returned vector. +/// \returns A 256-bit vector of [4 x double] containing the copied values. +#define _mm256_permute_pd(A, C) \ + ((__m256d)__builtin_ia32_vpermilpd256((__v4df)(__m256d)(A), (int)(C))) + +/// Copies the values in a 128-bit vector of [4 x float] as specified by +/// the immediate integer operand. +/// +/// \headerfile +/// +/// \code +/// __m128 _mm_permute_ps(__m128 A, const int C); +/// \endcode +/// +/// This intrinsic corresponds to the VPERMILPS instruction. +/// +/// \param A +/// A 128-bit vector of [4 x float]. +/// \param C +/// An immediate integer operand specifying how the values are to be +/// copied. \n +/// Bits [1:0]: \n +/// 00: Bits [31:0] of the source are copied to bits [31:0] of the +/// returned vector. \n +/// 01: Bits [63:32] of the source are copied to bits [31:0] of the +/// returned vector. \n +/// 10: Bits [95:64] of the source are copied to bits [31:0] of the +/// returned vector. \n +/// 11: Bits [127:96] of the source are copied to bits [31:0] of the +/// returned vector. \n +/// Bits [3:2]: \n +/// 00: Bits [31:0] of the source are copied to bits [63:32] of the +/// returned vector. \n +/// 01: Bits [63:32] of the source are copied to bits [63:32] of the +/// returned vector. \n +/// 10: Bits [95:64] of the source are copied to bits [63:32] of the +/// returned vector. \n +/// 11: Bits [127:96] of the source are copied to bits [63:32] of the +/// returned vector. \n +/// Bits [5:4]: \n +/// 00: Bits [31:0] of the source are copied to bits [95:64] of the +/// returned vector. \n +/// 01: Bits [63:32] of the source are copied to bits [95:64] of the +/// returned vector. \n +/// 10: Bits [95:64] of the source are copied to bits [95:64] of the +/// returned vector. \n +/// 11: Bits [127:96] of the source are copied to bits [95:64] of the +/// returned vector. \n +/// Bits [7:6]: \n +/// 00: Bits [31:0] of the source are copied to bits [127:96] of the +/// returned vector. \n +/// 01: Bits [63:32] of the source are copied to bits [127:96] of the +/// returned vector. \n +/// 10: Bits [95:64] of the source are copied to bits [127:96] of the +/// returned vector. \n +/// 11: Bits [127:96] of the source are copied to bits [127:96] of the +/// returned vector. +/// \returns A 128-bit vector of [4 x float] containing the copied values. +#define _mm_permute_ps(A, C) \ + ((__m128)__builtin_ia32_vpermilps((__v4sf)(__m128)(A), (int)(C))) + +/// Copies the values in a 256-bit vector of [8 x float] as specified by +/// the immediate integer operand. +/// +/// \headerfile +/// +/// \code +/// __m256 _mm256_permute_ps(__m256 A, const int C); +/// \endcode +/// +/// This intrinsic corresponds to the VPERMILPS instruction. +/// +/// \param A +/// A 256-bit vector of [8 x float]. +/// \param C +/// An immediate integer operand specifying how the values are to be +/// copied. \n +/// Bits [1:0]: \n +/// 00: Bits [31:0] of the source are copied to bits [31:0] of the +/// returned vector. \n +/// 01: Bits [63:32] of the source are copied to bits [31:0] of the +/// returned vector. \n +/// 10: Bits [95:64] of the source are copied to bits [31:0] of the +/// returned vector. \n +/// 11: Bits [127:96] of the source are copied to bits [31:0] of the +/// returned vector. \n +/// Bits [3:2]: \n +/// 00: Bits [31:0] of the source are copied to bits [63:32] of the +/// returned vector. \n +/// 01: Bits [63:32] of the source are copied to bits [63:32] of the +/// returned vector. \n +/// 10: Bits [95:64] of the source are copied to bits [63:32] of the +/// returned vector. \n +/// 11: Bits [127:96] of the source are copied to bits [63:32] of the +/// returned vector. \n +/// Bits [5:4]: \n +/// 00: Bits [31:0] of the source are copied to bits [95:64] of the +/// returned vector. \n +/// 01: Bits [63:32] of the source are copied to bits [95:64] of the +/// returned vector. \n +/// 10: Bits [95:64] of the source are copied to bits [95:64] of the +/// returned vector. \n +/// 11: Bits [127:96] of the source are copied to bits [95:64] of the +/// returned vector. \n +/// Bits [7:6]: \n +/// 00: Bits [31:0] of the source are copied to bits [127:96] of the +/// returned vector. \n +/// 01: Bits [63:32] of the source are copied to bits [127:96] of the +/// returned vector. \n +/// 10: Bits [95:64] of the source are copied to bits [127:96] of the +/// returned vector. \n +/// 11: Bits [127:96] of the source are copied to bits [127:96] of the +/// returned vector. \n +/// Bits [1:0]: \n +/// 00: Bits [159:128] of the source are copied to bits [159:128] of the +/// returned vector. \n +/// 01: Bits [191:160] of the source are copied to bits [159:128] of the +/// returned vector. \n +/// 10: Bits [223:192] of the source are copied to bits [159:128] of the +/// returned vector. \n +/// 11: Bits [255:224] of the source are copied to bits [159:128] of the +/// returned vector. \n +/// Bits [3:2]: \n +/// 00: Bits [159:128] of the source are copied to bits [191:160] of the +/// returned vector. \n +/// 01: Bits [191:160] of the source are copied to bits [191:160] of the +/// returned vector. \n +/// 10: Bits [223:192] of the source are copied to bits [191:160] of the +/// returned vector. \n +/// 11: Bits [255:224] of the source are copied to bits [191:160] of the +/// returned vector. \n +/// Bits [5:4]: \n +/// 00: Bits [159:128] of the source are copied to bits [223:192] of the +/// returned vector. \n +/// 01: Bits [191:160] of the source are copied to bits [223:192] of the +/// returned vector. \n +/// 10: Bits [223:192] of the source are copied to bits [223:192] of the +/// returned vector. \n +/// 11: Bits [255:224] of the source are copied to bits [223:192] of the +/// returned vector. \n +/// Bits [7:6]: \n +/// 00: Bits [159:128] of the source are copied to bits [255:224] of the +/// returned vector. \n +/// 01: Bits [191:160] of the source are copied to bits [255:224] of the +/// returned vector. \n +/// 10: Bits [223:192] of the source are copied to bits [255:224] of the +/// returned vector. \n +/// 11: Bits [255:224] of the source are copied to bits [255:224] of the +/// returned vector. +/// \returns A 256-bit vector of [8 x float] containing the copied values. +#define _mm256_permute_ps(A, C) \ + ((__m256)__builtin_ia32_vpermilps256((__v8sf)(__m256)(A), (int)(C))) + +/// Permutes 128-bit data values stored in two 256-bit vectors of +/// [4 x double], as specified by the immediate integer operand. +/// +/// \headerfile +/// +/// \code +/// __m256d _mm256_permute2f128_pd(__m256d V1, __m256d V2, const int M); +/// \endcode +/// +/// This intrinsic corresponds to the VPERM2F128 instruction. +/// +/// \param V1 +/// A 256-bit vector of [4 x double]. +/// \param V2 +/// A 256-bit vector of [4 x double. +/// \param M +/// An immediate integer operand specifying how the values are to be +/// permuted. \n +/// Bits [1:0]: \n +/// 00: Bits [127:0] of operand \a V1 are copied to bits [127:0] of the +/// destination. \n +/// 01: Bits [255:128] of operand \a V1 are copied to bits [127:0] of the +/// destination. \n +/// 10: Bits [127:0] of operand \a V2 are copied to bits [127:0] of the +/// destination. \n +/// 11: Bits [255:128] of operand \a V2 are copied to bits [127:0] of the +/// destination. \n +/// Bits [5:4]: \n +/// 00: Bits [127:0] of operand \a V1 are copied to bits [255:128] of the +/// destination. \n +/// 01: Bits [255:128] of operand \a V1 are copied to bits [255:128] of the +/// destination. \n +/// 10: Bits [127:0] of operand \a V2 are copied to bits [255:128] of the +/// destination. \n +/// 11: Bits [255:128] of operand \a V2 are copied to bits [255:128] of the +/// destination. +/// \returns A 256-bit vector of [4 x double] containing the copied values. +#define _mm256_permute2f128_pd(V1, V2, M) \ + ((__m256d)__builtin_ia32_vperm2f128_pd256((__v4df)(__m256d)(V1), \ + (__v4df)(__m256d)(V2), (int)(M))) + +/// Permutes 128-bit data values stored in two 256-bit vectors of +/// [8 x float], as specified by the immediate integer operand. +/// +/// \headerfile +/// +/// \code +/// __m256 _mm256_permute2f128_ps(__m256 V1, __m256 V2, const int M); +/// \endcode +/// +/// This intrinsic corresponds to the VPERM2F128 instruction. +/// +/// \param V1 +/// A 256-bit vector of [8 x float]. +/// \param V2 +/// A 256-bit vector of [8 x float]. +/// \param M +/// An immediate integer operand specifying how the values are to be +/// permuted. \n +/// Bits [1:0]: \n +/// 00: Bits [127:0] of operand \a V1 are copied to bits [127:0] of the +/// destination. \n +/// 01: Bits [255:128] of operand \a V1 are copied to bits [127:0] of the +/// destination. \n +/// 10: Bits [127:0] of operand \a V2 are copied to bits [127:0] of the +/// destination. \n +/// 11: Bits [255:128] of operand \a V2 are copied to bits [127:0] of the +/// destination. \n +/// Bits [5:4]: \n +/// 00: Bits [127:0] of operand \a V1 are copied to bits [255:128] of the +/// destination. \n +/// 01: Bits [255:128] of operand \a V1 are copied to bits [255:128] of the +/// destination. \n +/// 10: Bits [127:0] of operand \a V2 are copied to bits [255:128] of the +/// destination. \n +/// 11: Bits [255:128] of operand \a V2 are copied to bits [255:128] of the +/// destination. +/// \returns A 256-bit vector of [8 x float] containing the copied values. +#define _mm256_permute2f128_ps(V1, V2, M) \ + ((__m256)__builtin_ia32_vperm2f128_ps256((__v8sf)(__m256)(V1), \ + (__v8sf)(__m256)(V2), (int)(M))) + +/// Permutes 128-bit data values stored in two 256-bit integer vectors, +/// as specified by the immediate integer operand. +/// +/// \headerfile +/// +/// \code +/// __m256i _mm256_permute2f128_si256(__m256i V1, __m256i V2, const int M); +/// \endcode +/// +/// This intrinsic corresponds to the VPERM2F128 instruction. +/// +/// \param V1 +/// A 256-bit integer vector. +/// \param V2 +/// A 256-bit integer vector. +/// \param M +/// An immediate integer operand specifying how the values are to be copied. +/// Bits [1:0]: \n +/// 00: Bits [127:0] of operand \a V1 are copied to bits [127:0] of the +/// destination. \n +/// 01: Bits [255:128] of operand \a V1 are copied to bits [127:0] of the +/// destination. \n +/// 10: Bits [127:0] of operand \a V2 are copied to bits [127:0] of the +/// destination. \n +/// 11: Bits [255:128] of operand \a V2 are copied to bits [127:0] of the +/// destination. \n +/// Bits [5:4]: \n +/// 00: Bits [127:0] of operand \a V1 are copied to bits [255:128] of the +/// destination. \n +/// 01: Bits [255:128] of operand \a V1 are copied to bits [255:128] of the +/// destination. \n +/// 10: Bits [127:0] of operand \a V2 are copied to bits [255:128] of the +/// destination. \n +/// 11: Bits [255:128] of operand \a V2 are copied to bits [255:128] of the +/// destination. +/// \returns A 256-bit integer vector containing the copied values. +#define _mm256_permute2f128_si256(V1, V2, M) \ + ((__m256i)__builtin_ia32_vperm2f128_si256((__v8si)(__m256i)(V1), \ + (__v8si)(__m256i)(V2), (int)(M))) + +/* Vector Blend */ +/// Merges 64-bit double-precision data values stored in either of the +/// two 256-bit vectors of [4 x double], as specified by the immediate +/// integer operand. +/// +/// \headerfile +/// +/// \code +/// __m256d _mm256_blend_pd(__m256d V1, __m256d V2, const int M); +/// \endcode +/// +/// This intrinsic corresponds to the VBLENDPD instruction. +/// +/// \param V1 +/// A 256-bit vector of [4 x double]. +/// \param V2 +/// A 256-bit vector of [4 x double]. +/// \param M +/// An immediate integer operand, with mask bits [3:0] specifying how the +/// values are to be copied. The position of the mask bit corresponds to the +/// index of a copied value. When a mask bit is 0, the corresponding 64-bit +/// element in operand \a V1 is copied to the same position in the +/// destination. When a mask bit is 1, the corresponding 64-bit element in +/// operand \a V2 is copied to the same position in the destination. +/// \returns A 256-bit vector of [4 x double] containing the copied values. +#define _mm256_blend_pd(V1, V2, M) \ + ((__m256d)__builtin_ia32_blendpd256((__v4df)(__m256d)(V1), \ + (__v4df)(__m256d)(V2), (int)(M))) + +/// Merges 32-bit single-precision data values stored in either of the +/// two 256-bit vectors of [8 x float], as specified by the immediate +/// integer operand. +/// +/// \headerfile +/// +/// \code +/// __m256 _mm256_blend_ps(__m256 V1, __m256 V2, const int M); +/// \endcode +/// +/// This intrinsic corresponds to the VBLENDPS instruction. +/// +/// \param V1 +/// A 256-bit vector of [8 x float]. +/// \param V2 +/// A 256-bit vector of [8 x float]. +/// \param M +/// An immediate integer operand, with mask bits [7:0] specifying how the +/// values are to be copied. The position of the mask bit corresponds to the +/// index of a copied value. When a mask bit is 0, the corresponding 32-bit +/// element in operand \a V1 is copied to the same position in the +/// destination. When a mask bit is 1, the corresponding 32-bit element in +/// operand \a V2 is copied to the same position in the destination. +/// \returns A 256-bit vector of [8 x float] containing the copied values. +#define _mm256_blend_ps(V1, V2, M) \ + ((__m256)__builtin_ia32_blendps256((__v8sf)(__m256)(V1), \ + (__v8sf)(__m256)(V2), (int)(M))) + +/// Merges 64-bit double-precision data values stored in either of the +/// two 256-bit vectors of [4 x double], as specified by the 256-bit vector +/// operand. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VBLENDVPD instruction. +/// +/// \param __a +/// A 256-bit vector of [4 x double]. +/// \param __b +/// A 256-bit vector of [4 x double]. +/// \param __c +/// A 256-bit vector operand, with mask bits 255, 191, 127, and 63 specifying +/// how the values are to be copied. The position of the mask bit corresponds +/// to the most significant bit of a copied value. When a mask bit is 0, the +/// corresponding 64-bit element in operand \a __a is copied to the same +/// position in the destination. When a mask bit is 1, the corresponding +/// 64-bit element in operand \a __b is copied to the same position in the +/// destination. +/// \returns A 256-bit vector of [4 x double] containing the copied values. +static __inline __m256d __DEFAULT_FN_ATTRS +_mm256_blendv_pd(__m256d __a, __m256d __b, __m256d __c) +{ + return (__m256d)__builtin_ia32_blendvpd256( + (__v4df)__a, (__v4df)__b, (__v4df)__c); +} + +/// Merges 32-bit single-precision data values stored in either of the +/// two 256-bit vectors of [8 x float], as specified by the 256-bit vector +/// operand. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VBLENDVPS instruction. +/// +/// \param __a +/// A 256-bit vector of [8 x float]. +/// \param __b +/// A 256-bit vector of [8 x float]. +/// \param __c +/// A 256-bit vector operand, with mask bits 255, 223, 191, 159, 127, 95, 63, +/// and 31 specifying how the values are to be copied. The position of the +/// mask bit corresponds to the most significant bit of a copied value. When +/// a mask bit is 0, the corresponding 32-bit element in operand \a __a is +/// copied to the same position in the destination. When a mask bit is 1, the +/// corresponding 32-bit element in operand \a __b is copied to the same +/// position in the destination. +/// \returns A 256-bit vector of [8 x float] containing the copied values. +static __inline __m256 __DEFAULT_FN_ATTRS +_mm256_blendv_ps(__m256 __a, __m256 __b, __m256 __c) +{ + return (__m256)__builtin_ia32_blendvps256( + (__v8sf)__a, (__v8sf)__b, (__v8sf)__c); +} + +/* Vector Dot Product */ +/// Computes two dot products in parallel, using the lower and upper +/// halves of two [8 x float] vectors as input to the two computations, and +/// returning the two dot products in the lower and upper halves of the +/// [8 x float] result. +/// +/// The immediate integer operand controls which input elements will +/// contribute to the dot product, and where the final results are returned. +/// In general, for each dot product, the four corresponding elements of the +/// input vectors are multiplied; the first two and second two products are +/// summed, then the two sums are added to form the final result. +/// +/// \headerfile +/// +/// \code +/// __m256 _mm256_dp_ps(__m256 V1, __m256 V2, const int M); +/// \endcode +/// +/// This intrinsic corresponds to the VDPPS instruction. +/// +/// \param V1 +/// A vector of [8 x float] values, treated as two [4 x float] vectors. +/// \param V2 +/// A vector of [8 x float] values, treated as two [4 x float] vectors. +/// \param M +/// An immediate integer argument. Bits [7:4] determine which elements of +/// the input vectors are used, with bit [4] corresponding to the lowest +/// element and bit [7] corresponding to the highest element of each [4 x +/// float] subvector. If a bit is set, the corresponding elements from the +/// two input vectors are used as an input for dot product; otherwise that +/// input is treated as zero. Bits [3:0] determine which elements of the +/// result will receive a copy of the final dot product, with bit [0] +/// corresponding to the lowest element and bit [3] corresponding to the +/// highest element of each [4 x float] subvector. If a bit is set, the dot +/// product is returned in the corresponding element; otherwise that element +/// is set to zero. The bitmask is applied in the same way to each of the +/// two parallel dot product computations. +/// \returns A 256-bit vector of [8 x float] containing the two dot products. +#define _mm256_dp_ps(V1, V2, M) \ + ((__m256)__builtin_ia32_dpps256((__v8sf)(__m256)(V1), \ + (__v8sf)(__m256)(V2), (M))) + +/* Vector shuffle */ +/// Selects 8 float values from the 256-bit operands of [8 x float], as +/// specified by the immediate value operand. +/// +/// The four selected elements in each operand are copied to the destination +/// according to the bits specified in the immediate operand. The selected +/// elements from the first 256-bit operand are copied to bits [63:0] and +/// bits [191:128] of the destination, and the selected elements from the +/// second 256-bit operand are copied to bits [127:64] and bits [255:192] of +/// the destination. For example, if bits [7:0] of the immediate operand +/// contain a value of 0xFF, the 256-bit destination vector would contain the +/// following values: b[7], b[7], a[7], a[7], b[3], b[3], a[3], a[3]. +/// +/// \headerfile +/// +/// \code +/// __m256 _mm256_shuffle_ps(__m256 a, __m256 b, const int mask); +/// \endcode +/// +/// This intrinsic corresponds to the VSHUFPS instruction. +/// +/// \param a +/// A 256-bit vector of [8 x float]. The four selected elements in this +/// operand are copied to bits [63:0] and bits [191:128] in the destination, +/// according to the bits specified in the immediate operand. +/// \param b +/// A 256-bit vector of [8 x float]. The four selected elements in this +/// operand are copied to bits [127:64] and bits [255:192] in the +/// destination, according to the bits specified in the immediate operand. +/// \param mask +/// An immediate value containing an 8-bit value specifying which elements to +/// copy from \a a and \a b \n. +/// Bits [3:0] specify the values copied from operand \a a. \n +/// Bits [7:4] specify the values copied from operand \a b. \n +/// The destinations within the 256-bit destination are assigned values as +/// follows, according to the bit value assignments described below: \n +/// Bits [1:0] are used to assign values to bits [31:0] and [159:128] in the +/// destination. \n +/// Bits [3:2] are used to assign values to bits [63:32] and [191:160] in the +/// destination. \n +/// Bits [5:4] are used to assign values to bits [95:64] and [223:192] in the +/// destination. \n +/// Bits [7:6] are used to assign values to bits [127:96] and [255:224] in +/// the destination. \n +/// Bit value assignments: \n +/// 00: Bits [31:0] and [159:128] are copied from the selected operand. \n +/// 01: Bits [63:32] and [191:160] are copied from the selected operand. \n +/// 10: Bits [95:64] and [223:192] are copied from the selected operand. \n +/// 11: Bits [127:96] and [255:224] are copied from the selected operand. \n +/// Note: To generate a mask, you can use the \c _MM_SHUFFLE macro. +/// _MM_SHUFFLE(b6, b4, b2, b0) can create an 8-bit mask of the form +/// [b6, b4, b2, b0]. +/// \returns A 256-bit vector of [8 x float] containing the shuffled values. +#define _mm256_shuffle_ps(a, b, mask) \ + ((__m256)__builtin_ia32_shufps256((__v8sf)(__m256)(a), \ + (__v8sf)(__m256)(b), (int)(mask))) + +/// Selects four double-precision values from the 256-bit operands of +/// [4 x double], as specified by the immediate value operand. +/// +/// The selected elements from the first 256-bit operand are copied to bits +/// [63:0] and bits [191:128] in the destination, and the selected elements +/// from the second 256-bit operand are copied to bits [127:64] and bits +/// [255:192] in the destination. For example, if bits [3:0] of the immediate +/// operand contain a value of 0xF, the 256-bit destination vector would +/// contain the following values: b[3], a[3], b[1], a[1]. +/// +/// \headerfile +/// +/// \code +/// __m256d _mm256_shuffle_pd(__m256d a, __m256d b, const int mask); +/// \endcode +/// +/// This intrinsic corresponds to the VSHUFPD instruction. +/// +/// \param a +/// A 256-bit vector of [4 x double]. +/// \param b +/// A 256-bit vector of [4 x double]. +/// \param mask +/// An immediate value containing 8-bit values specifying which elements to +/// copy from \a a and \a b: \n +/// Bit [0]=0: Bits [63:0] are copied from \a a to bits [63:0] of the +/// destination. \n +/// Bit [0]=1: Bits [127:64] are copied from \a a to bits [63:0] of the +/// destination. \n +/// Bit [1]=0: Bits [63:0] are copied from \a b to bits [127:64] of the +/// destination. \n +/// Bit [1]=1: Bits [127:64] are copied from \a b to bits [127:64] of the +/// destination. \n +/// Bit [2]=0: Bits [191:128] are copied from \a a to bits [191:128] of the +/// destination. \n +/// Bit [2]=1: Bits [255:192] are copied from \a a to bits [191:128] of the +/// destination. \n +/// Bit [3]=0: Bits [191:128] are copied from \a b to bits [255:192] of the +/// destination. \n +/// Bit [3]=1: Bits [255:192] are copied from \a b to bits [255:192] of the +/// destination. +/// \returns A 256-bit vector of [4 x double] containing the shuffled values. +#define _mm256_shuffle_pd(a, b, mask) \ + ((__m256d)__builtin_ia32_shufpd256((__v4df)(__m256d)(a), \ + (__v4df)(__m256d)(b), (int)(mask))) + +/* Compare */ +#define _CMP_EQ_UQ 0x08 /* Equal (unordered, non-signaling) */ +#define _CMP_NGE_US 0x09 /* Not-greater-than-or-equal (unordered, signaling) */ +#define _CMP_NGT_US 0x0a /* Not-greater-than (unordered, signaling) */ +#define _CMP_FALSE_OQ 0x0b /* False (ordered, non-signaling) */ +#define _CMP_NEQ_OQ 0x0c /* Not-equal (ordered, non-signaling) */ +#define _CMP_GE_OS 0x0d /* Greater-than-or-equal (ordered, signaling) */ +#define _CMP_GT_OS 0x0e /* Greater-than (ordered, signaling) */ +#define _CMP_TRUE_UQ 0x0f /* True (unordered, non-signaling) */ +#define _CMP_EQ_OS 0x10 /* Equal (ordered, signaling) */ +#define _CMP_LT_OQ 0x11 /* Less-than (ordered, non-signaling) */ +#define _CMP_LE_OQ 0x12 /* Less-than-or-equal (ordered, non-signaling) */ +#define _CMP_UNORD_S 0x13 /* Unordered (signaling) */ +#define _CMP_NEQ_US 0x14 /* Not-equal (unordered, signaling) */ +#define _CMP_NLT_UQ 0x15 /* Not-less-than (unordered, non-signaling) */ +#define _CMP_NLE_UQ 0x16 /* Not-less-than-or-equal (unordered, non-signaling) */ +#define _CMP_ORD_S 0x17 /* Ordered (signaling) */ +#define _CMP_EQ_US 0x18 /* Equal (unordered, signaling) */ +#define _CMP_NGE_UQ 0x19 /* Not-greater-than-or-equal (unordered, non-signaling) */ +#define _CMP_NGT_UQ 0x1a /* Not-greater-than (unordered, non-signaling) */ +#define _CMP_FALSE_OS 0x1b /* False (ordered, signaling) */ +#define _CMP_NEQ_OS 0x1c /* Not-equal (ordered, signaling) */ +#define _CMP_GE_OQ 0x1d /* Greater-than-or-equal (ordered, non-signaling) */ +#define _CMP_GT_OQ 0x1e /* Greater-than (ordered, non-signaling) */ +#define _CMP_TRUE_US 0x1f /* True (unordered, signaling) */ + +/* Below intrinsic defined in emmintrin.h can be used for AVX */ +/// Compares each of the corresponding double-precision values of two +/// 128-bit vectors of [2 x double], using the operation specified by the +/// immediate integer operand. +/// +/// Each comparison returns 0x0 for false, 0xFFFFFFFFFFFFFFFF for true. +/// If either value in a comparison is NaN, comparisons that are ordered +/// return false, and comparisons that are unordered return true. +/// +/// \headerfile +/// +/// \code +/// __m128d _mm_cmp_pd(__m128d a, __m128d b, const int c); +/// \endcode +/// +/// This intrinsic corresponds to the VCMPPD instruction. +/// +/// \param a +/// A 128-bit vector of [2 x double]. +/// \param b +/// A 128-bit vector of [2 x double]. +/// \param c +/// An immediate integer operand, with bits [4:0] specifying which comparison +/// operation to use: \n +/// 0x00: Equal (ordered, non-signaling) \n +/// 0x01: Less-than (ordered, signaling) \n +/// 0x02: Less-than-or-equal (ordered, signaling) \n +/// 0x03: Unordered (non-signaling) \n +/// 0x04: Not-equal (unordered, non-signaling) \n +/// 0x05: Not-less-than (unordered, signaling) \n +/// 0x06: Not-less-than-or-equal (unordered, signaling) \n +/// 0x07: Ordered (non-signaling) \n +/// 0x08: Equal (unordered, non-signaling) \n +/// 0x09: Not-greater-than-or-equal (unordered, signaling) \n +/// 0x0A: Not-greater-than (unordered, signaling) \n +/// 0x0B: False (ordered, non-signaling) \n +/// 0x0C: Not-equal (ordered, non-signaling) \n +/// 0x0D: Greater-than-or-equal (ordered, signaling) \n +/// 0x0E: Greater-than (ordered, signaling) \n +/// 0x0F: True (unordered, non-signaling) \n +/// 0x10: Equal (ordered, signaling) \n +/// 0x11: Less-than (ordered, non-signaling) \n +/// 0x12: Less-than-or-equal (ordered, non-signaling) \n +/// 0x13: Unordered (signaling) \n +/// 0x14: Not-equal (unordered, signaling) \n +/// 0x15: Not-less-than (unordered, non-signaling) \n +/// 0x16: Not-less-than-or-equal (unordered, non-signaling) \n +/// 0x17: Ordered (signaling) \n +/// 0x18: Equal (unordered, signaling) \n +/// 0x19: Not-greater-than-or-equal (unordered, non-signaling) \n +/// 0x1A: Not-greater-than (unordered, non-signaling) \n +/// 0x1B: False (ordered, signaling) \n +/// 0x1C: Not-equal (ordered, signaling) \n +/// 0x1D: Greater-than-or-equal (ordered, non-signaling) \n +/// 0x1E: Greater-than (ordered, non-signaling) \n +/// 0x1F: True (unordered, signaling) +/// \returns A 128-bit vector of [2 x double] containing the comparison results. +/// \fn __m128d _mm_cmp_pd(__m128d a, __m128d b, const int c) + +/* Below intrinsic defined in xmmintrin.h can be used for AVX */ +/// Compares each of the corresponding values of two 128-bit vectors of +/// [4 x float], using the operation specified by the immediate integer +/// operand. +/// +/// Each comparison returns 0x0 for false, 0xFFFFFFFF for true. +/// If either value in a comparison is NaN, comparisons that are ordered +/// return false, and comparisons that are unordered return true. +/// +/// \headerfile +/// +/// \code +/// __m128 _mm_cmp_ps(__m128 a, __m128 b, const int c); +/// \endcode +/// +/// This intrinsic corresponds to the VCMPPS instruction. +/// +/// \param a +/// A 128-bit vector of [4 x float]. +/// \param b +/// A 128-bit vector of [4 x float]. +/// \param c +/// An immediate integer operand, with bits [4:0] specifying which comparison +/// operation to use: \n +/// 0x00: Equal (ordered, non-signaling) \n +/// 0x01: Less-than (ordered, signaling) \n +/// 0x02: Less-than-or-equal (ordered, signaling) \n +/// 0x03: Unordered (non-signaling) \n +/// 0x04: Not-equal (unordered, non-signaling) \n +/// 0x05: Not-less-than (unordered, signaling) \n +/// 0x06: Not-less-than-or-equal (unordered, signaling) \n +/// 0x07: Ordered (non-signaling) \n +/// 0x08: Equal (unordered, non-signaling) \n +/// 0x09: Not-greater-than-or-equal (unordered, signaling) \n +/// 0x0A: Not-greater-than (unordered, signaling) \n +/// 0x0B: False (ordered, non-signaling) \n +/// 0x0C: Not-equal (ordered, non-signaling) \n +/// 0x0D: Greater-than-or-equal (ordered, signaling) \n +/// 0x0E: Greater-than (ordered, signaling) \n +/// 0x0F: True (unordered, non-signaling) \n +/// 0x10: Equal (ordered, signaling) \n +/// 0x11: Less-than (ordered, non-signaling) \n +/// 0x12: Less-than-or-equal (ordered, non-signaling) \n +/// 0x13: Unordered (signaling) \n +/// 0x14: Not-equal (unordered, signaling) \n +/// 0x15: Not-less-than (unordered, non-signaling) \n +/// 0x16: Not-less-than-or-equal (unordered, non-signaling) \n +/// 0x17: Ordered (signaling) \n +/// 0x18: Equal (unordered, signaling) \n +/// 0x19: Not-greater-than-or-equal (unordered, non-signaling) \n +/// 0x1A: Not-greater-than (unordered, non-signaling) \n +/// 0x1B: False (ordered, signaling) \n +/// 0x1C: Not-equal (ordered, signaling) \n +/// 0x1D: Greater-than-or-equal (ordered, non-signaling) \n +/// 0x1E: Greater-than (ordered, non-signaling) \n +/// 0x1F: True (unordered, signaling) +/// \returns A 128-bit vector of [4 x float] containing the comparison results. +/// \fn __m128 _mm_cmp_ps(__m128 a, __m128 b, const int c) + +/// Compares each of the corresponding double-precision values of two +/// 256-bit vectors of [4 x double], using the operation specified by the +/// immediate integer operand. +/// +/// Each comparison returns 0x0 for false, 0xFFFFFFFFFFFFFFFF for true. +/// If either value in a comparison is NaN, comparisons that are ordered +/// return false, and comparisons that are unordered return true. +/// +/// \headerfile +/// +/// \code +/// __m256d _mm256_cmp_pd(__m256d a, __m256d b, const int c); +/// \endcode +/// +/// This intrinsic corresponds to the VCMPPD instruction. +/// +/// \param a +/// A 256-bit vector of [4 x double]. +/// \param b +/// A 256-bit vector of [4 x double]. +/// \param c +/// An immediate integer operand, with bits [4:0] specifying which comparison +/// operation to use: \n +/// 0x00: Equal (ordered, non-signaling) \n +/// 0x01: Less-than (ordered, signaling) \n +/// 0x02: Less-than-or-equal (ordered, signaling) \n +/// 0x03: Unordered (non-signaling) \n +/// 0x04: Not-equal (unordered, non-signaling) \n +/// 0x05: Not-less-than (unordered, signaling) \n +/// 0x06: Not-less-than-or-equal (unordered, signaling) \n +/// 0x07: Ordered (non-signaling) \n +/// 0x08: Equal (unordered, non-signaling) \n +/// 0x09: Not-greater-than-or-equal (unordered, signaling) \n +/// 0x0A: Not-greater-than (unordered, signaling) \n +/// 0x0B: False (ordered, non-signaling) \n +/// 0x0C: Not-equal (ordered, non-signaling) \n +/// 0x0D: Greater-than-or-equal (ordered, signaling) \n +/// 0x0E: Greater-than (ordered, signaling) \n +/// 0x0F: True (unordered, non-signaling) \n +/// 0x10: Equal (ordered, signaling) \n +/// 0x11: Less-than (ordered, non-signaling) \n +/// 0x12: Less-than-or-equal (ordered, non-signaling) \n +/// 0x13: Unordered (signaling) \n +/// 0x14: Not-equal (unordered, signaling) \n +/// 0x15: Not-less-than (unordered, non-signaling) \n +/// 0x16: Not-less-than-or-equal (unordered, non-signaling) \n +/// 0x17: Ordered (signaling) \n +/// 0x18: Equal (unordered, signaling) \n +/// 0x19: Not-greater-than-or-equal (unordered, non-signaling) \n +/// 0x1A: Not-greater-than (unordered, non-signaling) \n +/// 0x1B: False (ordered, signaling) \n +/// 0x1C: Not-equal (ordered, signaling) \n +/// 0x1D: Greater-than-or-equal (ordered, non-signaling) \n +/// 0x1E: Greater-than (ordered, non-signaling) \n +/// 0x1F: True (unordered, signaling) +/// \returns A 256-bit vector of [4 x double] containing the comparison results. +#define _mm256_cmp_pd(a, b, c) \ + ((__m256d)__builtin_ia32_cmppd256((__v4df)(__m256d)(a), \ + (__v4df)(__m256d)(b), (c))) + +/// Compares each of the corresponding values of two 256-bit vectors of +/// [8 x float], using the operation specified by the immediate integer +/// operand. +/// +/// Each comparison returns 0x0 for false, 0xFFFFFFFF for true. +/// If either value in a comparison is NaN, comparisons that are ordered +/// return false, and comparisons that are unordered return true. +/// +/// \headerfile +/// +/// \code +/// __m256 _mm256_cmp_ps(__m256 a, __m256 b, const int c); +/// \endcode +/// +/// This intrinsic corresponds to the VCMPPS instruction. +/// +/// \param a +/// A 256-bit vector of [8 x float]. +/// \param b +/// A 256-bit vector of [8 x float]. +/// \param c +/// An immediate integer operand, with bits [4:0] specifying which comparison +/// operation to use: \n +/// 0x00: Equal (ordered, non-signaling) \n +/// 0x01: Less-than (ordered, signaling) \n +/// 0x02: Less-than-or-equal (ordered, signaling) \n +/// 0x03: Unordered (non-signaling) \n +/// 0x04: Not-equal (unordered, non-signaling) \n +/// 0x05: Not-less-than (unordered, signaling) \n +/// 0x06: Not-less-than-or-equal (unordered, signaling) \n +/// 0x07: Ordered (non-signaling) \n +/// 0x08: Equal (unordered, non-signaling) \n +/// 0x09: Not-greater-than-or-equal (unordered, signaling) \n +/// 0x0A: Not-greater-than (unordered, signaling) \n +/// 0x0B: False (ordered, non-signaling) \n +/// 0x0C: Not-equal (ordered, non-signaling) \n +/// 0x0D: Greater-than-or-equal (ordered, signaling) \n +/// 0x0E: Greater-than (ordered, signaling) \n +/// 0x0F: True (unordered, non-signaling) \n +/// 0x10: Equal (ordered, signaling) \n +/// 0x11: Less-than (ordered, non-signaling) \n +/// 0x12: Less-than-or-equal (ordered, non-signaling) \n +/// 0x13: Unordered (signaling) \n +/// 0x14: Not-equal (unordered, signaling) \n +/// 0x15: Not-less-than (unordered, non-signaling) \n +/// 0x16: Not-less-than-or-equal (unordered, non-signaling) \n +/// 0x17: Ordered (signaling) \n +/// 0x18: Equal (unordered, signaling) \n +/// 0x19: Not-greater-than-or-equal (unordered, non-signaling) \n +/// 0x1A: Not-greater-than (unordered, non-signaling) \n +/// 0x1B: False (ordered, signaling) \n +/// 0x1C: Not-equal (ordered, signaling) \n +/// 0x1D: Greater-than-or-equal (ordered, non-signaling) \n +/// 0x1E: Greater-than (ordered, non-signaling) \n +/// 0x1F: True (unordered, signaling) +/// \returns A 256-bit vector of [8 x float] containing the comparison results. +#define _mm256_cmp_ps(a, b, c) \ + ((__m256)__builtin_ia32_cmpps256((__v8sf)(__m256)(a), \ + (__v8sf)(__m256)(b), (c))) + +/* Below intrinsic defined in emmintrin.h can be used for AVX */ +/// Compares each of the corresponding scalar double-precision values of +/// two 128-bit vectors of [2 x double], using the operation specified by the +/// immediate integer operand. +/// +/// Each comparison returns 0x0 for false, 0xFFFFFFFFFFFFFFFF for true. +/// If either value in a comparison is NaN, comparisons that are ordered +/// return false, and comparisons that are unordered return true. +/// +/// \headerfile +/// +/// \code +/// __m128d _mm_cmp_sd(__m128d a, __m128d b, const int c); +/// \endcode +/// +/// This intrinsic corresponds to the VCMPSD instruction. +/// +/// \param a +/// A 128-bit vector of [2 x double]. +/// \param b +/// A 128-bit vector of [2 x double]. +/// \param c +/// An immediate integer operand, with bits [4:0] specifying which comparison +/// operation to use: \n +/// 0x00: Equal (ordered, non-signaling) \n +/// 0x01: Less-than (ordered, signaling) \n +/// 0x02: Less-than-or-equal (ordered, signaling) \n +/// 0x03: Unordered (non-signaling) \n +/// 0x04: Not-equal (unordered, non-signaling) \n +/// 0x05: Not-less-than (unordered, signaling) \n +/// 0x06: Not-less-than-or-equal (unordered, signaling) \n +/// 0x07: Ordered (non-signaling) \n +/// 0x08: Equal (unordered, non-signaling) \n +/// 0x09: Not-greater-than-or-equal (unordered, signaling) \n +/// 0x0A: Not-greater-than (unordered, signaling) \n +/// 0x0B: False (ordered, non-signaling) \n +/// 0x0C: Not-equal (ordered, non-signaling) \n +/// 0x0D: Greater-than-or-equal (ordered, signaling) \n +/// 0x0E: Greater-than (ordered, signaling) \n +/// 0x0F: True (unordered, non-signaling) \n +/// 0x10: Equal (ordered, signaling) \n +/// 0x11: Less-than (ordered, non-signaling) \n +/// 0x12: Less-than-or-equal (ordered, non-signaling) \n +/// 0x13: Unordered (signaling) \n +/// 0x14: Not-equal (unordered, signaling) \n +/// 0x15: Not-less-than (unordered, non-signaling) \n +/// 0x16: Not-less-than-or-equal (unordered, non-signaling) \n +/// 0x17: Ordered (signaling) \n +/// 0x18: Equal (unordered, signaling) \n +/// 0x19: Not-greater-than-or-equal (unordered, non-signaling) \n +/// 0x1A: Not-greater-than (unordered, non-signaling) \n +/// 0x1B: False (ordered, signaling) \n +/// 0x1C: Not-equal (ordered, signaling) \n +/// 0x1D: Greater-than-or-equal (ordered, non-signaling) \n +/// 0x1E: Greater-than (ordered, non-signaling) \n +/// 0x1F: True (unordered, signaling) +/// \returns A 128-bit vector of [2 x double] containing the comparison results. +/// \fn __m128d _mm_cmp_sd(__m128d a, __m128d b, const int c) + +/* Below intrinsic defined in xmmintrin.h can be used for AVX */ +/// Compares each of the corresponding scalar values of two 128-bit +/// vectors of [4 x float], using the operation specified by the immediate +/// integer operand. +/// +/// Each comparison returns 0x0 for false, 0xFFFFFFFF for true. +/// If either value in a comparison is NaN, comparisons that are ordered +/// return false, and comparisons that are unordered return true. +/// +/// \headerfile +/// +/// \code +/// __m128 _mm_cmp_ss(__m128 a, __m128 b, const int c); +/// \endcode +/// +/// This intrinsic corresponds to the VCMPSS instruction. +/// +/// \param a +/// A 128-bit vector of [4 x float]. +/// \param b +/// A 128-bit vector of [4 x float]. +/// \param c +/// An immediate integer operand, with bits [4:0] specifying which comparison +/// operation to use: \n +/// 0x00: Equal (ordered, non-signaling) \n +/// 0x01: Less-than (ordered, signaling) \n +/// 0x02: Less-than-or-equal (ordered, signaling) \n +/// 0x03: Unordered (non-signaling) \n +/// 0x04: Not-equal (unordered, non-signaling) \n +/// 0x05: Not-less-than (unordered, signaling) \n +/// 0x06: Not-less-than-or-equal (unordered, signaling) \n +/// 0x07: Ordered (non-signaling) \n +/// 0x08: Equal (unordered, non-signaling) \n +/// 0x09: Not-greater-than-or-equal (unordered, signaling) \n +/// 0x0A: Not-greater-than (unordered, signaling) \n +/// 0x0B: False (ordered, non-signaling) \n +/// 0x0C: Not-equal (ordered, non-signaling) \n +/// 0x0D: Greater-than-or-equal (ordered, signaling) \n +/// 0x0E: Greater-than (ordered, signaling) \n +/// 0x0F: True (unordered, non-signaling) \n +/// 0x10: Equal (ordered, signaling) \n +/// 0x11: Less-than (ordered, non-signaling) \n +/// 0x12: Less-than-or-equal (ordered, non-signaling) \n +/// 0x13: Unordered (signaling) \n +/// 0x14: Not-equal (unordered, signaling) \n +/// 0x15: Not-less-than (unordered, non-signaling) \n +/// 0x16: Not-less-than-or-equal (unordered, non-signaling) \n +/// 0x17: Ordered (signaling) \n +/// 0x18: Equal (unordered, signaling) \n +/// 0x19: Not-greater-than-or-equal (unordered, non-signaling) \n +/// 0x1A: Not-greater-than (unordered, non-signaling) \n +/// 0x1B: False (ordered, signaling) \n +/// 0x1C: Not-equal (ordered, signaling) \n +/// 0x1D: Greater-than-or-equal (ordered, non-signaling) \n +/// 0x1E: Greater-than (ordered, non-signaling) \n +/// 0x1F: True (unordered, signaling) +/// \returns A 128-bit vector of [4 x float] containing the comparison results. +/// \fn __m128 _mm_cmp_ss(__m128 a, __m128 b, const int c) + +/// Takes a [8 x i32] vector and returns the vector element value +/// indexed by the immediate constant operand. +/// +/// \headerfile +/// +/// \code +/// int _mm256_extract_epi32(__m256i X, const int N); +/// \endcode +/// +/// This intrinsic corresponds to the VEXTRACTF128+COMPOSITE +/// instruction. +/// +/// \param X +/// A 256-bit vector of [8 x i32]. +/// \param N +/// An immediate integer operand with bits [2:0] determining which vector +/// element is extracted and returned. +/// \returns A 32-bit integer containing the extracted 32 bits of extended +/// packed data. +#define _mm256_extract_epi32(X, N) \ + ((int)__builtin_ia32_vec_ext_v8si((__v8si)(__m256i)(X), (int)(N))) + +/// Takes a [16 x i16] vector and returns the vector element value +/// indexed by the immediate constant operand. +/// +/// \headerfile +/// +/// \code +/// int _mm256_extract_epi16(__m256i X, const int N); +/// \endcode +/// +/// This intrinsic corresponds to the VEXTRACTF128+COMPOSITE +/// instruction. +/// +/// \param X +/// A 256-bit integer vector of [16 x i16]. +/// \param N +/// An immediate integer operand with bits [3:0] determining which vector +/// element is extracted and returned. +/// \returns A 32-bit integer containing the extracted 16 bits of zero extended +/// packed data. +#define _mm256_extract_epi16(X, N) \ + ((int)(unsigned short)__builtin_ia32_vec_ext_v16hi((__v16hi)(__m256i)(X), \ + (int)(N))) + +/// Takes a [32 x i8] vector and returns the vector element value +/// indexed by the immediate constant operand. +/// +/// \headerfile +/// +/// \code +/// int _mm256_extract_epi8(__m256i X, const int N); +/// \endcode +/// +/// This intrinsic corresponds to the VEXTRACTF128+COMPOSITE +/// instruction. +/// +/// \param X +/// A 256-bit integer vector of [32 x i8]. +/// \param N +/// An immediate integer operand with bits [4:0] determining which vector +/// element is extracted and returned. +/// \returns A 32-bit integer containing the extracted 8 bits of zero extended +/// packed data. +#define _mm256_extract_epi8(X, N) \ + ((int)(unsigned char)__builtin_ia32_vec_ext_v32qi((__v32qi)(__m256i)(X), \ + (int)(N))) + +#ifdef __x86_64__ +/// Takes a [4 x i64] vector and returns the vector element value +/// indexed by the immediate constant operand. +/// +/// \headerfile +/// +/// \code +/// long long _mm256_extract_epi64(__m256i X, const int N); +/// \endcode +/// +/// This intrinsic corresponds to the VEXTRACTF128+COMPOSITE +/// instruction. +/// +/// \param X +/// A 256-bit integer vector of [4 x i64]. +/// \param N +/// An immediate integer operand with bits [1:0] determining which vector +/// element is extracted and returned. +/// \returns A 64-bit integer containing the extracted 64 bits of extended +/// packed data. +#define _mm256_extract_epi64(X, N) \ + ((long long)__builtin_ia32_vec_ext_v4di((__v4di)(__m256i)(X), (int)(N))) +#endif + +/// Takes a [8 x i32] vector and replaces the vector element value +/// indexed by the immediate constant operand by a new value. Returns the +/// modified vector. +/// +/// \headerfile +/// +/// \code +/// __m256i _mm256_insert_epi32(__m256i X, int I, const int N); +/// \endcode +/// +/// This intrinsic corresponds to the VINSERTF128+COMPOSITE +/// instruction. +/// +/// \param X +/// A vector of [8 x i32] to be used by the insert operation. +/// \param I +/// An integer value. The replacement value for the insert operation. +/// \param N +/// An immediate integer specifying the index of the vector element to be +/// replaced. +/// \returns A copy of vector \a X, after replacing its element indexed by +/// \a N with \a I. +#define _mm256_insert_epi32(X, I, N) \ + ((__m256i)__builtin_ia32_vec_set_v8si((__v8si)(__m256i)(X), \ + (int)(I), (int)(N))) + + +/// Takes a [16 x i16] vector and replaces the vector element value +/// indexed by the immediate constant operand with a new value. Returns the +/// modified vector. +/// +/// \headerfile +/// +/// \code +/// __m256i _mm256_insert_epi16(__m256i X, int I, const int N); +/// \endcode +/// +/// This intrinsic corresponds to the VINSERTF128+COMPOSITE +/// instruction. +/// +/// \param X +/// A vector of [16 x i16] to be used by the insert operation. +/// \param I +/// An i16 integer value. The replacement value for the insert operation. +/// \param N +/// An immediate integer specifying the index of the vector element to be +/// replaced. +/// \returns A copy of vector \a X, after replacing its element indexed by +/// \a N with \a I. +#define _mm256_insert_epi16(X, I, N) \ + ((__m256i)__builtin_ia32_vec_set_v16hi((__v16hi)(__m256i)(X), \ + (int)(I), (int)(N))) + +/// Takes a [32 x i8] vector and replaces the vector element value +/// indexed by the immediate constant operand with a new value. Returns the +/// modified vector. +/// +/// \headerfile +/// +/// \code +/// __m256i _mm256_insert_epi8(__m256i X, int I, const int N); +/// \endcode +/// +/// This intrinsic corresponds to the VINSERTF128+COMPOSITE +/// instruction. +/// +/// \param X +/// A vector of [32 x i8] to be used by the insert operation. +/// \param I +/// An i8 integer value. The replacement value for the insert operation. +/// \param N +/// An immediate integer specifying the index of the vector element to be +/// replaced. +/// \returns A copy of vector \a X, after replacing its element indexed by +/// \a N with \a I. +#define _mm256_insert_epi8(X, I, N) \ + ((__m256i)__builtin_ia32_vec_set_v32qi((__v32qi)(__m256i)(X), \ + (int)(I), (int)(N))) + +#ifdef __x86_64__ +/// Takes a [4 x i64] vector and replaces the vector element value +/// indexed by the immediate constant operand with a new value. Returns the +/// modified vector. +/// +/// \headerfile +/// +/// \code +/// __m256i _mm256_insert_epi64(__m256i X, int I, const int N); +/// \endcode +/// +/// This intrinsic corresponds to the VINSERTF128+COMPOSITE +/// instruction. +/// +/// \param X +/// A vector of [4 x i64] to be used by the insert operation. +/// \param I +/// A 64-bit integer value. The replacement value for the insert operation. +/// \param N +/// An immediate integer specifying the index of the vector element to be +/// replaced. +/// \returns A copy of vector \a X, after replacing its element indexed by +/// \a N with \a I. +#define _mm256_insert_epi64(X, I, N) \ + ((__m256i)__builtin_ia32_vec_set_v4di((__v4di)(__m256i)(X), \ + (long long)(I), (int)(N))) +#endif + +/* Conversion */ +/// Converts a vector of [4 x i32] into a vector of [4 x double]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VCVTDQ2PD instruction. +/// +/// \param __a +/// A 128-bit integer vector of [4 x i32]. +/// \returns A 256-bit vector of [4 x double] containing the converted values. +static __inline __m256d __DEFAULT_FN_ATTRS +_mm256_cvtepi32_pd(__m128i __a) +{ + return (__m256d)__builtin_convertvector((__v4si)__a, __v4df); +} + +/// Converts a vector of [8 x i32] into a vector of [8 x float]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VCVTDQ2PS instruction. +/// +/// \param __a +/// A 256-bit integer vector. +/// \returns A 256-bit vector of [8 x float] containing the converted values. +static __inline __m256 __DEFAULT_FN_ATTRS +_mm256_cvtepi32_ps(__m256i __a) +{ + return (__m256)__builtin_convertvector((__v8si)__a, __v8sf); +} + +/// Converts a 256-bit vector of [4 x double] into a 128-bit vector of +/// [4 x float]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VCVTPD2PS instruction. +/// +/// \param __a +/// A 256-bit vector of [4 x double]. +/// \returns A 128-bit vector of [4 x float] containing the converted values. +static __inline __m128 __DEFAULT_FN_ATTRS +_mm256_cvtpd_ps(__m256d __a) +{ + return (__m128)__builtin_ia32_cvtpd2ps256((__v4df) __a); +} + +/// Converts a vector of [8 x float] into a vector of [8 x i32]. +/// +/// If a converted value does not fit in a 32-bit integer, raises a +/// floating-point invalid exception. If the exception is masked, returns +/// the most negative integer. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VCVTPS2DQ instruction. +/// +/// \param __a +/// A 256-bit vector of [8 x float]. +/// \returns A 256-bit integer vector containing the converted values. +static __inline __m256i __DEFAULT_FN_ATTRS +_mm256_cvtps_epi32(__m256 __a) +{ + return (__m256i)__builtin_ia32_cvtps2dq256((__v8sf) __a); +} + +/// Converts a 128-bit vector of [4 x float] into a 256-bit vector of [4 +/// x double]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VCVTPS2PD instruction. +/// +/// \param __a +/// A 128-bit vector of [4 x float]. +/// \returns A 256-bit vector of [4 x double] containing the converted values. +static __inline __m256d __DEFAULT_FN_ATTRS +_mm256_cvtps_pd(__m128 __a) +{ + return (__m256d)__builtin_convertvector((__v4sf)__a, __v4df); +} + +/// Converts a 256-bit vector of [4 x double] into four signed truncated +/// (rounded toward zero) 32-bit integers returned in a 128-bit vector of +/// [4 x i32]. +/// +/// If a converted value does not fit in a 32-bit integer, raises a +/// floating-point invalid exception. If the exception is masked, returns +/// the most negative integer. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VCVTTPD2DQ instruction. +/// +/// \param __a +/// A 256-bit vector of [4 x double]. +/// \returns A 128-bit integer vector containing the converted values. +static __inline __m128i __DEFAULT_FN_ATTRS +_mm256_cvttpd_epi32(__m256d __a) +{ + return (__m128i)__builtin_ia32_cvttpd2dq256((__v4df) __a); +} + +/// Converts a 256-bit vector of [4 x double] into a 128-bit vector of +/// [4 x i32]. +/// +/// If a converted value does not fit in a 32-bit integer, raises a +/// floating-point invalid exception. If the exception is masked, returns +/// the most negative integer. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VCVTPD2DQ instruction. +/// +/// \param __a +/// A 256-bit vector of [4 x double]. +/// \returns A 128-bit integer vector containing the converted values. +static __inline __m128i __DEFAULT_FN_ATTRS +_mm256_cvtpd_epi32(__m256d __a) +{ + return (__m128i)__builtin_ia32_cvtpd2dq256((__v4df) __a); +} + +/// Converts a vector of [8 x float] into eight signed truncated (rounded +/// toward zero) 32-bit integers returned in a vector of [8 x i32]. +/// +/// If a converted value does not fit in a 32-bit integer, raises a +/// floating-point invalid exception. If the exception is masked, returns +/// the most negative integer. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VCVTTPS2DQ instruction. +/// +/// \param __a +/// A 256-bit vector of [8 x float]. +/// \returns A 256-bit integer vector containing the converted values. +static __inline __m256i __DEFAULT_FN_ATTRS +_mm256_cvttps_epi32(__m256 __a) +{ + return (__m256i)__builtin_ia32_cvttps2dq256((__v8sf) __a); +} + +/// Returns the first element of the input vector of [4 x double]. +/// +/// \headerfile +/// +/// This intrinsic is a utility function and does not correspond to a specific +/// instruction. +/// +/// \param __a +/// A 256-bit vector of [4 x double]. +/// \returns A 64 bit double containing the first element of the input vector. +static __inline double __DEFAULT_FN_ATTRS +_mm256_cvtsd_f64(__m256d __a) +{ + return __a[0]; +} + +/// Returns the first element of the input vector of [8 x i32]. +/// +/// \headerfile +/// +/// This intrinsic is a utility function and does not correspond to a specific +/// instruction. +/// +/// \param __a +/// A 256-bit vector of [8 x i32]. +/// \returns A 32 bit integer containing the first element of the input vector. +static __inline int __DEFAULT_FN_ATTRS +_mm256_cvtsi256_si32(__m256i __a) +{ + __v8si __b = (__v8si)__a; + return __b[0]; +} + +/// Returns the first element of the input vector of [8 x float]. +/// +/// \headerfile +/// +/// This intrinsic is a utility function and does not correspond to a specific +/// instruction. +/// +/// \param __a +/// A 256-bit vector of [8 x float]. +/// \returns A 32 bit float containing the first element of the input vector. +static __inline float __DEFAULT_FN_ATTRS +_mm256_cvtss_f32(__m256 __a) +{ + return __a[0]; +} + +/* Vector replicate */ +/// Moves and duplicates odd-indexed values from a 256-bit vector of +/// [8 x float] to float values in a 256-bit vector of [8 x float]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VMOVSHDUP instruction. +/// +/// \param __a +/// A 256-bit vector of [8 x float]. \n +/// Bits [255:224] of \a __a are written to bits [255:224] and [223:192] of +/// the return value. \n +/// Bits [191:160] of \a __a are written to bits [191:160] and [159:128] of +/// the return value. \n +/// Bits [127:96] of \a __a are written to bits [127:96] and [95:64] of the +/// return value. \n +/// Bits [63:32] of \a __a are written to bits [63:32] and [31:0] of the +/// return value. +/// \returns A 256-bit vector of [8 x float] containing the moved and duplicated +/// values. +static __inline __m256 __DEFAULT_FN_ATTRS +_mm256_movehdup_ps(__m256 __a) +{ + return __builtin_shufflevector((__v8sf)__a, (__v8sf)__a, 1, 1, 3, 3, 5, 5, 7, 7); +} + +/// Moves and duplicates even-indexed values from a 256-bit vector of +/// [8 x float] to float values in a 256-bit vector of [8 x float]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VMOVSLDUP instruction. +/// +/// \param __a +/// A 256-bit vector of [8 x float]. \n +/// Bits [223:192] of \a __a are written to bits [255:224] and [223:192] of +/// the return value. \n +/// Bits [159:128] of \a __a are written to bits [191:160] and [159:128] of +/// the return value. \n +/// Bits [95:64] of \a __a are written to bits [127:96] and [95:64] of the +/// return value. \n +/// Bits [31:0] of \a __a are written to bits [63:32] and [31:0] of the +/// return value. +/// \returns A 256-bit vector of [8 x float] containing the moved and duplicated +/// values. +static __inline __m256 __DEFAULT_FN_ATTRS +_mm256_moveldup_ps(__m256 __a) +{ + return __builtin_shufflevector((__v8sf)__a, (__v8sf)__a, 0, 0, 2, 2, 4, 4, 6, 6); +} + +/// Moves and duplicates double-precision floating point values from a +/// 256-bit vector of [4 x double] to double-precision values in a 256-bit +/// vector of [4 x double]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VMOVDDUP instruction. +/// +/// \param __a +/// A 256-bit vector of [4 x double]. \n +/// Bits [63:0] of \a __a are written to bits [127:64] and [63:0] of the +/// return value. \n +/// Bits [191:128] of \a __a are written to bits [255:192] and [191:128] of +/// the return value. +/// \returns A 256-bit vector of [4 x double] containing the moved and +/// duplicated values. +static __inline __m256d __DEFAULT_FN_ATTRS +_mm256_movedup_pd(__m256d __a) +{ + return __builtin_shufflevector((__v4df)__a, (__v4df)__a, 0, 0, 2, 2); +} + +/* Unpack and Interleave */ +/// Unpacks the odd-indexed vector elements from two 256-bit vectors of +/// [4 x double] and interleaves them into a 256-bit vector of [4 x double]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VUNPCKHPD instruction. +/// +/// \param __a +/// A 256-bit floating-point vector of [4 x double]. \n +/// Bits [127:64] are written to bits [63:0] of the return value. \n +/// Bits [255:192] are written to bits [191:128] of the return value. \n +/// \param __b +/// A 256-bit floating-point vector of [4 x double]. \n +/// Bits [127:64] are written to bits [127:64] of the return value. \n +/// Bits [255:192] are written to bits [255:192] of the return value. \n +/// \returns A 256-bit vector of [4 x double] containing the interleaved values. +static __inline __m256d __DEFAULT_FN_ATTRS +_mm256_unpackhi_pd(__m256d __a, __m256d __b) +{ + return __builtin_shufflevector((__v4df)__a, (__v4df)__b, 1, 5, 1+2, 5+2); +} + +/// Unpacks the even-indexed vector elements from two 256-bit vectors of +/// [4 x double] and interleaves them into a 256-bit vector of [4 x double]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VUNPCKLPD instruction. +/// +/// \param __a +/// A 256-bit floating-point vector of [4 x double]. \n +/// Bits [63:0] are written to bits [63:0] of the return value. \n +/// Bits [191:128] are written to bits [191:128] of the return value. +/// \param __b +/// A 256-bit floating-point vector of [4 x double]. \n +/// Bits [63:0] are written to bits [127:64] of the return value. \n +/// Bits [191:128] are written to bits [255:192] of the return value. \n +/// \returns A 256-bit vector of [4 x double] containing the interleaved values. +static __inline __m256d __DEFAULT_FN_ATTRS +_mm256_unpacklo_pd(__m256d __a, __m256d __b) +{ + return __builtin_shufflevector((__v4df)__a, (__v4df)__b, 0, 4, 0+2, 4+2); +} + +/// Unpacks the 32-bit vector elements 2, 3, 6 and 7 from each of the +/// two 256-bit vectors of [8 x float] and interleaves them into a 256-bit +/// vector of [8 x float]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VUNPCKHPS instruction. +/// +/// \param __a +/// A 256-bit vector of [8 x float]. \n +/// Bits [95:64] are written to bits [31:0] of the return value. \n +/// Bits [127:96] are written to bits [95:64] of the return value. \n +/// Bits [223:192] are written to bits [159:128] of the return value. \n +/// Bits [255:224] are written to bits [223:192] of the return value. +/// \param __b +/// A 256-bit vector of [8 x float]. \n +/// Bits [95:64] are written to bits [63:32] of the return value. \n +/// Bits [127:96] are written to bits [127:96] of the return value. \n +/// Bits [223:192] are written to bits [191:160] of the return value. \n +/// Bits [255:224] are written to bits [255:224] of the return value. +/// \returns A 256-bit vector of [8 x float] containing the interleaved values. +static __inline __m256 __DEFAULT_FN_ATTRS +_mm256_unpackhi_ps(__m256 __a, __m256 __b) +{ + return __builtin_shufflevector((__v8sf)__a, (__v8sf)__b, 2, 10, 2+1, 10+1, 6, 14, 6+1, 14+1); +} + +/// Unpacks the 32-bit vector elements 0, 1, 4 and 5 from each of the +/// two 256-bit vectors of [8 x float] and interleaves them into a 256-bit +/// vector of [8 x float]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VUNPCKLPS instruction. +/// +/// \param __a +/// A 256-bit vector of [8 x float]. \n +/// Bits [31:0] are written to bits [31:0] of the return value. \n +/// Bits [63:32] are written to bits [95:64] of the return value. \n +/// Bits [159:128] are written to bits [159:128] of the return value. \n +/// Bits [191:160] are written to bits [223:192] of the return value. +/// \param __b +/// A 256-bit vector of [8 x float]. \n +/// Bits [31:0] are written to bits [63:32] of the return value. \n +/// Bits [63:32] are written to bits [127:96] of the return value. \n +/// Bits [159:128] are written to bits [191:160] of the return value. \n +/// Bits [191:160] are written to bits [255:224] of the return value. +/// \returns A 256-bit vector of [8 x float] containing the interleaved values. +static __inline __m256 __DEFAULT_FN_ATTRS +_mm256_unpacklo_ps(__m256 __a, __m256 __b) +{ + return __builtin_shufflevector((__v8sf)__a, (__v8sf)__b, 0, 8, 0+1, 8+1, 4, 12, 4+1, 12+1); +} + +/* Bit Test */ +/// Given two 128-bit floating-point vectors of [2 x double], perform an +/// element-by-element comparison of the double-precision element in the +/// first source vector and the corresponding element in the second source +/// vector. +/// +/// The EFLAGS register is updated as follows: \n +/// If there is at least one pair of double-precision elements where the +/// sign-bits of both elements are 1, the ZF flag is set to 0. Otherwise the +/// ZF flag is set to 1. \n +/// If there is at least one pair of double-precision elements where the +/// sign-bit of the first element is 0 and the sign-bit of the second element +/// is 1, the CF flag is set to 0. Otherwise the CF flag is set to 1. \n +/// This intrinsic returns the value of the ZF flag. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VTESTPD instruction. +/// +/// \param __a +/// A 128-bit vector of [2 x double]. +/// \param __b +/// A 128-bit vector of [2 x double]. +/// \returns the ZF flag in the EFLAGS register. +static __inline int __DEFAULT_FN_ATTRS128 +_mm_testz_pd(__m128d __a, __m128d __b) +{ + return __builtin_ia32_vtestzpd((__v2df)__a, (__v2df)__b); +} + +/// Given two 128-bit floating-point vectors of [2 x double], perform an +/// element-by-element comparison of the double-precision element in the +/// first source vector and the corresponding element in the second source +/// vector. +/// +/// The EFLAGS register is updated as follows: \n +/// If there is at least one pair of double-precision elements where the +/// sign-bits of both elements are 1, the ZF flag is set to 0. Otherwise the +/// ZF flag is set to 1. \n +/// If there is at least one pair of double-precision elements where the +/// sign-bit of the first element is 0 and the sign-bit of the second element +/// is 1, the CF flag is set to 0. Otherwise the CF flag is set to 1. \n +/// This intrinsic returns the value of the CF flag. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VTESTPD instruction. +/// +/// \param __a +/// A 128-bit vector of [2 x double]. +/// \param __b +/// A 128-bit vector of [2 x double]. +/// \returns the CF flag in the EFLAGS register. +static __inline int __DEFAULT_FN_ATTRS128 +_mm_testc_pd(__m128d __a, __m128d __b) +{ + return __builtin_ia32_vtestcpd((__v2df)__a, (__v2df)__b); +} + +/// Given two 128-bit floating-point vectors of [2 x double], perform an +/// element-by-element comparison of the double-precision element in the +/// first source vector and the corresponding element in the second source +/// vector. +/// +/// The EFLAGS register is updated as follows: \n +/// If there is at least one pair of double-precision elements where the +/// sign-bits of both elements are 1, the ZF flag is set to 0. Otherwise the +/// ZF flag is set to 1. \n +/// If there is at least one pair of double-precision elements where the +/// sign-bit of the first element is 0 and the sign-bit of the second element +/// is 1, the CF flag is set to 0. Otherwise the CF flag is set to 1. \n +/// This intrinsic returns 1 if both the ZF and CF flags are set to 0, +/// otherwise it returns 0. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VTESTPD instruction. +/// +/// \param __a +/// A 128-bit vector of [2 x double]. +/// \param __b +/// A 128-bit vector of [2 x double]. +/// \returns 1 if both the ZF and CF flags are set to 0, otherwise returns 0. +static __inline int __DEFAULT_FN_ATTRS128 +_mm_testnzc_pd(__m128d __a, __m128d __b) +{ + return __builtin_ia32_vtestnzcpd((__v2df)__a, (__v2df)__b); +} + +/// Given two 128-bit floating-point vectors of [4 x float], perform an +/// element-by-element comparison of the single-precision element in the +/// first source vector and the corresponding element in the second source +/// vector. +/// +/// The EFLAGS register is updated as follows: \n +/// If there is at least one pair of single-precision elements where the +/// sign-bits of both elements are 1, the ZF flag is set to 0. Otherwise the +/// ZF flag is set to 1. \n +/// If there is at least one pair of single-precision elements where the +/// sign-bit of the first element is 0 and the sign-bit of the second element +/// is 1, the CF flag is set to 0. Otherwise the CF flag is set to 1. \n +/// This intrinsic returns the value of the ZF flag. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VTESTPS instruction. +/// +/// \param __a +/// A 128-bit vector of [4 x float]. +/// \param __b +/// A 128-bit vector of [4 x float]. +/// \returns the ZF flag. +static __inline int __DEFAULT_FN_ATTRS128 +_mm_testz_ps(__m128 __a, __m128 __b) +{ + return __builtin_ia32_vtestzps((__v4sf)__a, (__v4sf)__b); +} + +/// Given two 128-bit floating-point vectors of [4 x float], perform an +/// element-by-element comparison of the single-precision element in the +/// first source vector and the corresponding element in the second source +/// vector. +/// +/// The EFLAGS register is updated as follows: \n +/// If there is at least one pair of single-precision elements where the +/// sign-bits of both elements are 1, the ZF flag is set to 0. Otherwise the +/// ZF flag is set to 1. \n +/// If there is at least one pair of single-precision elements where the +/// sign-bit of the first element is 0 and the sign-bit of the second element +/// is 1, the CF flag is set to 0. Otherwise the CF flag is set to 1. \n +/// This intrinsic returns the value of the CF flag. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VTESTPS instruction. +/// +/// \param __a +/// A 128-bit vector of [4 x float]. +/// \param __b +/// A 128-bit vector of [4 x float]. +/// \returns the CF flag. +static __inline int __DEFAULT_FN_ATTRS128 +_mm_testc_ps(__m128 __a, __m128 __b) +{ + return __builtin_ia32_vtestcps((__v4sf)__a, (__v4sf)__b); +} + +/// Given two 128-bit floating-point vectors of [4 x float], perform an +/// element-by-element comparison of the single-precision element in the +/// first source vector and the corresponding element in the second source +/// vector. +/// +/// The EFLAGS register is updated as follows: \n +/// If there is at least one pair of single-precision elements where the +/// sign-bits of both elements are 1, the ZF flag is set to 0. Otherwise the +/// ZF flag is set to 1. \n +/// If there is at least one pair of single-precision elements where the +/// sign-bit of the first element is 0 and the sign-bit of the second element +/// is 1, the CF flag is set to 0. Otherwise the CF flag is set to 1. \n +/// This intrinsic returns 1 if both the ZF and CF flags are set to 0, +/// otherwise it returns 0. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VTESTPS instruction. +/// +/// \param __a +/// A 128-bit vector of [4 x float]. +/// \param __b +/// A 128-bit vector of [4 x float]. +/// \returns 1 if both the ZF and CF flags are set to 0, otherwise returns 0. +static __inline int __DEFAULT_FN_ATTRS128 +_mm_testnzc_ps(__m128 __a, __m128 __b) +{ + return __builtin_ia32_vtestnzcps((__v4sf)__a, (__v4sf)__b); +} + +/// Given two 256-bit floating-point vectors of [4 x double], perform an +/// element-by-element comparison of the double-precision elements in the +/// first source vector and the corresponding elements in the second source +/// vector. +/// +/// The EFLAGS register is updated as follows: \n +/// If there is at least one pair of double-precision elements where the +/// sign-bits of both elements are 1, the ZF flag is set to 0. Otherwise the +/// ZF flag is set to 1. \n +/// If there is at least one pair of double-precision elements where the +/// sign-bit of the first element is 0 and the sign-bit of the second element +/// is 1, the CF flag is set to 0. Otherwise the CF flag is set to 1. \n +/// This intrinsic returns the value of the ZF flag. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VTESTPD instruction. +/// +/// \param __a +/// A 256-bit vector of [4 x double]. +/// \param __b +/// A 256-bit vector of [4 x double]. +/// \returns the ZF flag. +static __inline int __DEFAULT_FN_ATTRS +_mm256_testz_pd(__m256d __a, __m256d __b) +{ + return __builtin_ia32_vtestzpd256((__v4df)__a, (__v4df)__b); +} + +/// Given two 256-bit floating-point vectors of [4 x double], perform an +/// element-by-element comparison of the double-precision elements in the +/// first source vector and the corresponding elements in the second source +/// vector. +/// +/// The EFLAGS register is updated as follows: \n +/// If there is at least one pair of double-precision elements where the +/// sign-bits of both elements are 1, the ZF flag is set to 0. Otherwise the +/// ZF flag is set to 1. \n +/// If there is at least one pair of double-precision elements where the +/// sign-bit of the first element is 0 and the sign-bit of the second element +/// is 1, the CF flag is set to 0. Otherwise the CF flag is set to 1. \n +/// This intrinsic returns the value of the CF flag. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VTESTPD instruction. +/// +/// \param __a +/// A 256-bit vector of [4 x double]. +/// \param __b +/// A 256-bit vector of [4 x double]. +/// \returns the CF flag. +static __inline int __DEFAULT_FN_ATTRS +_mm256_testc_pd(__m256d __a, __m256d __b) +{ + return __builtin_ia32_vtestcpd256((__v4df)__a, (__v4df)__b); +} + +/// Given two 256-bit floating-point vectors of [4 x double], perform an +/// element-by-element comparison of the double-precision elements in the +/// first source vector and the corresponding elements in the second source +/// vector. +/// +/// The EFLAGS register is updated as follows: \n +/// If there is at least one pair of double-precision elements where the +/// sign-bits of both elements are 1, the ZF flag is set to 0. Otherwise the +/// ZF flag is set to 1. \n +/// If there is at least one pair of double-precision elements where the +/// sign-bit of the first element is 0 and the sign-bit of the second element +/// is 1, the CF flag is set to 0. Otherwise the CF flag is set to 1. \n +/// This intrinsic returns 1 if both the ZF and CF flags are set to 0, +/// otherwise it returns 0. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VTESTPD instruction. +/// +/// \param __a +/// A 256-bit vector of [4 x double]. +/// \param __b +/// A 256-bit vector of [4 x double]. +/// \returns 1 if both the ZF and CF flags are set to 0, otherwise returns 0. +static __inline int __DEFAULT_FN_ATTRS +_mm256_testnzc_pd(__m256d __a, __m256d __b) +{ + return __builtin_ia32_vtestnzcpd256((__v4df)__a, (__v4df)__b); +} + +/// Given two 256-bit floating-point vectors of [8 x float], perform an +/// element-by-element comparison of the single-precision element in the +/// first source vector and the corresponding element in the second source +/// vector. +/// +/// The EFLAGS register is updated as follows: \n +/// If there is at least one pair of single-precision elements where the +/// sign-bits of both elements are 1, the ZF flag is set to 0. Otherwise the +/// ZF flag is set to 1. \n +/// If there is at least one pair of single-precision elements where the +/// sign-bit of the first element is 0 and the sign-bit of the second element +/// is 1, the CF flag is set to 0. Otherwise the CF flag is set to 1. \n +/// This intrinsic returns the value of the ZF flag. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VTESTPS instruction. +/// +/// \param __a +/// A 256-bit vector of [8 x float]. +/// \param __b +/// A 256-bit vector of [8 x float]. +/// \returns the ZF flag. +static __inline int __DEFAULT_FN_ATTRS +_mm256_testz_ps(__m256 __a, __m256 __b) +{ + return __builtin_ia32_vtestzps256((__v8sf)__a, (__v8sf)__b); +} + +/// Given two 256-bit floating-point vectors of [8 x float], perform an +/// element-by-element comparison of the single-precision element in the +/// first source vector and the corresponding element in the second source +/// vector. +/// +/// The EFLAGS register is updated as follows: \n +/// If there is at least one pair of single-precision elements where the +/// sign-bits of both elements are 1, the ZF flag is set to 0. Otherwise the +/// ZF flag is set to 1. \n +/// If there is at least one pair of single-precision elements where the +/// sign-bit of the first element is 0 and the sign-bit of the second element +/// is 1, the CF flag is set to 0. Otherwise the CF flag is set to 1. \n +/// This intrinsic returns the value of the CF flag. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VTESTPS instruction. +/// +/// \param __a +/// A 256-bit vector of [8 x float]. +/// \param __b +/// A 256-bit vector of [8 x float]. +/// \returns the CF flag. +static __inline int __DEFAULT_FN_ATTRS +_mm256_testc_ps(__m256 __a, __m256 __b) +{ + return __builtin_ia32_vtestcps256((__v8sf)__a, (__v8sf)__b); +} + +/// Given two 256-bit floating-point vectors of [8 x float], perform an +/// element-by-element comparison of the single-precision elements in the +/// first source vector and the corresponding elements in the second source +/// vector. +/// +/// The EFLAGS register is updated as follows: \n +/// If there is at least one pair of single-precision elements where the +/// sign-bits of both elements are 1, the ZF flag is set to 0. Otherwise the +/// ZF flag is set to 1. \n +/// If there is at least one pair of single-precision elements where the +/// sign-bit of the first element is 0 and the sign-bit of the second element +/// is 1, the CF flag is set to 0. Otherwise the CF flag is set to 1. \n +/// This intrinsic returns 1 if both the ZF and CF flags are set to 0, +/// otherwise it returns 0. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VTESTPS instruction. +/// +/// \param __a +/// A 256-bit vector of [8 x float]. +/// \param __b +/// A 256-bit vector of [8 x float]. +/// \returns 1 if both the ZF and CF flags are set to 0, otherwise returns 0. +static __inline int __DEFAULT_FN_ATTRS +_mm256_testnzc_ps(__m256 __a, __m256 __b) +{ + return __builtin_ia32_vtestnzcps256((__v8sf)__a, (__v8sf)__b); +} + +/// Given two 256-bit integer vectors, perform a bit-by-bit comparison +/// of the two source vectors. +/// +/// The EFLAGS register is updated as follows: \n +/// If there is at least one pair of bits where both bits are 1, the ZF flag +/// is set to 0. Otherwise the ZF flag is set to 1. \n +/// If there is at least one pair of bits where the bit from the first source +/// vector is 0 and the bit from the second source vector is 1, the CF flag +/// is set to 0. Otherwise the CF flag is set to 1. \n +/// This intrinsic returns the value of the ZF flag. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VPTEST instruction. +/// +/// \param __a +/// A 256-bit integer vector. +/// \param __b +/// A 256-bit integer vector. +/// \returns the ZF flag. +static __inline int __DEFAULT_FN_ATTRS +_mm256_testz_si256(__m256i __a, __m256i __b) +{ + return __builtin_ia32_ptestz256((__v4di)__a, (__v4di)__b); +} + +/// Given two 256-bit integer vectors, perform a bit-by-bit comparison +/// of the two source vectors. +/// +/// The EFLAGS register is updated as follows: \n +/// If there is at least one pair of bits where both bits are 1, the ZF flag +/// is set to 0. Otherwise the ZF flag is set to 1. \n +/// If there is at least one pair of bits where the bit from the first source +/// vector is 0 and the bit from the second source vector is 1, the CF flag +/// is set to 0. Otherwise the CF flag is set to 1. \n +/// This intrinsic returns the value of the CF flag. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VPTEST instruction. +/// +/// \param __a +/// A 256-bit integer vector. +/// \param __b +/// A 256-bit integer vector. +/// \returns the CF flag. +static __inline int __DEFAULT_FN_ATTRS +_mm256_testc_si256(__m256i __a, __m256i __b) +{ + return __builtin_ia32_ptestc256((__v4di)__a, (__v4di)__b); +} + +/// Given two 256-bit integer vectors, perform a bit-by-bit comparison +/// of the two source vectors. +/// +/// The EFLAGS register is updated as follows: \n +/// If there is at least one pair of bits where both bits are 1, the ZF flag +/// is set to 0. Otherwise the ZF flag is set to 1. \n +/// If there is at least one pair of bits where the bit from the first source +/// vector is 0 and the bit from the second source vector is 1, the CF flag +/// is set to 0. Otherwise the CF flag is set to 1. \n +/// This intrinsic returns 1 if both the ZF and CF flags are set to 0, +/// otherwise it returns 0. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VPTEST instruction. +/// +/// \param __a +/// A 256-bit integer vector. +/// \param __b +/// A 256-bit integer vector. +/// \returns 1 if both the ZF and CF flags are set to 0, otherwise returns 0. +static __inline int __DEFAULT_FN_ATTRS +_mm256_testnzc_si256(__m256i __a, __m256i __b) +{ + return __builtin_ia32_ptestnzc256((__v4di)__a, (__v4di)__b); +} + +/* Vector extract sign mask */ +/// Extracts the sign bits of double-precision floating point elements +/// in a 256-bit vector of [4 x double] and writes them to the lower order +/// bits of the return value. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VMOVMSKPD instruction. +/// +/// \param __a +/// A 256-bit vector of [4 x double] containing the double-precision +/// floating point values with sign bits to be extracted. +/// \returns The sign bits from the operand, written to bits [3:0]. +static __inline int __DEFAULT_FN_ATTRS +_mm256_movemask_pd(__m256d __a) +{ + return __builtin_ia32_movmskpd256((__v4df)__a); +} + +/// Extracts the sign bits of single-precision floating point elements +/// in a 256-bit vector of [8 x float] and writes them to the lower order +/// bits of the return value. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VMOVMSKPS instruction. +/// +/// \param __a +/// A 256-bit vector of [8 x float] containing the single-precision floating +/// point values with sign bits to be extracted. +/// \returns The sign bits from the operand, written to bits [7:0]. +static __inline int __DEFAULT_FN_ATTRS +_mm256_movemask_ps(__m256 __a) +{ + return __builtin_ia32_movmskps256((__v8sf)__a); +} + +/* Vector __zero */ +/// Zeroes the contents of all XMM or YMM registers. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VZEROALL instruction. +static __inline void __attribute__((__always_inline__, __nodebug__, __target__("avx"))) +_mm256_zeroall(void) +{ + __builtin_ia32_vzeroall(); +} + +/// Zeroes the upper 128 bits (bits 255:128) of all YMM registers. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VZEROUPPER instruction. +static __inline void __attribute__((__always_inline__, __nodebug__, __target__("avx"))) +_mm256_zeroupper(void) +{ + __builtin_ia32_vzeroupper(); +} + +/* Vector load with broadcast */ +/// Loads a scalar single-precision floating point value from the +/// specified address pointed to by \a __a and broadcasts it to the elements +/// of a [4 x float] vector. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VBROADCASTSS instruction. +/// +/// \param __a +/// The single-precision floating point value to be broadcast. +/// \returns A 128-bit vector of [4 x float] whose 32-bit elements are set +/// equal to the broadcast value. +static __inline __m128 __DEFAULT_FN_ATTRS128 +_mm_broadcast_ss(float const *__a) +{ + struct __mm_broadcast_ss_struct { + float __f; + } __attribute__((__packed__, __may_alias__)); + float __f = ((const struct __mm_broadcast_ss_struct*)__a)->__f; + return __extension__ (__m128){ __f, __f, __f, __f }; +} + +/// Loads a scalar double-precision floating point value from the +/// specified address pointed to by \a __a and broadcasts it to the elements +/// of a [4 x double] vector. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VBROADCASTSD instruction. +/// +/// \param __a +/// The double-precision floating point value to be broadcast. +/// \returns A 256-bit vector of [4 x double] whose 64-bit elements are set +/// equal to the broadcast value. +static __inline __m256d __DEFAULT_FN_ATTRS +_mm256_broadcast_sd(double const *__a) +{ + struct __mm256_broadcast_sd_struct { + double __d; + } __attribute__((__packed__, __may_alias__)); + double __d = ((const struct __mm256_broadcast_sd_struct*)__a)->__d; + return __extension__ (__m256d)(__v4df){ __d, __d, __d, __d }; +} + +/// Loads a scalar single-precision floating point value from the +/// specified address pointed to by \a __a and broadcasts it to the elements +/// of a [8 x float] vector. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VBROADCASTSS instruction. +/// +/// \param __a +/// The single-precision floating point value to be broadcast. +/// \returns A 256-bit vector of [8 x float] whose 32-bit elements are set +/// equal to the broadcast value. +static __inline __m256 __DEFAULT_FN_ATTRS +_mm256_broadcast_ss(float const *__a) +{ + struct __mm256_broadcast_ss_struct { + float __f; + } __attribute__((__packed__, __may_alias__)); + float __f = ((const struct __mm256_broadcast_ss_struct*)__a)->__f; + return __extension__ (__m256)(__v8sf){ __f, __f, __f, __f, __f, __f, __f, __f }; +} + +/// Loads the data from a 128-bit vector of [2 x double] from the +/// specified address pointed to by \a __a and broadcasts it to 128-bit +/// elements in a 256-bit vector of [4 x double]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VBROADCASTF128 instruction. +/// +/// \param __a +/// The 128-bit vector of [2 x double] to be broadcast. +/// \returns A 256-bit vector of [4 x double] whose 128-bit elements are set +/// equal to the broadcast value. +static __inline __m256d __DEFAULT_FN_ATTRS +_mm256_broadcast_pd(__m128d const *__a) +{ + __m128d __b = _mm_loadu_pd((const double *)__a); + return (__m256d)__builtin_shufflevector((__v2df)__b, (__v2df)__b, + 0, 1, 0, 1); +} + +/// Loads the data from a 128-bit vector of [4 x float] from the +/// specified address pointed to by \a __a and broadcasts it to 128-bit +/// elements in a 256-bit vector of [8 x float]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VBROADCASTF128 instruction. +/// +/// \param __a +/// The 128-bit vector of [4 x float] to be broadcast. +/// \returns A 256-bit vector of [8 x float] whose 128-bit elements are set +/// equal to the broadcast value. +static __inline __m256 __DEFAULT_FN_ATTRS +_mm256_broadcast_ps(__m128 const *__a) +{ + __m128 __b = _mm_loadu_ps((const float *)__a); + return (__m256)__builtin_shufflevector((__v4sf)__b, (__v4sf)__b, + 0, 1, 2, 3, 0, 1, 2, 3); +} + +/* SIMD load ops */ +/// Loads 4 double-precision floating point values from a 32-byte aligned +/// memory location pointed to by \a __p into a vector of [4 x double]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VMOVAPD instruction. +/// +/// \param __p +/// A 32-byte aligned pointer to a memory location containing +/// double-precision floating point values. +/// \returns A 256-bit vector of [4 x double] containing the moved values. +static __inline __m256d __DEFAULT_FN_ATTRS +_mm256_load_pd(double const *__p) +{ + return *(const __m256d *)__p; +} + +/// Loads 8 single-precision floating point values from a 32-byte aligned +/// memory location pointed to by \a __p into a vector of [8 x float]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VMOVAPS instruction. +/// +/// \param __p +/// A 32-byte aligned pointer to a memory location containing float values. +/// \returns A 256-bit vector of [8 x float] containing the moved values. +static __inline __m256 __DEFAULT_FN_ATTRS +_mm256_load_ps(float const *__p) +{ + return *(const __m256 *)__p; +} + +/// Loads 4 double-precision floating point values from an unaligned +/// memory location pointed to by \a __p into a vector of [4 x double]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VMOVUPD instruction. +/// +/// \param __p +/// A pointer to a memory location containing double-precision floating +/// point values. +/// \returns A 256-bit vector of [4 x double] containing the moved values. +static __inline __m256d __DEFAULT_FN_ATTRS +_mm256_loadu_pd(double const *__p) +{ + struct __loadu_pd { + __m256d_u __v; + } __attribute__((__packed__, __may_alias__)); + return ((const struct __loadu_pd*)__p)->__v; +} + +/// Loads 8 single-precision floating point values from an unaligned +/// memory location pointed to by \a __p into a vector of [8 x float]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VMOVUPS instruction. +/// +/// \param __p +/// A pointer to a memory location containing single-precision floating +/// point values. +/// \returns A 256-bit vector of [8 x float] containing the moved values. +static __inline __m256 __DEFAULT_FN_ATTRS +_mm256_loadu_ps(float const *__p) +{ + struct __loadu_ps { + __m256_u __v; + } __attribute__((__packed__, __may_alias__)); + return ((const struct __loadu_ps*)__p)->__v; +} + +/// Loads 256 bits of integer data from a 32-byte aligned memory +/// location pointed to by \a __p into elements of a 256-bit integer vector. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VMOVDQA instruction. +/// +/// \param __p +/// A 32-byte aligned pointer to a 256-bit integer vector containing integer +/// values. +/// \returns A 256-bit integer vector containing the moved values. +static __inline __m256i __DEFAULT_FN_ATTRS +_mm256_load_si256(__m256i const *__p) +{ + return *__p; +} + +/// Loads 256 bits of integer data from an unaligned memory location +/// pointed to by \a __p into a 256-bit integer vector. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VMOVDQU instruction. +/// +/// \param __p +/// A pointer to a 256-bit integer vector containing integer values. +/// \returns A 256-bit integer vector containing the moved values. +static __inline __m256i __DEFAULT_FN_ATTRS +_mm256_loadu_si256(__m256i_u const *__p) +{ + struct __loadu_si256 { + __m256i_u __v; + } __attribute__((__packed__, __may_alias__)); + return ((const struct __loadu_si256*)__p)->__v; +} + +/// Loads 256 bits of integer data from an unaligned memory location +/// pointed to by \a __p into a 256-bit integer vector. This intrinsic may +/// perform better than \c _mm256_loadu_si256 when the data crosses a cache +/// line boundary. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VLDDQU instruction. +/// +/// \param __p +/// A pointer to a 256-bit integer vector containing integer values. +/// \returns A 256-bit integer vector containing the moved values. +static __inline __m256i __DEFAULT_FN_ATTRS +_mm256_lddqu_si256(__m256i_u const *__p) +{ + return (__m256i)__builtin_ia32_lddqu256((char const *)__p); +} + +/* SIMD store ops */ +/// Stores double-precision floating point values from a 256-bit vector +/// of [4 x double] to a 32-byte aligned memory location pointed to by +/// \a __p. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VMOVAPD instruction. +/// +/// \param __p +/// A 32-byte aligned pointer to a memory location that will receive the +/// double-precision floaing point values. +/// \param __a +/// A 256-bit vector of [4 x double] containing the values to be moved. +static __inline void __DEFAULT_FN_ATTRS +_mm256_store_pd(double *__p, __m256d __a) +{ + *(__m256d *)__p = __a; +} + +/// Stores single-precision floating point values from a 256-bit vector +/// of [8 x float] to a 32-byte aligned memory location pointed to by \a __p. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VMOVAPS instruction. +/// +/// \param __p +/// A 32-byte aligned pointer to a memory location that will receive the +/// float values. +/// \param __a +/// A 256-bit vector of [8 x float] containing the values to be moved. +static __inline void __DEFAULT_FN_ATTRS +_mm256_store_ps(float *__p, __m256 __a) +{ + *(__m256 *)__p = __a; +} + +/// Stores double-precision floating point values from a 256-bit vector +/// of [4 x double] to an unaligned memory location pointed to by \a __p. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VMOVUPD instruction. +/// +/// \param __p +/// A pointer to a memory location that will receive the double-precision +/// floating point values. +/// \param __a +/// A 256-bit vector of [4 x double] containing the values to be moved. +static __inline void __DEFAULT_FN_ATTRS +_mm256_storeu_pd(double *__p, __m256d __a) +{ + struct __storeu_pd { + __m256d_u __v; + } __attribute__((__packed__, __may_alias__)); + ((struct __storeu_pd*)__p)->__v = __a; +} + +/// Stores single-precision floating point values from a 256-bit vector +/// of [8 x float] to an unaligned memory location pointed to by \a __p. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VMOVUPS instruction. +/// +/// \param __p +/// A pointer to a memory location that will receive the float values. +/// \param __a +/// A 256-bit vector of [8 x float] containing the values to be moved. +static __inline void __DEFAULT_FN_ATTRS +_mm256_storeu_ps(float *__p, __m256 __a) +{ + struct __storeu_ps { + __m256_u __v; + } __attribute__((__packed__, __may_alias__)); + ((struct __storeu_ps*)__p)->__v = __a; +} + +/// Stores integer values from a 256-bit integer vector to a 32-byte +/// aligned memory location pointed to by \a __p. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VMOVDQA instruction. +/// +/// \param __p +/// A 32-byte aligned pointer to a memory location that will receive the +/// integer values. +/// \param __a +/// A 256-bit integer vector containing the values to be moved. +static __inline void __DEFAULT_FN_ATTRS +_mm256_store_si256(__m256i *__p, __m256i __a) +{ + *__p = __a; +} + +/// Stores integer values from a 256-bit integer vector to an unaligned +/// memory location pointed to by \a __p. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VMOVDQU instruction. +/// +/// \param __p +/// A pointer to a memory location that will receive the integer values. +/// \param __a +/// A 256-bit integer vector containing the values to be moved. +static __inline void __DEFAULT_FN_ATTRS +_mm256_storeu_si256(__m256i_u *__p, __m256i __a) +{ + struct __storeu_si256 { + __m256i_u __v; + } __attribute__((__packed__, __may_alias__)); + ((struct __storeu_si256*)__p)->__v = __a; +} + +/* Conditional load ops */ +/// Conditionally loads double-precision floating point elements from a +/// memory location pointed to by \a __p into a 128-bit vector of +/// [2 x double], depending on the mask bits associated with each data +/// element. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VMASKMOVPD instruction. +/// +/// \param __p +/// A pointer to a memory location that contains the double-precision +/// floating point values. +/// \param __m +/// A 128-bit integer vector containing the mask. The most significant bit of +/// each data element represents the mask bits. If a mask bit is zero, the +/// corresponding value in the memory location is not loaded and the +/// corresponding field in the return value is set to zero. +/// \returns A 128-bit vector of [2 x double] containing the loaded values. +static __inline __m128d __DEFAULT_FN_ATTRS128 +_mm_maskload_pd(double const *__p, __m128i __m) +{ + return (__m128d)__builtin_ia32_maskloadpd((const __v2df *)__p, (__v2di)__m); +} + +/// Conditionally loads double-precision floating point elements from a +/// memory location pointed to by \a __p into a 256-bit vector of +/// [4 x double], depending on the mask bits associated with each data +/// element. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VMASKMOVPD instruction. +/// +/// \param __p +/// A pointer to a memory location that contains the double-precision +/// floating point values. +/// \param __m +/// A 256-bit integer vector of [4 x quadword] containing the mask. The most +/// significant bit of each quadword element represents the mask bits. If a +/// mask bit is zero, the corresponding value in the memory location is not +/// loaded and the corresponding field in the return value is set to zero. +/// \returns A 256-bit vector of [4 x double] containing the loaded values. +static __inline __m256d __DEFAULT_FN_ATTRS +_mm256_maskload_pd(double const *__p, __m256i __m) +{ + return (__m256d)__builtin_ia32_maskloadpd256((const __v4df *)__p, + (__v4di)__m); +} + +/// Conditionally loads single-precision floating point elements from a +/// memory location pointed to by \a __p into a 128-bit vector of +/// [4 x float], depending on the mask bits associated with each data +/// element. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VMASKMOVPS instruction. +/// +/// \param __p +/// A pointer to a memory location that contains the single-precision +/// floating point values. +/// \param __m +/// A 128-bit integer vector containing the mask. The most significant bit of +/// each data element represents the mask bits. If a mask bit is zero, the +/// corresponding value in the memory location is not loaded and the +/// corresponding field in the return value is set to zero. +/// \returns A 128-bit vector of [4 x float] containing the loaded values. +static __inline __m128 __DEFAULT_FN_ATTRS128 +_mm_maskload_ps(float const *__p, __m128i __m) +{ + return (__m128)__builtin_ia32_maskloadps((const __v4sf *)__p, (__v4si)__m); +} + +/// Conditionally loads single-precision floating point elements from a +/// memory location pointed to by \a __p into a 256-bit vector of +/// [8 x float], depending on the mask bits associated with each data +/// element. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VMASKMOVPS instruction. +/// +/// \param __p +/// A pointer to a memory location that contains the single-precision +/// floating point values. +/// \param __m +/// A 256-bit integer vector of [8 x dword] containing the mask. The most +/// significant bit of each dword element represents the mask bits. If a mask +/// bit is zero, the corresponding value in the memory location is not loaded +/// and the corresponding field in the return value is set to zero. +/// \returns A 256-bit vector of [8 x float] containing the loaded values. +static __inline __m256 __DEFAULT_FN_ATTRS +_mm256_maskload_ps(float const *__p, __m256i __m) +{ + return (__m256)__builtin_ia32_maskloadps256((const __v8sf *)__p, (__v8si)__m); +} + +/* Conditional store ops */ +/// Moves single-precision floating point values from a 256-bit vector +/// of [8 x float] to a memory location pointed to by \a __p, according to +/// the specified mask. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VMASKMOVPS instruction. +/// +/// \param __p +/// A pointer to a memory location that will receive the float values. +/// \param __m +/// A 256-bit integer vector of [8 x dword] containing the mask. The most +/// significant bit of each dword element in the mask vector represents the +/// mask bits. If a mask bit is zero, the corresponding value from vector +/// \a __a is not stored and the corresponding field in the memory location +/// pointed to by \a __p is not changed. +/// \param __a +/// A 256-bit vector of [8 x float] containing the values to be stored. +static __inline void __DEFAULT_FN_ATTRS +_mm256_maskstore_ps(float *__p, __m256i __m, __m256 __a) +{ + __builtin_ia32_maskstoreps256((__v8sf *)__p, (__v8si)__m, (__v8sf)__a); +} + +/// Moves double-precision values from a 128-bit vector of [2 x double] +/// to a memory location pointed to by \a __p, according to the specified +/// mask. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VMASKMOVPD instruction. +/// +/// \param __p +/// A pointer to a memory location that will receive the float values. +/// \param __m +/// A 128-bit integer vector containing the mask. The most significant bit of +/// each field in the mask vector represents the mask bits. If a mask bit is +/// zero, the corresponding value from vector \a __a is not stored and the +/// corresponding field in the memory location pointed to by \a __p is not +/// changed. +/// \param __a +/// A 128-bit vector of [2 x double] containing the values to be stored. +static __inline void __DEFAULT_FN_ATTRS128 +_mm_maskstore_pd(double *__p, __m128i __m, __m128d __a) +{ + __builtin_ia32_maskstorepd((__v2df *)__p, (__v2di)__m, (__v2df)__a); +} + +/// Moves double-precision values from a 256-bit vector of [4 x double] +/// to a memory location pointed to by \a __p, according to the specified +/// mask. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VMASKMOVPD instruction. +/// +/// \param __p +/// A pointer to a memory location that will receive the float values. +/// \param __m +/// A 256-bit integer vector of [4 x quadword] containing the mask. The most +/// significant bit of each quadword element in the mask vector represents +/// the mask bits. If a mask bit is zero, the corresponding value from vector +/// __a is not stored and the corresponding field in the memory location +/// pointed to by \a __p is not changed. +/// \param __a +/// A 256-bit vector of [4 x double] containing the values to be stored. +static __inline void __DEFAULT_FN_ATTRS +_mm256_maskstore_pd(double *__p, __m256i __m, __m256d __a) +{ + __builtin_ia32_maskstorepd256((__v4df *)__p, (__v4di)__m, (__v4df)__a); +} + +/// Moves single-precision floating point values from a 128-bit vector +/// of [4 x float] to a memory location pointed to by \a __p, according to +/// the specified mask. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VMASKMOVPS instruction. +/// +/// \param __p +/// A pointer to a memory location that will receive the float values. +/// \param __m +/// A 128-bit integer vector containing the mask. The most significant bit of +/// each field in the mask vector represents the mask bits. If a mask bit is +/// zero, the corresponding value from vector __a is not stored and the +/// corresponding field in the memory location pointed to by \a __p is not +/// changed. +/// \param __a +/// A 128-bit vector of [4 x float] containing the values to be stored. +static __inline void __DEFAULT_FN_ATTRS128 +_mm_maskstore_ps(float *__p, __m128i __m, __m128 __a) +{ + __builtin_ia32_maskstoreps((__v4sf *)__p, (__v4si)__m, (__v4sf)__a); +} + +/* Cacheability support ops */ +/// Moves integer data from a 256-bit integer vector to a 32-byte +/// aligned memory location. To minimize caching, the data is flagged as +/// non-temporal (unlikely to be used again soon). +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VMOVNTDQ instruction. +/// +/// \param __a +/// A pointer to a 32-byte aligned memory location that will receive the +/// integer values. +/// \param __b +/// A 256-bit integer vector containing the values to be moved. +static __inline void __DEFAULT_FN_ATTRS +_mm256_stream_si256(void *__a, __m256i __b) +{ + typedef __v4di __v4di_aligned __attribute__((aligned(32))); + __builtin_nontemporal_store((__v4di_aligned)__b, (__v4di_aligned*)__a); +} + +/// Moves double-precision values from a 256-bit vector of [4 x double] +/// to a 32-byte aligned memory location. To minimize caching, the data is +/// flagged as non-temporal (unlikely to be used again soon). +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VMOVNTPD instruction. +/// +/// \param __a +/// A pointer to a 32-byte aligned memory location that will receive the +/// double-precision floating-point values. +/// \param __b +/// A 256-bit vector of [4 x double] containing the values to be moved. +static __inline void __DEFAULT_FN_ATTRS +_mm256_stream_pd(void *__a, __m256d __b) +{ + typedef __v4df __v4df_aligned __attribute__((aligned(32))); + __builtin_nontemporal_store((__v4df_aligned)__b, (__v4df_aligned*)__a); +} + +/// Moves single-precision floating point values from a 256-bit vector +/// of [8 x float] to a 32-byte aligned memory location. To minimize +/// caching, the data is flagged as non-temporal (unlikely to be used again +/// soon). +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VMOVNTPS instruction. +/// +/// \param __p +/// A pointer to a 32-byte aligned memory location that will receive the +/// single-precision floating point values. +/// \param __a +/// A 256-bit vector of [8 x float] containing the values to be moved. +static __inline void __DEFAULT_FN_ATTRS +_mm256_stream_ps(void *__p, __m256 __a) +{ + typedef __v8sf __v8sf_aligned __attribute__((aligned(32))); + __builtin_nontemporal_store((__v8sf_aligned)__a, (__v8sf_aligned*)__p); +} + +/* Create vectors */ +/// Create a 256-bit vector of [4 x double] with undefined values. +/// +/// \headerfile +/// +/// This intrinsic has no corresponding instruction. +/// +/// \returns A 256-bit vector of [4 x double] containing undefined values. +static __inline__ __m256d __DEFAULT_FN_ATTRS +_mm256_undefined_pd(void) +{ + return (__m256d)__builtin_ia32_undef256(); +} + +/// Create a 256-bit vector of [8 x float] with undefined values. +/// +/// \headerfile +/// +/// This intrinsic has no corresponding instruction. +/// +/// \returns A 256-bit vector of [8 x float] containing undefined values. +static __inline__ __m256 __DEFAULT_FN_ATTRS +_mm256_undefined_ps(void) +{ + return (__m256)__builtin_ia32_undef256(); +} + +/// Create a 256-bit integer vector with undefined values. +/// +/// \headerfile +/// +/// This intrinsic has no corresponding instruction. +/// +/// \returns A 256-bit integer vector containing undefined values. +static __inline__ __m256i __DEFAULT_FN_ATTRS +_mm256_undefined_si256(void) +{ + return (__m256i)__builtin_ia32_undef256(); +} + +/// Constructs a 256-bit floating-point vector of [4 x double] +/// initialized with the specified double-precision floating-point values. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VUNPCKLPD+VINSERTF128 +/// instruction. +/// +/// \param __a +/// A double-precision floating-point value used to initialize bits [255:192] +/// of the result. +/// \param __b +/// A double-precision floating-point value used to initialize bits [191:128] +/// of the result. +/// \param __c +/// A double-precision floating-point value used to initialize bits [127:64] +/// of the result. +/// \param __d +/// A double-precision floating-point value used to initialize bits [63:0] +/// of the result. +/// \returns An initialized 256-bit floating-point vector of [4 x double]. +static __inline __m256d __DEFAULT_FN_ATTRS +_mm256_set_pd(double __a, double __b, double __c, double __d) +{ + return __extension__ (__m256d){ __d, __c, __b, __a }; +} + +/// Constructs a 256-bit floating-point vector of [8 x float] initialized +/// with the specified single-precision floating-point values. +/// +/// \headerfile +/// +/// This intrinsic is a utility function and does not correspond to a specific +/// instruction. +/// +/// \param __a +/// A single-precision floating-point value used to initialize bits [255:224] +/// of the result. +/// \param __b +/// A single-precision floating-point value used to initialize bits [223:192] +/// of the result. +/// \param __c +/// A single-precision floating-point value used to initialize bits [191:160] +/// of the result. +/// \param __d +/// A single-precision floating-point value used to initialize bits [159:128] +/// of the result. +/// \param __e +/// A single-precision floating-point value used to initialize bits [127:96] +/// of the result. +/// \param __f +/// A single-precision floating-point value used to initialize bits [95:64] +/// of the result. +/// \param __g +/// A single-precision floating-point value used to initialize bits [63:32] +/// of the result. +/// \param __h +/// A single-precision floating-point value used to initialize bits [31:0] +/// of the result. +/// \returns An initialized 256-bit floating-point vector of [8 x float]. +static __inline __m256 __DEFAULT_FN_ATTRS +_mm256_set_ps(float __a, float __b, float __c, float __d, + float __e, float __f, float __g, float __h) +{ + return __extension__ (__m256){ __h, __g, __f, __e, __d, __c, __b, __a }; +} + +/// Constructs a 256-bit integer vector initialized with the specified +/// 32-bit integral values. +/// +/// \headerfile +/// +/// This intrinsic is a utility function and does not correspond to a specific +/// instruction. +/// +/// \param __i0 +/// A 32-bit integral value used to initialize bits [255:224] of the result. +/// \param __i1 +/// A 32-bit integral value used to initialize bits [223:192] of the result. +/// \param __i2 +/// A 32-bit integral value used to initialize bits [191:160] of the result. +/// \param __i3 +/// A 32-bit integral value used to initialize bits [159:128] of the result. +/// \param __i4 +/// A 32-bit integral value used to initialize bits [127:96] of the result. +/// \param __i5 +/// A 32-bit integral value used to initialize bits [95:64] of the result. +/// \param __i6 +/// A 32-bit integral value used to initialize bits [63:32] of the result. +/// \param __i7 +/// A 32-bit integral value used to initialize bits [31:0] of the result. +/// \returns An initialized 256-bit integer vector. +static __inline __m256i __DEFAULT_FN_ATTRS +_mm256_set_epi32(int __i0, int __i1, int __i2, int __i3, + int __i4, int __i5, int __i6, int __i7) +{ + return __extension__ (__m256i)(__v8si){ __i7, __i6, __i5, __i4, __i3, __i2, __i1, __i0 }; +} + +/// Constructs a 256-bit integer vector initialized with the specified +/// 16-bit integral values. +/// +/// \headerfile +/// +/// This intrinsic is a utility function and does not correspond to a specific +/// instruction. +/// +/// \param __w15 +/// A 16-bit integral value used to initialize bits [255:240] of the result. +/// \param __w14 +/// A 16-bit integral value used to initialize bits [239:224] of the result. +/// \param __w13 +/// A 16-bit integral value used to initialize bits [223:208] of the result. +/// \param __w12 +/// A 16-bit integral value used to initialize bits [207:192] of the result. +/// \param __w11 +/// A 16-bit integral value used to initialize bits [191:176] of the result. +/// \param __w10 +/// A 16-bit integral value used to initialize bits [175:160] of the result. +/// \param __w09 +/// A 16-bit integral value used to initialize bits [159:144] of the result. +/// \param __w08 +/// A 16-bit integral value used to initialize bits [143:128] of the result. +/// \param __w07 +/// A 16-bit integral value used to initialize bits [127:112] of the result. +/// \param __w06 +/// A 16-bit integral value used to initialize bits [111:96] of the result. +/// \param __w05 +/// A 16-bit integral value used to initialize bits [95:80] of the result. +/// \param __w04 +/// A 16-bit integral value used to initialize bits [79:64] of the result. +/// \param __w03 +/// A 16-bit integral value used to initialize bits [63:48] of the result. +/// \param __w02 +/// A 16-bit integral value used to initialize bits [47:32] of the result. +/// \param __w01 +/// A 16-bit integral value used to initialize bits [31:16] of the result. +/// \param __w00 +/// A 16-bit integral value used to initialize bits [15:0] of the result. +/// \returns An initialized 256-bit integer vector. +static __inline __m256i __DEFAULT_FN_ATTRS +_mm256_set_epi16(short __w15, short __w14, short __w13, short __w12, + short __w11, short __w10, short __w09, short __w08, + short __w07, short __w06, short __w05, short __w04, + short __w03, short __w02, short __w01, short __w00) +{ + return __extension__ (__m256i)(__v16hi){ __w00, __w01, __w02, __w03, __w04, __w05, __w06, + __w07, __w08, __w09, __w10, __w11, __w12, __w13, __w14, __w15 }; +} + +/// Constructs a 256-bit integer vector initialized with the specified +/// 8-bit integral values. +/// +/// \headerfile +/// +/// This intrinsic is a utility function and does not correspond to a specific +/// instruction. +/// +/// \param __b31 +/// An 8-bit integral value used to initialize bits [255:248] of the result. +/// \param __b30 +/// An 8-bit integral value used to initialize bits [247:240] of the result. +/// \param __b29 +/// An 8-bit integral value used to initialize bits [239:232] of the result. +/// \param __b28 +/// An 8-bit integral value used to initialize bits [231:224] of the result. +/// \param __b27 +/// An 8-bit integral value used to initialize bits [223:216] of the result. +/// \param __b26 +/// An 8-bit integral value used to initialize bits [215:208] of the result. +/// \param __b25 +/// An 8-bit integral value used to initialize bits [207:200] of the result. +/// \param __b24 +/// An 8-bit integral value used to initialize bits [199:192] of the result. +/// \param __b23 +/// An 8-bit integral value used to initialize bits [191:184] of the result. +/// \param __b22 +/// An 8-bit integral value used to initialize bits [183:176] of the result. +/// \param __b21 +/// An 8-bit integral value used to initialize bits [175:168] of the result. +/// \param __b20 +/// An 8-bit integral value used to initialize bits [167:160] of the result. +/// \param __b19 +/// An 8-bit integral value used to initialize bits [159:152] of the result. +/// \param __b18 +/// An 8-bit integral value used to initialize bits [151:144] of the result. +/// \param __b17 +/// An 8-bit integral value used to initialize bits [143:136] of the result. +/// \param __b16 +/// An 8-bit integral value used to initialize bits [135:128] of the result. +/// \param __b15 +/// An 8-bit integral value used to initialize bits [127:120] of the result. +/// \param __b14 +/// An 8-bit integral value used to initialize bits [119:112] of the result. +/// \param __b13 +/// An 8-bit integral value used to initialize bits [111:104] of the result. +/// \param __b12 +/// An 8-bit integral value used to initialize bits [103:96] of the result. +/// \param __b11 +/// An 8-bit integral value used to initialize bits [95:88] of the result. +/// \param __b10 +/// An 8-bit integral value used to initialize bits [87:80] of the result. +/// \param __b09 +/// An 8-bit integral value used to initialize bits [79:72] of the result. +/// \param __b08 +/// An 8-bit integral value used to initialize bits [71:64] of the result. +/// \param __b07 +/// An 8-bit integral value used to initialize bits [63:56] of the result. +/// \param __b06 +/// An 8-bit integral value used to initialize bits [55:48] of the result. +/// \param __b05 +/// An 8-bit integral value used to initialize bits [47:40] of the result. +/// \param __b04 +/// An 8-bit integral value used to initialize bits [39:32] of the result. +/// \param __b03 +/// An 8-bit integral value used to initialize bits [31:24] of the result. +/// \param __b02 +/// An 8-bit integral value used to initialize bits [23:16] of the result. +/// \param __b01 +/// An 8-bit integral value used to initialize bits [15:8] of the result. +/// \param __b00 +/// An 8-bit integral value used to initialize bits [7:0] of the result. +/// \returns An initialized 256-bit integer vector. +static __inline __m256i __DEFAULT_FN_ATTRS +_mm256_set_epi8(char __b31, char __b30, char __b29, char __b28, + char __b27, char __b26, char __b25, char __b24, + char __b23, char __b22, char __b21, char __b20, + char __b19, char __b18, char __b17, char __b16, + char __b15, char __b14, char __b13, char __b12, + char __b11, char __b10, char __b09, char __b08, + char __b07, char __b06, char __b05, char __b04, + char __b03, char __b02, char __b01, char __b00) +{ + return __extension__ (__m256i)(__v32qi){ + __b00, __b01, __b02, __b03, __b04, __b05, __b06, __b07, + __b08, __b09, __b10, __b11, __b12, __b13, __b14, __b15, + __b16, __b17, __b18, __b19, __b20, __b21, __b22, __b23, + __b24, __b25, __b26, __b27, __b28, __b29, __b30, __b31 + }; +} + +/// Constructs a 256-bit integer vector initialized with the specified +/// 64-bit integral values. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VPUNPCKLQDQ+VINSERTF128 +/// instruction. +/// +/// \param __a +/// A 64-bit integral value used to initialize bits [255:192] of the result. +/// \param __b +/// A 64-bit integral value used to initialize bits [191:128] of the result. +/// \param __c +/// A 64-bit integral value used to initialize bits [127:64] of the result. +/// \param __d +/// A 64-bit integral value used to initialize bits [63:0] of the result. +/// \returns An initialized 256-bit integer vector. +static __inline __m256i __DEFAULT_FN_ATTRS +_mm256_set_epi64x(long long __a, long long __b, long long __c, long long __d) +{ + return __extension__ (__m256i)(__v4di){ __d, __c, __b, __a }; +} + +/* Create vectors with elements in reverse order */ +/// Constructs a 256-bit floating-point vector of [4 x double], +/// initialized in reverse order with the specified double-precision +/// floating-point values. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VUNPCKLPD+VINSERTF128 +/// instruction. +/// +/// \param __a +/// A double-precision floating-point value used to initialize bits [63:0] +/// of the result. +/// \param __b +/// A double-precision floating-point value used to initialize bits [127:64] +/// of the result. +/// \param __c +/// A double-precision floating-point value used to initialize bits [191:128] +/// of the result. +/// \param __d +/// A double-precision floating-point value used to initialize bits [255:192] +/// of the result. +/// \returns An initialized 256-bit floating-point vector of [4 x double]. +static __inline __m256d __DEFAULT_FN_ATTRS +_mm256_setr_pd(double __a, double __b, double __c, double __d) +{ + return _mm256_set_pd(__d, __c, __b, __a); +} + +/// Constructs a 256-bit floating-point vector of [8 x float], +/// initialized in reverse order with the specified single-precision +/// float-point values. +/// +/// \headerfile +/// +/// This intrinsic is a utility function and does not correspond to a specific +/// instruction. +/// +/// \param __a +/// A single-precision floating-point value used to initialize bits [31:0] +/// of the result. +/// \param __b +/// A single-precision floating-point value used to initialize bits [63:32] +/// of the result. +/// \param __c +/// A single-precision floating-point value used to initialize bits [95:64] +/// of the result. +/// \param __d +/// A single-precision floating-point value used to initialize bits [127:96] +/// of the result. +/// \param __e +/// A single-precision floating-point value used to initialize bits [159:128] +/// of the result. +/// \param __f +/// A single-precision floating-point value used to initialize bits [191:160] +/// of the result. +/// \param __g +/// A single-precision floating-point value used to initialize bits [223:192] +/// of the result. +/// \param __h +/// A single-precision floating-point value used to initialize bits [255:224] +/// of the result. +/// \returns An initialized 256-bit floating-point vector of [8 x float]. +static __inline __m256 __DEFAULT_FN_ATTRS +_mm256_setr_ps(float __a, float __b, float __c, float __d, + float __e, float __f, float __g, float __h) +{ + return _mm256_set_ps(__h, __g, __f, __e, __d, __c, __b, __a); +} + +/// Constructs a 256-bit integer vector, initialized in reverse order +/// with the specified 32-bit integral values. +/// +/// \headerfile +/// +/// This intrinsic is a utility function and does not correspond to a specific +/// instruction. +/// +/// \param __i0 +/// A 32-bit integral value used to initialize bits [31:0] of the result. +/// \param __i1 +/// A 32-bit integral value used to initialize bits [63:32] of the result. +/// \param __i2 +/// A 32-bit integral value used to initialize bits [95:64] of the result. +/// \param __i3 +/// A 32-bit integral value used to initialize bits [127:96] of the result. +/// \param __i4 +/// A 32-bit integral value used to initialize bits [159:128] of the result. +/// \param __i5 +/// A 32-bit integral value used to initialize bits [191:160] of the result. +/// \param __i6 +/// A 32-bit integral value used to initialize bits [223:192] of the result. +/// \param __i7 +/// A 32-bit integral value used to initialize bits [255:224] of the result. +/// \returns An initialized 256-bit integer vector. +static __inline __m256i __DEFAULT_FN_ATTRS +_mm256_setr_epi32(int __i0, int __i1, int __i2, int __i3, + int __i4, int __i5, int __i6, int __i7) +{ + return _mm256_set_epi32(__i7, __i6, __i5, __i4, __i3, __i2, __i1, __i0); +} + +/// Constructs a 256-bit integer vector, initialized in reverse order +/// with the specified 16-bit integral values. +/// +/// \headerfile +/// +/// This intrinsic is a utility function and does not correspond to a specific +/// instruction. +/// +/// \param __w15 +/// A 16-bit integral value used to initialize bits [15:0] of the result. +/// \param __w14 +/// A 16-bit integral value used to initialize bits [31:16] of the result. +/// \param __w13 +/// A 16-bit integral value used to initialize bits [47:32] of the result. +/// \param __w12 +/// A 16-bit integral value used to initialize bits [63:48] of the result. +/// \param __w11 +/// A 16-bit integral value used to initialize bits [79:64] of the result. +/// \param __w10 +/// A 16-bit integral value used to initialize bits [95:80] of the result. +/// \param __w09 +/// A 16-bit integral value used to initialize bits [111:96] of the result. +/// \param __w08 +/// A 16-bit integral value used to initialize bits [127:112] of the result. +/// \param __w07 +/// A 16-bit integral value used to initialize bits [143:128] of the result. +/// \param __w06 +/// A 16-bit integral value used to initialize bits [159:144] of the result. +/// \param __w05 +/// A 16-bit integral value used to initialize bits [175:160] of the result. +/// \param __w04 +/// A 16-bit integral value used to initialize bits [191:176] of the result. +/// \param __w03 +/// A 16-bit integral value used to initialize bits [207:192] of the result. +/// \param __w02 +/// A 16-bit integral value used to initialize bits [223:208] of the result. +/// \param __w01 +/// A 16-bit integral value used to initialize bits [239:224] of the result. +/// \param __w00 +/// A 16-bit integral value used to initialize bits [255:240] of the result. +/// \returns An initialized 256-bit integer vector. +static __inline __m256i __DEFAULT_FN_ATTRS +_mm256_setr_epi16(short __w15, short __w14, short __w13, short __w12, + short __w11, short __w10, short __w09, short __w08, + short __w07, short __w06, short __w05, short __w04, + short __w03, short __w02, short __w01, short __w00) +{ + return _mm256_set_epi16(__w00, __w01, __w02, __w03, + __w04, __w05, __w06, __w07, + __w08, __w09, __w10, __w11, + __w12, __w13, __w14, __w15); +} + +/// Constructs a 256-bit integer vector, initialized in reverse order +/// with the specified 8-bit integral values. +/// +/// \headerfile +/// +/// This intrinsic is a utility function and does not correspond to a specific +/// instruction. +/// +/// \param __b31 +/// An 8-bit integral value used to initialize bits [7:0] of the result. +/// \param __b30 +/// An 8-bit integral value used to initialize bits [15:8] of the result. +/// \param __b29 +/// An 8-bit integral value used to initialize bits [23:16] of the result. +/// \param __b28 +/// An 8-bit integral value used to initialize bits [31:24] of the result. +/// \param __b27 +/// An 8-bit integral value used to initialize bits [39:32] of the result. +/// \param __b26 +/// An 8-bit integral value used to initialize bits [47:40] of the result. +/// \param __b25 +/// An 8-bit integral value used to initialize bits [55:48] of the result. +/// \param __b24 +/// An 8-bit integral value used to initialize bits [63:56] of the result. +/// \param __b23 +/// An 8-bit integral value used to initialize bits [71:64] of the result. +/// \param __b22 +/// An 8-bit integral value used to initialize bits [79:72] of the result. +/// \param __b21 +/// An 8-bit integral value used to initialize bits [87:80] of the result. +/// \param __b20 +/// An 8-bit integral value used to initialize bits [95:88] of the result. +/// \param __b19 +/// An 8-bit integral value used to initialize bits [103:96] of the result. +/// \param __b18 +/// An 8-bit integral value used to initialize bits [111:104] of the result. +/// \param __b17 +/// An 8-bit integral value used to initialize bits [119:112] of the result. +/// \param __b16 +/// An 8-bit integral value used to initialize bits [127:120] of the result. +/// \param __b15 +/// An 8-bit integral value used to initialize bits [135:128] of the result. +/// \param __b14 +/// An 8-bit integral value used to initialize bits [143:136] of the result. +/// \param __b13 +/// An 8-bit integral value used to initialize bits [151:144] of the result. +/// \param __b12 +/// An 8-bit integral value used to initialize bits [159:152] of the result. +/// \param __b11 +/// An 8-bit integral value used to initialize bits [167:160] of the result. +/// \param __b10 +/// An 8-bit integral value used to initialize bits [175:168] of the result. +/// \param __b09 +/// An 8-bit integral value used to initialize bits [183:176] of the result. +/// \param __b08 +/// An 8-bit integral value used to initialize bits [191:184] of the result. +/// \param __b07 +/// An 8-bit integral value used to initialize bits [199:192] of the result. +/// \param __b06 +/// An 8-bit integral value used to initialize bits [207:200] of the result. +/// \param __b05 +/// An 8-bit integral value used to initialize bits [215:208] of the result. +/// \param __b04 +/// An 8-bit integral value used to initialize bits [223:216] of the result. +/// \param __b03 +/// An 8-bit integral value used to initialize bits [231:224] of the result. +/// \param __b02 +/// An 8-bit integral value used to initialize bits [239:232] of the result. +/// \param __b01 +/// An 8-bit integral value used to initialize bits [247:240] of the result. +/// \param __b00 +/// An 8-bit integral value used to initialize bits [255:248] of the result. +/// \returns An initialized 256-bit integer vector. +static __inline __m256i __DEFAULT_FN_ATTRS +_mm256_setr_epi8(char __b31, char __b30, char __b29, char __b28, + char __b27, char __b26, char __b25, char __b24, + char __b23, char __b22, char __b21, char __b20, + char __b19, char __b18, char __b17, char __b16, + char __b15, char __b14, char __b13, char __b12, + char __b11, char __b10, char __b09, char __b08, + char __b07, char __b06, char __b05, char __b04, + char __b03, char __b02, char __b01, char __b00) +{ + return _mm256_set_epi8(__b00, __b01, __b02, __b03, __b04, __b05, __b06, __b07, + __b08, __b09, __b10, __b11, __b12, __b13, __b14, __b15, + __b16, __b17, __b18, __b19, __b20, __b21, __b22, __b23, + __b24, __b25, __b26, __b27, __b28, __b29, __b30, __b31); +} + +/// Constructs a 256-bit integer vector, initialized in reverse order +/// with the specified 64-bit integral values. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VPUNPCKLQDQ+VINSERTF128 +/// instruction. +/// +/// \param __a +/// A 64-bit integral value used to initialize bits [63:0] of the result. +/// \param __b +/// A 64-bit integral value used to initialize bits [127:64] of the result. +/// \param __c +/// A 64-bit integral value used to initialize bits [191:128] of the result. +/// \param __d +/// A 64-bit integral value used to initialize bits [255:192] of the result. +/// \returns An initialized 256-bit integer vector. +static __inline __m256i __DEFAULT_FN_ATTRS +_mm256_setr_epi64x(long long __a, long long __b, long long __c, long long __d) +{ + return _mm256_set_epi64x(__d, __c, __b, __a); +} + +/* Create vectors with repeated elements */ +/// Constructs a 256-bit floating-point vector of [4 x double], with each +/// of the four double-precision floating-point vector elements set to the +/// specified double-precision floating-point value. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VMOVDDUP+VINSERTF128 instruction. +/// +/// \param __w +/// A double-precision floating-point value used to initialize each vector +/// element of the result. +/// \returns An initialized 256-bit floating-point vector of [4 x double]. +static __inline __m256d __DEFAULT_FN_ATTRS +_mm256_set1_pd(double __w) +{ + return _mm256_set_pd(__w, __w, __w, __w); +} + +/// Constructs a 256-bit floating-point vector of [8 x float], with each +/// of the eight single-precision floating-point vector elements set to the +/// specified single-precision floating-point value. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VPERMILPS+VINSERTF128 +/// instruction. +/// +/// \param __w +/// A single-precision floating-point value used to initialize each vector +/// element of the result. +/// \returns An initialized 256-bit floating-point vector of [8 x float]. +static __inline __m256 __DEFAULT_FN_ATTRS +_mm256_set1_ps(float __w) +{ + return _mm256_set_ps(__w, __w, __w, __w, __w, __w, __w, __w); +} + +/// Constructs a 256-bit integer vector of [8 x i32], with each of the +/// 32-bit integral vector elements set to the specified 32-bit integral +/// value. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VPERMILPS+VINSERTF128 +/// instruction. +/// +/// \param __i +/// A 32-bit integral value used to initialize each vector element of the +/// result. +/// \returns An initialized 256-bit integer vector of [8 x i32]. +static __inline __m256i __DEFAULT_FN_ATTRS +_mm256_set1_epi32(int __i) +{ + return _mm256_set_epi32(__i, __i, __i, __i, __i, __i, __i, __i); +} + +/// Constructs a 256-bit integer vector of [16 x i16], with each of the +/// 16-bit integral vector elements set to the specified 16-bit integral +/// value. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VPSHUFB+VINSERTF128 instruction. +/// +/// \param __w +/// A 16-bit integral value used to initialize each vector element of the +/// result. +/// \returns An initialized 256-bit integer vector of [16 x i16]. +static __inline __m256i __DEFAULT_FN_ATTRS +_mm256_set1_epi16(short __w) +{ + return _mm256_set_epi16(__w, __w, __w, __w, __w, __w, __w, __w, + __w, __w, __w, __w, __w, __w, __w, __w); +} + +/// Constructs a 256-bit integer vector of [32 x i8], with each of the +/// 8-bit integral vector elements set to the specified 8-bit integral value. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VPSHUFB+VINSERTF128 instruction. +/// +/// \param __b +/// An 8-bit integral value used to initialize each vector element of the +/// result. +/// \returns An initialized 256-bit integer vector of [32 x i8]. +static __inline __m256i __DEFAULT_FN_ATTRS +_mm256_set1_epi8(char __b) +{ + return _mm256_set_epi8(__b, __b, __b, __b, __b, __b, __b, __b, + __b, __b, __b, __b, __b, __b, __b, __b, + __b, __b, __b, __b, __b, __b, __b, __b, + __b, __b, __b, __b, __b, __b, __b, __b); +} + +/// Constructs a 256-bit integer vector of [4 x i64], with each of the +/// 64-bit integral vector elements set to the specified 64-bit integral +/// value. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VMOVDDUP+VINSERTF128 instruction. +/// +/// \param __q +/// A 64-bit integral value used to initialize each vector element of the +/// result. +/// \returns An initialized 256-bit integer vector of [4 x i64]. +static __inline __m256i __DEFAULT_FN_ATTRS +_mm256_set1_epi64x(long long __q) +{ + return _mm256_set_epi64x(__q, __q, __q, __q); +} + +/* Create __zeroed vectors */ +/// Constructs a 256-bit floating-point vector of [4 x double] with all +/// vector elements initialized to zero. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VXORPS instruction. +/// +/// \returns A 256-bit vector of [4 x double] with all elements set to zero. +static __inline __m256d __DEFAULT_FN_ATTRS +_mm256_setzero_pd(void) +{ + return __extension__ (__m256d){ 0.0, 0.0, 0.0, 0.0 }; +} + +/// Constructs a 256-bit floating-point vector of [8 x float] with all +/// vector elements initialized to zero. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VXORPS instruction. +/// +/// \returns A 256-bit vector of [8 x float] with all elements set to zero. +static __inline __m256 __DEFAULT_FN_ATTRS +_mm256_setzero_ps(void) +{ + return __extension__ (__m256){ 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f }; +} + +/// Constructs a 256-bit integer vector initialized to zero. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VXORPS instruction. +/// +/// \returns A 256-bit integer vector initialized to zero. +static __inline __m256i __DEFAULT_FN_ATTRS +_mm256_setzero_si256(void) +{ + return __extension__ (__m256i)(__v4di){ 0, 0, 0, 0 }; +} + +/* Cast between vector types */ +/// Casts a 256-bit floating-point vector of [4 x double] into a 256-bit +/// floating-point vector of [8 x float]. +/// +/// \headerfile +/// +/// This intrinsic has no corresponding instruction. +/// +/// \param __a +/// A 256-bit floating-point vector of [4 x double]. +/// \returns A 256-bit floating-point vector of [8 x float] containing the same +/// bitwise pattern as the parameter. +static __inline __m256 __DEFAULT_FN_ATTRS +_mm256_castpd_ps(__m256d __a) +{ + return (__m256)__a; +} + +/// Casts a 256-bit floating-point vector of [4 x double] into a 256-bit +/// integer vector. +/// +/// \headerfile +/// +/// This intrinsic has no corresponding instruction. +/// +/// \param __a +/// A 256-bit floating-point vector of [4 x double]. +/// \returns A 256-bit integer vector containing the same bitwise pattern as the +/// parameter. +static __inline __m256i __DEFAULT_FN_ATTRS +_mm256_castpd_si256(__m256d __a) +{ + return (__m256i)__a; +} + +/// Casts a 256-bit floating-point vector of [8 x float] into a 256-bit +/// floating-point vector of [4 x double]. +/// +/// \headerfile +/// +/// This intrinsic has no corresponding instruction. +/// +/// \param __a +/// A 256-bit floating-point vector of [8 x float]. +/// \returns A 256-bit floating-point vector of [4 x double] containing the same +/// bitwise pattern as the parameter. +static __inline __m256d __DEFAULT_FN_ATTRS +_mm256_castps_pd(__m256 __a) +{ + return (__m256d)__a; +} + +/// Casts a 256-bit floating-point vector of [8 x float] into a 256-bit +/// integer vector. +/// +/// \headerfile +/// +/// This intrinsic has no corresponding instruction. +/// +/// \param __a +/// A 256-bit floating-point vector of [8 x float]. +/// \returns A 256-bit integer vector containing the same bitwise pattern as the +/// parameter. +static __inline __m256i __DEFAULT_FN_ATTRS +_mm256_castps_si256(__m256 __a) +{ + return (__m256i)__a; +} + +/// Casts a 256-bit integer vector into a 256-bit floating-point vector +/// of [8 x float]. +/// +/// \headerfile +/// +/// This intrinsic has no corresponding instruction. +/// +/// \param __a +/// A 256-bit integer vector. +/// \returns A 256-bit floating-point vector of [8 x float] containing the same +/// bitwise pattern as the parameter. +static __inline __m256 __DEFAULT_FN_ATTRS +_mm256_castsi256_ps(__m256i __a) +{ + return (__m256)__a; +} + +/// Casts a 256-bit integer vector into a 256-bit floating-point vector +/// of [4 x double]. +/// +/// \headerfile +/// +/// This intrinsic has no corresponding instruction. +/// +/// \param __a +/// A 256-bit integer vector. +/// \returns A 256-bit floating-point vector of [4 x double] containing the same +/// bitwise pattern as the parameter. +static __inline __m256d __DEFAULT_FN_ATTRS +_mm256_castsi256_pd(__m256i __a) +{ + return (__m256d)__a; +} + +/// Returns the lower 128 bits of a 256-bit floating-point vector of +/// [4 x double] as a 128-bit floating-point vector of [2 x double]. +/// +/// \headerfile +/// +/// This intrinsic has no corresponding instruction. +/// +/// \param __a +/// A 256-bit floating-point vector of [4 x double]. +/// \returns A 128-bit floating-point vector of [2 x double] containing the +/// lower 128 bits of the parameter. +static __inline __m128d __DEFAULT_FN_ATTRS +_mm256_castpd256_pd128(__m256d __a) +{ + return __builtin_shufflevector((__v4df)__a, (__v4df)__a, 0, 1); +} + +/// Returns the lower 128 bits of a 256-bit floating-point vector of +/// [8 x float] as a 128-bit floating-point vector of [4 x float]. +/// +/// \headerfile +/// +/// This intrinsic has no corresponding instruction. +/// +/// \param __a +/// A 256-bit floating-point vector of [8 x float]. +/// \returns A 128-bit floating-point vector of [4 x float] containing the +/// lower 128 bits of the parameter. +static __inline __m128 __DEFAULT_FN_ATTRS +_mm256_castps256_ps128(__m256 __a) +{ + return __builtin_shufflevector((__v8sf)__a, (__v8sf)__a, 0, 1, 2, 3); +} + +/// Truncates a 256-bit integer vector into a 128-bit integer vector. +/// +/// \headerfile +/// +/// This intrinsic has no corresponding instruction. +/// +/// \param __a +/// A 256-bit integer vector. +/// \returns A 128-bit integer vector containing the lower 128 bits of the +/// parameter. +static __inline __m128i __DEFAULT_FN_ATTRS +_mm256_castsi256_si128(__m256i __a) +{ + return __builtin_shufflevector((__v4di)__a, (__v4di)__a, 0, 1); +} + +/// Constructs a 256-bit floating-point vector of [4 x double] from a +/// 128-bit floating-point vector of [2 x double]. +/// +/// The lower 128 bits contain the value of the source vector. The contents +/// of the upper 128 bits are undefined. +/// +/// \headerfile +/// +/// This intrinsic has no corresponding instruction. +/// +/// \param __a +/// A 128-bit vector of [2 x double]. +/// \returns A 256-bit floating-point vector of [4 x double]. The lower 128 bits +/// contain the value of the parameter. The contents of the upper 128 bits +/// are undefined. +static __inline __m256d __DEFAULT_FN_ATTRS +_mm256_castpd128_pd256(__m128d __a) +{ + return __builtin_shufflevector( + (__v2df)__a, (__v2df)__builtin_nondeterministic_value(__a), 0, 1, 2, 3); +} + +/// Constructs a 256-bit floating-point vector of [8 x float] from a +/// 128-bit floating-point vector of [4 x float]. +/// +/// The lower 128 bits contain the value of the source vector. The contents +/// of the upper 128 bits are undefined. +/// +/// \headerfile +/// +/// This intrinsic has no corresponding instruction. +/// +/// \param __a +/// A 128-bit vector of [4 x float]. +/// \returns A 256-bit floating-point vector of [8 x float]. The lower 128 bits +/// contain the value of the parameter. The contents of the upper 128 bits +/// are undefined. +static __inline __m256 __DEFAULT_FN_ATTRS +_mm256_castps128_ps256(__m128 __a) +{ + return __builtin_shufflevector((__v4sf)__a, + (__v4sf)__builtin_nondeterministic_value(__a), + 0, 1, 2, 3, 4, 5, 6, 7); +} + +/// Constructs a 256-bit integer vector from a 128-bit integer vector. +/// +/// The lower 128 bits contain the value of the source vector. The contents +/// of the upper 128 bits are undefined. +/// +/// \headerfile +/// +/// This intrinsic has no corresponding instruction. +/// +/// \param __a +/// A 128-bit integer vector. +/// \returns A 256-bit integer vector. The lower 128 bits contain the value of +/// the parameter. The contents of the upper 128 bits are undefined. +static __inline __m256i __DEFAULT_FN_ATTRS +_mm256_castsi128_si256(__m128i __a) +{ + return __builtin_shufflevector( + (__v2di)__a, (__v2di)__builtin_nondeterministic_value(__a), 0, 1, 2, 3); +} + +/// Constructs a 256-bit floating-point vector of [4 x double] from a +/// 128-bit floating-point vector of [2 x double]. The lower 128 bits +/// contain the value of the source vector. The upper 128 bits are set +/// to zero. +/// +/// \headerfile +/// +/// This intrinsic has no corresponding instruction. +/// +/// \param __a +/// A 128-bit vector of [2 x double]. +/// \returns A 256-bit floating-point vector of [4 x double]. The lower 128 bits +/// contain the value of the parameter. The upper 128 bits are set to zero. +static __inline __m256d __DEFAULT_FN_ATTRS +_mm256_zextpd128_pd256(__m128d __a) +{ + return __builtin_shufflevector((__v2df)__a, (__v2df)_mm_setzero_pd(), 0, 1, 2, 3); +} + +/// Constructs a 256-bit floating-point vector of [8 x float] from a +/// 128-bit floating-point vector of [4 x float]. The lower 128 bits contain +/// the value of the source vector. The upper 128 bits are set to zero. +/// +/// \headerfile +/// +/// This intrinsic has no corresponding instruction. +/// +/// \param __a +/// A 128-bit vector of [4 x float]. +/// \returns A 256-bit floating-point vector of [8 x float]. The lower 128 bits +/// contain the value of the parameter. The upper 128 bits are set to zero. +static __inline __m256 __DEFAULT_FN_ATTRS +_mm256_zextps128_ps256(__m128 __a) +{ + return __builtin_shufflevector((__v4sf)__a, (__v4sf)_mm_setzero_ps(), 0, 1, 2, 3, 4, 5, 6, 7); +} + +/// Constructs a 256-bit integer vector from a 128-bit integer vector. +/// The lower 128 bits contain the value of the source vector. The upper +/// 128 bits are set to zero. +/// +/// \headerfile +/// +/// This intrinsic has no corresponding instruction. +/// +/// \param __a +/// A 128-bit integer vector. +/// \returns A 256-bit integer vector. The lower 128 bits contain the value of +/// the parameter. The upper 128 bits are set to zero. +static __inline __m256i __DEFAULT_FN_ATTRS +_mm256_zextsi128_si256(__m128i __a) +{ + return __builtin_shufflevector((__v2di)__a, (__v2di)_mm_setzero_si128(), 0, 1, 2, 3); +} + +/* + Vector insert. + We use macros rather than inlines because we only want to accept + invocations where the immediate M is a constant expression. +*/ +/// Constructs a new 256-bit vector of [8 x float] by first duplicating +/// a 256-bit vector of [8 x float] given in the first parameter, and then +/// replacing either the upper or the lower 128 bits with the contents of a +/// 128-bit vector of [4 x float] in the second parameter. +/// +/// The immediate integer parameter determines between the upper or the lower +/// 128 bits. +/// +/// \headerfile +/// +/// \code +/// __m256 _mm256_insertf128_ps(__m256 V1, __m128 V2, const int M); +/// \endcode +/// +/// This intrinsic corresponds to the VINSERTF128 instruction. +/// +/// \param V1 +/// A 256-bit vector of [8 x float]. This vector is copied to the result +/// first, and then either the upper or the lower 128 bits of the result will +/// be replaced by the contents of \a V2. +/// \param V2 +/// A 128-bit vector of [4 x float]. The contents of this parameter are +/// written to either the upper or the lower 128 bits of the result depending +/// on the value of parameter \a M. +/// \param M +/// An immediate integer. The least significant bit determines how the values +/// from the two parameters are interleaved: \n +/// If bit [0] of \a M is 0, \a V2 are copied to bits [127:0] of the result, +/// and bits [255:128] of \a V1 are copied to bits [255:128] of the +/// result. \n +/// If bit [0] of \a M is 1, \a V2 are copied to bits [255:128] of the +/// result, and bits [127:0] of \a V1 are copied to bits [127:0] of the +/// result. +/// \returns A 256-bit vector of [8 x float] containing the interleaved values. +#define _mm256_insertf128_ps(V1, V2, M) \ + ((__m256)__builtin_ia32_vinsertf128_ps256((__v8sf)(__m256)(V1), \ + (__v4sf)(__m128)(V2), (int)(M))) + +/// Constructs a new 256-bit vector of [4 x double] by first duplicating +/// a 256-bit vector of [4 x double] given in the first parameter, and then +/// replacing either the upper or the lower 128 bits with the contents of a +/// 128-bit vector of [2 x double] in the second parameter. +/// +/// The immediate integer parameter determines between the upper or the lower +/// 128 bits. +/// +/// \headerfile +/// +/// \code +/// __m256d _mm256_insertf128_pd(__m256d V1, __m128d V2, const int M); +/// \endcode +/// +/// This intrinsic corresponds to the VINSERTF128 instruction. +/// +/// \param V1 +/// A 256-bit vector of [4 x double]. This vector is copied to the result +/// first, and then either the upper or the lower 128 bits of the result will +/// be replaced by the contents of \a V2. +/// \param V2 +/// A 128-bit vector of [2 x double]. The contents of this parameter are +/// written to either the upper or the lower 128 bits of the result depending +/// on the value of parameter \a M. +/// \param M +/// An immediate integer. The least significant bit determines how the values +/// from the two parameters are interleaved: \n +/// If bit [0] of \a M is 0, \a V2 are copied to bits [127:0] of the result, +/// and bits [255:128] of \a V1 are copied to bits [255:128] of the +/// result. \n +/// If bit [0] of \a M is 1, \a V2 are copied to bits [255:128] of the +/// result, and bits [127:0] of \a V1 are copied to bits [127:0] of the +/// result. +/// \returns A 256-bit vector of [4 x double] containing the interleaved values. +#define _mm256_insertf128_pd(V1, V2, M) \ + ((__m256d)__builtin_ia32_vinsertf128_pd256((__v4df)(__m256d)(V1), \ + (__v2df)(__m128d)(V2), (int)(M))) + +/// Constructs a new 256-bit integer vector by first duplicating a +/// 256-bit integer vector given in the first parameter, and then replacing +/// either the upper or the lower 128 bits with the contents of a 128-bit +/// integer vector in the second parameter. +/// +/// The immediate integer parameter determines between the upper or the lower +/// 128 bits. +/// +/// \headerfile +/// +/// \code +/// __m256i _mm256_insertf128_si256(__m256i V1, __m128i V2, const int M); +/// \endcode +/// +/// This intrinsic corresponds to the VINSERTF128 instruction. +/// +/// \param V1 +/// A 256-bit integer vector. This vector is copied to the result first, and +/// then either the upper or the lower 128 bits of the result will be +/// replaced by the contents of \a V2. +/// \param V2 +/// A 128-bit integer vector. The contents of this parameter are written to +/// either the upper or the lower 128 bits of the result depending on the +/// value of parameter \a M. +/// \param M +/// An immediate integer. The least significant bit determines how the values +/// from the two parameters are interleaved: \n +/// If bit [0] of \a M is 0, \a V2 are copied to bits [127:0] of the result, +/// and bits [255:128] of \a V1 are copied to bits [255:128] of the +/// result. \n +/// If bit [0] of \a M is 1, \a V2 are copied to bits [255:128] of the +/// result, and bits [127:0] of \a V1 are copied to bits [127:0] of the +/// result. +/// \returns A 256-bit integer vector containing the interleaved values. +#define _mm256_insertf128_si256(V1, V2, M) \ + ((__m256i)__builtin_ia32_vinsertf128_si256((__v8si)(__m256i)(V1), \ + (__v4si)(__m128i)(V2), (int)(M))) + +/* + Vector extract. + We use macros rather than inlines because we only want to accept + invocations where the immediate M is a constant expression. +*/ +/// Extracts either the upper or the lower 128 bits from a 256-bit vector +/// of [8 x float], as determined by the immediate integer parameter, and +/// returns the extracted bits as a 128-bit vector of [4 x float]. +/// +/// \headerfile +/// +/// \code +/// __m128 _mm256_extractf128_ps(__m256 V, const int M); +/// \endcode +/// +/// This intrinsic corresponds to the VEXTRACTF128 instruction. +/// +/// \param V +/// A 256-bit vector of [8 x float]. +/// \param M +/// An immediate integer. The least significant bit determines which bits are +/// extracted from the first parameter: \n +/// If bit [0] of \a M is 0, bits [127:0] of \a V are copied to the +/// result. \n +/// If bit [0] of \a M is 1, bits [255:128] of \a V are copied to the result. +/// \returns A 128-bit vector of [4 x float] containing the extracted bits. +#define _mm256_extractf128_ps(V, M) \ + ((__m128)__builtin_ia32_vextractf128_ps256((__v8sf)(__m256)(V), (int)(M))) + +/// Extracts either the upper or the lower 128 bits from a 256-bit vector +/// of [4 x double], as determined by the immediate integer parameter, and +/// returns the extracted bits as a 128-bit vector of [2 x double]. +/// +/// \headerfile +/// +/// \code +/// __m128d _mm256_extractf128_pd(__m256d V, const int M); +/// \endcode +/// +/// This intrinsic corresponds to the VEXTRACTF128 instruction. +/// +/// \param V +/// A 256-bit vector of [4 x double]. +/// \param M +/// An immediate integer. The least significant bit determines which bits are +/// extracted from the first parameter: \n +/// If bit [0] of \a M is 0, bits [127:0] of \a V are copied to the +/// result. \n +/// If bit [0] of \a M is 1, bits [255:128] of \a V are copied to the result. +/// \returns A 128-bit vector of [2 x double] containing the extracted bits. +#define _mm256_extractf128_pd(V, M) \ + ((__m128d)__builtin_ia32_vextractf128_pd256((__v4df)(__m256d)(V), (int)(M))) + +/// Extracts either the upper or the lower 128 bits from a 256-bit +/// integer vector, as determined by the immediate integer parameter, and +/// returns the extracted bits as a 128-bit integer vector. +/// +/// \headerfile +/// +/// \code +/// __m128i _mm256_extractf128_si256(__m256i V, const int M); +/// \endcode +/// +/// This intrinsic corresponds to the VEXTRACTF128 instruction. +/// +/// \param V +/// A 256-bit integer vector. +/// \param M +/// An immediate integer. The least significant bit determines which bits are +/// extracted from the first parameter: \n +/// If bit [0] of \a M is 0, bits [127:0] of \a V are copied to the +/// result. \n +/// If bit [0] of \a M is 1, bits [255:128] of \a V are copied to the result. +/// \returns A 128-bit integer vector containing the extracted bits. +#define _mm256_extractf128_si256(V, M) \ + ((__m128i)__builtin_ia32_vextractf128_si256((__v8si)(__m256i)(V), (int)(M))) + +/// Constructs a 256-bit floating-point vector of [8 x float] by +/// concatenating two 128-bit floating-point vectors of [4 x float]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VINSERTF128 instruction. +/// +/// \param __hi +/// A 128-bit floating-point vector of [4 x float] to be copied to the upper +/// 128 bits of the result. +/// \param __lo +/// A 128-bit floating-point vector of [4 x float] to be copied to the lower +/// 128 bits of the result. +/// \returns A 256-bit floating-point vector of [8 x float] containing the +/// concatenated result. +static __inline __m256 __DEFAULT_FN_ATTRS +_mm256_set_m128 (__m128 __hi, __m128 __lo) +{ + return (__m256) __builtin_shufflevector((__v4sf)__lo, (__v4sf)__hi, 0, 1, 2, 3, 4, 5, 6, 7); +} + +/// Constructs a 256-bit floating-point vector of [4 x double] by +/// concatenating two 128-bit floating-point vectors of [2 x double]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VINSERTF128 instruction. +/// +/// \param __hi +/// A 128-bit floating-point vector of [2 x double] to be copied to the upper +/// 128 bits of the result. +/// \param __lo +/// A 128-bit floating-point vector of [2 x double] to be copied to the lower +/// 128 bits of the result. +/// \returns A 256-bit floating-point vector of [4 x double] containing the +/// concatenated result. +static __inline __m256d __DEFAULT_FN_ATTRS +_mm256_set_m128d (__m128d __hi, __m128d __lo) +{ + return (__m256d) __builtin_shufflevector((__v2df)__lo, (__v2df)__hi, 0, 1, 2, 3); +} + +/// Constructs a 256-bit integer vector by concatenating two 128-bit +/// integer vectors. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VINSERTF128 instruction. +/// +/// \param __hi +/// A 128-bit integer vector to be copied to the upper 128 bits of the +/// result. +/// \param __lo +/// A 128-bit integer vector to be copied to the lower 128 bits of the +/// result. +/// \returns A 256-bit integer vector containing the concatenated result. +static __inline __m256i __DEFAULT_FN_ATTRS +_mm256_set_m128i (__m128i __hi, __m128i __lo) +{ + return (__m256i) __builtin_shufflevector((__v2di)__lo, (__v2di)__hi, 0, 1, 2, 3); +} + +/// Constructs a 256-bit floating-point vector of [8 x float] by +/// concatenating two 128-bit floating-point vectors of [4 x float]. This is +/// similar to _mm256_set_m128, but the order of the input parameters is +/// swapped. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VINSERTF128 instruction. +/// +/// \param __lo +/// A 128-bit floating-point vector of [4 x float] to be copied to the lower +/// 128 bits of the result. +/// \param __hi +/// A 128-bit floating-point vector of [4 x float] to be copied to the upper +/// 128 bits of the result. +/// \returns A 256-bit floating-point vector of [8 x float] containing the +/// concatenated result. +static __inline __m256 __DEFAULT_FN_ATTRS +_mm256_setr_m128 (__m128 __lo, __m128 __hi) +{ + return _mm256_set_m128(__hi, __lo); +} + +/// Constructs a 256-bit floating-point vector of [4 x double] by +/// concatenating two 128-bit floating-point vectors of [2 x double]. This is +/// similar to _mm256_set_m128d, but the order of the input parameters is +/// swapped. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VINSERTF128 instruction. +/// +/// \param __lo +/// A 128-bit floating-point vector of [2 x double] to be copied to the lower +/// 128 bits of the result. +/// \param __hi +/// A 128-bit floating-point vector of [2 x double] to be copied to the upper +/// 128 bits of the result. +/// \returns A 256-bit floating-point vector of [4 x double] containing the +/// concatenated result. +static __inline __m256d __DEFAULT_FN_ATTRS +_mm256_setr_m128d (__m128d __lo, __m128d __hi) +{ + return (__m256d)_mm256_set_m128d(__hi, __lo); +} + +/// Constructs a 256-bit integer vector by concatenating two 128-bit +/// integer vectors. This is similar to _mm256_set_m128i, but the order of +/// the input parameters is swapped. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VINSERTF128 instruction. +/// +/// \param __lo +/// A 128-bit integer vector to be copied to the lower 128 bits of the +/// result. +/// \param __hi +/// A 128-bit integer vector to be copied to the upper 128 bits of the +/// result. +/// \returns A 256-bit integer vector containing the concatenated result. +static __inline __m256i __DEFAULT_FN_ATTRS +_mm256_setr_m128i (__m128i __lo, __m128i __hi) +{ + return (__m256i)_mm256_set_m128i(__hi, __lo); +} + +/* SIMD load ops (unaligned) */ +/// Loads two 128-bit floating-point vectors of [4 x float] from +/// unaligned memory locations and constructs a 256-bit floating-point vector +/// of [8 x float] by concatenating the two 128-bit vectors. +/// +/// \headerfile +/// +/// This intrinsic corresponds to load instructions followed by the +/// VINSERTF128 instruction. +/// +/// \param __addr_hi +/// A pointer to a 128-bit memory location containing 4 consecutive +/// single-precision floating-point values. These values are to be copied to +/// bits[255:128] of the result. The address of the memory location does not +/// have to be aligned. +/// \param __addr_lo +/// A pointer to a 128-bit memory location containing 4 consecutive +/// single-precision floating-point values. These values are to be copied to +/// bits[127:0] of the result. The address of the memory location does not +/// have to be aligned. +/// \returns A 256-bit floating-point vector of [8 x float] containing the +/// concatenated result. +static __inline __m256 __DEFAULT_FN_ATTRS +_mm256_loadu2_m128(float const *__addr_hi, float const *__addr_lo) +{ + return _mm256_set_m128(_mm_loadu_ps(__addr_hi), _mm_loadu_ps(__addr_lo)); +} + +/// Loads two 128-bit floating-point vectors of [2 x double] from +/// unaligned memory locations and constructs a 256-bit floating-point vector +/// of [4 x double] by concatenating the two 128-bit vectors. +/// +/// \headerfile +/// +/// This intrinsic corresponds to load instructions followed by the +/// VINSERTF128 instruction. +/// +/// \param __addr_hi +/// A pointer to a 128-bit memory location containing two consecutive +/// double-precision floating-point values. These values are to be copied to +/// bits[255:128] of the result. The address of the memory location does not +/// have to be aligned. +/// \param __addr_lo +/// A pointer to a 128-bit memory location containing two consecutive +/// double-precision floating-point values. These values are to be copied to +/// bits[127:0] of the result. The address of the memory location does not +/// have to be aligned. +/// \returns A 256-bit floating-point vector of [4 x double] containing the +/// concatenated result. +static __inline __m256d __DEFAULT_FN_ATTRS +_mm256_loadu2_m128d(double const *__addr_hi, double const *__addr_lo) +{ + return _mm256_set_m128d(_mm_loadu_pd(__addr_hi), _mm_loadu_pd(__addr_lo)); +} + +/// Loads two 128-bit integer vectors from unaligned memory locations and +/// constructs a 256-bit integer vector by concatenating the two 128-bit +/// vectors. +/// +/// \headerfile +/// +/// This intrinsic corresponds to load instructions followed by the +/// VINSERTF128 instruction. +/// +/// \param __addr_hi +/// A pointer to a 128-bit memory location containing a 128-bit integer +/// vector. This vector is to be copied to bits[255:128] of the result. The +/// address of the memory location does not have to be aligned. +/// \param __addr_lo +/// A pointer to a 128-bit memory location containing a 128-bit integer +/// vector. This vector is to be copied to bits[127:0] of the result. The +/// address of the memory location does not have to be aligned. +/// \returns A 256-bit integer vector containing the concatenated result. +static __inline __m256i __DEFAULT_FN_ATTRS +_mm256_loadu2_m128i(__m128i_u const *__addr_hi, __m128i_u const *__addr_lo) +{ + return _mm256_set_m128i(_mm_loadu_si128(__addr_hi), _mm_loadu_si128(__addr_lo)); +} + +/* SIMD store ops (unaligned) */ +/// Stores the upper and lower 128 bits of a 256-bit floating-point +/// vector of [8 x float] into two different unaligned memory locations. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VEXTRACTF128 instruction and the +/// store instructions. +/// +/// \param __addr_hi +/// A pointer to a 128-bit memory location. Bits[255:128] of \a __a are to be +/// copied to this memory location. The address of this memory location does +/// not have to be aligned. +/// \param __addr_lo +/// A pointer to a 128-bit memory location. Bits[127:0] of \a __a are to be +/// copied to this memory location. The address of this memory location does +/// not have to be aligned. +/// \param __a +/// A 256-bit floating-point vector of [8 x float]. +static __inline void __DEFAULT_FN_ATTRS +_mm256_storeu2_m128(float *__addr_hi, float *__addr_lo, __m256 __a) +{ + __m128 __v128; + + __v128 = _mm256_castps256_ps128(__a); + _mm_storeu_ps(__addr_lo, __v128); + __v128 = _mm256_extractf128_ps(__a, 1); + _mm_storeu_ps(__addr_hi, __v128); +} + +/// Stores the upper and lower 128 bits of a 256-bit floating-point +/// vector of [4 x double] into two different unaligned memory locations. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VEXTRACTF128 instruction and the +/// store instructions. +/// +/// \param __addr_hi +/// A pointer to a 128-bit memory location. Bits[255:128] of \a __a are to be +/// copied to this memory location. The address of this memory location does +/// not have to be aligned. +/// \param __addr_lo +/// A pointer to a 128-bit memory location. Bits[127:0] of \a __a are to be +/// copied to this memory location. The address of this memory location does +/// not have to be aligned. +/// \param __a +/// A 256-bit floating-point vector of [4 x double]. +static __inline void __DEFAULT_FN_ATTRS +_mm256_storeu2_m128d(double *__addr_hi, double *__addr_lo, __m256d __a) +{ + __m128d __v128; + + __v128 = _mm256_castpd256_pd128(__a); + _mm_storeu_pd(__addr_lo, __v128); + __v128 = _mm256_extractf128_pd(__a, 1); + _mm_storeu_pd(__addr_hi, __v128); +} + +/// Stores the upper and lower 128 bits of a 256-bit integer vector into +/// two different unaligned memory locations. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VEXTRACTF128 instruction and the +/// store instructions. +/// +/// \param __addr_hi +/// A pointer to a 128-bit memory location. Bits[255:128] of \a __a are to be +/// copied to this memory location. The address of this memory location does +/// not have to be aligned. +/// \param __addr_lo +/// A pointer to a 128-bit memory location. Bits[127:0] of \a __a are to be +/// copied to this memory location. The address of this memory location does +/// not have to be aligned. +/// \param __a +/// A 256-bit integer vector. +static __inline void __DEFAULT_FN_ATTRS +_mm256_storeu2_m128i(__m128i_u *__addr_hi, __m128i_u *__addr_lo, __m256i __a) +{ + __m128i __v128; + + __v128 = _mm256_castsi256_si128(__a); + _mm_storeu_si128(__addr_lo, __v128); + __v128 = _mm256_extractf128_si256(__a, 1); + _mm_storeu_si128(__addr_hi, __v128); +} + +#undef __DEFAULT_FN_ATTRS +#undef __DEFAULT_FN_ATTRS128 + +#endif /* __AVXINTRIN_H */ diff --git a/third_party/intel/clang/avxneconvertintrin.h b/third_party/intel/clang/avxneconvertintrin.h new file mode 100644 index 000000000..1bef1c893 --- /dev/null +++ b/third_party/intel/clang/avxneconvertintrin.h @@ -0,0 +1,484 @@ +/*===-------------- avxneconvertintrin.h - AVXNECONVERT --------------------=== + * + * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. + * See https://llvm.org/LICENSE.txt for license information. + * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + * + *===-----------------------------------------------------------------------=== + */ + +#ifndef __IMMINTRIN_H +#error \ + "Never use directly; include instead." +#endif // __IMMINTRIN_H + +#ifdef __SSE2__ + +#ifndef __AVXNECONVERTINTRIN_H +#define __AVXNECONVERTINTRIN_H + +/* Define the default attributes for the functions in this file. */ +#define __DEFAULT_FN_ATTRS128 \ + __attribute__((__always_inline__, __nodebug__, __target__("avxneconvert"), \ + __min_vector_width__(128))) +#define __DEFAULT_FN_ATTRS256 \ + __attribute__((__always_inline__, __nodebug__, __target__("avxneconvert"), \ + __min_vector_width__(256))) + +/// Convert scalar BF16 (16-bit) floating-point element +/// stored at memory locations starting at location \a __A to a +/// single-precision (32-bit) floating-point, broadcast it to packed +/// single-precision (32-bit) floating-point elements, and store the results in +/// \a dst. +/// +/// \headerfile +/// +/// \code +/// _mm_bcstnebf16_ps(const void *__A); +/// \endcode +/// +/// This intrinsic corresponds to the \c VBCSTNEBF162PS instruction. +/// +/// \param __A +/// A pointer to a 16-bit memory location. The address of the memory +/// location does not have to be aligned. +/// \returns +/// A 128-bit vector of [4 x float]. +/// +/// \code{.operation} +/// b := Convert_BF16_To_FP32(MEM[__A+15:__A]) +/// FOR j := 0 to 3 +/// m := j*32 +/// dst[m+31:m] := b +/// ENDFOR +/// dst[MAX:128] := 0 +/// \endcode +static __inline__ __m128 __DEFAULT_FN_ATTRS128 +_mm_bcstnebf16_ps(const void *__A) { + return (__m128)__builtin_ia32_vbcstnebf162ps128((const __bf16 *)__A); +} + +/// Convert scalar BF16 (16-bit) floating-point element +/// stored at memory locations starting at location \a __A to a +/// single-precision (32-bit) floating-point, broadcast it to packed +/// single-precision (32-bit) floating-point elements, and store the results in +/// \a dst. +/// +/// \headerfile +/// +/// \code +/// _mm256_bcstnebf16_ps(const void *__A); +/// \endcode +/// +/// This intrinsic corresponds to the \c VBCSTNEBF162PS instruction. +/// +/// \param __A +/// A pointer to a 16-bit memory location. The address of the memory +/// location does not have to be aligned. +/// \returns +/// A 256-bit vector of [8 x float]. +/// +/// \code{.operation} +/// b := Convert_BF16_To_FP32(MEM[__A+15:__A]) +/// FOR j := 0 to 7 +/// m := j*32 +/// dst[m+31:m] := b +/// ENDFOR +/// dst[MAX:256] := 0 +/// \endcode +static __inline__ __m256 __DEFAULT_FN_ATTRS256 +_mm256_bcstnebf16_ps(const void *__A) { + return (__m256)__builtin_ia32_vbcstnebf162ps256((const __bf16 *)__A); +} + +/// Convert scalar half-precision (16-bit) floating-point element +/// stored at memory locations starting at location \a __A to a +/// single-precision (32-bit) floating-point, broadcast it to packed +/// single-precision (32-bit) floating-point elements, and store the results in +/// \a dst. +/// +/// \headerfile +/// +/// \code +/// _mm_bcstnesh_ps(const void *__A); +/// \endcode +/// +/// This intrinsic corresponds to the \c VBCSTNESH2PS instruction. +/// +/// \param __A +/// A pointer to a 16-bit memory location. The address of the memory +/// location does not have to be aligned. +/// \returns +/// A 128-bit vector of [4 x float]. +/// +/// \code{.operation} +/// b := Convert_FP16_To_FP32(MEM[__A+15:__A]) +/// FOR j := 0 to 3 +/// m := j*32 +/// dst[m+31:m] := b +/// ENDFOR +/// dst[MAX:128] := 0 +/// \endcode +static __inline__ __m128 __DEFAULT_FN_ATTRS128 +_mm_bcstnesh_ps(const void *__A) { + return (__m128)__builtin_ia32_vbcstnesh2ps128((const _Float16 *)__A); +} + +/// Convert scalar half-precision (16-bit) floating-point element +/// stored at memory locations starting at location \a __A to a +/// single-precision (32-bit) floating-point, broadcast it to packed +/// single-precision (32-bit) floating-point elements, and store the results in +/// \a dst. +/// +/// \headerfile +/// +/// \code +/// _mm256_bcstnesh_ps(const void *__A); +/// \endcode +/// +/// This intrinsic corresponds to the \c VBCSTNESH2PS instruction. +/// +/// \param __A +/// A pointer to a 16-bit memory location. The address of the memory +/// location does not have to be aligned. +/// \returns +/// A 256-bit vector of [8 x float]. +/// +/// \code{.operation} +/// b := Convert_FP16_To_FP32(MEM[__A+15:__A]) +/// FOR j := 0 to 7 +/// m := j*32 +/// dst[m+31:m] := b +/// ENDFOR +/// dst[MAX:256] := 0 +/// \endcode +static __inline__ __m256 __DEFAULT_FN_ATTRS256 +_mm256_bcstnesh_ps(const void *__A) { + return (__m256)__builtin_ia32_vbcstnesh2ps256((const _Float16 *)__A); +} + +/// Convert packed BF16 (16-bit) floating-point even-indexed elements +/// stored at memory locations starting at location \a __A to packed +/// single-precision (32-bit) floating-point elements, and store the results in +/// \a dst. +/// +/// \headerfile +/// +/// \code +/// _mm_cvtneebf16_ps(const __m128bh *__A); +/// \endcode +/// +/// This intrinsic corresponds to the \c VCVTNEEBF162PS instruction. +/// +/// \param __A +/// A pointer to a 128-bit memory location containing 8 consecutive +/// BF16 (16-bit) floating-point values. +/// \returns +/// A 128-bit vector of [4 x float]. +/// +/// \code{.operation} +/// FOR j := 0 to 3 +/// k := j*2 +/// i := k*16 +/// m := j*32 +/// dst[m+31:m] := Convert_BF16_To_FP32(MEM[__A+i+15:__A+i]) +/// ENDFOR +/// dst[MAX:128] := 0 +/// \endcode +static __inline__ __m128 __DEFAULT_FN_ATTRS128 +_mm_cvtneebf16_ps(const __m128bh *__A) { + return (__m128)__builtin_ia32_vcvtneebf162ps128((const __v8bf *)__A); +} + +/// Convert packed BF16 (16-bit) floating-point even-indexed elements +/// stored at memory locations starting at location \a __A to packed +/// single-precision (32-bit) floating-point elements, and store the results in +/// \a dst. +/// +/// \headerfile +/// +/// \code +/// _mm256_cvtneebf16_ps(const __m256bh *__A); +/// \endcode +/// +/// This intrinsic corresponds to the \c VCVTNEEBF162PS instruction. +/// +/// \param __A +/// A pointer to a 256-bit memory location containing 16 consecutive +/// BF16 (16-bit) floating-point values. +/// \returns +/// A 256-bit vector of [8 x float]. +/// +/// \code{.operation} +/// FOR j := 0 to 7 +/// k := j*2 +/// i := k*16 +/// m := j*32 +/// dst[m+31:m] := Convert_BF16_To_FP32(MEM[__A+i+15:__A+i]) +/// ENDFOR +/// dst[MAX:256] := 0 +/// \endcode +static __inline__ __m256 __DEFAULT_FN_ATTRS256 +_mm256_cvtneebf16_ps(const __m256bh *__A) { + return (__m256)__builtin_ia32_vcvtneebf162ps256((const __v16bf *)__A); +} + +/// Convert packed half-precision (16-bit) floating-point even-indexed elements +/// stored at memory locations starting at location \a __A to packed +/// single-precision (32-bit) floating-point elements, and store the results in +/// \a dst. +/// +/// \headerfile +/// +/// \code +/// _mm_cvtneeph_ps(const __m128h *__A); +/// \endcode +/// +/// This intrinsic corresponds to the \c VCVTNEEPH2PS instruction. +/// +/// \param __A +/// A pointer to a 128-bit memory location containing 8 consecutive +/// half-precision (16-bit) floating-point values. +/// \returns +/// A 128-bit vector of [4 x float]. +/// +/// \code{.operation} +/// FOR j := 0 to 3 +/// k := j*2 +/// i := k*16 +/// m := j*32 +/// dst[m+31:m] := Convert_FP16_To_FP32(MEM[__A+i+15:__A+i]) +/// ENDFOR +/// dst[MAX:128] := 0 +/// \endcode +static __inline__ __m128 __DEFAULT_FN_ATTRS128 +_mm_cvtneeph_ps(const __m128h *__A) { + return (__m128)__builtin_ia32_vcvtneeph2ps128((const __v8hf *)__A); +} + +/// Convert packed half-precision (16-bit) floating-point even-indexed elements +/// stored at memory locations starting at location \a __A to packed +/// single-precision (32-bit) floating-point elements, and store the results in +/// \a dst. +/// +/// \headerfile +/// +/// \code +/// _mm256_cvtneeph_ps(const __m256h *__A); +/// \endcode +/// +/// This intrinsic corresponds to the \c VCVTNEEPH2PS instruction. +/// +/// \param __A +/// A pointer to a 256-bit memory location containing 16 consecutive +/// half-precision (16-bit) floating-point values. +/// \returns +/// A 256-bit vector of [8 x float]. +/// +/// \code{.operation} +/// FOR j := 0 to 7 +/// k := j*2 +/// i := k*16 +/// m := j*32 +/// dst[m+31:m] := Convert_FP16_To_FP32(MEM[__A+i+15:__A+i]) +/// ENDFOR +/// dst[MAX:256] := 0 +/// \endcode +static __inline__ __m256 __DEFAULT_FN_ATTRS256 +_mm256_cvtneeph_ps(const __m256h *__A) { + return (__m256)__builtin_ia32_vcvtneeph2ps256((const __v16hf *)__A); +} + +/// Convert packed BF16 (16-bit) floating-point odd-indexed elements +/// stored at memory locations starting at location \a __A to packed +/// single-precision (32-bit) floating-point elements, and store the results in +/// \a dst. +/// +/// \headerfile +/// +/// \code +/// _mm_cvtneobf16_ps(const __m128bh *__A); +/// \endcode +/// +/// This intrinsic corresponds to the \c VCVTNEOBF162PS instruction. +/// +/// \param __A +/// A pointer to a 128-bit memory location containing 8 consecutive +/// BF16 (16-bit) floating-point values. +/// \returns +/// A 128-bit vector of [4 x float]. +/// +/// \code{.operation} +/// FOR j := 0 to 3 +/// k := j*2+1 +/// i := k*16 +/// m := j*32 +/// dst[m+31:m] := Convert_BF16_To_FP32(MEM[__A+i+15:__A+i]) +/// ENDFOR +/// dst[MAX:128] := 0 +/// \endcode +static __inline__ __m128 __DEFAULT_FN_ATTRS128 +_mm_cvtneobf16_ps(const __m128bh *__A) { + return (__m128)__builtin_ia32_vcvtneobf162ps128((const __v8bf *)__A); +} + +/// Convert packed BF16 (16-bit) floating-point odd-indexed elements +/// stored at memory locations starting at location \a __A to packed +/// single-precision (32-bit) floating-point elements, and store the results in +/// \a dst. +/// +/// \headerfile +/// +/// \code +/// _mm256_cvtneobf16_ps(const __m256bh *__A); +/// \endcode +/// +/// This intrinsic corresponds to the \c VCVTNEOBF162PS instruction. +/// +/// \param __A +/// A pointer to a 256-bit memory location containing 16 consecutive +/// BF16 (16-bit) floating-point values. +/// \returns +/// A 256-bit vector of [8 x float]. +/// +/// \code{.operation} +/// FOR j := 0 to 7 +/// k := j*2+1 +/// i := k*16 +/// m := j*32 +/// dst[m+31:m] := Convert_BF16_To_FP32(MEM[__A+i+15:__A+i]) +/// ENDFOR +/// dst[MAX:256] := 0 +/// \endcode +static __inline__ __m256 __DEFAULT_FN_ATTRS256 +_mm256_cvtneobf16_ps(const __m256bh *__A) { + return (__m256)__builtin_ia32_vcvtneobf162ps256((const __v16bf *)__A); +} + +/// Convert packed half-precision (16-bit) floating-point odd-indexed elements +/// stored at memory locations starting at location \a __A to packed +/// single-precision (32-bit) floating-point elements, and store the results in +/// \a dst. +/// +/// \headerfile +/// +/// \code +/// _mm_cvtneoph_ps(const __m128h *__A); +/// \endcode +/// +/// This intrinsic corresponds to the \c VCVTNEOPH2PS instruction. +/// +/// \param __A +/// A pointer to a 128-bit memory location containing 8 consecutive +/// half-precision (16-bit) floating-point values. +/// \returns +/// A 128-bit vector of [4 x float]. +/// +/// \code{.operation} +/// FOR j := 0 to 3 +/// k := j*2+1 +/// i := k*16 +/// m := j*32 +/// dst[m+31:m] := Convert_FP16_To_FP32(MEM[__A+i+15:__A+i]) +/// ENDFOR +/// dst[MAX:128] := 0 +/// \endcode +static __inline__ __m128 __DEFAULT_FN_ATTRS128 +_mm_cvtneoph_ps(const __m128h *__A) { + return (__m128)__builtin_ia32_vcvtneoph2ps128((const __v8hf *)__A); +} + +/// Convert packed half-precision (16-bit) floating-point odd-indexed elements +/// stored at memory locations starting at location \a __A to packed +/// single-precision (32-bit) floating-point elements, and store the results in +/// \a dst. +/// +/// \headerfile +/// +/// \code +/// _mm256_cvtneoph_ps(const __m256h *__A); +/// \endcode +/// +/// This intrinsic corresponds to the \c VCVTNEOPH2PS instruction. +/// +/// \param __A +/// A pointer to a 256-bit memory location containing 16 consecutive +/// half-precision (16-bit) floating-point values. +/// \returns +/// A 256-bit vector of [8 x float]. +/// +/// \code{.operation} +/// FOR j := 0 to 7 +/// k := j*2+1 +/// i := k*16 +/// m := j*32 +/// dst[m+31:m] := Convert_FP16_To_FP32(MEM[__A+i+15:__A+i]) +/// ENDFOR +/// dst[MAX:256] := 0 +/// \endcode +static __inline__ __m256 __DEFAULT_FN_ATTRS256 +_mm256_cvtneoph_ps(const __m256h *__A) { + return (__m256)__builtin_ia32_vcvtneoph2ps256((const __v16hf *)__A); +} + +/// Convert packed single-precision (32-bit) floating-point elements in \a __A +/// to packed BF16 (16-bit) floating-point elements, and store the results in \a +/// dst. +/// +/// \headerfile +/// +/// \code +/// _mm_cvtneps_avx_pbh(__m128 __A); +/// \endcode +/// +/// This intrinsic corresponds to the \c VCVTNEPS2BF16 instruction. +/// +/// \param __A +/// A 128-bit vector of [4 x float]. +/// \returns +/// A 128-bit vector of [8 x bfloat]. +/// +/// \code{.operation} +/// FOR j := 0 to 3 +/// dst.word[j] := Convert_FP32_To_BF16(__A.fp32[j]) +/// ENDFOR +/// dst[MAX:128] := 0 +/// \endcode +static __inline__ __m128bh __DEFAULT_FN_ATTRS128 +_mm_cvtneps_avx_pbh(__m128 __A) { + return (__m128bh)__builtin_ia32_vcvtneps2bf16128((__v4sf)__A); +} + +/// Convert packed single-precision (32-bit) floating-point elements in \a __A +/// to packed BF16 (16-bit) floating-point elements, and store the results in \a +/// dst. +/// +/// \headerfile +/// +/// \code +/// _mm256_cvtneps_avx_pbh(__m256 __A); +/// \endcode +/// +/// This intrinsic corresponds to the \c VCVTNEPS2BF16 instruction. +/// +/// \param __A +/// A 256-bit vector of [8 x float]. +/// \returns +/// A 128-bit vector of [8 x bfloat]. +/// +/// \code{.operation} +/// FOR j := 0 to 7 +/// dst.word[j] := Convert_FP32_To_BF16(a.fp32[j]) +/// ENDFOR +/// dst[MAX:128] := 0 +/// \endcode +static __inline__ __m128bh __DEFAULT_FN_ATTRS256 +_mm256_cvtneps_avx_pbh(__m256 __A) { + return (__m128bh)__builtin_ia32_vcvtneps2bf16256((__v8sf)__A); +} + +#undef __DEFAULT_FN_ATTRS128 +#undef __DEFAULT_FN_ATTRS256 + +#endif // __AVXNECONVERTINTRIN_H +#endif // __SSE2__ diff --git a/third_party/intel/clang/avxvnniint16intrin.h b/third_party/intel/clang/avxvnniint16intrin.h new file mode 100644 index 000000000..e4d342a8b --- /dev/null +++ b/third_party/intel/clang/avxvnniint16intrin.h @@ -0,0 +1,473 @@ +/*===----------- avxvnniint16intrin.h - AVXVNNIINT16 intrinsics-------------=== + * + * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. + * See https://llvm.org/LICENSE.txt for license information. + * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + * + *===-----------------------------------------------------------------------=== + */ + +#ifndef __IMMINTRIN_H +#error \ + "Never use directly; include instead." +#endif // __IMMINTRIN_H + +#ifndef __AVXVNNIINT16INTRIN_H +#define __AVXVNNIINT16INTRIN_H + +/* Define the default attributes for the functions in this file. */ +#define __DEFAULT_FN_ATTRS128 \ + __attribute__((__always_inline__, __nodebug__, __target__("avxvnniint16"), \ + __min_vector_width__(128))) +#define __DEFAULT_FN_ATTRS256 \ + __attribute__((__always_inline__, __nodebug__, __target__("avxvnniint16"), \ + __min_vector_width__(256))) + +/// Multiply groups of 2 adjacent pairs of signed 16-bit integers in \a __A with +/// corresponding unsigned 16-bit integers in \a __B, producing 2 intermediate +/// signed 16-bit results. Sum these 2 results with the corresponding +/// 32-bit integer in \a __W, and store the packed 32-bit results in \a dst. +/// +/// \headerfile +/// +/// \code +/// __m128i _mm_dpwsud_epi32(__m128i __W, __m128i __A, __m128i __B) +/// \endcode +/// +/// This intrinsic corresponds to the \c VPDPWSUD instruction. +/// +/// \param __W +/// A 128-bit vector of [4 x int]. +/// \param __A +/// A 128-bit vector of [8 x short]. +/// \param __B +/// A 128-bit vector of [8 x unsigned short]. +/// \returns +/// A 128-bit vector of [4 x int]. +/// +/// \code{.operation} +/// FOR j := 0 to 3 +/// tmp1.dword := SignExtend32(__A.word[2*j]) * ZeroExtend32(__B.word[2*j]) +/// tmp2.dword := SignExtend32(__A.word[2*j+1]) * ZeroExtend32(__B.word[2*j+1]) +/// dst.dword[j] := __W.dword[j] + tmp1 + tmp2 +/// ENDFOR +/// dst[MAX:128] := 0 +/// \endcode +static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_dpwsud_epi32(__m128i __W, + __m128i __A, + __m128i __B) { + return (__m128i)__builtin_ia32_vpdpwsud128((__v4si)__W, (__v4si)__A, + (__v4si)__B); +} + +/// Multiply groups of 2 adjacent pairs of signed 16-bit integers in \a __A with +/// corresponding unsigned 16-bit integers in \a __B, producing 2 intermediate +/// signed 16-bit results. Sum these 2 results with the corresponding +/// 32-bit integer in \a __W, and store the packed 32-bit results in \a dst. +/// +/// \headerfile +/// +/// \code +/// __m256i _mm256_dpwsud_epi32(__m256i __W, __m256i __A, __m256i __B) +/// \endcode +/// +/// This intrinsic corresponds to the \c VPDPWSUD instruction. +/// +/// \param __W +/// A 256-bit vector of [8 x int]. +/// \param __A +/// A 256-bit vector of [16 x short]. +/// \param __B +/// A 256-bit vector of [16 x unsigned short]. +/// \returns +/// A 256-bit vector of [8 x int]. +/// +/// \code{.operation} +/// FOR j := 0 to 7 +/// tmp1.dword := SignExtend32(__A.word[2*j]) * ZeroExtend32(__B.word[2*j]) +/// tmp2.dword := SignExtend32(__A.word[2*j+1]) * ZeroExtend32(__B.word[2*j+1]) +/// dst.dword[j] := __W.dword[j] + tmp1 + tmp2 +/// ENDFOR +/// dst[MAX:256] := 0 +/// \endcode +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_dpwsud_epi32(__m256i __W, __m256i __A, __m256i __B) { + return (__m256i)__builtin_ia32_vpdpwsud256((__v8si)__W, (__v8si)__A, + (__v8si)__B); +} + +/// Multiply groups of 2 adjacent pairs of signed 16-bit integers in \a __A with +/// corresponding unsigned 16-bit integers in \a __B, producing 2 intermediate +/// signed 16-bit results. Sum these 2 results with the corresponding +/// 32-bit integer in \a __W with signed saturation, and store the packed +/// 32-bit results in \a dst. +/// +/// \headerfile +/// +/// \code +/// __m128i _mm_dpwsuds_epi32(__m128i __W, __m128i __A, __m128i __B) +/// \endcode +/// +/// This intrinsic corresponds to the \c VPDPWSUDS instruction. +/// +/// \param __W +/// A 128-bit vector of [4 x int]. +/// \param __A +/// A 128-bit vector of [8 x short]. +/// \param __B +/// A 128-bit vector of [8 x unsigned short]. +/// \returns +/// A 128-bit vector of [4 x int]. +/// +/// \code{.operation} +/// FOR j := 0 to 3 +/// tmp1.dword := SignExtend32(__A.word[2*j]) * ZeroExtend32(__B.word[2*j]) +/// tmp2.dword := SignExtend32(__A.word[2*j+1]) * ZeroExtend32(__B.word[2*j+1]) +/// dst.dword[j] := SIGNED_DWORD_SATURATE(__W.dword[j] + tmp1 + tmp2) +/// ENDFOR +/// dst[MAX:128] := 0 +/// \endcode +static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_dpwsuds_epi32(__m128i __W, + __m128i __A, + __m128i __B) { + return (__m128i)__builtin_ia32_vpdpwsuds128((__v4si)__W, (__v4si)__A, + (__v4si)__B); +} + +/// Multiply groups of 2 adjacent pairs of signed 16-bit integers in \a __A with +/// corresponding unsigned 16-bit integers in \a __B, producing 2 intermediate +/// signed 16-bit results. Sum these 2 results with the corresponding +/// 32-bit integer in \a __W with signed saturation, and store the packed +/// 32-bit results in \a dst. +/// +/// \headerfile +/// +/// \code +/// __m256i _mm256_dpwsuds_epi32(__m256i __W, __m256i __A, __m256i __B) +/// \endcode +/// +/// This intrinsic corresponds to the \c VPDPWSUDS instruction. +/// +/// \param __W +/// A 256-bit vector of [8 x int]. +/// \param __A +/// A 256-bit vector of [16 x short]. +/// \param __B +/// A 256-bit vector of [16 x unsigned short]. +/// \returns +/// A 256-bit vector of [8 x int]. +/// +/// \code{.operation} +/// FOR j := 0 to 7 +/// tmp1.dword := SignExtend32(__A.word[2*j]) * ZeroExtend32(__B.word[2*j]) +/// tmp2.dword := SignExtend32(__A.word[2*j+1]) * ZeroExtend32(__B.word[2*j+1]) +/// dst.dword[j] := SIGNED_DWORD_SATURATE(__W.dword[j] + tmp1 + tmp2) +/// ENDFOR +/// dst[MAX:256] := 0 +/// \endcode +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_dpwsuds_epi32(__m256i __W, __m256i __A, __m256i __B) { + return (__m256i)__builtin_ia32_vpdpwsuds256((__v8si)__W, (__v8si)__A, + (__v8si)__B); +} + +/// Multiply groups of 2 adjacent pairs of unsigned 16-bit integers in \a __A with +/// corresponding signed 16-bit integers in \a __B, producing 2 intermediate +/// signed 16-bit results. Sum these 2 results with the corresponding +/// 32-bit integer in \a __W, and store the packed 32-bit results in \a dst. +/// +/// \headerfile +/// +/// \code +/// __m128i _mm_dpbusd_epi32(__m128i __W, __m128i __A, __m128i __B) +/// \endcode +/// +/// This intrinsic corresponds to the \c VPDPWUSD instruction. +/// +/// \param __W +/// A 128-bit vector of [4 x int]. +/// \param __A +/// A 128-bit vector of [8 x unsigned short]. +/// \param __B +/// A 128-bit vector of [8 x short]. +/// \returns +/// A 128-bit vector of [4 x int]. +/// +/// \code{.operation} +/// FOR j := 0 to 3 +/// tmp1.dword := ZeroExtend32(__A.word[2*j]) * SignExtend32(__B.word[2*j]) +/// tmp2.dword := ZeroExtend32(__A.word[2*j+1]) * SignExtend32(__B.word[2*j+1]) +/// dst.dword[j] := __W.dword[j] + tmp1 + tmp2 +/// ENDFOR +/// dst[MAX:128] := 0 +/// \endcode +static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_dpwusd_epi32(__m128i __W, + __m128i __A, + __m128i __B) { + return (__m128i)__builtin_ia32_vpdpwusd128((__v4si)__W, (__v4si)__A, + (__v4si)__B); +} + +/// Multiply groups of 2 adjacent pairs of unsigned 16-bit integers in \a __A with +/// corresponding signed 16-bit integers in \a __B, producing 2 intermediate +/// signed 16-bit results. Sum these 2 results with the corresponding +/// 32-bit integer in \a __W, and store the packed 32-bit results in \a dst. +/// +/// \headerfile +/// +/// \code +/// __m256i _mm256_dpwusd_epi32(__m256i __W, __m256i __A, __m256i __B) +/// \endcode +/// +/// This intrinsic corresponds to the \c VPDPWUSD instruction. +/// +/// \param __W +/// A 256-bit vector of [8 x int]. +/// \param __A +/// A 256-bit vector of [16 x unsigned short]. +/// \param __B +/// A 256-bit vector of [16 x short]. +/// \returns +/// A 256-bit vector of [8 x int]. +/// +/// \code{.operation} +/// FOR j := 0 to 7 +/// tmp1.dword := ZeroExtend32(__A.word[2*j]) * SignExtend32(__B.word[2*j]) +/// tmp2.dword := ZeroExtend32(__A.word[2*j+1]) * SignExtend32(__B.word[2*j+1]) +/// dst.dword[j] := __W.dword[j] + tmp1 + tmp2 +/// ENDFOR +/// dst[MAX:256] := 0 +/// \endcode +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_dpwusd_epi32(__m256i __W, __m256i __A, __m256i __B) { + return (__m256i)__builtin_ia32_vpdpwusd256((__v8si)__W, (__v8si)__A, + (__v8si)__B); +} + +/// Multiply groups of 2 adjacent pairs of unsigned 16-bit integers in \a __A with +/// corresponding signed 16-bit integers in \a __B, producing 2 intermediate +/// signed 16-bit results. Sum these 2 results with the corresponding +/// 32-bit integer in \a __W with signed saturation, and store the packed +/// 32-bit results in \a dst. +/// +/// \headerfile +/// +/// \code +/// __m128i _mm_dpwusds_epi32(__m128i __W, __m128i __A, __m128i __B) +/// \endcode +/// +/// This intrinsic corresponds to the \c VPDPWSUDS instruction. +/// +/// \param __W +/// A 128-bit vector of [4 x int]. +/// \param __A +/// A 128-bit vector of [8 x unsigned short]. +/// \param __B +/// A 128-bit vector of [8 x short]. +/// \returns +/// A 128-bit vector of [4 x int]. +/// +/// \code{.operation} +/// FOR j := 0 to 3 +/// tmp1.dword := ZeroExtend32(__A.word[2*j]) * SignExtend32(__B.word[2*j]) +/// tmp2.dword := ZeroExtend32(__A.word[2*j+1]) * SignExtend32(__B.word[2*j+1]) +/// dst.dword[j] := SIGNED_DWORD_SATURATE(__W.dword[j] + tmp1 + tmp2) +/// ENDFOR +/// dst[MAX:128] := 0 +/// \endcode +static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_dpwusds_epi32(__m128i __W, + __m128i __A, + __m128i __B) { + return (__m128i)__builtin_ia32_vpdpwusds128((__v4si)__W, (__v4si)__A, + (__v4si)__B); +} + +/// Multiply groups of 2 adjacent pairs of unsigned 16-bit integers in \a __A with +/// corresponding signed 16-bit integers in \a __B, producing 2 intermediate +/// signed 16-bit results. Sum these 2 results with the corresponding +/// 32-bit integer in \a __W with signed saturation, and store the packed +/// 32-bit results in \a dst. +/// +/// \headerfile +/// +/// \code +/// __m256i _mm256_dpwsuds_epi32(__m256i __W, __m256i __A, __m256i __B) +/// \endcode +/// +/// This intrinsic corresponds to the \c VPDPWSUDS instruction. +/// +/// \param __W +/// A 256-bit vector of [8 x int]. +/// \param __A +/// A 256-bit vector of [16 x unsigned short]. +/// \param __B +/// A 256-bit vector of [16 x short]. +/// \returns +/// A 256-bit vector of [8 x int]. +/// +/// \code{.operation} +/// FOR j := 0 to 7 +/// tmp1.dword := ZeroExtend32(__A.word[2*j]) * SignExtend32(__B.word[2*j]) +/// tmp2.dword := ZeroExtend32(__A.word[2*j+1]) * SignExtend32(__B.word[2*j+1]) +/// dst.dword[j] := SIGNED_DWORD_SATURATE(__W.dword[j] + tmp1 + tmp2) +/// ENDFOR +/// dst[MAX:256] := 0 +/// \endcode +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_dpwusds_epi32(__m256i __W, __m256i __A, __m256i __B) { + return (__m256i)__builtin_ia32_vpdpwusds256((__v8si)__W, (__v8si)__A, + (__v8si)__B); +} + +/// Multiply groups of 2 adjacent pairs of unsigned 16-bit integers in \a __A with +/// corresponding unsigned 16-bit integers in \a __B, producing 2 intermediate +/// signed 16-bit results. Sum these 2 results with the corresponding +/// 32-bit integer in \a __W, and store the packed 32-bit results in \a dst. +/// +/// \headerfile +/// +/// \code +/// __m128i _mm_dpwuud_epi32(__m128i __W, __m128i __A, __m128i __B) +/// \endcode +/// +/// This intrinsic corresponds to the \c VPDPWUUD instruction. +/// +/// \param __W +/// A 128-bit vector of [4 x unsigned int]. +/// \param __A +/// A 128-bit vector of [8 x unsigned short]. +/// \param __B +/// A 128-bit vector of [8 x unsigned short]. +/// \returns +/// A 128-bit vector of [4 x unsigned int]. +/// +/// \code{.operation} +/// FOR j := 0 to 3 +/// tmp1.dword := ZeroExtend32(__A.word[2*j]) * ZeroExtend32(__B.word[2*j]) +/// tmp2.dword := ZeroExtend32(__A.word[2*j+1]) * ZeroExtend32(__B.word[2*j+1]) +/// dst.dword[j] := __W.dword[j] + tmp1 + tmp2 +/// ENDFOR +/// dst[MAX:128] := 0 +/// \endcode +static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_dpwuud_epi32(__m128i __W, + __m128i __A, + __m128i __B) { + return (__m128i)__builtin_ia32_vpdpwuud128((__v4si)__W, (__v4si)__A, + (__v4si)__B); +} + +/// Multiply groups of 2 adjacent pairs of unsigned 16-bit integers in \a __A with +/// corresponding unsigned 16-bit integers in \a __B, producing 2 intermediate +/// signed 16-bit results. Sum these 2 results with the corresponding +/// 32-bit integer in \a __W, and store the packed 32-bit results in \a dst. +/// +/// \headerfile +/// +/// \code +/// __m256i _mm256_dpwuud_epi32(__m256i __W, __m256i __A, __m256i __B) +/// \endcode +/// +/// This intrinsic corresponds to the \c VPDPWUUD instruction. +/// +/// \param __W +/// A 256-bit vector of [8 x unsigned int]. +/// \param __A +/// A 256-bit vector of [16 x unsigned short]. +/// \param __B +/// A 256-bit vector of [16 x unsigned short]. +/// \returns +/// A 256-bit vector of [8 x unsigned int]. +/// +/// \code{.operation} +/// FOR j := 0 to 7 +/// tmp1.dword := ZeroExtend32(__A.word[2*j]) * ZeroExtend32(__B.word[2*j]) +/// tmp2.dword := ZeroExtend32(__A.word[2*j+1]) * ZeroExtend32(__B.word[2*j+1]) +/// dst.dword[j] := __W.dword[j] + tmp1 + tmp2 +/// ENDFOR +/// dst[MAX:256] := 0 +/// \endcode +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_dpwuud_epi32(__m256i __W, __m256i __A, __m256i __B) { + return (__m256i)__builtin_ia32_vpdpwuud256((__v8si)__W, (__v8si)__A, + (__v8si)__B); +} + +/// Multiply groups of 2 adjacent pairs of unsigned 16-bit integers in \a __A with +/// corresponding unsigned 16-bit integers in \a __B, producing 2 intermediate +/// signed 16-bit results. Sum these 2 results with the corresponding +/// 32-bit integer in \a __W with signed saturation, and store the packed +/// 32-bit results in \a dst. +/// +/// \headerfile +/// +/// \code +/// __m128i _mm_dpwsuds_epi32(__m128i __W, __m128i __A, __m128i __B) +/// \endcode +/// +/// This intrinsic corresponds to the \c VPDPWSUDS instruction. +/// +/// \param __W +/// A 128-bit vector of [4 x unsigned int]. +/// \param __A +/// A 128-bit vector of [8 x unsigned short]. +/// \param __B +/// A 128-bit vector of [8 x unsigned short]. +/// \returns +/// A 128-bit vector of [4 x unsigned int]. +/// +/// \code{.operation} +/// FOR j := 0 to 3 +/// tmp1.dword := ZeroExtend32(__A.word[2*j]) * ZeroExtend32(__B.word[2*j]) +/// tmp2.dword := ZeroExtend32(__A.word[2*j+1]) * ZeroExtend32(__B.word[2*j+1]) +/// dst.dword[j] := UNSIGNED_DWORD_SATURATE(__W.dword[j] + tmp1 + tmp2) +/// ENDFOR +/// dst[MAX:128] := 0 +/// \endcode +static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_dpwuuds_epi32(__m128i __W, + __m128i __A, + __m128i __B) { + return (__m128i)__builtin_ia32_vpdpwuuds128((__v4si)__W, (__v4si)__A, + (__v4si)__B); +} + +/// Multiply groups of 2 adjacent pairs of unsigned 16-bit integers in \a __A with +/// corresponding unsigned 16-bit integers in \a __B, producing 2 intermediate +/// signed 16-bit results. Sum these 2 results with the corresponding +/// 32-bit integer in \a __W with signed saturation, and store the packed +/// 32-bit results in \a dst. +/// +/// \headerfile +/// +/// \code +/// __m256i _mm256_dpwuuds_epi32(__m256i __W, __m256i __A, __m256i __B) +/// \endcode +/// +/// This intrinsic corresponds to the \c VPDPWSUDS instruction. +/// +/// \param __W +/// A 256-bit vector of [8 x unsigned int]. +/// \param __A +/// A 256-bit vector of [16 x unsigned short]. +/// \param __B +/// A 256-bit vector of [16 x unsigned short]. +/// \returns +/// A 256-bit vector of [8 x unsigned int]. +/// +/// \code{.operation} +/// FOR j := 0 to 7 +/// tmp1.dword := ZeroExtend32(__A.word[2*j]) * ZeroExtend32(__B.word[2*j]) +/// tmp2.dword := ZeroExtend32(__A.word[2*j+1]) * ZeroExtend32(__B.word[2*j+1]) +/// dst.dword[j] := UNSIGNED_DWORD_SATURATE(__W.dword[j] + tmp1 + tmp2) +/// ENDFOR +/// dst[MAX:256] := 0 +/// \endcode +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_dpwuuds_epi32(__m256i __W, __m256i __A, __m256i __B) { + return (__m256i)__builtin_ia32_vpdpwuuds256((__v8si)__W, (__v8si)__A, + (__v8si)__B); +} + +#undef __DEFAULT_FN_ATTRS128 +#undef __DEFAULT_FN_ATTRS256 + +#endif // __AVXVNNIINT16INTRIN_H diff --git a/third_party/intel/clang/avxvnniint8intrin.h b/third_party/intel/clang/avxvnniint8intrin.h new file mode 100644 index 000000000..b0b6cb853 --- /dev/null +++ b/third_party/intel/clang/avxvnniint8intrin.h @@ -0,0 +1,471 @@ +/*===-------- avxvnniint8intrin.h - AVXVNNIINT8 intrinsics -----------=== + * + * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. + * See https://llvm.org/LICENSE.txt for license information. + * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + * + *===-----------------------------------------------------------------------=== + */ +#ifndef __IMMINTRIN_H +#error \ + "Never use directly; include instead." +#endif + +#ifndef __AVXVNNIINT8INTRIN_H +#define __AVXVNNIINT8INTRIN_H + +/* Define the default attributes for the functions in this file. */ +#define __DEFAULT_FN_ATTRS256 \ + __attribute__((__always_inline__, __nodebug__, __target__("avxvnniint8"), \ + __min_vector_width__(256))) +#define __DEFAULT_FN_ATTRS128 \ + __attribute__((__always_inline__, __nodebug__, __target__("avxvnniint8"), \ + __min_vector_width__(128))) + +/// Multiply groups of 4 adjacent pairs of signed 8-bit integers in \a __A with +/// corresponding signed 8-bit integers in \a __B, producing 4 intermediate +/// signed 16-bit results. Sum these 4 results with the corresponding +/// 32-bit integer in \a __W, and store the packed 32-bit results in \a dst. +/// +/// \headerfile +/// +/// \code +/// _mm_dpbssd_epi32(__m128i __W, __m128i __A, __m128i __B); +/// \endcode +/// +/// This intrinsic corresponds to the \c VPDPBSSD instruction. +/// +/// \param __A +/// A 128-bit vector of [16 x char]. +/// \param __B +/// A 128-bit vector of [16 x char]. +/// \returns +/// A 128-bit vector of [4 x int]. +/// +/// \code{.operation} +/// FOR j := 0 to 3 +/// tmp1.word := SignExtend16(__A.byte[4*j]) * SignExtend16(__B.byte[4*j]) +/// tmp2.word := SignExtend16(__A.byte[4*j+1]) * SignExtend16(__B.byte[4*j+1]) +/// tmp3.word := SignExtend16(__A.byte[4*j+2]) * SignExtend16(__B.byte[4*j+2]) +/// tmp4.word := SignExtend16(__A.byte[4*j+3]) * SignExtend16(__B.byte[4*j+3]) +/// dst.dword[j] := __W.dword[j] + tmp1 + tmp2 + tmp3 + tmp4 +/// ENDFOR +/// dst[MAX:128] := 0 +/// \endcode +static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_dpbssd_epi32(__m128i __W, + __m128i __A, + __m128i __B) { + return (__m128i)__builtin_ia32_vpdpbssd128((__v4si)__W, (__v4si)__A, + (__v4si)__B); +} + +/// Multiply groups of 4 adjacent pairs of signed 8-bit integers in \a __A with +/// corresponding signed 8-bit integers in \a __B, producing 4 intermediate +/// signed 16-bit results. Sum these 4 results with the corresponding +/// 32-bit integer in \a __W, and store the packed 32-bit results in \a dst. +/// +/// \headerfile +/// +/// \code +/// _mm256_dpbssd_epi32(__m256i __W, __m256i __A, __m256i __B); +/// \endcode +/// +/// This intrinsic corresponds to the \c VPDPBSSD instruction. +/// +/// \param __A +/// A 256-bit vector of [32 x char]. +/// \param __B +/// A 256-bit vector of [32 x char]. +/// \returns +/// A 256-bit vector of [8 x int]. +/// +/// \code{.operation} +/// FOR j := 0 to 7 +/// tmp1.word := SignExtend16(__A.byte[4*j]) * SignExtend16(__B.byte[4*j]) +/// tmp2.word := SignExtend16(__A.byte[4*j+1]) * SignExtend16(__B.byte[4*j+1]) +/// tmp3.word := SignExtend16(__A.byte[4*j+2]) * SignExtend16(__B.byte[4*j+2]) +/// tmp4.word := SignExtend16(__A.byte[4*j+3]) * SignExtend16(__B.byte[4*j+3]) +/// dst.dword[j] := __W.dword[j] + tmp1 + tmp2 + tmp3 + tmp4 +/// ENDFOR +/// dst[MAX:256] := 0 +/// \endcode +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_dpbssd_epi32(__m256i __W, __m256i __A, __m256i __B) { + return (__m256i)__builtin_ia32_vpdpbssd256((__v8si)__W, (__v8si)__A, + (__v8si)__B); +} + +/// Multiply groups of 4 adjacent pairs of signed 8-bit integers in \a __A with +/// corresponding signed 8-bit integers in \a __B, producing 4 intermediate +/// signed 16-bit results. Sum these 4 results with the corresponding +/// 32-bit integer in \a __W with signed saturation, and store the packed +/// 32-bit results in \a dst. +/// +/// \headerfile +/// +/// \code +/// _mm_dpbssds_epi32( __m128i __W, __m128i __A, __m128i __B); +/// \endcode +/// +/// This intrinsic corresponds to the \c VPDPBSSD instruction. +/// +/// \param __A +/// A 128-bit vector of [16 x char]. +/// \param __B +/// A 128-bit vector of [16 x char]. +/// \returns +/// A 128-bit vector of [4 x int]. +/// +/// \code{.operation} +/// FOR j := 0 to 3 +/// tmp1.word := SignExtend16(__A.byte[4*j]) * SignExtend16(__B.byte[4*j]) +/// tmp2.word := SignExtend16(__A.byte[4*j+1]) * SignExtend16(__B.byte[4*j+1]) +/// tmp3.word := SignExtend16(__A.byte[4*j+2]) * SignExtend16(__B.byte[4*j+2]) +/// tmp4.word := SignExtend16(__A.byte[4*j+3]) * SignExtend16(__B.byte[4*j+3]) +/// dst.dword[j] := SIGNED_DWORD_SATURATE(__W.dword[j] + tmp1 + tmp2 + tmp3 + tmp4) +/// ENDFOR +/// dst[MAX:128] := 0 +/// \endcode +static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_dpbssds_epi32(__m128i __W, + __m128i __A, + __m128i __B) { + return (__m128i)__builtin_ia32_vpdpbssds128((__v4si)__W, (__v4si)__A, + (__v4si)__B); +} + +/// Multiply groups of 4 adjacent pairs of signed 8-bit integers in \a __A with +/// corresponding signed 8-bit integers in \a __B, producing 4 intermediate +/// signed 16-bit results. Sum these 4 results with the corresponding +/// 32-bit integer in \a __W with signed saturation, and store the packed +/// 32-bit results in \a dst. +/// +/// \headerfile +/// +/// \code +/// _mm256_dpbssds_epi32(__m256i __W, __m256i __A, __m256i __B); +/// \endcode +/// +/// This intrinsic corresponds to the \c VPDPBSSD instruction. +/// +/// \param __A +/// A 256-bit vector of [32 x char]. +/// \param __B +/// A 256-bit vector of [32 x char]. +/// \returns +/// A 256-bit vector of [8 x int]. +/// +/// \code{.operation} +/// FOR j := 0 to 7 +/// tmp1.word := SignExtend16(__A.byte[4*j]) * SignExtend16(__B.byte[4*j]) +/// tmp2.word := SignExtend16(__A.byte[4*j+1]) * SignExtend16(__B.byte[4*j+1]) +/// tmp3.word := SignExtend16(__A.byte[4*j+2]) * SignExtend16(__B.byte[4*j+2]) +/// tmp4.word := SignExtend16(__A.byte[4*j+3]) * SignExtend16(__B.byte[4*j+3]) +/// dst.dword[j] := SIGNED_DWORD_SATURATE(__W.dword[j] + tmp1 + tmp2 + tmp3 + tmp4) +/// ENDFOR +/// dst[MAX:256] := 0 +/// \endcode +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_dpbssds_epi32(__m256i __W, __m256i __A, __m256i __B) { + return (__m256i)__builtin_ia32_vpdpbssds256((__v8si)__W, (__v8si)__A, + (__v8si)__B); +} + +/// Multiply groups of 4 adjacent pairs of signed 8-bit integers in \a __A with +/// corresponding unsigned 8-bit integers in \a __B, producing 4 intermediate +/// signed 16-bit results. Sum these 4 results with the corresponding +/// 32-bit integer in \a __W, and store the packed 32-bit results in \a dst. +/// +/// \headerfile +/// +/// \code +/// _mm_dpbsud_epi32(__m128i __W, __m128i __A, __m128i __B); +/// \endcode +/// +/// This intrinsic corresponds to the \c VPDPBSSD instruction. +/// +/// \param __A +/// A 128-bit vector of [16 x char]. +/// \param __B +/// A 128-bit vector of [16 x unsigned char]. +/// \returns +/// A 128-bit vector of [4 x int]. +/// +/// \code{.operation} +/// FOR j := 0 to 3 +/// tmp1.word := Signed(SignExtend16(__A.byte[4*j]) * ZeroExtend16(__B.byte[4*j])) +/// tmp2.word := Signed(SignExtend16(__A.byte[4*j+1]) * ZeroExtend16(__B.byte[4*j+1])) +/// tmp3.word := Signed(SignExtend16(__A.byte[4*j+2]) * ZeroExtend16(__B.byte[4*j+2])) +/// tmp4.word := Signed(SignExtend16(__A.byte[4*j+3]) * ZeroExtend16(__B.byte[4*j+3])) +/// dst.dword[j] := __W.dword[j] + tmp1 + tmp2 + tmp3 + tmp4 +/// ENDFOR +/// dst[MAX:128] := 0 +/// \endcode +static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_dpbsud_epi32(__m128i __W, + __m128i __A, + __m128i __B) { + return (__m128i)__builtin_ia32_vpdpbsud128((__v4si)__W, (__v4si)__A, + (__v4si)__B); +} + +/// Multiply groups of 4 adjacent pairs of signed 8-bit integers in \a __A with +/// corresponding unsigned 8-bit integers in \a __B, producing 4 intermediate +/// signed 16-bit results. Sum these 4 results with the corresponding +/// 32-bit integer in \a __W, and store the packed 32-bit results in \a dst. +/// +/// \headerfile +/// +/// \code +/// _mm256_dpbsud_epi32(__m256i __W, __m256i __A, __m256i __B); +/// \endcode +/// +/// This intrinsic corresponds to the \c VPDPBSSD instruction. +/// +/// \param __A +/// A 256-bit vector of [32 x char]. +/// \param __B +/// A 256-bit vector of [32 x unsigned char]. +/// \returns +/// A 256-bit vector of [8 x int]. +/// +/// \code{.operation} +/// FOR j := 0 to 7 +/// tmp1.word := Signed(SignExtend16(__A.byte[4*j]) * ZeroExtend16(__B.byte[4*j])) +/// tmp2.word := Signed(SignExtend16(__A.byte[4*j+1]) * ZeroExtend16(__B.byte[4*j+1])) +/// tmp3.word := Signed(SignExtend16(__A.byte[4*j+2]) * ZeroExtend16(__B.byte[4*j+2])) +/// tmp4.word := Signed(SignExtend16(__A.byte[4*j+3]) * ZeroExtend16(__B.byte[4*j+3])) +/// dst.dword[j] := __W.dword[j] + tmp1 + tmp2 + tmp3 + tmp4 +/// ENDFOR +/// dst[MAX:256] := 0 +/// \endcode +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_dpbsud_epi32(__m256i __W, __m256i __A, __m256i __B) { + return (__m256i)__builtin_ia32_vpdpbsud256((__v8si)__W, (__v8si)__A, + (__v8si)__B); +} + +/// Multiply groups of 4 adjacent pairs of signed 8-bit integers in \a __A with +/// corresponding unsigned 8-bit integers in \a __B, producing 4 intermediate +/// signed 16-bit results. Sum these 4 results with the corresponding +/// 32-bit integer in \a __W with signed saturation, and store the packed +/// 32-bit results in \a dst. +/// +/// \headerfile +/// +/// \code +/// _mm_dpbsuds_epi32( __m128i __W, __m128i __A, __m128i __B); +/// \endcode +/// +/// This intrinsic corresponds to the \c VPDPBSSD instruction. +/// +/// \param __A +/// A 128-bit vector of [16 x char]. +/// \param __B +/// A 128-bit vector of [16 x unsigned char]. +/// \returns +/// A 128-bit vector of [4 x int]. +/// +/// \code{.operation} +/// FOR j := 0 to 3 +/// tmp1.word := Signed(SignExtend16(__A.byte[4*j]) * ZeroExtend16(__B.byte[4*j])) +/// tmp2.word := Signed(SignExtend16(__A.byte[4*j+1]) * ZeroExtend16(__B.byte[4*j+1])) +/// tmp3.word := Signed(SignExtend16(__A.byte[4*j+2]) * ZeroExtend16(__B.byte[4*j+2])) +/// tmp4.word := Signed(SignExtend16(__A.byte[4*j+3]) * ZeroExtend16(__B.byte[4*j+3])) +/// dst.dword[j] := SIGNED_DWORD_SATURATE(__W.dword[j] + tmp1 + tmp2 + tmp3 + tmp4) +/// ENDFOR +/// dst[MAX:128] := 0 +/// \endcode +static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_dpbsuds_epi32(__m128i __W, + __m128i __A, + __m128i __B) { + return (__m128i)__builtin_ia32_vpdpbsuds128((__v4si)__W, (__v4si)__A, + (__v4si)__B); +} + +/// Multiply groups of 4 adjacent pairs of signed 8-bit integers in \a __A with +/// corresponding unsigned 8-bit integers in \a __B, producing 4 intermediate +/// signed 16-bit results. Sum these 4 results with the corresponding +/// 32-bit integer in \a __W with signed saturation, and store the packed +/// 32-bit results in \a dst. +/// +/// \headerfile +/// +/// \code +/// _mm256_dpbsuds_epi32(__m256i __W, __m256i __A, __m256i __B); +/// \endcode +/// +/// This intrinsic corresponds to the \c VPDPBSSD instruction. +/// +/// \param __A +/// A 256-bit vector of [32 x char]. +/// \param __B +/// A 256-bit vector of [32 x unsigned char]. +/// \returns +/// A 256-bit vector of [8 x int]. +/// +/// \code{.operation} +/// FOR j := 0 to 7 +/// tmp1.word := Signed(SignExtend16(__A.byte[4*j]) * ZeroExtend16(__B.byte[4*j])) +/// tmp2.word := Signed(SignExtend16(__A.byte[4*j+1]) * ZeroExtend16(__B.byte[4*j+1])) +/// tmp3.word := Signed(SignExtend16(__A.byte[4*j+2]) * ZeroExtend16(__B.byte[4*j+2])) +/// tmp4.word := Signed(SignExtend16(__A.byte[4*j+3]) * ZeroExtend16(__B.byte[4*j+3])) +/// dst.dword[j] := SIGNED_DWORD_SATURATE(__W.dword[j] + tmp1 + tmp2 + tmp3 + tmp4) +/// ENDFOR +/// dst[MAX:256] := 0 +/// \endcode +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_dpbsuds_epi32(__m256i __W, __m256i __A, __m256i __B) { + return (__m256i)__builtin_ia32_vpdpbsuds256((__v8si)__W, (__v8si)__A, + (__v8si)__B); +} + +/// Multiply groups of 4 adjacent pairs of unsigned 8-bit integers in \a __A with +/// corresponding unsigned 8-bit integers in \a __B, producing 4 intermediate +/// signed 16-bit results. Sum these 4 results with the corresponding +/// 32-bit integer in \a __W, and store the packed 32-bit results in \a dst. +/// +/// \headerfile +/// +/// \code +/// _mm_dpbuud_epi32(__m128i __W, __m128i __A, __m128i __B); +/// \endcode +/// +/// This intrinsic corresponds to the \c VPDPBSSD instruction. +/// +/// \param __A +/// A 128-bit vector of [16 x unsigned char]. +/// \param __B +/// A 128-bit vector of [16 x unsigned char]. +/// \returns +/// A 128-bit vector of [4 x int]. +/// +/// \code{.operation} +/// FOR j := 0 to 3 +/// tmp1.word := ZeroExtend16(__A.byte[4*j]) * ZeroExtend16(__B.byte[4*j]) +/// tmp2.word := ZeroExtend16(__A.byte[4*j+1]) * ZeroExtend16(__B.byte[4*j+1]) +/// tmp3.word := ZeroExtend16(__A.byte[4*j+2]) * ZeroExtend16(__B.byte[4*j+2]) +/// tmp4.word := ZeroExtend16(__A.byte[4*j+3]) * ZeroExtend16(__B.byte[4*j+3]) +/// dst.dword[j] := __W.dword[j] + tmp1 + tmp2 + tmp3 + tmp4 +/// ENDFOR +/// dst[MAX:128] := 0 +/// \endcode +static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_dpbuud_epi32(__m128i __W, + __m128i __A, + __m128i __B) { + return (__m128i)__builtin_ia32_vpdpbuud128((__v4si)__W, (__v4si)__A, + (__v4si)__B); +} + +/// Multiply groups of 4 adjacent pairs of unsigned 8-bit integers in \a __A with +/// corresponding unsigned 8-bit integers in \a __B, producing 4 intermediate +/// signed 16-bit results. Sum these 4 results with the corresponding +/// 32-bit integer in \a __W, and store the packed 32-bit results in \a dst. +/// +/// \headerfile +/// +/// \code +/// _mm256_dpbuud_epi32(__m256i __W, __m256i __A, __m256i __B); +/// \endcode +/// +/// This intrinsic corresponds to the \c VPDPBSSD instruction. +/// +/// \param __A +/// A 256-bit vector of [32 x unsigned char]. +/// \param __B +/// A 256-bit vector of [32 x unsigned char]. +/// \returns +/// A 256-bit vector of [8 x int]. +/// +/// \code{.operation} +/// FOR j := 0 to 7 +/// tmp1.word := ZeroExtend16(__A.byte[4*j]) * ZeroExtend16(__B.byte[4*j]) +/// tmp2.word := ZeroExtend16(__A.byte[4*j+1]) * ZeroExtend16(__B.byte[4*j+1]) +/// tmp3.word := ZeroExtend16(__A.byte[4*j+2]) * ZeroExtend16(__B.byte[4*j+2]) +/// tmp4.word := ZeroExtend16(__A.byte[4*j+3]) * ZeroExtend16(__B.byte[4*j+3]) +/// dst.dword[j] := __W.dword[j] + tmp1 + tmp2 + tmp3 + tmp4 +/// ENDFOR +/// dst[MAX:256] := 0 +/// \endcode +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_dpbuud_epi32(__m256i __W, __m256i __A, __m256i __B) { + return (__m256i)__builtin_ia32_vpdpbuud256((__v8si)__W, (__v8si)__A, + (__v8si)__B); +} + +/// Multiply groups of 4 adjacent pairs of unsigned 8-bit integers in \a __A with +/// corresponding unsigned 8-bit integers in \a __B, producing 4 intermediate +/// signed 16-bit results. Sum these 4 results with the corresponding +/// 32-bit integer in \a __W with signed saturation, and store the packed +/// 32-bit results in \a dst. +/// +/// \headerfile +/// +/// \code +/// _mm_dpbuuds_epi32( __m128i __W, __m128i __A, __m128i __B); +/// \endcode +/// +/// This intrinsic corresponds to the \c VPDPBUUDS instruction. +/// +/// \param __A +/// A 128-bit vector of [16 x unsigned char]. +/// \param __B +/// A 128-bit vector of [16 x unsigned char]. +/// \returns +/// A 128-bit vector of [4 x int]. +/// +/// \code{.operation} +/// FOR j := 0 to 3 +/// tmp1.word := ZeroExtend16(__A.byte[4*j]) * ZeroExtend16(__B.byte[4*j]) +/// tmp2.word := ZeroExtend16(__A.byte[4*j+1]) * ZeroExtend16(__B.byte[4*j+1]) +/// tmp3.word := ZeroExtend16(__A.byte[4*j+2]) * ZeroExtend16(__B.byte[4*j+2]) +/// tmp4.word := ZeroExtend16(__A.byte[4*j+3]) * ZeroExtend16(__B.byte[4*j+3]) +/// dst.dword[j] := UNSIGNED_DWORD_SATURATE(__W.dword[j] + tmp1 + tmp2 + tmp3 + tmp4) +/// ENDFOR +/// dst[MAX:128] := 0 +/// \endcode +static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_dpbuuds_epi32(__m128i __W, + __m128i __A, + __m128i __B) { + return (__m128i)__builtin_ia32_vpdpbuuds128((__v4si)__W, (__v4si)__A, + (__v4si)__B); +} + +/// Multiply groups of 4 adjacent pairs of signed 8-bit integers in \a __A with +/// corresponding unsigned 8-bit integers in \a __B, producing 4 intermediate +/// signed 16-bit results. Sum these 4 results with the corresponding +/// 32-bit integer in \a __W with signed saturation, and store the packed +/// 32-bit results in \a dst. +/// +/// \headerfile +/// +/// \code +/// _mm256_dpbuuds_epi32(__m256i __W, __m256i __A, __m256i __B); +/// \endcode +/// +/// This intrinsic corresponds to the \c VPDPBUUDS instruction. +/// +/// \param __A +/// A 256-bit vector of [32 x unsigned char]. +/// \param __B +/// A 256-bit vector of [32 x unsigned char]. +/// \returns +/// A 256-bit vector of [8 x int]. +/// +/// \code{.operation} +/// FOR j := 0 to 7 +/// tmp1.word := ZeroExtend16(__A.byte[4*j]) * ZeroExtend16(__B.byte[4*j]) +/// tmp2.word := ZeroExtend16(__A.byte[4*j+1]) * ZeroExtend16(__B.byte[4*j+1]) +/// tmp3.word := ZeroExtend16(__A.byte[4*j+2]) * ZeroExtend16(__B.byte[4*j+2]) +/// tmp4.word := ZeroExtend16(__A.byte[4*j+3]) * ZeroExtend16(__B.byte[4*j+3]) +/// dst.dword[j] := UNSIGNED_DWORD_SATURATE(__W.dword[j] + tmp1 + tmp2 + tmp3 + tmp4) +/// ENDFOR +/// dst[MAX:256] := 0 +/// \endcode +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_dpbuuds_epi32(__m256i __W, __m256i __A, __m256i __B) { + return (__m256i)__builtin_ia32_vpdpbuuds256((__v8si)__W, (__v8si)__A, + (__v8si)__B); +} +#undef __DEFAULT_FN_ATTRS128 +#undef __DEFAULT_FN_ATTRS256 + +#endif // __AVXVNNIINT8INTRIN_H diff --git a/third_party/intel/clang/avxvnniintrin.h b/third_party/intel/clang/avxvnniintrin.h new file mode 100644 index 000000000..b7de562b5 --- /dev/null +++ b/third_party/intel/clang/avxvnniintrin.h @@ -0,0 +1,225 @@ +/*===--------------- avxvnniintrin.h - VNNI intrinsics --------------------=== + * + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + * + *===-----------------------------------------------------------------------=== + */ +#ifndef __IMMINTRIN_H +#error "Never use directly; include instead." +#endif + +#ifndef __AVXVNNIINTRIN_H +#define __AVXVNNIINTRIN_H + +/* Below intrinsics defined in avx512vlvnniintrin.h can be used for AVXVNNI */ +/// \fn __m256i _mm256_dpbusd_epi32(__m256i __S, __m256i __A, __m256i __B) +/// \fn __m256i _mm256_dpbusds_epi32(__m256i __S, __m256i __A, __m256i __B) +/// \fn __m256i _mm256_dpwssd_epi32(__m256i __S, __m256i __A, __m256i __B) +/// \fn __m256i _mm256_dpwssds_epi32(__m256i __S, __m256i __A, __m256i __B) +/// \fn __m128i _mm_dpbusd_epi32(__m128i __S, __m128i __A, __m128i __B) +/// \fn __m128i _mm_dpbusds_epi32(__m128i __S, __m128i __A, __m128i __B) +/// \fn __m128i _mm_dpwssd_epi32(__m128i __S, __m128i __A, __m128i __B) +/// \fn __m128i _mm_dpwssds_epi32(__m128i __S, __m128i __A, __m128i __B) + +/* Intrinsics with _avx_ prefix are for compatibility with msvc. */ +/* Define the default attributes for the functions in this file. */ +#define __DEFAULT_FN_ATTRS256 __attribute__((__always_inline__, __nodebug__, __target__("avxvnni"), __min_vector_width__(256))) +#define __DEFAULT_FN_ATTRS128 __attribute__((__always_inline__, __nodebug__, __target__("avxvnni"), __min_vector_width__(128))) + +/// Multiply groups of 4 adjacent pairs of unsigned 8-bit integers in \a __A with +/// corresponding signed 8-bit integers in \a __B, producing 4 intermediate signed +/// 16-bit results. Sum these 4 results with the corresponding 32-bit integer +/// in \a __S, and store the packed 32-bit results in DST. +/// +/// This intrinsic corresponds to the VPDPBUSD instructions. +/// +/// \code{.operation} +/// FOR j := 0 to 7 +/// tmp1.word := Signed(ZeroExtend16(__A.byte[4*j]) * SignExtend16(__B.byte[4*j])) +/// tmp2.word := Signed(ZeroExtend16(__A.byte[4*j+1]) * SignExtend16(__B.byte[4*j+1])) +/// tmp3.word := Signed(ZeroExtend16(__A.byte[4*j+2]) * SignExtend16(__B.byte[4*j+2])) +/// tmp4.word := Signed(ZeroExtend16(__A.byte[4*j+3]) * SignExtend16(__B.byte[4*j+3])) +/// DST.dword[j] := __S.dword[j] + tmp1 + tmp2 + tmp3 + tmp4 +/// ENDFOR +/// DST[MAX:256] := 0 +/// \endcode +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_dpbusd_avx_epi32(__m256i __S, __m256i __A, __m256i __B) +{ + return (__m256i)__builtin_ia32_vpdpbusd256((__v8si)__S, (__v8si)__A, (__v8si)__B); +} + +/// Multiply groups of 4 adjacent pairs of unsigned 8-bit integers in \a __A with +/// corresponding signed 8-bit integers in \a __B, producing 4 intermediate signed +/// 16-bit results. Sum these 4 results with the corresponding 32-bit integer +/// in \a __S using signed saturation, and store the packed 32-bit results in DST. +/// +/// This intrinsic corresponds to the VPDPBUSDS instructions. +/// +/// \code{.operation} +/// FOR j := 0 to 7 +/// tmp1.word := Signed(ZeroExtend16(__A.byte[4*j]) * SignExtend16(__B.byte[4*j])) +/// tmp2.word := Signed(ZeroExtend16(__A.byte[4*j+1]) * SignExtend16(__B.byte[4*j+1])) +/// tmp3.word := Signed(ZeroExtend16(__A.byte[4*j+2]) * SignExtend16(__B.byte[4*j+2])) +/// tmp4.word := Signed(ZeroExtend16(__A.byte[4*j+3]) * SignExtend16(__B.byte[4*j+3])) +/// DST.dword[j] := Saturate32(__S.dword[j] + tmp1 + tmp2 + tmp3 + tmp4) +/// ENDFOR +/// DST[MAX:256] := 0 +/// \endcode +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_dpbusds_avx_epi32(__m256i __S, __m256i __A, __m256i __B) +{ + return (__m256i)__builtin_ia32_vpdpbusds256((__v8si)__S, (__v8si)__A, (__v8si)__B); +} + +/// Multiply groups of 2 adjacent pairs of signed 16-bit integers in \a __A with +/// corresponding 16-bit integers in \a __B, producing 2 intermediate signed 32-bit +/// results. Sum these 2 results with the corresponding 32-bit integer in \a __S, +/// and store the packed 32-bit results in DST. +/// +/// This intrinsic corresponds to the VPDPWSSD instructions. +/// +/// \code{.operation} +/// FOR j := 0 to 7 +/// tmp1.dword := SignExtend32(__A.word[2*j]) * SignExtend32(__B.word[2*j]) +/// tmp2.dword := SignExtend32(__A.word[2*j+1]) * SignExtend32(__B.word[2*j+1]) +/// DST.dword[j] := __S.dword[j] + tmp1 + tmp2 +/// ENDFOR +/// DST[MAX:256] := 0 +/// \endcode +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_dpwssd_avx_epi32(__m256i __S, __m256i __A, __m256i __B) +{ + return (__m256i)__builtin_ia32_vpdpwssd256((__v8si)__S, (__v8si)__A, (__v8si)__B); +} + +/// Multiply groups of 2 adjacent pairs of signed 16-bit integers in \a __A with +/// corresponding 16-bit integers in \a __B, producing 2 intermediate signed 32-bit +/// results. Sum these 2 results with the corresponding 32-bit integer in \a __S +/// using signed saturation, and store the packed 32-bit results in DST. +/// +/// This intrinsic corresponds to the VPDPWSSDS instructions. +/// +/// \code{.operation} +/// FOR j := 0 to 7 +/// tmp1.dword := SignExtend32(__A.word[2*j]) * SignExtend32(__B.word[2*j]) +/// tmp2.dword := SignExtend32(__A.word[2*j+1]) * SignExtend32(__B.word[2*j+1]) +/// DST.dword[j] := Saturate32(__S.dword[j] + tmp1 + tmp2) +/// ENDFOR +/// DST[MAX:256] := 0 +/// \endcode +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_dpwssds_avx_epi32(__m256i __S, __m256i __A, __m256i __B) +{ + return (__m256i)__builtin_ia32_vpdpwssds256((__v8si)__S, (__v8si)__A, (__v8si)__B); +} + +/// Multiply groups of 4 adjacent pairs of unsigned 8-bit integers in \a __A with +/// corresponding signed 8-bit integers in \a __B, producing 4 intermediate signed +/// 16-bit results. Sum these 4 results with the corresponding 32-bit integer +/// in \a __S, and store the packed 32-bit results in DST. +/// +/// This intrinsic corresponds to the VPDPBUSD instructions. +/// +/// \code{.operation} +/// FOR j := 0 to 3 +/// tmp1.word := Signed(ZeroExtend16(__A.byte[4*j]) * SignExtend16(__B.byte[4*j])) +/// tmp2.word := Signed(ZeroExtend16(__A.byte[4*j+1]) * SignExtend16(__B.byte[4*j+1])) +/// tmp3.word := Signed(ZeroExtend16(__A.byte[4*j+2]) * SignExtend16(__B.byte[4*j+2])) +/// tmp4.word := Signed(ZeroExtend16(__A.byte[4*j+3]) * SignExtend16(__B.byte[4*j+3])) +/// DST.dword[j] := __S.dword[j] + tmp1 + tmp2 + tmp3 + tmp4 +/// ENDFOR +/// DST[MAX:128] := 0 +/// \endcode +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_dpbusd_avx_epi32(__m128i __S, __m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_vpdpbusd128((__v4si)__S, (__v4si)__A, (__v4si)__B); +} + +/// Multiply groups of 4 adjacent pairs of unsigned 8-bit integers in \a __A with +/// corresponding signed 8-bit integers in \a __B, producing 4 intermediate signed +/// 16-bit results. Sum these 4 results with the corresponding 32-bit integer +/// in \a __S using signed saturation, and store the packed 32-bit results in DST. +/// +/// This intrinsic corresponds to the VPDPBUSDS instructions. +/// +/// \code{.operation} +/// FOR j := 0 to 3 +/// tmp1.word := Signed(ZeroExtend16(__A.byte[4*j]) * SignExtend16(__B.byte[4*j])) +/// tmp2.word := Signed(ZeroExtend16(__A.byte[4*j+1]) * SignExtend16(__B.byte[4*j+1])) +/// tmp3.word := Signed(ZeroExtend16(__A.byte[4*j+2]) * SignExtend16(__B.byte[4*j+2])) +/// tmp4.word := Signed(ZeroExtend16(__A.byte[4*j+3]) * SignExtend16(__B.byte[4*j+3])) +/// DST.dword[j] := Saturate32(__S.dword[j] + tmp1 + tmp2 + tmp3 + tmp4) +/// ENDFOR +/// DST[MAX:128] := 0 +/// \endcode +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_dpbusds_avx_epi32(__m128i __S, __m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_vpdpbusds128((__v4si)__S, (__v4si)__A, (__v4si)__B); +} + +/// Multiply groups of 2 adjacent pairs of signed 16-bit integers in \a __A with +/// corresponding 16-bit integers in \a __B, producing 2 intermediate signed 32-bit +/// results. Sum these 2 results with the corresponding 32-bit integer in \a __S, +/// and store the packed 32-bit results in DST. +/// +/// This intrinsic corresponds to the VPDPWSSD instructions. +/// +/// \code{.operation} +/// FOR j := 0 to 3 +/// tmp1.dword := SignExtend32(__A.word[2*j]) * SignExtend32(__B.word[2*j]) +/// tmp2.dword := SignExtend32(__A.word[2*j+1]) * SignExtend32(__B.word[2*j+1]) +/// DST.dword[j] := __S.dword[j] + tmp1 + tmp2 +/// ENDFOR +/// DST[MAX:128] := 0 +/// \endcode +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_dpwssd_avx_epi32(__m128i __S, __m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_vpdpwssd128((__v4si)__S, (__v4si)__A, (__v4si)__B); +} + +/// Multiply groups of 2 adjacent pairs of signed 16-bit integers in \a __A with +/// corresponding 16-bit integers in \a __B, producing 2 intermediate signed 32-bit +/// results. Sum these 2 results with the corresponding 32-bit integer in \a __S +/// using signed saturation, and store the packed 32-bit results in DST. +/// +/// This intrinsic corresponds to the VPDPWSSDS instructions. +/// +/// \code{.operation} +/// FOR j := 0 to 3 +/// tmp1.dword := SignExtend32(__A.word[2*j]) * SignExtend32(__B.word[2*j]) +/// tmp2.dword := SignExtend32(__A.word[2*j+1]) * SignExtend32(__B.word[2*j+1]) +/// DST.dword[j] := Saturate32(__S.dword[j] + tmp1 + tmp2) +/// ENDFOR +/// DST[MAX:128] := 0 +/// \endcode +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_dpwssds_avx_epi32(__m128i __S, __m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_vpdpwssds128((__v4si)__S, (__v4si)__A, (__v4si)__B); +} + +#undef __DEFAULT_FN_ATTRS128 +#undef __DEFAULT_FN_ATTRS256 + +#endif // __AVXVNNIINTRIN_H diff --git a/third_party/intel/clang/bmi2intrin.h b/third_party/intel/clang/bmi2intrin.h new file mode 100644 index 000000000..f0a3343be --- /dev/null +++ b/third_party/intel/clang/bmi2intrin.h @@ -0,0 +1,255 @@ +/*===---- bmi2intrin.h - BMI2 intrinsics -----------------------------------=== + * + * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. + * See https://llvm.org/LICENSE.txt for license information. + * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + * + *===-----------------------------------------------------------------------=== + */ + +#ifndef __IMMINTRIN_H +#error "Never use directly; include instead." +#endif + +#ifndef __BMI2INTRIN_H +#define __BMI2INTRIN_H + +/* Define the default attributes for the functions in this file. */ +#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("bmi2"))) + +/// Copies the unsigned 32-bit integer \a __X and zeroes the upper bits +/// starting at bit number \a __Y. +/// +/// \code{.operation} +/// i := __Y[7:0] +/// result := __X +/// IF i < 32 +/// result[31:i] := 0 +/// FI +/// \endcode +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c BZHI instruction. +/// +/// \param __X +/// The 32-bit source value to copy. +/// \param __Y +/// The lower 8 bits specify the bit number of the lowest bit to zero. +/// \returns The partially zeroed 32-bit value. +static __inline__ unsigned int __DEFAULT_FN_ATTRS +_bzhi_u32(unsigned int __X, unsigned int __Y) +{ + return __builtin_ia32_bzhi_si(__X, __Y); +} + +/// Deposit (scatter) low-order bits from the unsigned 32-bit integer \a __X +/// into the 32-bit result, according to the mask in the unsigned 32-bit +/// integer \a __Y. All other bits of the result are zero. +/// +/// \code{.operation} +/// i := 0 +/// result := 0 +/// FOR m := 0 TO 31 +/// IF __Y[m] == 1 +/// result[m] := __X[i] +/// i := i + 1 +/// ENDIF +/// ENDFOR +/// \endcode +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c PDEP instruction. +/// +/// \param __X +/// The 32-bit source value to copy. +/// \param __Y +/// The 32-bit mask specifying where to deposit source bits. +/// \returns The 32-bit result. +static __inline__ unsigned int __DEFAULT_FN_ATTRS +_pdep_u32(unsigned int __X, unsigned int __Y) +{ + return __builtin_ia32_pdep_si(__X, __Y); +} + +/// Extract (gather) bits from the unsigned 32-bit integer \a __X into the +/// low-order bits of the 32-bit result, according to the mask in the +/// unsigned 32-bit integer \a __Y. All other bits of the result are zero. +/// +/// \code{.operation} +/// i := 0 +/// result := 0 +/// FOR m := 0 TO 31 +/// IF __Y[m] == 1 +/// result[i] := __X[m] +/// i := i + 1 +/// ENDIF +/// ENDFOR +/// \endcode +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c PEXT instruction. +/// +/// \param __X +/// The 32-bit source value to copy. +/// \param __Y +/// The 32-bit mask specifying which source bits to extract. +/// \returns The 32-bit result. +static __inline__ unsigned int __DEFAULT_FN_ATTRS +_pext_u32(unsigned int __X, unsigned int __Y) +{ + return __builtin_ia32_pext_si(__X, __Y); +} + +/// Multiplies the unsigned 32-bit integers \a __X and \a __Y to form a +/// 64-bit product. Stores the upper 32 bits of the product in the +/// memory at \a __P and returns the lower 32 bits. +/// +/// \code{.operation} +/// Store32(__P, (__X * __Y)[63:32]) +/// result := (__X * __Y)[31:0] +/// \endcode +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c MULX instruction. +/// +/// \param __X +/// An unsigned 32-bit multiplicand. +/// \param __Y +/// An unsigned 32-bit multiplicand. +/// \param __P +/// A pointer to memory for storing the upper half of the product. +/// \returns The lower half of the product. +static __inline__ unsigned int __DEFAULT_FN_ATTRS +_mulx_u32(unsigned int __X, unsigned int __Y, unsigned int *__P) +{ + unsigned long long __res = (unsigned long long) __X * __Y; + *__P = (unsigned int)(__res >> 32); + return (unsigned int)__res; +} + +#ifdef __x86_64__ + +/// Copies the unsigned 64-bit integer \a __X and zeroes the upper bits +/// starting at bit number \a __Y. +/// +/// \code{.operation} +/// i := __Y[7:0] +/// result := __X +/// IF i < 64 +/// result[63:i] := 0 +/// FI +/// \endcode +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c BZHI instruction. +/// +/// \param __X +/// The 64-bit source value to copy. +/// \param __Y +/// The lower 8 bits specify the bit number of the lowest bit to zero. +/// \returns The partially zeroed 64-bit value. +static __inline__ unsigned long long __DEFAULT_FN_ATTRS +_bzhi_u64(unsigned long long __X, unsigned long long __Y) +{ + return __builtin_ia32_bzhi_di(__X, __Y); +} + +/// Deposit (scatter) low-order bits from the unsigned 64-bit integer \a __X +/// into the 64-bit result, according to the mask in the unsigned 64-bit +/// integer \a __Y. All other bits of the result are zero. +/// +/// \code{.operation} +/// i := 0 +/// result := 0 +/// FOR m := 0 TO 63 +/// IF __Y[m] == 1 +/// result[m] := __X[i] +/// i := i + 1 +/// ENDIF +/// ENDFOR +/// \endcode +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c PDEP instruction. +/// +/// \param __X +/// The 64-bit source value to copy. +/// \param __Y +/// The 64-bit mask specifying where to deposit source bits. +/// \returns The 64-bit result. +static __inline__ unsigned long long __DEFAULT_FN_ATTRS +_pdep_u64(unsigned long long __X, unsigned long long __Y) +{ + return __builtin_ia32_pdep_di(__X, __Y); +} + +/// Extract (gather) bits from the unsigned 64-bit integer \a __X into the +/// low-order bits of the 64-bit result, according to the mask in the +/// unsigned 64-bit integer \a __Y. All other bits of the result are zero. +/// +/// \code{.operation} +/// i := 0 +/// result := 0 +/// FOR m := 0 TO 63 +/// IF __Y[m] == 1 +/// result[i] := __X[m] +/// i := i + 1 +/// ENDIF +/// ENDFOR +/// \endcode +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c PEXT instruction. +/// +/// \param __X +/// The 64-bit source value to copy. +/// \param __Y +/// The 64-bit mask specifying which source bits to extract. +/// \returns The 64-bit result. +static __inline__ unsigned long long __DEFAULT_FN_ATTRS +_pext_u64(unsigned long long __X, unsigned long long __Y) +{ + return __builtin_ia32_pext_di(__X, __Y); +} + +/// Multiplies the unsigned 64-bit integers \a __X and \a __Y to form a +/// 128-bit product. Stores the upper 64 bits of the product to the +/// memory addressed by \a __P and returns the lower 64 bits. +/// +/// \code{.operation} +/// Store64(__P, (__X * __Y)[127:64]) +/// result := (__X * __Y)[63:0] +/// \endcode +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c MULX instruction. +/// +/// \param __X +/// An unsigned 64-bit multiplicand. +/// \param __Y +/// An unsigned 64-bit multiplicand. +/// \param __P +/// A pointer to memory for storing the upper half of the product. +/// \returns The lower half of the product. +static __inline__ unsigned long long __DEFAULT_FN_ATTRS +_mulx_u64 (unsigned long long __X, unsigned long long __Y, + unsigned long long *__P) +{ + unsigned __int128 __res = (unsigned __int128) __X * __Y; + *__P = (unsigned long long) (__res >> 64); + return (unsigned long long) __res; +} + +#endif /* __x86_64__ */ + +#undef __DEFAULT_FN_ATTRS + +#endif /* __BMI2INTRIN_H */ diff --git a/third_party/intel/clang/bmiintrin.h b/third_party/intel/clang/bmiintrin.h new file mode 100644 index 000000000..78bffe68e --- /dev/null +++ b/third_party/intel/clang/bmiintrin.h @@ -0,0 +1,614 @@ +/*===---- bmiintrin.h - BMI intrinsics -------------------------------------=== + * + * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. + * See https://llvm.org/LICENSE.txt for license information. + * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + * + *===-----------------------------------------------------------------------=== + */ + +#if !defined __X86INTRIN_H && !defined __IMMINTRIN_H +#error "Never use directly; include instead." +#endif + +#ifndef __BMIINTRIN_H +#define __BMIINTRIN_H + +/* Allow using the tzcnt intrinsics even for non-BMI targets. Since the TZCNT + instruction behaves as BSF on non-BMI targets, there is code that expects + to use it as a potentially faster version of BSF. */ +#define __RELAXED_FN_ATTRS __attribute__((__always_inline__, __nodebug__)) + +/// Counts the number of trailing zero bits in the operand. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c TZCNT instruction. +/// +/// \param __X +/// An unsigned 16-bit integer whose trailing zeros are to be counted. +/// \returns An unsigned 16-bit integer containing the number of trailing zero +/// bits in the operand. +/// \see _tzcnt_u16 +static __inline__ unsigned short __RELAXED_FN_ATTRS +__tzcnt_u16(unsigned short __X) +{ + return __builtin_ia32_tzcnt_u16(__X); +} + +/// Counts the number of trailing zero bits in the operand. +/// +/// \headerfile +/// +/// \code +/// unsigned short _tzcnt_u16(unsigned short __X); +/// \endcode +/// +/// This intrinsic corresponds to the \c TZCNT instruction. +/// +/// \param __X +/// An unsigned 16-bit integer whose trailing zeros are to be counted. +/// \returns An unsigned 16-bit integer containing the number of trailing zero +/// bits in the operand. +/// \see __tzcnt_u16 +#define _tzcnt_u16 __tzcnt_u16 + +/// Counts the number of trailing zero bits in the operand. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c TZCNT instruction. +/// +/// \param __X +/// An unsigned 32-bit integer whose trailing zeros are to be counted. +/// \returns An unsigned 32-bit integer containing the number of trailing zero +/// bits in the operand. +/// \see { _mm_tzcnt_32 _tzcnt_u32 } +static __inline__ unsigned int __RELAXED_FN_ATTRS +__tzcnt_u32(unsigned int __X) +{ + return __builtin_ia32_tzcnt_u32(__X); +} + +/// Counts the number of trailing zero bits in the operand. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c TZCNT instruction. +/// +/// \param __X +/// An unsigned 32-bit integer whose trailing zeros are to be counted. +/// \returns A 32-bit integer containing the number of trailing zero bits in +/// the operand. +/// \see { __tzcnt_u32 _tzcnt_u32 } +static __inline__ int __RELAXED_FN_ATTRS +_mm_tzcnt_32(unsigned int __X) +{ + return (int)__builtin_ia32_tzcnt_u32(__X); +} + +/// Counts the number of trailing zero bits in the operand. +/// +/// \headerfile +/// +/// \code +/// unsigned int _tzcnt_u32(unsigned int __X); +/// \endcode +/// +/// This intrinsic corresponds to the \c TZCNT instruction. +/// +/// \param __X +/// An unsigned 32-bit integer whose trailing zeros are to be counted. +/// \returns An unsigned 32-bit integer containing the number of trailing zero +/// bits in the operand. +/// \see { _mm_tzcnt_32 __tzcnt_u32 } +#define _tzcnt_u32 __tzcnt_u32 + +#ifdef __x86_64__ + +/// Counts the number of trailing zero bits in the operand. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c TZCNT instruction. +/// +/// \param __X +/// An unsigned 64-bit integer whose trailing zeros are to be counted. +/// \returns An unsigned 64-bit integer containing the number of trailing zero +/// bits in the operand. +/// \see { _mm_tzcnt_64 _tzcnt_u64 } +static __inline__ unsigned long long __RELAXED_FN_ATTRS +__tzcnt_u64(unsigned long long __X) +{ + return __builtin_ia32_tzcnt_u64(__X); +} + +/// Counts the number of trailing zero bits in the operand. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c TZCNT instruction. +/// +/// \param __X +/// An unsigned 64-bit integer whose trailing zeros are to be counted. +/// \returns An 64-bit integer containing the number of trailing zero bits in +/// the operand. +/// \see { __tzcnt_u64 _tzcnt_u64 } +static __inline__ long long __RELAXED_FN_ATTRS +_mm_tzcnt_64(unsigned long long __X) +{ + return (long long)__builtin_ia32_tzcnt_u64(__X); +} + +/// Counts the number of trailing zero bits in the operand. +/// +/// \headerfile +/// +/// \code +/// unsigned long long _tzcnt_u64(unsigned long long __X); +/// \endcode +/// +/// This intrinsic corresponds to the \c TZCNT instruction. +/// +/// \param __X +/// An unsigned 64-bit integer whose trailing zeros are to be counted. +/// \returns An unsigned 64-bit integer containing the number of trailing zero +/// bits in the operand. +/// \see { _mm_tzcnt_64 __tzcnt_u64 +#define _tzcnt_u64 __tzcnt_u64 + +#endif /* __x86_64__ */ + +#undef __RELAXED_FN_ATTRS + +#if !defined(__SCE__) || __has_feature(modules) || defined(__BMI__) + +/* Define the default attributes for the functions in this file. */ +#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("bmi"))) + +/// Performs a bitwise AND of the second operand with the one's +/// complement of the first operand. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c ANDN instruction. +/// +/// \param __X +/// An unsigned integer containing one of the operands. +/// \param __Y +/// An unsigned integer containing one of the operands. +/// \returns An unsigned integer containing the bitwise AND of the second +/// operand with the one's complement of the first operand. +/// \see _andn_u32 +static __inline__ unsigned int __DEFAULT_FN_ATTRS +__andn_u32(unsigned int __X, unsigned int __Y) +{ + return ~__X & __Y; +} + +/// Performs a bitwise AND of the second operand with the one's +/// complement of the first operand. +/// +/// \headerfile +/// +/// \code +/// unsigned int _andn_u32(unsigned int __X, unsigned int __Y); +/// \endcode +/// +/// This intrinsic corresponds to the \c ANDN instruction. +/// +/// \param __X +/// An unsigned integer containing one of the operands. +/// \param __Y +/// An unsigned integer containing one of the operands. +/// \returns An unsigned integer containing the bitwise AND of the second +/// operand with the one's complement of the first operand. +/// \see __andn_u32 +#define _andn_u32 __andn_u32 + +/* AMD-specified, double-leading-underscore version of BEXTR */ +/// Extracts the specified bits from the first operand and returns them +/// in the least significant bits of the result. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c BEXTR instruction. +/// +/// \param __X +/// An unsigned integer whose bits are to be extracted. +/// \param __Y +/// An unsigned integer used to specify which bits are extracted. Bits [7:0] +/// specify the index of the least significant bit. Bits [15:8] specify the +/// number of bits to be extracted. +/// \returns An unsigned integer whose least significant bits contain the +/// extracted bits. +/// \see _bextr_u32 +static __inline__ unsigned int __DEFAULT_FN_ATTRS +__bextr_u32(unsigned int __X, unsigned int __Y) +{ + return __builtin_ia32_bextr_u32(__X, __Y); +} + +/* Intel-specified, single-leading-underscore version of BEXTR */ +/// Extracts the specified bits from the first operand and returns them +/// in the least significant bits of the result. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c BEXTR instruction. +/// +/// \param __X +/// An unsigned integer whose bits are to be extracted. +/// \param __Y +/// An unsigned integer used to specify the index of the least significant +/// bit for the bits to be extracted. Bits [7:0] specify the index. +/// \param __Z +/// An unsigned integer used to specify the number of bits to be extracted. +/// Bits [7:0] specify the number of bits. +/// \returns An unsigned integer whose least significant bits contain the +/// extracted bits. +/// \see __bextr_u32 +static __inline__ unsigned int __DEFAULT_FN_ATTRS +_bextr_u32(unsigned int __X, unsigned int __Y, unsigned int __Z) +{ + return __builtin_ia32_bextr_u32 (__X, ((__Y & 0xff) | ((__Z & 0xff) << 8))); +} + +/* Intel-specified, single-leading-underscore version of BEXTR2 */ +/// Extracts the specified bits from the first operand and returns them +/// in the least significant bits of the result. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c BEXTR instruction. +/// +/// \param __X +/// An unsigned integer whose bits are to be extracted. +/// \param __Y +/// An unsigned integer used to specify which bits are extracted. Bits [7:0] +/// specify the index of the least significant bit. Bits [15:8] specify the +/// number of bits to be extracted. +/// \returns An unsigned integer whose least significant bits contain the +/// extracted bits. +/// \see __bextr_u32 +static __inline__ unsigned int __DEFAULT_FN_ATTRS +_bextr2_u32(unsigned int __X, unsigned int __Y) { + return __builtin_ia32_bextr_u32(__X, __Y); +} + +/// Clears all bits in the source except for the least significant bit +/// containing a value of 1 and returns the result. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c BLSI instruction. +/// +/// \param __X +/// An unsigned integer whose bits are to be cleared. +/// \returns An unsigned integer containing the result of clearing the bits from +/// the source operand. +/// \see _blsi_u32 +static __inline__ unsigned int __DEFAULT_FN_ATTRS +__blsi_u32(unsigned int __X) +{ + return __X & -__X; +} + +/// Clears all bits in the source except for the least significant bit +/// containing a value of 1 and returns the result. +/// +/// \headerfile +/// +/// \code +/// unsigned int _blsi_u32(unsigned int __X); +/// \endcode +/// +/// This intrinsic corresponds to the \c BLSI instruction. +/// +/// \param __X +/// An unsigned integer whose bits are to be cleared. +/// \returns An unsigned integer containing the result of clearing the bits from +/// the source operand. +/// \see __blsi_u32 +#define _blsi_u32 __blsi_u32 + +/// Creates a mask whose bits are set to 1, using bit 0 up to and +/// including the least significant bit that is set to 1 in the source +/// operand and returns the result. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c BLSMSK instruction. +/// +/// \param __X +/// An unsigned integer used to create the mask. +/// \returns An unsigned integer containing the newly created mask. +/// \see _blsmsk_u32 +static __inline__ unsigned int __DEFAULT_FN_ATTRS +__blsmsk_u32(unsigned int __X) +{ + return __X ^ (__X - 1); +} + +/// Creates a mask whose bits are set to 1, using bit 0 up to and +/// including the least significant bit that is set to 1 in the source +/// operand and returns the result. +/// +/// \headerfile +/// +/// \code +/// unsigned int _blsmsk_u32(unsigned int __X); +/// \endcode +/// +/// This intrinsic corresponds to the \c BLSMSK instruction. +/// +/// \param __X +/// An unsigned integer used to create the mask. +/// \returns An unsigned integer containing the newly created mask. +/// \see __blsmsk_u32 +#define _blsmsk_u32 __blsmsk_u32 + +/// Clears the least significant bit that is set to 1 in the source +/// operand and returns the result. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c BLSR instruction. +/// +/// \param __X +/// An unsigned integer containing the operand to be cleared. +/// \returns An unsigned integer containing the result of clearing the source +/// operand. +/// \see _blsr_u32 +static __inline__ unsigned int __DEFAULT_FN_ATTRS +__blsr_u32(unsigned int __X) +{ + return __X & (__X - 1); +} + +/// Clears the least significant bit that is set to 1 in the source +/// operand and returns the result. +/// +/// \headerfile +/// +/// \code +/// unsigned int _bls4_u32(unsigned int __X); +/// \endcode +/// +/// This intrinsic corresponds to the \c BLSR instruction. +/// +/// \param __X +/// An unsigned integer containing the operand to be cleared. +/// \returns An unsigned integer containing the result of clearing the source +/// operand. +/// \see __blsr_u32 +#define _blsr_u32 __blsr_u32 + +#ifdef __x86_64__ + +/// Performs a bitwise AND of the second operand with the one's +/// complement of the first operand. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c ANDN instruction. +/// +/// \param __X +/// An unsigned 64-bit integer containing one of the operands. +/// \param __Y +/// An unsigned 64-bit integer containing one of the operands. +/// \returns An unsigned 64-bit integer containing the bitwise AND of the second +/// operand with the one's complement of the first operand. +/// \see _andn_u64 +static __inline__ unsigned long long __DEFAULT_FN_ATTRS +__andn_u64 (unsigned long long __X, unsigned long long __Y) +{ + return ~__X & __Y; +} + +/// Performs a bitwise AND of the second operand with the one's +/// complement of the first operand. +/// +/// \headerfile +/// +/// \code +/// unsigned long long _andn_u64(unsigned long long __X, +/// unsigned long long __Y); +/// \endcode +/// +/// This intrinsic corresponds to the \c ANDN instruction. +/// +/// \param __X +/// An unsigned 64-bit integer containing one of the operands. +/// \param __Y +/// An unsigned 64-bit integer containing one of the operands. +/// \returns An unsigned 64-bit integer containing the bitwise AND of the second +/// operand with the one's complement of the first operand. +/// \see __andn_u64 +#define _andn_u64 __andn_u64 + +/* AMD-specified, double-leading-underscore version of BEXTR */ +/// Extracts the specified bits from the first operand and returns them +/// in the least significant bits of the result. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c BEXTR instruction. +/// +/// \param __X +/// An unsigned 64-bit integer whose bits are to be extracted. +/// \param __Y +/// An unsigned 64-bit integer used to specify which bits are extracted. Bits +/// [7:0] specify the index of the least significant bit. Bits [15:8] specify +/// the number of bits to be extracted. +/// \returns An unsigned 64-bit integer whose least significant bits contain the +/// extracted bits. +/// \see _bextr_u64 +static __inline__ unsigned long long __DEFAULT_FN_ATTRS +__bextr_u64(unsigned long long __X, unsigned long long __Y) +{ + return __builtin_ia32_bextr_u64(__X, __Y); +} + +/* Intel-specified, single-leading-underscore version of BEXTR */ +/// Extracts the specified bits from the first operand and returns them +/// in the least significant bits of the result. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c BEXTR instruction. +/// +/// \param __X +/// An unsigned 64-bit integer whose bits are to be extracted. +/// \param __Y +/// An unsigned integer used to specify the index of the least significant +/// bit for the bits to be extracted. Bits [7:0] specify the index. +/// \param __Z +/// An unsigned integer used to specify the number of bits to be extracted. +/// Bits [7:0] specify the number of bits. +/// \returns An unsigned 64-bit integer whose least significant bits contain the +/// extracted bits. +/// \see __bextr_u64 +static __inline__ unsigned long long __DEFAULT_FN_ATTRS +_bextr_u64(unsigned long long __X, unsigned int __Y, unsigned int __Z) +{ + return __builtin_ia32_bextr_u64 (__X, ((__Y & 0xff) | ((__Z & 0xff) << 8))); +} + +/* Intel-specified, single-leading-underscore version of BEXTR2 */ +/// Extracts the specified bits from the first operand and returns them +/// in the least significant bits of the result. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c BEXTR instruction. +/// +/// \param __X +/// An unsigned 64-bit integer whose bits are to be extracted. +/// \param __Y +/// An unsigned 64-bit integer used to specify which bits are extracted. Bits +/// [7:0] specify the index of the least significant bit. Bits [15:8] specify +/// the number of bits to be extracted. +/// \returns An unsigned 64-bit integer whose least significant bits contain the +/// extracted bits. +/// \see __bextr_u64 +static __inline__ unsigned long long __DEFAULT_FN_ATTRS +_bextr2_u64(unsigned long long __X, unsigned long long __Y) { + return __builtin_ia32_bextr_u64(__X, __Y); +} + +/// Clears all bits in the source except for the least significant bit +/// containing a value of 1 and returns the result. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c BLSI instruction. +/// +/// \param __X +/// An unsigned 64-bit integer whose bits are to be cleared. +/// \returns An unsigned 64-bit integer containing the result of clearing the +/// bits from the source operand. +/// \see _blsi_u64 +static __inline__ unsigned long long __DEFAULT_FN_ATTRS +__blsi_u64(unsigned long long __X) +{ + return __X & -__X; +} + +/// Clears all bits in the source except for the least significant bit +/// containing a value of 1 and returns the result. +/// +/// \headerfile +/// +/// \code +/// unsigned long long _blsi_u64(unsigned long long __X); +/// \endcode +/// +/// This intrinsic corresponds to the \c BLSI instruction. +/// +/// \param __X +/// An unsigned 64-bit integer whose bits are to be cleared. +/// \returns An unsigned 64-bit integer containing the result of clearing the +/// bits from the source operand. +/// \see __blsi_u64 +#define _blsi_u64 __blsi_u64 + +/// Creates a mask whose bits are set to 1, using bit 0 up to and +/// including the least significant bit that is set to 1 in the source +/// operand and returns the result. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c BLSMSK instruction. +/// +/// \param __X +/// An unsigned 64-bit integer used to create the mask. +/// \returns An unsigned 64-bit integer containing the newly created mask. +/// \see _blsmsk_u64 +static __inline__ unsigned long long __DEFAULT_FN_ATTRS +__blsmsk_u64(unsigned long long __X) +{ + return __X ^ (__X - 1); +} + +/// Creates a mask whose bits are set to 1, using bit 0 up to and +/// including the least significant bit that is set to 1 in the source +/// operand and returns the result. +/// +/// \headerfile +/// +/// \code +/// unsigned long long _blsmsk_u64(unsigned long long __X); +/// \endcode +/// +/// This intrinsic corresponds to the \c BLSMSK instruction. +/// +/// \param __X +/// An unsigned 64-bit integer used to create the mask. +/// \returns An unsigned 64-bit integer containing the newly created mask. +/// \see __blsmsk_u64 +#define _blsmsk_u64 __blsmsk_u64 + +/// Clears the least significant bit that is set to 1 in the source +/// operand and returns the result. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c BLSR instruction. +/// +/// \param __X +/// An unsigned 64-bit integer containing the operand to be cleared. +/// \returns An unsigned 64-bit integer containing the result of clearing the +/// source operand. +/// \see _blsr_u64 +static __inline__ unsigned long long __DEFAULT_FN_ATTRS +__blsr_u64(unsigned long long __X) +{ + return __X & (__X - 1); +} + +/// Clears the least significant bit that is set to 1 in the source +/// operand and returns the result. +/// +/// \headerfile +/// +/// \code +/// unsigned long long _blsr_u64(unsigned long long __X); +/// \endcode +/// +/// This intrinsic corresponds to the \c BLSR instruction. +/// +/// \param __X +/// An unsigned 64-bit integer containing the operand to be cleared. +/// \returns An unsigned 64-bit integer containing the result of clearing the +/// source operand. +/// \see __blsr_u64 +#define _blsr_u64 __blsr_u64 + +#endif /* __x86_64__ */ + +#undef __DEFAULT_FN_ATTRS + +#endif /* !defined(__SCE__) || __has_feature(modules) || defined(__BMI__) */ + +#endif /* __BMIINTRIN_H */ diff --git a/third_party/intel/clang/cetintrin.h b/third_party/intel/clang/cetintrin.h new file mode 100644 index 000000000..a68df5b1d --- /dev/null +++ b/third_party/intel/clang/cetintrin.h @@ -0,0 +1,115 @@ +/*===---- cetintrin.h - CET intrinsic --------------------------------------=== + * + * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. + * See https://llvm.org/LICENSE.txt for license information. + * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + * + *===-----------------------------------------------------------------------=== + */ + +#ifndef __IMMINTRIN_H +#error "Never use directly; include instead." +#endif + +#ifndef __CETINTRIN_H +#define __CETINTRIN_H + +/* Define the default attributes for the functions in this file. */ +#define __DEFAULT_FN_ATTRS \ + __attribute__((__always_inline__, __nodebug__, __target__("shstk"))) + +static __inline__ void __DEFAULT_FN_ATTRS _incsspd(int __a) { + __builtin_ia32_incsspd((unsigned int)__a); +} + +#ifdef __x86_64__ +static __inline__ void __DEFAULT_FN_ATTRS _incsspq(unsigned long long __a) { + __builtin_ia32_incsspq(__a); +} +#endif /* __x86_64__ */ + +#ifdef __x86_64__ +static __inline__ void __DEFAULT_FN_ATTRS _inc_ssp(unsigned int __a) { + __builtin_ia32_incsspq(__a); +} +#else /* __x86_64__ */ +static __inline__ void __DEFAULT_FN_ATTRS _inc_ssp(unsigned int __a) { + __builtin_ia32_incsspd(__a); +} +#endif /* __x86_64__ */ + +static __inline__ unsigned int __DEFAULT_FN_ATTRS _rdsspd(unsigned int __a) { + return __builtin_ia32_rdsspd(__a); +} + +static __inline__ unsigned int __DEFAULT_FN_ATTRS _rdsspd_i32(void) { +#pragma clang diagnostic push +#pragma clang diagnostic ignored "-Wuninitialized" + unsigned int t; + return __builtin_ia32_rdsspd(t); +#pragma clang diagnostic pop +} + +#ifdef __x86_64__ +static __inline__ unsigned long long __DEFAULT_FN_ATTRS _rdsspq(unsigned long long __a) { + return __builtin_ia32_rdsspq(__a); +} + +static __inline__ unsigned long long __DEFAULT_FN_ATTRS _rdsspq_i64(void) { +#pragma clang diagnostic push +#pragma clang diagnostic ignored "-Wuninitialized" + unsigned long long t; + return __builtin_ia32_rdsspq(t); +#pragma clang diagnostic pop +} +#endif /* __x86_64__ */ + +#ifdef __x86_64__ +static __inline__ unsigned long long __DEFAULT_FN_ATTRS _get_ssp(void) { + return __builtin_ia32_rdsspq(0); +} +#else /* __x86_64__ */ +static __inline__ unsigned int __DEFAULT_FN_ATTRS _get_ssp(void) { + return __builtin_ia32_rdsspd(0); +} +#endif /* __x86_64__ */ + +static __inline__ void __DEFAULT_FN_ATTRS _saveprevssp(void) { + __builtin_ia32_saveprevssp(); +} + +static __inline__ void __DEFAULT_FN_ATTRS _rstorssp(void * __p) { + __builtin_ia32_rstorssp(__p); +} + +static __inline__ void __DEFAULT_FN_ATTRS _wrssd(unsigned int __a, void * __p) { + __builtin_ia32_wrssd(__a, __p); +} + +#ifdef __x86_64__ +static __inline__ void __DEFAULT_FN_ATTRS _wrssq(unsigned long long __a, void * __p) { + __builtin_ia32_wrssq(__a, __p); +} +#endif /* __x86_64__ */ + +static __inline__ void __DEFAULT_FN_ATTRS _wrussd(unsigned int __a, void * __p) { + __builtin_ia32_wrussd(__a, __p); +} + +#ifdef __x86_64__ +static __inline__ void __DEFAULT_FN_ATTRS _wrussq(unsigned long long __a, void * __p) { + __builtin_ia32_wrussq(__a, __p); +} +#endif /* __x86_64__ */ + +static __inline__ void __DEFAULT_FN_ATTRS _setssbsy(void) { + __builtin_ia32_setssbsy(); +} + +static __inline__ void __DEFAULT_FN_ATTRS _clrssbsy(void * __p) { + __builtin_ia32_clrssbsy(__p); +} + +#undef __DEFAULT_FN_ATTRS + +#endif /* __CETINTRIN_H */ diff --git a/third_party/intel/clang/cldemoteintrin.h b/third_party/intel/clang/cldemoteintrin.h new file mode 100644 index 000000000..cfb951c1b --- /dev/null +++ b/third_party/intel/clang/cldemoteintrin.h @@ -0,0 +1,36 @@ +/*===---- cldemoteintrin.h - CLDEMOTE intrinsic ----------------------------=== + * + * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. + * See https://llvm.org/LICENSE.txt for license information. + * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + * + *===-----------------------------------------------------------------------=== + */ + +#if !defined __X86INTRIN_H && !defined __IMMINTRIN_H +#error "Never use directly; include instead." +#endif + +#ifndef __CLDEMOTEINTRIN_H +#define __CLDEMOTEINTRIN_H + +/* Define the default attributes for the functions in this file. */ +#define __DEFAULT_FN_ATTRS \ + __attribute__((__always_inline__, __nodebug__, __target__("cldemote"))) + +/// Hint to hardware that the cache line that contains \p __P should be demoted +/// from the cache closest to the processor core to a level more distant from +/// the processor core. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the CLDEMOTE instruction. +static __inline__ void __DEFAULT_FN_ATTRS +_cldemote(const void * __P) { + __builtin_ia32_cldemote(__P); +} + +#define _mm_cldemote(p) _cldemote(p) +#undef __DEFAULT_FN_ATTRS + +#endif diff --git a/third_party/intel/clang/clflushoptintrin.h b/third_party/intel/clang/clflushoptintrin.h new file mode 100644 index 000000000..ae0a0244c --- /dev/null +++ b/third_party/intel/clang/clflushoptintrin.h @@ -0,0 +1,36 @@ +/*===---- clflushoptintrin.h - CLFLUSHOPT intrinsic ------------------------=== + * + * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. + * See https://llvm.org/LICENSE.txt for license information. + * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + * + *===-----------------------------------------------------------------------=== + */ + +#ifndef __IMMINTRIN_H +#error "Never use directly; include instead." +#endif + +#ifndef __CLFLUSHOPTINTRIN_H +#define __CLFLUSHOPTINTRIN_H + +/* Define the default attributes for the functions in this file. */ +#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("clflushopt"))) + +/// Invalidates all levels of the cache hierarchy and flushes modified data to +/// memory for the cache line specified by the address \a __m. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c CLFLUSHOPT instruction. +/// +/// \param __m +/// An address within the cache line to flush and invalidate. +static __inline__ void __DEFAULT_FN_ATTRS +_mm_clflushopt(void const * __m) { + __builtin_ia32_clflushopt(__m); +} + +#undef __DEFAULT_FN_ATTRS + +#endif diff --git a/third_party/intel/clang/clwbintrin.h b/third_party/intel/clang/clwbintrin.h new file mode 100644 index 000000000..3360d203f --- /dev/null +++ b/third_party/intel/clang/clwbintrin.h @@ -0,0 +1,38 @@ +/*===---- clwbintrin.h - CLWB intrinsic ------------------------------------=== + * + * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. + * See https://llvm.org/LICENSE.txt for license information. + * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + * + *===-----------------------------------------------------------------------=== + */ + +#ifndef __IMMINTRIN_H +#error "Never use directly; include instead." +#endif + +#ifndef __CLWBINTRIN_H +#define __CLWBINTRIN_H + +/* Define the default attributes for the functions in this file. */ +#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("clwb"))) + +/// Writes back to memory the cache line (if modified) that contains the +/// linear address specified in \a __p from any level of the cache hierarchy in +/// the cache coherence domain +/// +/// \headerfile +/// +/// This intrinsic corresponds to the CLWB instruction. +/// +/// \param __p +/// A pointer to the memory location used to identify the cache line to be +/// written back. +static __inline__ void __DEFAULT_FN_ATTRS +_mm_clwb(void const *__p) { + __builtin_ia32_clwb(__p); +} + +#undef __DEFAULT_FN_ATTRS + +#endif diff --git a/third_party/intel/clang/clzerointrin.h b/third_party/intel/clang/clzerointrin.h new file mode 100644 index 000000000..acccfe94f --- /dev/null +++ b/third_party/intel/clang/clzerointrin.h @@ -0,0 +1,38 @@ +/*===----------------------- clzerointrin.h - CLZERO ----------------------=== + * + * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. + * See https://llvm.org/LICENSE.txt for license information. + * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + * + *===-----------------------------------------------------------------------=== + */ +#ifndef __X86INTRIN_H +#error "Never use directly; include instead." +#endif + +#ifndef __CLZEROINTRIN_H +#define __CLZEROINTRIN_H + +/* Define the default attributes for the functions in this file. */ +#define __DEFAULT_FN_ATTRS \ + __attribute__((__always_inline__, __nodebug__, __target__("clzero"))) + +/// Zeroes out the cache line for the address \a __line. This uses a +/// non-temporal store. Calling \c _mm_sfence() afterward might be needed +/// to enforce ordering. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c CLZERO instruction. +/// +/// \param __line +/// An address within the cache line to zero out. +static __inline__ void __DEFAULT_FN_ATTRS +_mm_clzero (void * __line) +{ + __builtin_ia32_clzero ((void *)__line); +} + +#undef __DEFAULT_FN_ATTRS + +#endif /* __CLZEROINTRIN_H */ diff --git a/third_party/intel/clang/cmpccxaddintrin.h b/third_party/intel/clang/cmpccxaddintrin.h new file mode 100644 index 000000000..695749899 --- /dev/null +++ b/third_party/intel/clang/cmpccxaddintrin.h @@ -0,0 +1,70 @@ +/*===--------------- cmpccxaddintrin.h - CMPCCXADD intrinsics--------------=== + * + * + * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. + * See https://llvm.org/LICENSE.txt for license information. + * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + * + *===-----------------------------------------------------------------------=== + */ +#ifndef __X86GPRINTRIN_H +#error \ + "Never use directly; include instead." +#endif // __X86GPRINTRIN_H + +#ifndef __CMPCCXADDINTRIN_H +#define __CMPCCXADDINTRIN_H +#ifdef __x86_64__ + +typedef enum { + _CMPCCX_O, /* Overflow. */ + _CMPCCX_NO, /* No overflow. */ + _CMPCCX_B, /* Below. */ + _CMPCCX_NB, /* Not below. */ + _CMPCCX_Z, /* Zero. */ + _CMPCCX_NZ, /* Not zero. */ + _CMPCCX_BE, /* Below or equal. */ + _CMPCCX_NBE, /* Neither below nor equal. */ + _CMPCCX_S, /* Sign. */ + _CMPCCX_NS, /* No sign. */ + _CMPCCX_P, /* Parity. */ + _CMPCCX_NP, /* No parity. */ + _CMPCCX_L, /* Less. */ + _CMPCCX_NL, /* Not less. */ + _CMPCCX_LE, /* Less or equal. */ + _CMPCCX_NLE, /* Neither less nor equal. */ +} _CMPCCX_ENUM; + +/// Compares the value from the memory __A with the value of __B. If the +/// specified condition __D is met, then add the third operand __C to the +/// __A and write it into __A, else the value of __A is unchanged. The return +/// value is the original value of __A. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c CMPCCXADD instructions. +/// +/// \param __A +/// __A pointer specifying the memory address. +/// +/// \param __B +/// A integer operand. +/// +/// \param __C +/// A integer operand. +/// +/// \param __D +/// The specified condition. +/// +/// \returns a integer which is the original value of first operand. + +#define _cmpccxadd_epi32(__A, __B, __C, __D) \ + ((int)(__builtin_ia32_cmpccxadd32((void *)(__A), (int)(__B), (int)(__C), \ + (int)(__D)))) + +#define _cmpccxadd_epi64(__A, __B, __C, __D) \ + ((long long)(__builtin_ia32_cmpccxadd64((void *)(__A), (long long)(__B), \ + (long long)(__C), (int)(__D)))) + +#endif // __x86_64__ +#endif // __CMPCCXADDINTRIN_H diff --git a/third_party/intel/clang/crc32intrin.h b/third_party/intel/clang/crc32intrin.h new file mode 100644 index 000000000..a0bd99d1b --- /dev/null +++ b/third_party/intel/clang/crc32intrin.h @@ -0,0 +1,100 @@ +/*===---- crc32intrin.h - SSE4.2 Accumulate CRC32 intrinsics ---------------=== + * + * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. + * See https://llvm.org/LICENSE.txt for license information. + * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + * + *===-----------------------------------------------------------------------=== + */ + +#ifndef __CRC32INTRIN_H +#define __CRC32INTRIN_H + +#define __DEFAULT_FN_ATTRS \ + __attribute__((__always_inline__, __nodebug__, __target__("crc32"))) + +/// Adds the unsigned integer operand to the CRC-32C checksum of the +/// unsigned char operand. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the CRC32B instruction. +/// +/// \param __C +/// An unsigned integer operand to add to the CRC-32C checksum of operand +/// \a __D. +/// \param __D +/// An unsigned 8-bit integer operand used to compute the CRC-32C checksum. +/// \returns The result of adding operand \a __C to the CRC-32C checksum of +/// operand \a __D. +static __inline__ unsigned int __DEFAULT_FN_ATTRS +_mm_crc32_u8(unsigned int __C, unsigned char __D) +{ + return __builtin_ia32_crc32qi(__C, __D); +} + +/// Adds the unsigned integer operand to the CRC-32C checksum of the +/// unsigned short operand. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the CRC32W instruction. +/// +/// \param __C +/// An unsigned integer operand to add to the CRC-32C checksum of operand +/// \a __D. +/// \param __D +/// An unsigned 16-bit integer operand used to compute the CRC-32C checksum. +/// \returns The result of adding operand \a __C to the CRC-32C checksum of +/// operand \a __D. +static __inline__ unsigned int __DEFAULT_FN_ATTRS +_mm_crc32_u16(unsigned int __C, unsigned short __D) +{ + return __builtin_ia32_crc32hi(__C, __D); +} + +/// Adds the first unsigned integer operand to the CRC-32C checksum of +/// the second unsigned integer operand. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the CRC32L instruction. +/// +/// \param __C +/// An unsigned integer operand to add to the CRC-32C checksum of operand +/// \a __D. +/// \param __D +/// An unsigned 32-bit integer operand used to compute the CRC-32C checksum. +/// \returns The result of adding operand \a __C to the CRC-32C checksum of +/// operand \a __D. +static __inline__ unsigned int __DEFAULT_FN_ATTRS +_mm_crc32_u32(unsigned int __C, unsigned int __D) +{ + return __builtin_ia32_crc32si(__C, __D); +} + +#ifdef __x86_64__ +/// Adds the unsigned integer operand to the CRC-32C checksum of the +/// unsigned 64-bit integer operand. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the CRC32Q instruction. +/// +/// \param __C +/// An unsigned integer operand to add to the CRC-32C checksum of operand +/// \a __D. +/// \param __D +/// An unsigned 64-bit integer operand used to compute the CRC-32C checksum. +/// \returns The result of adding operand \a __C to the CRC-32C checksum of +/// operand \a __D. +static __inline__ unsigned long long __DEFAULT_FN_ATTRS +_mm_crc32_u64(unsigned long long __C, unsigned long long __D) +{ + return __builtin_ia32_crc32di(__C, __D); +} +#endif /* __x86_64__ */ + +#undef __DEFAULT_FN_ATTRS + +#endif /* __CRC32INTRIN_H */ diff --git a/third_party/intel/clang/emmintrin.h b/third_party/intel/clang/emmintrin.h new file mode 100644 index 000000000..16ac07eaa --- /dev/null +++ b/third_party/intel/clang/emmintrin.h @@ -0,0 +1,4906 @@ +/*===---- emmintrin.h - SSE2 intrinsics ------------------------------------=== + * + * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. + * See https://llvm.org/LICENSE.txt for license information. + * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + * + *===-----------------------------------------------------------------------=== + */ + +#ifndef __EMMINTRIN_H +#define __EMMINTRIN_H + +#if !defined(__i386__) && !defined(__x86_64__) +#error "This header is only meant to be used on x86 and x64 architecture" +#endif + +#include "xmmintrin.h" + +typedef double __m128d __attribute__((__vector_size__(16), __aligned__(16))); +typedef long long __m128i __attribute__((__vector_size__(16), __aligned__(16))); + +typedef double __m128d_u __attribute__((__vector_size__(16), __aligned__(1))); +typedef long long __m128i_u + __attribute__((__vector_size__(16), __aligned__(1))); + +/* Type defines. */ +typedef double __v2df __attribute__((__vector_size__(16))); +typedef long long __v2di __attribute__((__vector_size__(16))); +typedef short __v8hi __attribute__((__vector_size__(16))); +typedef char __v16qi __attribute__((__vector_size__(16))); + +/* Unsigned types */ +typedef unsigned long long __v2du __attribute__((__vector_size__(16))); +typedef unsigned short __v8hu __attribute__((__vector_size__(16))); +typedef unsigned char __v16qu __attribute__((__vector_size__(16))); + +/* We need an explicitly signed variant for char. Note that this shouldn't + * appear in the interface though. */ +typedef signed char __v16qs __attribute__((__vector_size__(16))); + +#ifdef __SSE2__ +/* Both _Float16 and __bf16 require SSE2 being enabled. */ +typedef _Float16 __v8hf __attribute__((__vector_size__(16), __aligned__(16))); +typedef _Float16 __m128h __attribute__((__vector_size__(16), __aligned__(16))); +typedef _Float16 __m128h_u __attribute__((__vector_size__(16), __aligned__(1))); + +typedef __bf16 __v8bf __attribute__((__vector_size__(16), __aligned__(16))); +typedef __bf16 __m128bh __attribute__((__vector_size__(16), __aligned__(16))); +#endif + +/* Define the default attributes for the functions in this file. */ +#define __DEFAULT_FN_ATTRS \ + __attribute__((__always_inline__, __nodebug__, \ + __target__("sse2,no-evex512"), __min_vector_width__(128))) +#define __DEFAULT_FN_ATTRS_MMX \ + __attribute__((__always_inline__, __nodebug__, \ + __target__("mmx,sse2,no-evex512"), __min_vector_width__(64))) + +/// Adds lower double-precision values in both operands and returns the +/// sum in the lower 64 bits of the result. The upper 64 bits of the result +/// are copied from the upper double-precision value of the first operand. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VADDSD / ADDSD instruction. +/// +/// \param __a +/// A 128-bit vector of [2 x double] containing one of the source operands. +/// \param __b +/// A 128-bit vector of [2 x double] containing one of the source operands. +/// \returns A 128-bit vector of [2 x double] whose lower 64 bits contain the +/// sum of the lower 64 bits of both operands. The upper 64 bits are copied +/// from the upper 64 bits of the first source operand. +static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_add_sd(__m128d __a, + __m128d __b) { + __a[0] += __b[0]; + return __a; +} + +/// Adds two 128-bit vectors of [2 x double]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VADDPD / ADDPD instruction. +/// +/// \param __a +/// A 128-bit vector of [2 x double] containing one of the source operands. +/// \param __b +/// A 128-bit vector of [2 x double] containing one of the source operands. +/// \returns A 128-bit vector of [2 x double] containing the sums of both +/// operands. +static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_add_pd(__m128d __a, + __m128d __b) { + return (__m128d)((__v2df)__a + (__v2df)__b); +} + +/// Subtracts the lower double-precision value of the second operand +/// from the lower double-precision value of the first operand and returns +/// the difference in the lower 64 bits of the result. The upper 64 bits of +/// the result are copied from the upper double-precision value of the first +/// operand. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VSUBSD / SUBSD instruction. +/// +/// \param __a +/// A 128-bit vector of [2 x double] containing the minuend. +/// \param __b +/// A 128-bit vector of [2 x double] containing the subtrahend. +/// \returns A 128-bit vector of [2 x double] whose lower 64 bits contain the +/// difference of the lower 64 bits of both operands. The upper 64 bits are +/// copied from the upper 64 bits of the first source operand. +static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_sub_sd(__m128d __a, + __m128d __b) { + __a[0] -= __b[0]; + return __a; +} + +/// Subtracts two 128-bit vectors of [2 x double]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VSUBPD / SUBPD instruction. +/// +/// \param __a +/// A 128-bit vector of [2 x double] containing the minuend. +/// \param __b +/// A 128-bit vector of [2 x double] containing the subtrahend. +/// \returns A 128-bit vector of [2 x double] containing the differences between +/// both operands. +static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_sub_pd(__m128d __a, + __m128d __b) { + return (__m128d)((__v2df)__a - (__v2df)__b); +} + +/// Multiplies lower double-precision values in both operands and returns +/// the product in the lower 64 bits of the result. The upper 64 bits of the +/// result are copied from the upper double-precision value of the first +/// operand. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VMULSD / MULSD instruction. +/// +/// \param __a +/// A 128-bit vector of [2 x double] containing one of the source operands. +/// \param __b +/// A 128-bit vector of [2 x double] containing one of the source operands. +/// \returns A 128-bit vector of [2 x double] whose lower 64 bits contain the +/// product of the lower 64 bits of both operands. The upper 64 bits are +/// copied from the upper 64 bits of the first source operand. +static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_mul_sd(__m128d __a, + __m128d __b) { + __a[0] *= __b[0]; + return __a; +} + +/// Multiplies two 128-bit vectors of [2 x double]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VMULPD / MULPD instruction. +/// +/// \param __a +/// A 128-bit vector of [2 x double] containing one of the operands. +/// \param __b +/// A 128-bit vector of [2 x double] containing one of the operands. +/// \returns A 128-bit vector of [2 x double] containing the products of both +/// operands. +static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_mul_pd(__m128d __a, + __m128d __b) { + return (__m128d)((__v2df)__a * (__v2df)__b); +} + +/// Divides the lower double-precision value of the first operand by the +/// lower double-precision value of the second operand and returns the +/// quotient in the lower 64 bits of the result. The upper 64 bits of the +/// result are copied from the upper double-precision value of the first +/// operand. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VDIVSD / DIVSD instruction. +/// +/// \param __a +/// A 128-bit vector of [2 x double] containing the dividend. +/// \param __b +/// A 128-bit vector of [2 x double] containing divisor. +/// \returns A 128-bit vector of [2 x double] whose lower 64 bits contain the +/// quotient of the lower 64 bits of both operands. The upper 64 bits are +/// copied from the upper 64 bits of the first source operand. +static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_div_sd(__m128d __a, + __m128d __b) { + __a[0] /= __b[0]; + return __a; +} + +/// Performs an element-by-element division of two 128-bit vectors of +/// [2 x double]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VDIVPD / DIVPD instruction. +/// +/// \param __a +/// A 128-bit vector of [2 x double] containing the dividend. +/// \param __b +/// A 128-bit vector of [2 x double] containing the divisor. +/// \returns A 128-bit vector of [2 x double] containing the quotients of both +/// operands. +static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_div_pd(__m128d __a, + __m128d __b) { + return (__m128d)((__v2df)__a / (__v2df)__b); +} + +/// Calculates the square root of the lower double-precision value of +/// the second operand and returns it in the lower 64 bits of the result. +/// The upper 64 bits of the result are copied from the upper +/// double-precision value of the first operand. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VSQRTSD / SQRTSD instruction. +/// +/// \param __a +/// A 128-bit vector of [2 x double] containing one of the operands. The +/// upper 64 bits of this operand are copied to the upper 64 bits of the +/// result. +/// \param __b +/// A 128-bit vector of [2 x double] containing one of the operands. The +/// square root is calculated using the lower 64 bits of this operand. +/// \returns A 128-bit vector of [2 x double] whose lower 64 bits contain the +/// square root of the lower 64 bits of operand \a __b, and whose upper 64 +/// bits are copied from the upper 64 bits of operand \a __a. +static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_sqrt_sd(__m128d __a, + __m128d __b) { + __m128d __c = __builtin_ia32_sqrtsd((__v2df)__b); + return __extension__(__m128d){__c[0], __a[1]}; +} + +/// Calculates the square root of the each of two values stored in a +/// 128-bit vector of [2 x double]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VSQRTPD / SQRTPD instruction. +/// +/// \param __a +/// A 128-bit vector of [2 x double]. +/// \returns A 128-bit vector of [2 x double] containing the square roots of the +/// values in the operand. +static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_sqrt_pd(__m128d __a) { + return __builtin_ia32_sqrtpd((__v2df)__a); +} + +/// Compares lower 64-bit double-precision values of both operands, and +/// returns the lesser of the pair of values in the lower 64-bits of the +/// result. The upper 64 bits of the result are copied from the upper +/// double-precision value of the first operand. +/// +/// If either value in a comparison is NaN, returns the value from \a __b. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VMINSD / MINSD instruction. +/// +/// \param __a +/// A 128-bit vector of [2 x double] containing one of the operands. The +/// lower 64 bits of this operand are used in the comparison. +/// \param __b +/// A 128-bit vector of [2 x double] containing one of the operands. The +/// lower 64 bits of this operand are used in the comparison. +/// \returns A 128-bit vector of [2 x double] whose lower 64 bits contain the +/// minimum value between both operands. The upper 64 bits are copied from +/// the upper 64 bits of the first source operand. +static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_min_sd(__m128d __a, + __m128d __b) { + return __builtin_ia32_minsd((__v2df)__a, (__v2df)__b); +} + +/// Performs element-by-element comparison of the two 128-bit vectors of +/// [2 x double] and returns a vector containing the lesser of each pair of +/// values. +/// +/// If either value in a comparison is NaN, returns the value from \a __b. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VMINPD / MINPD instruction. +/// +/// \param __a +/// A 128-bit vector of [2 x double] containing one of the operands. +/// \param __b +/// A 128-bit vector of [2 x double] containing one of the operands. +/// \returns A 128-bit vector of [2 x double] containing the minimum values +/// between both operands. +static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_min_pd(__m128d __a, + __m128d __b) { + return __builtin_ia32_minpd((__v2df)__a, (__v2df)__b); +} + +/// Compares lower 64-bit double-precision values of both operands, and +/// returns the greater of the pair of values in the lower 64-bits of the +/// result. The upper 64 bits of the result are copied from the upper +/// double-precision value of the first operand. +/// +/// If either value in a comparison is NaN, returns the value from \a __b. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VMAXSD / MAXSD instruction. +/// +/// \param __a +/// A 128-bit vector of [2 x double] containing one of the operands. The +/// lower 64 bits of this operand are used in the comparison. +/// \param __b +/// A 128-bit vector of [2 x double] containing one of the operands. The +/// lower 64 bits of this operand are used in the comparison. +/// \returns A 128-bit vector of [2 x double] whose lower 64 bits contain the +/// maximum value between both operands. The upper 64 bits are copied from +/// the upper 64 bits of the first source operand. +static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_max_sd(__m128d __a, + __m128d __b) { + return __builtin_ia32_maxsd((__v2df)__a, (__v2df)__b); +} + +/// Performs element-by-element comparison of the two 128-bit vectors of +/// [2 x double] and returns a vector containing the greater of each pair +/// of values. +/// +/// If either value in a comparison is NaN, returns the value from \a __b. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VMAXPD / MAXPD instruction. +/// +/// \param __a +/// A 128-bit vector of [2 x double] containing one of the operands. +/// \param __b +/// A 128-bit vector of [2 x double] containing one of the operands. +/// \returns A 128-bit vector of [2 x double] containing the maximum values +/// between both operands. +static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_max_pd(__m128d __a, + __m128d __b) { + return __builtin_ia32_maxpd((__v2df)__a, (__v2df)__b); +} + +/// Performs a bitwise AND of two 128-bit vectors of [2 x double]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VPAND / PAND instruction. +/// +/// \param __a +/// A 128-bit vector of [2 x double] containing one of the source operands. +/// \param __b +/// A 128-bit vector of [2 x double] containing one of the source operands. +/// \returns A 128-bit vector of [2 x double] containing the bitwise AND of the +/// values between both operands. +static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_and_pd(__m128d __a, + __m128d __b) { + return (__m128d)((__v2du)__a & (__v2du)__b); +} + +/// Performs a bitwise AND of two 128-bit vectors of [2 x double], using +/// the one's complement of the values contained in the first source operand. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VPANDN / PANDN instruction. +/// +/// \param __a +/// A 128-bit vector of [2 x double] containing the left source operand. The +/// one's complement of this value is used in the bitwise AND. +/// \param __b +/// A 128-bit vector of [2 x double] containing the right source operand. +/// \returns A 128-bit vector of [2 x double] containing the bitwise AND of the +/// values in the second operand and the one's complement of the first +/// operand. +static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_andnot_pd(__m128d __a, + __m128d __b) { + return (__m128d)(~(__v2du)__a & (__v2du)__b); +} + +/// Performs a bitwise OR of two 128-bit vectors of [2 x double]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VPOR / POR instruction. +/// +/// \param __a +/// A 128-bit vector of [2 x double] containing one of the source operands. +/// \param __b +/// A 128-bit vector of [2 x double] containing one of the source operands. +/// \returns A 128-bit vector of [2 x double] containing the bitwise OR of the +/// values between both operands. +static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_or_pd(__m128d __a, + __m128d __b) { + return (__m128d)((__v2du)__a | (__v2du)__b); +} + +/// Performs a bitwise XOR of two 128-bit vectors of [2 x double]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VPXOR / PXOR instruction. +/// +/// \param __a +/// A 128-bit vector of [2 x double] containing one of the source operands. +/// \param __b +/// A 128-bit vector of [2 x double] containing one of the source operands. +/// \returns A 128-bit vector of [2 x double] containing the bitwise XOR of the +/// values between both operands. +static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_xor_pd(__m128d __a, + __m128d __b) { + return (__m128d)((__v2du)__a ^ (__v2du)__b); +} + +/// Compares each of the corresponding double-precision values of the +/// 128-bit vectors of [2 x double] for equality. +/// +/// Each comparison returns 0x0 for false, 0xFFFFFFFFFFFFFFFF for true. +/// If either value in a comparison is NaN, returns false. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VCMPEQPD / CMPEQPD instruction. +/// +/// \param __a +/// A 128-bit vector of [2 x double]. +/// \param __b +/// A 128-bit vector of [2 x double]. +/// \returns A 128-bit vector containing the comparison results. +static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cmpeq_pd(__m128d __a, + __m128d __b) { + return (__m128d)__builtin_ia32_cmpeqpd((__v2df)__a, (__v2df)__b); +} + +/// Compares each of the corresponding double-precision values of the +/// 128-bit vectors of [2 x double] to determine if the values in the first +/// operand are less than those in the second operand. +/// +/// Each comparison returns 0x0 for false, 0xFFFFFFFFFFFFFFFF for true. +/// If either value in a comparison is NaN, returns false. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VCMPLTPD / CMPLTPD instruction. +/// +/// \param __a +/// A 128-bit vector of [2 x double]. +/// \param __b +/// A 128-bit vector of [2 x double]. +/// \returns A 128-bit vector containing the comparison results. +static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cmplt_pd(__m128d __a, + __m128d __b) { + return (__m128d)__builtin_ia32_cmpltpd((__v2df)__a, (__v2df)__b); +} + +/// Compares each of the corresponding double-precision values of the +/// 128-bit vectors of [2 x double] to determine if the values in the first +/// operand are less than or equal to those in the second operand. +/// +/// Each comparison returns 0x0 for false, 0xFFFFFFFFFFFFFFFF for true. +/// If either value in a comparison is NaN, returns false. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VCMPLEPD / CMPLEPD instruction. +/// +/// \param __a +/// A 128-bit vector of [2 x double]. +/// \param __b +/// A 128-bit vector of [2 x double]. +/// \returns A 128-bit vector containing the comparison results. +static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cmple_pd(__m128d __a, + __m128d __b) { + return (__m128d)__builtin_ia32_cmplepd((__v2df)__a, (__v2df)__b); +} + +/// Compares each of the corresponding double-precision values of the +/// 128-bit vectors of [2 x double] to determine if the values in the first +/// operand are greater than those in the second operand. +/// +/// Each comparison returns 0x0 for false, 0xFFFFFFFFFFFFFFFF for true. +/// If either value in a comparison is NaN, returns false. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VCMPLTPD / CMPLTPD instruction. +/// +/// \param __a +/// A 128-bit vector of [2 x double]. +/// \param __b +/// A 128-bit vector of [2 x double]. +/// \returns A 128-bit vector containing the comparison results. +static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cmpgt_pd(__m128d __a, + __m128d __b) { + return (__m128d)__builtin_ia32_cmpltpd((__v2df)__b, (__v2df)__a); +} + +/// Compares each of the corresponding double-precision values of the +/// 128-bit vectors of [2 x double] to determine if the values in the first +/// operand are greater than or equal to those in the second operand. +/// +/// Each comparison returns 0x0 for false, 0xFFFFFFFFFFFFFFFF for true. +/// If either value in a comparison is NaN, returns false. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VCMPLEPD / CMPLEPD instruction. +/// +/// \param __a +/// A 128-bit vector of [2 x double]. +/// \param __b +/// A 128-bit vector of [2 x double]. +/// \returns A 128-bit vector containing the comparison results. +static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cmpge_pd(__m128d __a, + __m128d __b) { + return (__m128d)__builtin_ia32_cmplepd((__v2df)__b, (__v2df)__a); +} + +/// Compares each of the corresponding double-precision values of the +/// 128-bit vectors of [2 x double] to determine if the values in the first +/// operand are ordered with respect to those in the second operand. +/// +/// A pair of double-precision values are ordered with respect to each +/// other if neither value is a NaN. Each comparison returns 0x0 for false, +/// 0xFFFFFFFFFFFFFFFF for true. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VCMPORDPD / CMPORDPD instruction. +/// +/// \param __a +/// A 128-bit vector of [2 x double]. +/// \param __b +/// A 128-bit vector of [2 x double]. +/// \returns A 128-bit vector containing the comparison results. +static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cmpord_pd(__m128d __a, + __m128d __b) { + return (__m128d)__builtin_ia32_cmpordpd((__v2df)__a, (__v2df)__b); +} + +/// Compares each of the corresponding double-precision values of the +/// 128-bit vectors of [2 x double] to determine if the values in the first +/// operand are unordered with respect to those in the second operand. +/// +/// A pair of double-precision values are unordered with respect to each +/// other if one or both values are NaN. Each comparison returns 0x0 for +/// false, 0xFFFFFFFFFFFFFFFF for true. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VCMPUNORDPD / CMPUNORDPD +/// instruction. +/// +/// \param __a +/// A 128-bit vector of [2 x double]. +/// \param __b +/// A 128-bit vector of [2 x double]. +/// \returns A 128-bit vector containing the comparison results. +static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cmpunord_pd(__m128d __a, + __m128d __b) { + return (__m128d)__builtin_ia32_cmpunordpd((__v2df)__a, (__v2df)__b); +} + +/// Compares each of the corresponding double-precision values of the +/// 128-bit vectors of [2 x double] to determine if the values in the first +/// operand are unequal to those in the second operand. +/// +/// Each comparison returns 0x0 for false, 0xFFFFFFFFFFFFFFFF for true. +/// If either value in a comparison is NaN, returns true. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VCMPNEQPD / CMPNEQPD instruction. +/// +/// \param __a +/// A 128-bit vector of [2 x double]. +/// \param __b +/// A 128-bit vector of [2 x double]. +/// \returns A 128-bit vector containing the comparison results. +static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cmpneq_pd(__m128d __a, + __m128d __b) { + return (__m128d)__builtin_ia32_cmpneqpd((__v2df)__a, (__v2df)__b); +} + +/// Compares each of the corresponding double-precision values of the +/// 128-bit vectors of [2 x double] to determine if the values in the first +/// operand are not less than those in the second operand. +/// +/// Each comparison returns 0x0 for false, 0xFFFFFFFFFFFFFFFF for true. +/// If either value in a comparison is NaN, returns true. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VCMPNLTPD / CMPNLTPD instruction. +/// +/// \param __a +/// A 128-bit vector of [2 x double]. +/// \param __b +/// A 128-bit vector of [2 x double]. +/// \returns A 128-bit vector containing the comparison results. +static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cmpnlt_pd(__m128d __a, + __m128d __b) { + return (__m128d)__builtin_ia32_cmpnltpd((__v2df)__a, (__v2df)__b); +} + +/// Compares each of the corresponding double-precision values of the +/// 128-bit vectors of [2 x double] to determine if the values in the first +/// operand are not less than or equal to those in the second operand. +/// +/// Each comparison returns 0x0 for false, 0xFFFFFFFFFFFFFFFF for true. +/// If either value in a comparison is NaN, returns true. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VCMPNLEPD / CMPNLEPD instruction. +/// +/// \param __a +/// A 128-bit vector of [2 x double]. +/// \param __b +/// A 128-bit vector of [2 x double]. +/// \returns A 128-bit vector containing the comparison results. +static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cmpnle_pd(__m128d __a, + __m128d __b) { + return (__m128d)__builtin_ia32_cmpnlepd((__v2df)__a, (__v2df)__b); +} + +/// Compares each of the corresponding double-precision values of the +/// 128-bit vectors of [2 x double] to determine if the values in the first +/// operand are not greater than those in the second operand. +/// +/// Each comparison returns 0x0 for false, 0xFFFFFFFFFFFFFFFF for true. +/// If either value in a comparison is NaN, returns true. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VCMPNLTPD / CMPNLTPD instruction. +/// +/// \param __a +/// A 128-bit vector of [2 x double]. +/// \param __b +/// A 128-bit vector of [2 x double]. +/// \returns A 128-bit vector containing the comparison results. +static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cmpngt_pd(__m128d __a, + __m128d __b) { + return (__m128d)__builtin_ia32_cmpnltpd((__v2df)__b, (__v2df)__a); +} + +/// Compares each of the corresponding double-precision values of the +/// 128-bit vectors of [2 x double] to determine if the values in the first +/// operand are not greater than or equal to those in the second operand. +/// +/// Each comparison returns 0x0 for false, 0xFFFFFFFFFFFFFFFF for true. +/// If either value in a comparison is NaN, returns true. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VCMPNLEPD / CMPNLEPD instruction. +/// +/// \param __a +/// A 128-bit vector of [2 x double]. +/// \param __b +/// A 128-bit vector of [2 x double]. +/// \returns A 128-bit vector containing the comparison results. +static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cmpnge_pd(__m128d __a, + __m128d __b) { + return (__m128d)__builtin_ia32_cmpnlepd((__v2df)__b, (__v2df)__a); +} + +/// Compares the lower double-precision floating-point values in each of +/// the two 128-bit floating-point vectors of [2 x double] for equality. +/// +/// The comparison returns 0x0 for false, 0xFFFFFFFFFFFFFFFF for true. +/// If either value in a comparison is NaN, returns false. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VCMPEQSD / CMPEQSD instruction. +/// +/// \param __a +/// A 128-bit vector of [2 x double]. The lower double-precision value is +/// compared to the lower double-precision value of \a __b. +/// \param __b +/// A 128-bit vector of [2 x double]. The lower double-precision value is +/// compared to the lower double-precision value of \a __a. +/// \returns A 128-bit vector. The lower 64 bits contains the comparison +/// results. The upper 64 bits are copied from the upper 64 bits of \a __a. +static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cmpeq_sd(__m128d __a, + __m128d __b) { + return (__m128d)__builtin_ia32_cmpeqsd((__v2df)__a, (__v2df)__b); +} + +/// Compares the lower double-precision floating-point values in each of +/// the two 128-bit floating-point vectors of [2 x double] to determine if +/// the value in the first parameter is less than the corresponding value in +/// the second parameter. +/// +/// The comparison returns 0x0 for false, 0xFFFFFFFFFFFFFFFF for true. +/// If either value in a comparison is NaN, returns false. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VCMPLTSD / CMPLTSD instruction. +/// +/// \param __a +/// A 128-bit vector of [2 x double]. The lower double-precision value is +/// compared to the lower double-precision value of \a __b. +/// \param __b +/// A 128-bit vector of [2 x double]. The lower double-precision value is +/// compared to the lower double-precision value of \a __a. +/// \returns A 128-bit vector. The lower 64 bits contains the comparison +/// results. The upper 64 bits are copied from the upper 64 bits of \a __a. +static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cmplt_sd(__m128d __a, + __m128d __b) { + return (__m128d)__builtin_ia32_cmpltsd((__v2df)__a, (__v2df)__b); +} + +/// Compares the lower double-precision floating-point values in each of +/// the two 128-bit floating-point vectors of [2 x double] to determine if +/// the value in the first parameter is less than or equal to the +/// corresponding value in the second parameter. +/// +/// The comparison returns 0x0 for false, 0xFFFFFFFFFFFFFFFF for true. +/// If either value in a comparison is NaN, returns false. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VCMPLESD / CMPLESD instruction. +/// +/// \param __a +/// A 128-bit vector of [2 x double]. The lower double-precision value is +/// compared to the lower double-precision value of \a __b. +/// \param __b +/// A 128-bit vector of [2 x double]. The lower double-precision value is +/// compared to the lower double-precision value of \a __a. +/// \returns A 128-bit vector. The lower 64 bits contains the comparison +/// results. The upper 64 bits are copied from the upper 64 bits of \a __a. +static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cmple_sd(__m128d __a, + __m128d __b) { + return (__m128d)__builtin_ia32_cmplesd((__v2df)__a, (__v2df)__b); +} + +/// Compares the lower double-precision floating-point values in each of +/// the two 128-bit floating-point vectors of [2 x double] to determine if +/// the value in the first parameter is greater than the corresponding value +/// in the second parameter. +/// +/// The comparison returns 0x0 for false, 0xFFFFFFFFFFFFFFFF for true. +/// If either value in a comparison is NaN, returns false. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VCMPLTSD / CMPLTSD instruction. +/// +/// \param __a +/// A 128-bit vector of [2 x double]. The lower double-precision value is +/// compared to the lower double-precision value of \a __b. +/// \param __b +/// A 128-bit vector of [2 x double]. The lower double-precision value is +/// compared to the lower double-precision value of \a __a. +/// \returns A 128-bit vector. The lower 64 bits contains the comparison +/// results. The upper 64 bits are copied from the upper 64 bits of \a __a. +static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cmpgt_sd(__m128d __a, + __m128d __b) { + __m128d __c = __builtin_ia32_cmpltsd((__v2df)__b, (__v2df)__a); + return __extension__(__m128d){__c[0], __a[1]}; +} + +/// Compares the lower double-precision floating-point values in each of +/// the two 128-bit floating-point vectors of [2 x double] to determine if +/// the value in the first parameter is greater than or equal to the +/// corresponding value in the second parameter. +/// +/// The comparison returns 0x0 for false, 0xFFFFFFFFFFFFFFFF for true. +/// If either value in a comparison is NaN, returns false. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VCMPLESD / CMPLESD instruction. +/// +/// \param __a +/// A 128-bit vector of [2 x double]. The lower double-precision value is +/// compared to the lower double-precision value of \a __b. +/// \param __b +/// A 128-bit vector of [2 x double]. The lower double-precision value is +/// compared to the lower double-precision value of \a __a. +/// \returns A 128-bit vector. The lower 64 bits contains the comparison +/// results. The upper 64 bits are copied from the upper 64 bits of \a __a. +static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cmpge_sd(__m128d __a, + __m128d __b) { + __m128d __c = __builtin_ia32_cmplesd((__v2df)__b, (__v2df)__a); + return __extension__(__m128d){__c[0], __a[1]}; +} + +/// Compares the lower double-precision floating-point values in each of +/// the two 128-bit floating-point vectors of [2 x double] to determine if +/// the value in the first parameter is ordered with respect to the +/// corresponding value in the second parameter. +/// +/// The comparison returns 0x0 for false, 0xFFFFFFFFFFFFFFFF for true. A pair +/// of double-precision values are ordered with respect to each other if +/// neither value is a NaN. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VCMPORDSD / CMPORDSD instruction. +/// +/// \param __a +/// A 128-bit vector of [2 x double]. The lower double-precision value is +/// compared to the lower double-precision value of \a __b. +/// \param __b +/// A 128-bit vector of [2 x double]. The lower double-precision value is +/// compared to the lower double-precision value of \a __a. +/// \returns A 128-bit vector. The lower 64 bits contains the comparison +/// results. The upper 64 bits are copied from the upper 64 bits of \a __a. +static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cmpord_sd(__m128d __a, + __m128d __b) { + return (__m128d)__builtin_ia32_cmpordsd((__v2df)__a, (__v2df)__b); +} + +/// Compares the lower double-precision floating-point values in each of +/// the two 128-bit floating-point vectors of [2 x double] to determine if +/// the value in the first parameter is unordered with respect to the +/// corresponding value in the second parameter. +/// +/// The comparison returns 0x0 for false, 0xFFFFFFFFFFFFFFFF for true. A pair +/// of double-precision values are unordered with respect to each other if +/// one or both values are NaN. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VCMPUNORDSD / CMPUNORDSD +/// instruction. +/// +/// \param __a +/// A 128-bit vector of [2 x double]. The lower double-precision value is +/// compared to the lower double-precision value of \a __b. +/// \param __b +/// A 128-bit vector of [2 x double]. The lower double-precision value is +/// compared to the lower double-precision value of \a __a. +/// \returns A 128-bit vector. The lower 64 bits contains the comparison +/// results. The upper 64 bits are copied from the upper 64 bits of \a __a. +static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cmpunord_sd(__m128d __a, + __m128d __b) { + return (__m128d)__builtin_ia32_cmpunordsd((__v2df)__a, (__v2df)__b); +} + +/// Compares the lower double-precision floating-point values in each of +/// the two 128-bit floating-point vectors of [2 x double] to determine if +/// the value in the first parameter is unequal to the corresponding value in +/// the second parameter. +/// +/// The comparison returns 0x0 for false, 0xFFFFFFFFFFFFFFFF for true. +/// If either value in a comparison is NaN, returns true. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VCMPNEQSD / CMPNEQSD instruction. +/// +/// \param __a +/// A 128-bit vector of [2 x double]. The lower double-precision value is +/// compared to the lower double-precision value of \a __b. +/// \param __b +/// A 128-bit vector of [2 x double]. The lower double-precision value is +/// compared to the lower double-precision value of \a __a. +/// \returns A 128-bit vector. The lower 64 bits contains the comparison +/// results. The upper 64 bits are copied from the upper 64 bits of \a __a. +static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cmpneq_sd(__m128d __a, + __m128d __b) { + return (__m128d)__builtin_ia32_cmpneqsd((__v2df)__a, (__v2df)__b); +} + +/// Compares the lower double-precision floating-point values in each of +/// the two 128-bit floating-point vectors of [2 x double] to determine if +/// the value in the first parameter is not less than the corresponding +/// value in the second parameter. +/// +/// The comparison returns 0x0 for false, 0xFFFFFFFFFFFFFFFF for true. +/// If either value in a comparison is NaN, returns true. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VCMPNLTSD / CMPNLTSD instruction. +/// +/// \param __a +/// A 128-bit vector of [2 x double]. The lower double-precision value is +/// compared to the lower double-precision value of \a __b. +/// \param __b +/// A 128-bit vector of [2 x double]. The lower double-precision value is +/// compared to the lower double-precision value of \a __a. +/// \returns A 128-bit vector. The lower 64 bits contains the comparison +/// results. The upper 64 bits are copied from the upper 64 bits of \a __a. +static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cmpnlt_sd(__m128d __a, + __m128d __b) { + return (__m128d)__builtin_ia32_cmpnltsd((__v2df)__a, (__v2df)__b); +} + +/// Compares the lower double-precision floating-point values in each of +/// the two 128-bit floating-point vectors of [2 x double] to determine if +/// the value in the first parameter is not less than or equal to the +/// corresponding value in the second parameter. +/// +/// The comparison returns 0x0 for false, 0xFFFFFFFFFFFFFFFF for true. +/// If either value in a comparison is NaN, returns true. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VCMPNLESD / CMPNLESD instruction. +/// +/// \param __a +/// A 128-bit vector of [2 x double]. The lower double-precision value is +/// compared to the lower double-precision value of \a __b. +/// \param __b +/// A 128-bit vector of [2 x double]. The lower double-precision value is +/// compared to the lower double-precision value of \a __a. +/// \returns A 128-bit vector. The lower 64 bits contains the comparison +/// results. The upper 64 bits are copied from the upper 64 bits of \a __a. +static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cmpnle_sd(__m128d __a, + __m128d __b) { + return (__m128d)__builtin_ia32_cmpnlesd((__v2df)__a, (__v2df)__b); +} + +/// Compares the lower double-precision floating-point values in each of +/// the two 128-bit floating-point vectors of [2 x double] to determine if +/// the value in the first parameter is not greater than the corresponding +/// value in the second parameter. +/// +/// The comparison returns 0x0 for false, 0xFFFFFFFFFFFFFFFF for true. +/// If either value in a comparison is NaN, returns true. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VCMPNLTSD / CMPNLTSD instruction. +/// +/// \param __a +/// A 128-bit vector of [2 x double]. The lower double-precision value is +/// compared to the lower double-precision value of \a __b. +/// \param __b +/// A 128-bit vector of [2 x double]. The lower double-precision value is +/// compared to the lower double-precision value of \a __a. +/// \returns A 128-bit vector. The lower 64 bits contains the comparison +/// results. The upper 64 bits are copied from the upper 64 bits of \a __a. +static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cmpngt_sd(__m128d __a, + __m128d __b) { + __m128d __c = __builtin_ia32_cmpnltsd((__v2df)__b, (__v2df)__a); + return __extension__(__m128d){__c[0], __a[1]}; +} + +/// Compares the lower double-precision floating-point values in each of +/// the two 128-bit floating-point vectors of [2 x double] to determine if +/// the value in the first parameter is not greater than or equal to the +/// corresponding value in the second parameter. +/// +/// The comparison returns 0x0 for false, 0xFFFFFFFFFFFFFFFF for true. +/// If either value in a comparison is NaN, returns true. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VCMPNLESD / CMPNLESD instruction. +/// +/// \param __a +/// A 128-bit vector of [2 x double]. The lower double-precision value is +/// compared to the lower double-precision value of \a __b. +/// \param __b +/// A 128-bit vector of [2 x double]. The lower double-precision value is +/// compared to the lower double-precision value of \a __a. +/// \returns A 128-bit vector. The lower 64 bits contains the comparison +/// results. The upper 64 bits are copied from the upper 64 bits of \a __a. +static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cmpnge_sd(__m128d __a, + __m128d __b) { + __m128d __c = __builtin_ia32_cmpnlesd((__v2df)__b, (__v2df)__a); + return __extension__(__m128d){__c[0], __a[1]}; +} + +/// Compares the lower double-precision floating-point values in each of +/// the two 128-bit floating-point vectors of [2 x double] for equality. +/// +/// The comparison returns 0 for false, 1 for true. If either value in a +/// comparison is NaN, returns 0. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VCOMISD / COMISD instruction. +/// +/// \param __a +/// A 128-bit vector of [2 x double]. The lower double-precision value is +/// compared to the lower double-precision value of \a __b. +/// \param __b +/// A 128-bit vector of [2 x double]. The lower double-precision value is +/// compared to the lower double-precision value of \a __a. +/// \returns An integer containing the comparison results. +static __inline__ int __DEFAULT_FN_ATTRS _mm_comieq_sd(__m128d __a, + __m128d __b) { + return __builtin_ia32_comisdeq((__v2df)__a, (__v2df)__b); +} + +/// Compares the lower double-precision floating-point values in each of +/// the two 128-bit floating-point vectors of [2 x double] to determine if +/// the value in the first parameter is less than the corresponding value in +/// the second parameter. +/// +/// The comparison returns 0 for false, 1 for true. If either value in a +/// comparison is NaN, returns 0. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VCOMISD / COMISD instruction. +/// +/// \param __a +/// A 128-bit vector of [2 x double]. The lower double-precision value is +/// compared to the lower double-precision value of \a __b. +/// \param __b +/// A 128-bit vector of [2 x double]. The lower double-precision value is +/// compared to the lower double-precision value of \a __a. +/// \returns An integer containing the comparison results. +static __inline__ int __DEFAULT_FN_ATTRS _mm_comilt_sd(__m128d __a, + __m128d __b) { + return __builtin_ia32_comisdlt((__v2df)__a, (__v2df)__b); +} + +/// Compares the lower double-precision floating-point values in each of +/// the two 128-bit floating-point vectors of [2 x double] to determine if +/// the value in the first parameter is less than or equal to the +/// corresponding value in the second parameter. +/// +/// The comparison returns 0 for false, 1 for true. If either value in a +/// comparison is NaN, returns 0. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VCOMISD / COMISD instruction. +/// +/// \param __a +/// A 128-bit vector of [2 x double]. The lower double-precision value is +/// compared to the lower double-precision value of \a __b. +/// \param __b +/// A 128-bit vector of [2 x double]. The lower double-precision value is +/// compared to the lower double-precision value of \a __a. +/// \returns An integer containing the comparison results. +static __inline__ int __DEFAULT_FN_ATTRS _mm_comile_sd(__m128d __a, + __m128d __b) { + return __builtin_ia32_comisdle((__v2df)__a, (__v2df)__b); +} + +/// Compares the lower double-precision floating-point values in each of +/// the two 128-bit floating-point vectors of [2 x double] to determine if +/// the value in the first parameter is greater than the corresponding value +/// in the second parameter. +/// +/// The comparison returns 0 for false, 1 for true. If either value in a +/// comparison is NaN, returns 0. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VCOMISD / COMISD instruction. +/// +/// \param __a +/// A 128-bit vector of [2 x double]. The lower double-precision value is +/// compared to the lower double-precision value of \a __b. +/// \param __b +/// A 128-bit vector of [2 x double]. The lower double-precision value is +/// compared to the lower double-precision value of \a __a. +/// \returns An integer containing the comparison results. +static __inline__ int __DEFAULT_FN_ATTRS _mm_comigt_sd(__m128d __a, + __m128d __b) { + return __builtin_ia32_comisdgt((__v2df)__a, (__v2df)__b); +} + +/// Compares the lower double-precision floating-point values in each of +/// the two 128-bit floating-point vectors of [2 x double] to determine if +/// the value in the first parameter is greater than or equal to the +/// corresponding value in the second parameter. +/// +/// The comparison returns 0 for false, 1 for true. If either value in a +/// comparison is NaN, returns 0. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VCOMISD / COMISD instruction. +/// +/// \param __a +/// A 128-bit vector of [2 x double]. The lower double-precision value is +/// compared to the lower double-precision value of \a __b. +/// \param __b +/// A 128-bit vector of [2 x double]. The lower double-precision value is +/// compared to the lower double-precision value of \a __a. +/// \returns An integer containing the comparison results. +static __inline__ int __DEFAULT_FN_ATTRS _mm_comige_sd(__m128d __a, + __m128d __b) { + return __builtin_ia32_comisdge((__v2df)__a, (__v2df)__b); +} + +/// Compares the lower double-precision floating-point values in each of +/// the two 128-bit floating-point vectors of [2 x double] to determine if +/// the value in the first parameter is unequal to the corresponding value in +/// the second parameter. +/// +/// The comparison returns 0 for false, 1 for true. If either value in a +/// comparison is NaN, returns 1. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VCOMISD / COMISD instruction. +/// +/// \param __a +/// A 128-bit vector of [2 x double]. The lower double-precision value is +/// compared to the lower double-precision value of \a __b. +/// \param __b +/// A 128-bit vector of [2 x double]. The lower double-precision value is +/// compared to the lower double-precision value of \a __a. +/// \returns An integer containing the comparison results. +static __inline__ int __DEFAULT_FN_ATTRS _mm_comineq_sd(__m128d __a, + __m128d __b) { + return __builtin_ia32_comisdneq((__v2df)__a, (__v2df)__b); +} + +/// Compares the lower double-precision floating-point values in each of +/// the two 128-bit floating-point vectors of [2 x double] for equality. +/// +/// The comparison returns 0 for false, 1 for true. If either value in a +/// comparison is NaN, returns 0. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VUCOMISD / UCOMISD instruction. +/// +/// \param __a +/// A 128-bit vector of [2 x double]. The lower double-precision value is +/// compared to the lower double-precision value of \a __b. +/// \param __b +/// A 128-bit vector of [2 x double]. The lower double-precision value is +/// compared to the lower double-precision value of \a __a. +/// \returns An integer containing the comparison results. +static __inline__ int __DEFAULT_FN_ATTRS _mm_ucomieq_sd(__m128d __a, + __m128d __b) { + return __builtin_ia32_ucomisdeq((__v2df)__a, (__v2df)__b); +} + +/// Compares the lower double-precision floating-point values in each of +/// the two 128-bit floating-point vectors of [2 x double] to determine if +/// the value in the first parameter is less than the corresponding value in +/// the second parameter. +/// +/// The comparison returns 0 for false, 1 for true. If either value in a +/// comparison is NaN, returns 0. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VUCOMISD / UCOMISD instruction. +/// +/// \param __a +/// A 128-bit vector of [2 x double]. The lower double-precision value is +/// compared to the lower double-precision value of \a __b. +/// \param __b +/// A 128-bit vector of [2 x double]. The lower double-precision value is +/// compared to the lower double-precision value of \a __a. +/// \returns An integer containing the comparison results. +static __inline__ int __DEFAULT_FN_ATTRS _mm_ucomilt_sd(__m128d __a, + __m128d __b) { + return __builtin_ia32_ucomisdlt((__v2df)__a, (__v2df)__b); +} + +/// Compares the lower double-precision floating-point values in each of +/// the two 128-bit floating-point vectors of [2 x double] to determine if +/// the value in the first parameter is less than or equal to the +/// corresponding value in the second parameter. +/// +/// The comparison returns 0 for false, 1 for true. If either value in a +/// comparison is NaN, returns 0. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VUCOMISD / UCOMISD instruction. +/// +/// \param __a +/// A 128-bit vector of [2 x double]. The lower double-precision value is +/// compared to the lower double-precision value of \a __b. +/// \param __b +/// A 128-bit vector of [2 x double]. The lower double-precision value is +/// compared to the lower double-precision value of \a __a. +/// \returns An integer containing the comparison results. +static __inline__ int __DEFAULT_FN_ATTRS _mm_ucomile_sd(__m128d __a, + __m128d __b) { + return __builtin_ia32_ucomisdle((__v2df)__a, (__v2df)__b); +} + +/// Compares the lower double-precision floating-point values in each of +/// the two 128-bit floating-point vectors of [2 x double] to determine if +/// the value in the first parameter is greater than the corresponding value +/// in the second parameter. +/// +/// The comparison returns 0 for false, 1 for true. If either value in a +/// comparison is NaN, returns 0. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VUCOMISD / UCOMISD instruction. +/// +/// \param __a +/// A 128-bit vector of [2 x double]. The lower double-precision value is +/// compared to the lower double-precision value of \a __b. +/// \param __b +/// A 128-bit vector of [2 x double]. The lower double-precision value is +/// compared to the lower double-precision value of \a __a. +/// \returns An integer containing the comparison results. +static __inline__ int __DEFAULT_FN_ATTRS _mm_ucomigt_sd(__m128d __a, + __m128d __b) { + return __builtin_ia32_ucomisdgt((__v2df)__a, (__v2df)__b); +} + +/// Compares the lower double-precision floating-point values in each of +/// the two 128-bit floating-point vectors of [2 x double] to determine if +/// the value in the first parameter is greater than or equal to the +/// corresponding value in the second parameter. +/// +/// The comparison returns 0 for false, 1 for true. If either value in a +/// comparison is NaN, returns 0. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VUCOMISD / UCOMISD instruction. +/// +/// \param __a +/// A 128-bit vector of [2 x double]. The lower double-precision value is +/// compared to the lower double-precision value of \a __b. +/// \param __b +/// A 128-bit vector of [2 x double]. The lower double-precision value is +/// compared to the lower double-precision value of \a __a. +/// \returns An integer containing the comparison results. +static __inline__ int __DEFAULT_FN_ATTRS _mm_ucomige_sd(__m128d __a, + __m128d __b) { + return __builtin_ia32_ucomisdge((__v2df)__a, (__v2df)__b); +} + +/// Compares the lower double-precision floating-point values in each of +/// the two 128-bit floating-point vectors of [2 x double] to determine if +/// the value in the first parameter is unequal to the corresponding value in +/// the second parameter. +/// +/// The comparison returns 0 for false, 1 for true. If either value in a +/// comparison is NaN, returns 1. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VUCOMISD / UCOMISD instruction. +/// +/// \param __a +/// A 128-bit vector of [2 x double]. The lower double-precision value is +/// compared to the lower double-precision value of \a __b. +/// \param __b +/// A 128-bit vector of [2 x double]. The lower double-precision value is +/// compared to the lower double-precision value of \a __a. +/// \returns An integer containing the comparison result. +static __inline__ int __DEFAULT_FN_ATTRS _mm_ucomineq_sd(__m128d __a, + __m128d __b) { + return __builtin_ia32_ucomisdneq((__v2df)__a, (__v2df)__b); +} + +/// Converts the two double-precision floating-point elements of a +/// 128-bit vector of [2 x double] into two single-precision floating-point +/// values, returned in the lower 64 bits of a 128-bit vector of [4 x float]. +/// The upper 64 bits of the result vector are set to zero. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VCVTPD2PS / CVTPD2PS instruction. +/// +/// \param __a +/// A 128-bit vector of [2 x double]. +/// \returns A 128-bit vector of [4 x float] whose lower 64 bits contain the +/// converted values. The upper 64 bits are set to zero. +static __inline__ __m128 __DEFAULT_FN_ATTRS _mm_cvtpd_ps(__m128d __a) { + return __builtin_ia32_cvtpd2ps((__v2df)__a); +} + +/// Converts the lower two single-precision floating-point elements of a +/// 128-bit vector of [4 x float] into two double-precision floating-point +/// values, returned in a 128-bit vector of [2 x double]. The upper two +/// elements of the input vector are unused. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VCVTPS2PD / CVTPS2PD instruction. +/// +/// \param __a +/// A 128-bit vector of [4 x float]. The lower two single-precision +/// floating-point elements are converted to double-precision values. The +/// upper two elements are unused. +/// \returns A 128-bit vector of [2 x double] containing the converted values. +static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cvtps_pd(__m128 __a) { + return (__m128d) __builtin_convertvector( + __builtin_shufflevector((__v4sf)__a, (__v4sf)__a, 0, 1), __v2df); +} + +/// Converts the lower two integer elements of a 128-bit vector of +/// [4 x i32] into two double-precision floating-point values, returned in a +/// 128-bit vector of [2 x double]. +/// +/// The upper two elements of the input vector are unused. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VCVTDQ2PD / CVTDQ2PD instruction. +/// +/// \param __a +/// A 128-bit integer vector of [4 x i32]. The lower two integer elements are +/// converted to double-precision values. +/// +/// The upper two elements are unused. +/// \returns A 128-bit vector of [2 x double] containing the converted values. +static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cvtepi32_pd(__m128i __a) { + return (__m128d) __builtin_convertvector( + __builtin_shufflevector((__v4si)__a, (__v4si)__a, 0, 1), __v2df); +} + +/// Converts the two double-precision floating-point elements of a +/// 128-bit vector of [2 x double] into two signed 32-bit integer values, +/// returned in the lower 64 bits of a 128-bit vector of [4 x i32]. The upper +/// 64 bits of the result vector are set to zero. +/// +/// If a converted value does not fit in a 32-bit integer, raises a +/// floating-point invalid exception. If the exception is masked, returns +/// the most negative integer. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VCVTPD2DQ / CVTPD2DQ instruction. +/// +/// \param __a +/// A 128-bit vector of [2 x double]. +/// \returns A 128-bit vector of [4 x i32] whose lower 64 bits contain the +/// converted values. The upper 64 bits are set to zero. +static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cvtpd_epi32(__m128d __a) { + return __builtin_ia32_cvtpd2dq((__v2df)__a); +} + +/// Converts the low-order element of a 128-bit vector of [2 x double] +/// into a 32-bit signed integer value. +/// +/// If the converted value does not fit in a 32-bit integer, raises a +/// floating-point invalid exception. If the exception is masked, returns +/// the most negative integer. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VCVTSD2SI / CVTSD2SI instruction. +/// +/// \param __a +/// A 128-bit vector of [2 x double]. The lower 64 bits are used in the +/// conversion. +/// \returns A 32-bit signed integer containing the converted value. +static __inline__ int __DEFAULT_FN_ATTRS _mm_cvtsd_si32(__m128d __a) { + return __builtin_ia32_cvtsd2si((__v2df)__a); +} + +/// Converts the lower double-precision floating-point element of a +/// 128-bit vector of [2 x double], in the second parameter, into a +/// single-precision floating-point value, returned in the lower 32 bits of a +/// 128-bit vector of [4 x float]. The upper 96 bits of the result vector are +/// copied from the upper 96 bits of the first parameter. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VCVTSD2SS / CVTSD2SS instruction. +/// +/// \param __a +/// A 128-bit vector of [4 x float]. The upper 96 bits of this parameter are +/// copied to the upper 96 bits of the result. +/// \param __b +/// A 128-bit vector of [2 x double]. The lower double-precision +/// floating-point element is used in the conversion. +/// \returns A 128-bit vector of [4 x float]. The lower 32 bits contain the +/// converted value from the second parameter. The upper 96 bits are copied +/// from the upper 96 bits of the first parameter. +static __inline__ __m128 __DEFAULT_FN_ATTRS _mm_cvtsd_ss(__m128 __a, + __m128d __b) { + return (__m128)__builtin_ia32_cvtsd2ss((__v4sf)__a, (__v2df)__b); +} + +/// Converts a 32-bit signed integer value, in the second parameter, into +/// a double-precision floating-point value, returned in the lower 64 bits of +/// a 128-bit vector of [2 x double]. The upper 64 bits of the result vector +/// are copied from the upper 64 bits of the first parameter. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VCVTSI2SD / CVTSI2SD instruction. +/// +/// \param __a +/// A 128-bit vector of [2 x double]. The upper 64 bits of this parameter are +/// copied to the upper 64 bits of the result. +/// \param __b +/// A 32-bit signed integer containing the value to be converted. +/// \returns A 128-bit vector of [2 x double]. The lower 64 bits contain the +/// converted value from the second parameter. The upper 64 bits are copied +/// from the upper 64 bits of the first parameter. +static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cvtsi32_sd(__m128d __a, + int __b) { + __a[0] = __b; + return __a; +} + +/// Converts the lower single-precision floating-point element of a +/// 128-bit vector of [4 x float], in the second parameter, into a +/// double-precision floating-point value, returned in the lower 64 bits of +/// a 128-bit vector of [2 x double]. The upper 64 bits of the result vector +/// are copied from the upper 64 bits of the first parameter. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VCVTSS2SD / CVTSS2SD instruction. +/// +/// \param __a +/// A 128-bit vector of [2 x double]. The upper 64 bits of this parameter are +/// copied to the upper 64 bits of the result. +/// \param __b +/// A 128-bit vector of [4 x float]. The lower single-precision +/// floating-point element is used in the conversion. +/// \returns A 128-bit vector of [2 x double]. The lower 64 bits contain the +/// converted value from the second parameter. The upper 64 bits are copied +/// from the upper 64 bits of the first parameter. +static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cvtss_sd(__m128d __a, + __m128 __b) { + __a[0] = __b[0]; + return __a; +} + +/// Converts the two double-precision floating-point elements of a +/// 128-bit vector of [2 x double] into two signed truncated (rounded +/// toward zero) 32-bit integer values, returned in the lower 64 bits +/// of a 128-bit vector of [4 x i32]. +/// +/// If a converted value does not fit in a 32-bit integer, raises a +/// floating-point invalid exception. If the exception is masked, returns +/// the most negative integer. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VCVTTPD2DQ / CVTTPD2DQ +/// instruction. +/// +/// \param __a +/// A 128-bit vector of [2 x double]. +/// \returns A 128-bit vector of [4 x i32] whose lower 64 bits contain the +/// converted values. The upper 64 bits are set to zero. +static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cvttpd_epi32(__m128d __a) { + return (__m128i)__builtin_ia32_cvttpd2dq((__v2df)__a); +} + +/// Converts the low-order element of a [2 x double] vector into a 32-bit +/// signed truncated (rounded toward zero) integer value. +/// +/// If the converted value does not fit in a 32-bit integer, raises a +/// floating-point invalid exception. If the exception is masked, returns +/// the most negative integer. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VCVTTSD2SI / CVTTSD2SI +/// instruction. +/// +/// \param __a +/// A 128-bit vector of [2 x double]. The lower 64 bits are used in the +/// conversion. +/// \returns A 32-bit signed integer containing the converted value. +static __inline__ int __DEFAULT_FN_ATTRS _mm_cvttsd_si32(__m128d __a) { + return __builtin_ia32_cvttsd2si((__v2df)__a); +} + +/// Converts the two double-precision floating-point elements of a +/// 128-bit vector of [2 x double] into two signed 32-bit integer values, +/// returned in a 64-bit vector of [2 x i32]. +/// +/// If a converted value does not fit in a 32-bit integer, raises a +/// floating-point invalid exception. If the exception is masked, returns +/// the most negative integer. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the CVTPD2PI instruction. +/// +/// \param __a +/// A 128-bit vector of [2 x double]. +/// \returns A 64-bit vector of [2 x i32] containing the converted values. +static __inline__ __m64 __DEFAULT_FN_ATTRS_MMX _mm_cvtpd_pi32(__m128d __a) { + return (__m64)__builtin_ia32_cvtpd2pi((__v2df)__a); +} + +/// Converts the two double-precision floating-point elements of a +/// 128-bit vector of [2 x double] into two signed truncated (rounded toward +/// zero) 32-bit integer values, returned in a 64-bit vector of [2 x i32]. +/// +/// If a converted value does not fit in a 32-bit integer, raises a +/// floating-point invalid exception. If the exception is masked, returns +/// the most negative integer. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the CVTTPD2PI instruction. +/// +/// \param __a +/// A 128-bit vector of [2 x double]. +/// \returns A 64-bit vector of [2 x i32] containing the converted values. +static __inline__ __m64 __DEFAULT_FN_ATTRS_MMX _mm_cvttpd_pi32(__m128d __a) { + return (__m64)__builtin_ia32_cvttpd2pi((__v2df)__a); +} + +/// Converts the two signed 32-bit integer elements of a 64-bit vector of +/// [2 x i32] into two double-precision floating-point values, returned in a +/// 128-bit vector of [2 x double]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the CVTPI2PD instruction. +/// +/// \param __a +/// A 64-bit vector of [2 x i32]. +/// \returns A 128-bit vector of [2 x double] containing the converted values. +static __inline__ __m128d __DEFAULT_FN_ATTRS_MMX _mm_cvtpi32_pd(__m64 __a) { + return __builtin_ia32_cvtpi2pd((__v2si)__a); +} + +/// Returns the low-order element of a 128-bit vector of [2 x double] as +/// a double-precision floating-point value. +/// +/// \headerfile +/// +/// This intrinsic has no corresponding instruction. +/// +/// \param __a +/// A 128-bit vector of [2 x double]. The lower 64 bits are returned. +/// \returns A double-precision floating-point value copied from the lower 64 +/// bits of \a __a. +static __inline__ double __DEFAULT_FN_ATTRS _mm_cvtsd_f64(__m128d __a) { + return __a[0]; +} + +/// Loads a 128-bit floating-point vector of [2 x double] from an aligned +/// memory location. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VMOVAPD / MOVAPD instruction. +/// +/// \param __dp +/// A pointer to a 128-bit memory location. The address of the memory +/// location has to be 16-byte aligned. +/// \returns A 128-bit vector of [2 x double] containing the loaded values. +static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_load_pd(double const *__dp) { + return *(const __m128d *)__dp; +} + +/// Loads a double-precision floating-point value from a specified memory +/// location and duplicates it to both vector elements of a 128-bit vector of +/// [2 x double]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VMOVDDUP / MOVDDUP instruction. +/// +/// \param __dp +/// A pointer to a memory location containing a double-precision value. +/// \returns A 128-bit vector of [2 x double] containing the loaded and +/// duplicated values. +static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_load1_pd(double const *__dp) { + struct __mm_load1_pd_struct { + double __u; + } __attribute__((__packed__, __may_alias__)); + double __u = ((const struct __mm_load1_pd_struct *)__dp)->__u; + return __extension__(__m128d){__u, __u}; +} + +#define _mm_load_pd1(dp) _mm_load1_pd(dp) + +/// Loads two double-precision values, in reverse order, from an aligned +/// memory location into a 128-bit vector of [2 x double]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VMOVAPD / MOVAPD instruction + +/// needed shuffling instructions. In AVX mode, the shuffling may be combined +/// with the \c VMOVAPD, resulting in only a \c VPERMILPD instruction. +/// +/// \param __dp +/// A 16-byte aligned pointer to an array of double-precision values to be +/// loaded in reverse order. +/// \returns A 128-bit vector of [2 x double] containing the reversed loaded +/// values. +static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_loadr_pd(double const *__dp) { + __m128d __u = *(const __m128d *)__dp; + return __builtin_shufflevector((__v2df)__u, (__v2df)__u, 1, 0); +} + +/// Loads a 128-bit floating-point vector of [2 x double] from an +/// unaligned memory location. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VMOVUPD / MOVUPD instruction. +/// +/// \param __dp +/// A pointer to a 128-bit memory location. The address of the memory +/// location does not have to be aligned. +/// \returns A 128-bit vector of [2 x double] containing the loaded values. +static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_loadu_pd(double const *__dp) { + struct __loadu_pd { + __m128d_u __v; + } __attribute__((__packed__, __may_alias__)); + return ((const struct __loadu_pd *)__dp)->__v; +} + +/// Loads a 64-bit integer value to the low element of a 128-bit integer +/// vector and clears the upper element. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VMOVQ / MOVQ instruction. +/// +/// \param __a +/// A pointer to a 64-bit memory location. The address of the memory +/// location does not have to be aligned. +/// \returns A 128-bit vector of [2 x i64] containing the loaded value. +static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_loadu_si64(void const *__a) { + struct __loadu_si64 { + long long __v; + } __attribute__((__packed__, __may_alias__)); + long long __u = ((const struct __loadu_si64 *)__a)->__v; + return __extension__(__m128i)(__v2di){__u, 0LL}; +} + +/// Loads a 32-bit integer value to the low element of a 128-bit integer +/// vector and clears the upper element. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VMOVD / MOVD instruction. +/// +/// \param __a +/// A pointer to a 32-bit memory location. The address of the memory +/// location does not have to be aligned. +/// \returns A 128-bit vector of [4 x i32] containing the loaded value. +static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_loadu_si32(void const *__a) { + struct __loadu_si32 { + int __v; + } __attribute__((__packed__, __may_alias__)); + int __u = ((const struct __loadu_si32 *)__a)->__v; + return __extension__(__m128i)(__v4si){__u, 0, 0, 0}; +} + +/// Loads a 16-bit integer value to the low element of a 128-bit integer +/// vector and clears the upper element. +/// +/// \headerfile +/// +/// This intrinsic does not correspond to a specific instruction. +/// +/// \param __a +/// A pointer to a 16-bit memory location. The address of the memory +/// location does not have to be aligned. +/// \returns A 128-bit vector of [8 x i16] containing the loaded value. +static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_loadu_si16(void const *__a) { + struct __loadu_si16 { + short __v; + } __attribute__((__packed__, __may_alias__)); + short __u = ((const struct __loadu_si16 *)__a)->__v; + return __extension__(__m128i)(__v8hi){__u, 0, 0, 0, 0, 0, 0, 0}; +} + +/// Loads a 64-bit double-precision value to the low element of a +/// 128-bit integer vector and clears the upper element. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VMOVSD / MOVSD instruction. +/// +/// \param __dp +/// A pointer to a memory location containing a double-precision value. +/// The address of the memory location does not have to be aligned. +/// \returns A 128-bit vector of [2 x double] containing the loaded value. +static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_load_sd(double const *__dp) { + struct __mm_load_sd_struct { + double __u; + } __attribute__((__packed__, __may_alias__)); + double __u = ((const struct __mm_load_sd_struct *)__dp)->__u; + return __extension__(__m128d){__u, 0}; +} + +/// Loads a double-precision value into the high-order bits of a 128-bit +/// vector of [2 x double]. The low-order bits are copied from the low-order +/// bits of the first operand. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VMOVHPD / MOVHPD instruction. +/// +/// \param __a +/// A 128-bit vector of [2 x double]. \n +/// Bits [63:0] are written to bits [63:0] of the result. +/// \param __dp +/// A pointer to a 64-bit memory location containing a double-precision +/// floating-point value that is loaded. The loaded value is written to bits +/// [127:64] of the result. The address of the memory location does not have +/// to be aligned. +/// \returns A 128-bit vector of [2 x double] containing the moved values. +static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_loadh_pd(__m128d __a, + double const *__dp) { + struct __mm_loadh_pd_struct { + double __u; + } __attribute__((__packed__, __may_alias__)); + double __u = ((const struct __mm_loadh_pd_struct *)__dp)->__u; + return __extension__(__m128d){__a[0], __u}; +} + +/// Loads a double-precision value into the low-order bits of a 128-bit +/// vector of [2 x double]. The high-order bits are copied from the +/// high-order bits of the first operand. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VMOVLPD / MOVLPD instruction. +/// +/// \param __a +/// A 128-bit vector of [2 x double]. \n +/// Bits [127:64] are written to bits [127:64] of the result. +/// \param __dp +/// A pointer to a 64-bit memory location containing a double-precision +/// floating-point value that is loaded. The loaded value is written to bits +/// [63:0] of the result. The address of the memory location does not have to +/// be aligned. +/// \returns A 128-bit vector of [2 x double] containing the moved values. +static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_loadl_pd(__m128d __a, + double const *__dp) { + struct __mm_loadl_pd_struct { + double __u; + } __attribute__((__packed__, __may_alias__)); + double __u = ((const struct __mm_loadl_pd_struct *)__dp)->__u; + return __extension__(__m128d){__u, __a[1]}; +} + +/// Constructs a 128-bit floating-point vector of [2 x double] with +/// unspecified content. This could be used as an argument to another +/// intrinsic function where the argument is required but the value is not +/// actually used. +/// +/// \headerfile +/// +/// This intrinsic has no corresponding instruction. +/// +/// \returns A 128-bit floating-point vector of [2 x double] with unspecified +/// content. +static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_undefined_pd(void) { + return (__m128d)__builtin_ia32_undef128(); +} + +/// Constructs a 128-bit floating-point vector of [2 x double]. The lower +/// 64 bits of the vector are initialized with the specified double-precision +/// floating-point value. The upper 64 bits are set to zero. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VMOVQ / MOVQ instruction. +/// +/// \param __w +/// A double-precision floating-point value used to initialize the lower 64 +/// bits of the result. +/// \returns An initialized 128-bit floating-point vector of [2 x double]. The +/// lower 64 bits contain the value of the parameter. The upper 64 bits are +/// set to zero. +static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_set_sd(double __w) { + return __extension__(__m128d){__w, 0}; +} + +/// Constructs a 128-bit floating-point vector of [2 x double], with each +/// of the two double-precision floating-point vector elements set to the +/// specified double-precision floating-point value. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VMOVDDUP / MOVLHPS instruction. +/// +/// \param __w +/// A double-precision floating-point value used to initialize each vector +/// element of the result. +/// \returns An initialized 128-bit floating-point vector of [2 x double]. +static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_set1_pd(double __w) { + return __extension__(__m128d){__w, __w}; +} + +/// Constructs a 128-bit floating-point vector of [2 x double], with each +/// of the two double-precision floating-point vector elements set to the +/// specified double-precision floating-point value. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VMOVDDUP / MOVLHPS instruction. +/// +/// \param __w +/// A double-precision floating-point value used to initialize each vector +/// element of the result. +/// \returns An initialized 128-bit floating-point vector of [2 x double]. +static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_set_pd1(double __w) { + return _mm_set1_pd(__w); +} + +/// Constructs a 128-bit floating-point vector of [2 x double] +/// initialized with the specified double-precision floating-point values. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VUNPCKLPD / UNPCKLPD instruction. +/// +/// \param __w +/// A double-precision floating-point value used to initialize the upper 64 +/// bits of the result. +/// \param __x +/// A double-precision floating-point value used to initialize the lower 64 +/// bits of the result. +/// \returns An initialized 128-bit floating-point vector of [2 x double]. +static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_set_pd(double __w, + double __x) { + return __extension__(__m128d){__x, __w}; +} + +/// Constructs a 128-bit floating-point vector of [2 x double], +/// initialized in reverse order with the specified double-precision +/// floating-point values. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VUNPCKLPD / UNPCKLPD instruction. +/// +/// \param __w +/// A double-precision floating-point value used to initialize the lower 64 +/// bits of the result. +/// \param __x +/// A double-precision floating-point value used to initialize the upper 64 +/// bits of the result. +/// \returns An initialized 128-bit floating-point vector of [2 x double]. +static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_setr_pd(double __w, + double __x) { + return __extension__(__m128d){__w, __x}; +} + +/// Constructs a 128-bit floating-point vector of [2 x double] +/// initialized to zero. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VXORPS / XORPS instruction. +/// +/// \returns An initialized 128-bit floating-point vector of [2 x double] with +/// all elements set to zero. +static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_setzero_pd(void) { + return __extension__(__m128d){0.0, 0.0}; +} + +/// Constructs a 128-bit floating-point vector of [2 x double]. The lower +/// 64 bits are set to the lower 64 bits of the second parameter. The upper +/// 64 bits are set to the upper 64 bits of the first parameter. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VBLENDPD / BLENDPD instruction. +/// +/// \param __a +/// A 128-bit vector of [2 x double]. The upper 64 bits are written to the +/// upper 64 bits of the result. +/// \param __b +/// A 128-bit vector of [2 x double]. The lower 64 bits are written to the +/// lower 64 bits of the result. +/// \returns A 128-bit vector of [2 x double] containing the moved values. +static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_move_sd(__m128d __a, + __m128d __b) { + __a[0] = __b[0]; + return __a; +} + +/// Stores the lower 64 bits of a 128-bit vector of [2 x double] to a +/// memory location. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VMOVSD / MOVSD instruction. +/// +/// \param __dp +/// A pointer to a 64-bit memory location. +/// \param __a +/// A 128-bit vector of [2 x double] containing the value to be stored. +static __inline__ void __DEFAULT_FN_ATTRS _mm_store_sd(double *__dp, + __m128d __a) { + struct __mm_store_sd_struct { + double __u; + } __attribute__((__packed__, __may_alias__)); + ((struct __mm_store_sd_struct *)__dp)->__u = __a[0]; +} + +/// Moves packed double-precision values from a 128-bit vector of +/// [2 x double] to a memory location. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VMOVAPD / MOVAPS instruction. +/// +/// \param __dp +/// A pointer to an aligned memory location that can store two +/// double-precision values. +/// \param __a +/// A packed 128-bit vector of [2 x double] containing the values to be +/// moved. +static __inline__ void __DEFAULT_FN_ATTRS _mm_store_pd(double *__dp, + __m128d __a) { + *(__m128d *)__dp = __a; +} + +/// Moves the lower 64 bits of a 128-bit vector of [2 x double] twice to +/// the upper and lower 64 bits of a memory location. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the +/// VMOVDDUP + VMOVAPD / MOVLHPS + MOVAPS instruction. +/// +/// \param __dp +/// A pointer to a memory location that can store two double-precision +/// values. +/// \param __a +/// A 128-bit vector of [2 x double] whose lower 64 bits are copied to each +/// of the values in \a __dp. +static __inline__ void __DEFAULT_FN_ATTRS _mm_store1_pd(double *__dp, + __m128d __a) { + __a = __builtin_shufflevector((__v2df)__a, (__v2df)__a, 0, 0); + _mm_store_pd(__dp, __a); +} + +/// Moves the lower 64 bits of a 128-bit vector of [2 x double] twice to +/// the upper and lower 64 bits of a memory location. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the +/// VMOVDDUP + VMOVAPD / MOVLHPS + MOVAPS instruction. +/// +/// \param __dp +/// A pointer to a memory location that can store two double-precision +/// values. +/// \param __a +/// A 128-bit vector of [2 x double] whose lower 64 bits are copied to each +/// of the values in \a __dp. +static __inline__ void __DEFAULT_FN_ATTRS _mm_store_pd1(double *__dp, + __m128d __a) { + _mm_store1_pd(__dp, __a); +} + +/// Stores a 128-bit vector of [2 x double] into an unaligned memory +/// location. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VMOVUPD / MOVUPD instruction. +/// +/// \param __dp +/// A pointer to a 128-bit memory location. The address of the memory +/// location does not have to be aligned. +/// \param __a +/// A 128-bit vector of [2 x double] containing the values to be stored. +static __inline__ void __DEFAULT_FN_ATTRS _mm_storeu_pd(double *__dp, + __m128d __a) { + struct __storeu_pd { + __m128d_u __v; + } __attribute__((__packed__, __may_alias__)); + ((struct __storeu_pd *)__dp)->__v = __a; +} + +/// Stores two double-precision values, in reverse order, from a 128-bit +/// vector of [2 x double] to a 16-byte aligned memory location. +/// +/// \headerfile +/// +/// This intrinsic corresponds to a shuffling instruction followed by a +/// VMOVAPD / MOVAPD instruction. +/// +/// \param __dp +/// A pointer to a 16-byte aligned memory location that can store two +/// double-precision values. +/// \param __a +/// A 128-bit vector of [2 x double] containing the values to be reversed and +/// stored. +static __inline__ void __DEFAULT_FN_ATTRS _mm_storer_pd(double *__dp, + __m128d __a) { + __a = __builtin_shufflevector((__v2df)__a, (__v2df)__a, 1, 0); + *(__m128d *)__dp = __a; +} + +/// Stores the upper 64 bits of a 128-bit vector of [2 x double] to a +/// memory location. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VMOVHPD / MOVHPD instruction. +/// +/// \param __dp +/// A pointer to a 64-bit memory location. +/// \param __a +/// A 128-bit vector of [2 x double] containing the value to be stored. +static __inline__ void __DEFAULT_FN_ATTRS _mm_storeh_pd(double *__dp, + __m128d __a) { + struct __mm_storeh_pd_struct { + double __u; + } __attribute__((__packed__, __may_alias__)); + ((struct __mm_storeh_pd_struct *)__dp)->__u = __a[1]; +} + +/// Stores the lower 64 bits of a 128-bit vector of [2 x double] to a +/// memory location. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VMOVLPD / MOVLPD instruction. +/// +/// \param __dp +/// A pointer to a 64-bit memory location. +/// \param __a +/// A 128-bit vector of [2 x double] containing the value to be stored. +static __inline__ void __DEFAULT_FN_ATTRS _mm_storel_pd(double *__dp, + __m128d __a) { + struct __mm_storeh_pd_struct { + double __u; + } __attribute__((__packed__, __may_alias__)); + ((struct __mm_storeh_pd_struct *)__dp)->__u = __a[0]; +} + +/// Adds the corresponding elements of two 128-bit vectors of [16 x i8], +/// saving the lower 8 bits of each sum in the corresponding element of a +/// 128-bit result vector of [16 x i8]. +/// +/// The integer elements of both parameters can be either signed or unsigned. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VPADDB / PADDB instruction. +/// +/// \param __a +/// A 128-bit vector of [16 x i8]. +/// \param __b +/// A 128-bit vector of [16 x i8]. +/// \returns A 128-bit vector of [16 x i8] containing the sums of both +/// parameters. +static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_add_epi8(__m128i __a, + __m128i __b) { + return (__m128i)((__v16qu)__a + (__v16qu)__b); +} + +/// Adds the corresponding elements of two 128-bit vectors of [8 x i16], +/// saving the lower 16 bits of each sum in the corresponding element of a +/// 128-bit result vector of [8 x i16]. +/// +/// The integer elements of both parameters can be either signed or unsigned. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VPADDW / PADDW instruction. +/// +/// \param __a +/// A 128-bit vector of [8 x i16]. +/// \param __b +/// A 128-bit vector of [8 x i16]. +/// \returns A 128-bit vector of [8 x i16] containing the sums of both +/// parameters. +static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_add_epi16(__m128i __a, + __m128i __b) { + return (__m128i)((__v8hu)__a + (__v8hu)__b); +} + +/// Adds the corresponding elements of two 128-bit vectors of [4 x i32], +/// saving the lower 32 bits of each sum in the corresponding element of a +/// 128-bit result vector of [4 x i32]. +/// +/// The integer elements of both parameters can be either signed or unsigned. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VPADDD / PADDD instruction. +/// +/// \param __a +/// A 128-bit vector of [4 x i32]. +/// \param __b +/// A 128-bit vector of [4 x i32]. +/// \returns A 128-bit vector of [4 x i32] containing the sums of both +/// parameters. +static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_add_epi32(__m128i __a, + __m128i __b) { + return (__m128i)((__v4su)__a + (__v4su)__b); +} + +/// Adds two signed or unsigned 64-bit integer values, returning the +/// lower 64 bits of the sum. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the PADDQ instruction. +/// +/// \param __a +/// A 64-bit integer. +/// \param __b +/// A 64-bit integer. +/// \returns A 64-bit integer containing the sum of both parameters. +static __inline__ __m64 __DEFAULT_FN_ATTRS_MMX _mm_add_si64(__m64 __a, + __m64 __b) { + return (__m64)__builtin_ia32_paddq((__v1di)__a, (__v1di)__b); +} + +/// Adds the corresponding elements of two 128-bit vectors of [2 x i64], +/// saving the lower 64 bits of each sum in the corresponding element of a +/// 128-bit result vector of [2 x i64]. +/// +/// The integer elements of both parameters can be either signed or unsigned. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VPADDQ / PADDQ instruction. +/// +/// \param __a +/// A 128-bit vector of [2 x i64]. +/// \param __b +/// A 128-bit vector of [2 x i64]. +/// \returns A 128-bit vector of [2 x i64] containing the sums of both +/// parameters. +static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_add_epi64(__m128i __a, + __m128i __b) { + return (__m128i)((__v2du)__a + (__v2du)__b); +} + +/// Adds, with saturation, the corresponding elements of two 128-bit +/// signed [16 x i8] vectors, saving each sum in the corresponding element +/// of a 128-bit result vector of [16 x i8]. +/// +/// Positive sums greater than 0x7F are saturated to 0x7F. Negative sums +/// less than 0x80 are saturated to 0x80. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VPADDSB / PADDSB instruction. +/// +/// \param __a +/// A 128-bit signed [16 x i8] vector. +/// \param __b +/// A 128-bit signed [16 x i8] vector. +/// \returns A 128-bit signed [16 x i8] vector containing the saturated sums of +/// both parameters. +static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_adds_epi8(__m128i __a, + __m128i __b) { + return (__m128i)__builtin_elementwise_add_sat((__v16qs)__a, (__v16qs)__b); +} + +/// Adds, with saturation, the corresponding elements of two 128-bit +/// signed [8 x i16] vectors, saving each sum in the corresponding element +/// of a 128-bit result vector of [8 x i16]. +/// +/// Positive sums greater than 0x7FFF are saturated to 0x7FFF. Negative sums +/// less than 0x8000 are saturated to 0x8000. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VPADDSW / PADDSW instruction. +/// +/// \param __a +/// A 128-bit signed [8 x i16] vector. +/// \param __b +/// A 128-bit signed [8 x i16] vector. +/// \returns A 128-bit signed [8 x i16] vector containing the saturated sums of +/// both parameters. +static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_adds_epi16(__m128i __a, + __m128i __b) { + return (__m128i)__builtin_elementwise_add_sat((__v8hi)__a, (__v8hi)__b); +} + +/// Adds, with saturation, the corresponding elements of two 128-bit +/// unsigned [16 x i8] vectors, saving each sum in the corresponding element +/// of a 128-bit result vector of [16 x i8]. +/// +/// Positive sums greater than 0xFF are saturated to 0xFF. Negative sums are +/// saturated to 0x00. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VPADDUSB / PADDUSB instruction. +/// +/// \param __a +/// A 128-bit unsigned [16 x i8] vector. +/// \param __b +/// A 128-bit unsigned [16 x i8] vector. +/// \returns A 128-bit unsigned [16 x i8] vector containing the saturated sums +/// of both parameters. +static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_adds_epu8(__m128i __a, + __m128i __b) { + return (__m128i)__builtin_elementwise_add_sat((__v16qu)__a, (__v16qu)__b); +} + +/// Adds, with saturation, the corresponding elements of two 128-bit +/// unsigned [8 x i16] vectors, saving each sum in the corresponding element +/// of a 128-bit result vector of [8 x i16]. +/// +/// Positive sums greater than 0xFFFF are saturated to 0xFFFF. Negative sums +/// are saturated to 0x0000. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VPADDUSB / PADDUSB instruction. +/// +/// \param __a +/// A 128-bit unsigned [8 x i16] vector. +/// \param __b +/// A 128-bit unsigned [8 x i16] vector. +/// \returns A 128-bit unsigned [8 x i16] vector containing the saturated sums +/// of both parameters. +static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_adds_epu16(__m128i __a, + __m128i __b) { + return (__m128i)__builtin_elementwise_add_sat((__v8hu)__a, (__v8hu)__b); +} + +/// Computes the rounded averages of corresponding elements of two +/// 128-bit unsigned [16 x i8] vectors, saving each result in the +/// corresponding element of a 128-bit result vector of [16 x i8]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VPAVGB / PAVGB instruction. +/// +/// \param __a +/// A 128-bit unsigned [16 x i8] vector. +/// \param __b +/// A 128-bit unsigned [16 x i8] vector. +/// \returns A 128-bit unsigned [16 x i8] vector containing the rounded +/// averages of both parameters. +static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_avg_epu8(__m128i __a, + __m128i __b) { + return (__m128i)__builtin_ia32_pavgb128((__v16qi)__a, (__v16qi)__b); +} + +/// Computes the rounded averages of corresponding elements of two +/// 128-bit unsigned [8 x i16] vectors, saving each result in the +/// corresponding element of a 128-bit result vector of [8 x i16]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VPAVGW / PAVGW instruction. +/// +/// \param __a +/// A 128-bit unsigned [8 x i16] vector. +/// \param __b +/// A 128-bit unsigned [8 x i16] vector. +/// \returns A 128-bit unsigned [8 x i16] vector containing the rounded +/// averages of both parameters. +static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_avg_epu16(__m128i __a, + __m128i __b) { + return (__m128i)__builtin_ia32_pavgw128((__v8hi)__a, (__v8hi)__b); +} + +/// Multiplies the corresponding elements of two 128-bit signed [8 x i16] +/// vectors, producing eight intermediate 32-bit signed integer products, and +/// adds the consecutive pairs of 32-bit products to form a 128-bit signed +/// [4 x i32] vector. +/// +/// For example, bits [15:0] of both parameters are multiplied producing a +/// 32-bit product, bits [31:16] of both parameters are multiplied producing +/// a 32-bit product, and the sum of those two products becomes bits [31:0] +/// of the result. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VPMADDWD / PMADDWD instruction. +/// +/// \param __a +/// A 128-bit signed [8 x i16] vector. +/// \param __b +/// A 128-bit signed [8 x i16] vector. +/// \returns A 128-bit signed [4 x i32] vector containing the sums of products +/// of both parameters. +static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_madd_epi16(__m128i __a, + __m128i __b) { + return (__m128i)__builtin_ia32_pmaddwd128((__v8hi)__a, (__v8hi)__b); +} + +/// Compares corresponding elements of two 128-bit signed [8 x i16] +/// vectors, saving the greater value from each comparison in the +/// corresponding element of a 128-bit result vector of [8 x i16]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VPMAXSW / PMAXSW instruction. +/// +/// \param __a +/// A 128-bit signed [8 x i16] vector. +/// \param __b +/// A 128-bit signed [8 x i16] vector. +/// \returns A 128-bit signed [8 x i16] vector containing the greater value of +/// each comparison. +static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_max_epi16(__m128i __a, + __m128i __b) { + return (__m128i)__builtin_elementwise_max((__v8hi)__a, (__v8hi)__b); +} + +/// Compares corresponding elements of two 128-bit unsigned [16 x i8] +/// vectors, saving the greater value from each comparison in the +/// corresponding element of a 128-bit result vector of [16 x i8]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VPMAXUB / PMAXUB instruction. +/// +/// \param __a +/// A 128-bit unsigned [16 x i8] vector. +/// \param __b +/// A 128-bit unsigned [16 x i8] vector. +/// \returns A 128-bit unsigned [16 x i8] vector containing the greater value of +/// each comparison. +static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_max_epu8(__m128i __a, + __m128i __b) { + return (__m128i)__builtin_elementwise_max((__v16qu)__a, (__v16qu)__b); +} + +/// Compares corresponding elements of two 128-bit signed [8 x i16] +/// vectors, saving the smaller value from each comparison in the +/// corresponding element of a 128-bit result vector of [8 x i16]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VPMINSW / PMINSW instruction. +/// +/// \param __a +/// A 128-bit signed [8 x i16] vector. +/// \param __b +/// A 128-bit signed [8 x i16] vector. +/// \returns A 128-bit signed [8 x i16] vector containing the smaller value of +/// each comparison. +static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_min_epi16(__m128i __a, + __m128i __b) { + return (__m128i)__builtin_elementwise_min((__v8hi)__a, (__v8hi)__b); +} + +/// Compares corresponding elements of two 128-bit unsigned [16 x i8] +/// vectors, saving the smaller value from each comparison in the +/// corresponding element of a 128-bit result vector of [16 x i8]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VPMINUB / PMINUB instruction. +/// +/// \param __a +/// A 128-bit unsigned [16 x i8] vector. +/// \param __b +/// A 128-bit unsigned [16 x i8] vector. +/// \returns A 128-bit unsigned [16 x i8] vector containing the smaller value of +/// each comparison. +static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_min_epu8(__m128i __a, + __m128i __b) { + return (__m128i)__builtin_elementwise_min((__v16qu)__a, (__v16qu)__b); +} + +/// Multiplies the corresponding elements of two signed [8 x i16] +/// vectors, saving the upper 16 bits of each 32-bit product in the +/// corresponding element of a 128-bit signed [8 x i16] result vector. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VPMULHW / PMULHW instruction. +/// +/// \param __a +/// A 128-bit signed [8 x i16] vector. +/// \param __b +/// A 128-bit signed [8 x i16] vector. +/// \returns A 128-bit signed [8 x i16] vector containing the upper 16 bits of +/// each of the eight 32-bit products. +static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_mulhi_epi16(__m128i __a, + __m128i __b) { + return (__m128i)__builtin_ia32_pmulhw128((__v8hi)__a, (__v8hi)__b); +} + +/// Multiplies the corresponding elements of two unsigned [8 x i16] +/// vectors, saving the upper 16 bits of each 32-bit product in the +/// corresponding element of a 128-bit unsigned [8 x i16] result vector. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VPMULHUW / PMULHUW instruction. +/// +/// \param __a +/// A 128-bit unsigned [8 x i16] vector. +/// \param __b +/// A 128-bit unsigned [8 x i16] vector. +/// \returns A 128-bit unsigned [8 x i16] vector containing the upper 16 bits +/// of each of the eight 32-bit products. +static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_mulhi_epu16(__m128i __a, + __m128i __b) { + return (__m128i)__builtin_ia32_pmulhuw128((__v8hi)__a, (__v8hi)__b); +} + +/// Multiplies the corresponding elements of two signed [8 x i16] +/// vectors, saving the lower 16 bits of each 32-bit product in the +/// corresponding element of a 128-bit signed [8 x i16] result vector. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VPMULLW / PMULLW instruction. +/// +/// \param __a +/// A 128-bit signed [8 x i16] vector. +/// \param __b +/// A 128-bit signed [8 x i16] vector. +/// \returns A 128-bit signed [8 x i16] vector containing the lower 16 bits of +/// each of the eight 32-bit products. +static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_mullo_epi16(__m128i __a, + __m128i __b) { + return (__m128i)((__v8hu)__a * (__v8hu)__b); +} + +/// Multiplies 32-bit unsigned integer values contained in the lower bits +/// of the two 64-bit integer vectors and returns the 64-bit unsigned +/// product. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the PMULUDQ instruction. +/// +/// \param __a +/// A 64-bit integer containing one of the source operands. +/// \param __b +/// A 64-bit integer containing one of the source operands. +/// \returns A 64-bit integer vector containing the product of both operands. +static __inline__ __m64 __DEFAULT_FN_ATTRS_MMX _mm_mul_su32(__m64 __a, + __m64 __b) { + return __builtin_ia32_pmuludq((__v2si)__a, (__v2si)__b); +} + +/// Multiplies 32-bit unsigned integer values contained in the lower +/// bits of the corresponding elements of two [2 x i64] vectors, and returns +/// the 64-bit products in the corresponding elements of a [2 x i64] vector. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VPMULUDQ / PMULUDQ instruction. +/// +/// \param __a +/// A [2 x i64] vector containing one of the source operands. +/// \param __b +/// A [2 x i64] vector containing one of the source operands. +/// \returns A [2 x i64] vector containing the product of both operands. +static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_mul_epu32(__m128i __a, + __m128i __b) { + return __builtin_ia32_pmuludq128((__v4si)__a, (__v4si)__b); +} + +/// Computes the absolute differences of corresponding 8-bit integer +/// values in two 128-bit vectors. Sums the first 8 absolute differences, and +/// separately sums the second 8 absolute differences. Packs these two +/// unsigned 16-bit integer sums into the upper and lower elements of a +/// [2 x i64] vector. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VPSADBW / PSADBW instruction. +/// +/// \param __a +/// A 128-bit integer vector containing one of the source operands. +/// \param __b +/// A 128-bit integer vector containing one of the source operands. +/// \returns A [2 x i64] vector containing the sums of the sets of absolute +/// differences between both operands. +static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_sad_epu8(__m128i __a, + __m128i __b) { + return __builtin_ia32_psadbw128((__v16qi)__a, (__v16qi)__b); +} + +/// Subtracts the corresponding 8-bit integer values in the operands. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VPSUBB / PSUBB instruction. +/// +/// \param __a +/// A 128-bit integer vector containing the minuends. +/// \param __b +/// A 128-bit integer vector containing the subtrahends. +/// \returns A 128-bit integer vector containing the differences of the values +/// in the operands. +static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_sub_epi8(__m128i __a, + __m128i __b) { + return (__m128i)((__v16qu)__a - (__v16qu)__b); +} + +/// Subtracts the corresponding 16-bit integer values in the operands. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VPSUBW / PSUBW instruction. +/// +/// \param __a +/// A 128-bit integer vector containing the minuends. +/// \param __b +/// A 128-bit integer vector containing the subtrahends. +/// \returns A 128-bit integer vector containing the differences of the values +/// in the operands. +static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_sub_epi16(__m128i __a, + __m128i __b) { + return (__m128i)((__v8hu)__a - (__v8hu)__b); +} + +/// Subtracts the corresponding 32-bit integer values in the operands. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VPSUBD / PSUBD instruction. +/// +/// \param __a +/// A 128-bit integer vector containing the minuends. +/// \param __b +/// A 128-bit integer vector containing the subtrahends. +/// \returns A 128-bit integer vector containing the differences of the values +/// in the operands. +static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_sub_epi32(__m128i __a, + __m128i __b) { + return (__m128i)((__v4su)__a - (__v4su)__b); +} + +/// Subtracts signed or unsigned 64-bit integer values and writes the +/// difference to the corresponding bits in the destination. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the PSUBQ instruction. +/// +/// \param __a +/// A 64-bit integer vector containing the minuend. +/// \param __b +/// A 64-bit integer vector containing the subtrahend. +/// \returns A 64-bit integer vector containing the difference of the values in +/// the operands. +static __inline__ __m64 __DEFAULT_FN_ATTRS_MMX _mm_sub_si64(__m64 __a, + __m64 __b) { + return (__m64)__builtin_ia32_psubq((__v1di)__a, (__v1di)__b); +} + +/// Subtracts the corresponding elements of two [2 x i64] vectors. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VPSUBQ / PSUBQ instruction. +/// +/// \param __a +/// A 128-bit integer vector containing the minuends. +/// \param __b +/// A 128-bit integer vector containing the subtrahends. +/// \returns A 128-bit integer vector containing the differences of the values +/// in the operands. +static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_sub_epi64(__m128i __a, + __m128i __b) { + return (__m128i)((__v2du)__a - (__v2du)__b); +} + +/// Subtracts, with saturation, corresponding 8-bit signed integer values in +/// the input and returns the differences in the corresponding bytes in the +/// destination. +/// +/// Differences greater than 0x7F are saturated to 0x7F, and differences +/// less than 0x80 are saturated to 0x80. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VPSUBSB / PSUBSB instruction. +/// +/// \param __a +/// A 128-bit integer vector containing the minuends. +/// \param __b +/// A 128-bit integer vector containing the subtrahends. +/// \returns A 128-bit integer vector containing the differences of the values +/// in the operands. +static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_subs_epi8(__m128i __a, + __m128i __b) { + return (__m128i)__builtin_elementwise_sub_sat((__v16qs)__a, (__v16qs)__b); +} + +/// Subtracts, with saturation, corresponding 16-bit signed integer values in +/// the input and returns the differences in the corresponding bytes in the +/// destination. +/// +/// Differences greater than 0x7FFF are saturated to 0x7FFF, and values less +/// than 0x8000 are saturated to 0x8000. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VPSUBSW / PSUBSW instruction. +/// +/// \param __a +/// A 128-bit integer vector containing the minuends. +/// \param __b +/// A 128-bit integer vector containing the subtrahends. +/// \returns A 128-bit integer vector containing the differences of the values +/// in the operands. +static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_subs_epi16(__m128i __a, + __m128i __b) { + return (__m128i)__builtin_elementwise_sub_sat((__v8hi)__a, (__v8hi)__b); +} + +/// Subtracts, with saturation, corresponding 8-bit unsigned integer values in +/// the input and returns the differences in the corresponding bytes in the +/// destination. +/// +/// Differences less than 0x00 are saturated to 0x00. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VPSUBUSB / PSUBUSB instruction. +/// +/// \param __a +/// A 128-bit integer vector containing the minuends. +/// \param __b +/// A 128-bit integer vector containing the subtrahends. +/// \returns A 128-bit integer vector containing the unsigned integer +/// differences of the values in the operands. +static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_subs_epu8(__m128i __a, + __m128i __b) { + return (__m128i)__builtin_elementwise_sub_sat((__v16qu)__a, (__v16qu)__b); +} + +/// Subtracts, with saturation, corresponding 16-bit unsigned integer values in +/// the input and returns the differences in the corresponding bytes in the +/// destination. +/// +/// Differences less than 0x0000 are saturated to 0x0000. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VPSUBUSW / PSUBUSW instruction. +/// +/// \param __a +/// A 128-bit integer vector containing the minuends. +/// \param __b +/// A 128-bit integer vector containing the subtrahends. +/// \returns A 128-bit integer vector containing the unsigned integer +/// differences of the values in the operands. +static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_subs_epu16(__m128i __a, + __m128i __b) { + return (__m128i)__builtin_elementwise_sub_sat((__v8hu)__a, (__v8hu)__b); +} + +/// Performs a bitwise AND of two 128-bit integer vectors. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VPAND / PAND instruction. +/// +/// \param __a +/// A 128-bit integer vector containing one of the source operands. +/// \param __b +/// A 128-bit integer vector containing one of the source operands. +/// \returns A 128-bit integer vector containing the bitwise AND of the values +/// in both operands. +static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_and_si128(__m128i __a, + __m128i __b) { + return (__m128i)((__v2du)__a & (__v2du)__b); +} + +/// Performs a bitwise AND of two 128-bit integer vectors, using the +/// one's complement of the values contained in the first source operand. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VPANDN / PANDN instruction. +/// +/// \param __a +/// A 128-bit vector containing the left source operand. The one's complement +/// of this value is used in the bitwise AND. +/// \param __b +/// A 128-bit vector containing the right source operand. +/// \returns A 128-bit integer vector containing the bitwise AND of the one's +/// complement of the first operand and the values in the second operand. +static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_andnot_si128(__m128i __a, + __m128i __b) { + return (__m128i)(~(__v2du)__a & (__v2du)__b); +} +/// Performs a bitwise OR of two 128-bit integer vectors. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VPOR / POR instruction. +/// +/// \param __a +/// A 128-bit integer vector containing one of the source operands. +/// \param __b +/// A 128-bit integer vector containing one of the source operands. +/// \returns A 128-bit integer vector containing the bitwise OR of the values +/// in both operands. +static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_or_si128(__m128i __a, + __m128i __b) { + return (__m128i)((__v2du)__a | (__v2du)__b); +} + +/// Performs a bitwise exclusive OR of two 128-bit integer vectors. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VPXOR / PXOR instruction. +/// +/// \param __a +/// A 128-bit integer vector containing one of the source operands. +/// \param __b +/// A 128-bit integer vector containing one of the source operands. +/// \returns A 128-bit integer vector containing the bitwise exclusive OR of the +/// values in both operands. +static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_xor_si128(__m128i __a, + __m128i __b) { + return (__m128i)((__v2du)__a ^ (__v2du)__b); +} + +/// Left-shifts the 128-bit integer vector operand by the specified +/// number of bytes. Low-order bits are cleared. +/// +/// \headerfile +/// +/// \code +/// __m128i _mm_slli_si128(__m128i a, const int imm); +/// \endcode +/// +/// This intrinsic corresponds to the VPSLLDQ / PSLLDQ instruction. +/// +/// \param a +/// A 128-bit integer vector containing the source operand. +/// \param imm +/// An immediate value specifying the number of bytes to left-shift operand +/// \a a. +/// \returns A 128-bit integer vector containing the left-shifted value. +#define _mm_slli_si128(a, imm) \ + ((__m128i)__builtin_ia32_pslldqi128_byteshift((__v2di)(__m128i)(a), \ + (int)(imm))) + +#define _mm_bslli_si128(a, imm) \ + ((__m128i)__builtin_ia32_pslldqi128_byteshift((__v2di)(__m128i)(a), \ + (int)(imm))) + +/// Left-shifts each 16-bit value in the 128-bit integer vector operand +/// by the specified number of bits. Low-order bits are cleared. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VPSLLW / PSLLW instruction. +/// +/// \param __a +/// A 128-bit integer vector containing the source operand. +/// \param __count +/// An integer value specifying the number of bits to left-shift each value +/// in operand \a __a. +/// \returns A 128-bit integer vector containing the left-shifted values. +static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_slli_epi16(__m128i __a, + int __count) { + return (__m128i)__builtin_ia32_psllwi128((__v8hi)__a, __count); +} + +/// Left-shifts each 16-bit value in the 128-bit integer vector operand +/// by the specified number of bits. Low-order bits are cleared. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VPSLLW / PSLLW instruction. +/// +/// \param __a +/// A 128-bit integer vector containing the source operand. +/// \param __count +/// A 128-bit integer vector in which bits [63:0] specify the number of bits +/// to left-shift each value in operand \a __a. +/// \returns A 128-bit integer vector containing the left-shifted values. +static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_sll_epi16(__m128i __a, + __m128i __count) { + return (__m128i)__builtin_ia32_psllw128((__v8hi)__a, (__v8hi)__count); +} + +/// Left-shifts each 32-bit value in the 128-bit integer vector operand +/// by the specified number of bits. Low-order bits are cleared. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VPSLLD / PSLLD instruction. +/// +/// \param __a +/// A 128-bit integer vector containing the source operand. +/// \param __count +/// An integer value specifying the number of bits to left-shift each value +/// in operand \a __a. +/// \returns A 128-bit integer vector containing the left-shifted values. +static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_slli_epi32(__m128i __a, + int __count) { + return (__m128i)__builtin_ia32_pslldi128((__v4si)__a, __count); +} + +/// Left-shifts each 32-bit value in the 128-bit integer vector operand +/// by the specified number of bits. Low-order bits are cleared. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VPSLLD / PSLLD instruction. +/// +/// \param __a +/// A 128-bit integer vector containing the source operand. +/// \param __count +/// A 128-bit integer vector in which bits [63:0] specify the number of bits +/// to left-shift each value in operand \a __a. +/// \returns A 128-bit integer vector containing the left-shifted values. +static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_sll_epi32(__m128i __a, + __m128i __count) { + return (__m128i)__builtin_ia32_pslld128((__v4si)__a, (__v4si)__count); +} + +/// Left-shifts each 64-bit value in the 128-bit integer vector operand +/// by the specified number of bits. Low-order bits are cleared. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VPSLLQ / PSLLQ instruction. +/// +/// \param __a +/// A 128-bit integer vector containing the source operand. +/// \param __count +/// An integer value specifying the number of bits to left-shift each value +/// in operand \a __a. +/// \returns A 128-bit integer vector containing the left-shifted values. +static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_slli_epi64(__m128i __a, + int __count) { + return __builtin_ia32_psllqi128((__v2di)__a, __count); +} + +/// Left-shifts each 64-bit value in the 128-bit integer vector operand +/// by the specified number of bits. Low-order bits are cleared. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VPSLLQ / PSLLQ instruction. +/// +/// \param __a +/// A 128-bit integer vector containing the source operand. +/// \param __count +/// A 128-bit integer vector in which bits [63:0] specify the number of bits +/// to left-shift each value in operand \a __a. +/// \returns A 128-bit integer vector containing the left-shifted values. +static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_sll_epi64(__m128i __a, + __m128i __count) { + return __builtin_ia32_psllq128((__v2di)__a, (__v2di)__count); +} + +/// Right-shifts each 16-bit value in the 128-bit integer vector operand +/// by the specified number of bits. High-order bits are filled with the sign +/// bit of the initial value. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VPSRAW / PSRAW instruction. +/// +/// \param __a +/// A 128-bit integer vector containing the source operand. +/// \param __count +/// An integer value specifying the number of bits to right-shift each value +/// in operand \a __a. +/// \returns A 128-bit integer vector containing the right-shifted values. +static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_srai_epi16(__m128i __a, + int __count) { + return (__m128i)__builtin_ia32_psrawi128((__v8hi)__a, __count); +} + +/// Right-shifts each 16-bit value in the 128-bit integer vector operand +/// by the specified number of bits. High-order bits are filled with the sign +/// bit of the initial value. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VPSRAW / PSRAW instruction. +/// +/// \param __a +/// A 128-bit integer vector containing the source operand. +/// \param __count +/// A 128-bit integer vector in which bits [63:0] specify the number of bits +/// to right-shift each value in operand \a __a. +/// \returns A 128-bit integer vector containing the right-shifted values. +static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_sra_epi16(__m128i __a, + __m128i __count) { + return (__m128i)__builtin_ia32_psraw128((__v8hi)__a, (__v8hi)__count); +} + +/// Right-shifts each 32-bit value in the 128-bit integer vector operand +/// by the specified number of bits. High-order bits are filled with the sign +/// bit of the initial value. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VPSRAD / PSRAD instruction. +/// +/// \param __a +/// A 128-bit integer vector containing the source operand. +/// \param __count +/// An integer value specifying the number of bits to right-shift each value +/// in operand \a __a. +/// \returns A 128-bit integer vector containing the right-shifted values. +static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_srai_epi32(__m128i __a, + int __count) { + return (__m128i)__builtin_ia32_psradi128((__v4si)__a, __count); +} + +/// Right-shifts each 32-bit value in the 128-bit integer vector operand +/// by the specified number of bits. High-order bits are filled with the sign +/// bit of the initial value. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VPSRAD / PSRAD instruction. +/// +/// \param __a +/// A 128-bit integer vector containing the source operand. +/// \param __count +/// A 128-bit integer vector in which bits [63:0] specify the number of bits +/// to right-shift each value in operand \a __a. +/// \returns A 128-bit integer vector containing the right-shifted values. +static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_sra_epi32(__m128i __a, + __m128i __count) { + return (__m128i)__builtin_ia32_psrad128((__v4si)__a, (__v4si)__count); +} + +/// Right-shifts the 128-bit integer vector operand by the specified +/// number of bytes. High-order bits are cleared. +/// +/// \headerfile +/// +/// \code +/// __m128i _mm_srli_si128(__m128i a, const int imm); +/// \endcode +/// +/// This intrinsic corresponds to the VPSRLDQ / PSRLDQ instruction. +/// +/// \param a +/// A 128-bit integer vector containing the source operand. +/// \param imm +/// An immediate value specifying the number of bytes to right-shift operand +/// \a a. +/// \returns A 128-bit integer vector containing the right-shifted value. +#define _mm_srli_si128(a, imm) \ + ((__m128i)__builtin_ia32_psrldqi128_byteshift((__v2di)(__m128i)(a), \ + (int)(imm))) + +#define _mm_bsrli_si128(a, imm) \ + ((__m128i)__builtin_ia32_psrldqi128_byteshift((__v2di)(__m128i)(a), \ + (int)(imm))) + +/// Right-shifts each of 16-bit values in the 128-bit integer vector +/// operand by the specified number of bits. High-order bits are cleared. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VPSRLW / PSRLW instruction. +/// +/// \param __a +/// A 128-bit integer vector containing the source operand. +/// \param __count +/// An integer value specifying the number of bits to right-shift each value +/// in operand \a __a. +/// \returns A 128-bit integer vector containing the right-shifted values. +static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_srli_epi16(__m128i __a, + int __count) { + return (__m128i)__builtin_ia32_psrlwi128((__v8hi)__a, __count); +} + +/// Right-shifts each of 16-bit values in the 128-bit integer vector +/// operand by the specified number of bits. High-order bits are cleared. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VPSRLW / PSRLW instruction. +/// +/// \param __a +/// A 128-bit integer vector containing the source operand. +/// \param __count +/// A 128-bit integer vector in which bits [63:0] specify the number of bits +/// to right-shift each value in operand \a __a. +/// \returns A 128-bit integer vector containing the right-shifted values. +static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_srl_epi16(__m128i __a, + __m128i __count) { + return (__m128i)__builtin_ia32_psrlw128((__v8hi)__a, (__v8hi)__count); +} + +/// Right-shifts each of 32-bit values in the 128-bit integer vector +/// operand by the specified number of bits. High-order bits are cleared. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VPSRLD / PSRLD instruction. +/// +/// \param __a +/// A 128-bit integer vector containing the source operand. +/// \param __count +/// An integer value specifying the number of bits to right-shift each value +/// in operand \a __a. +/// \returns A 128-bit integer vector containing the right-shifted values. +static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_srli_epi32(__m128i __a, + int __count) { + return (__m128i)__builtin_ia32_psrldi128((__v4si)__a, __count); +} + +/// Right-shifts each of 32-bit values in the 128-bit integer vector +/// operand by the specified number of bits. High-order bits are cleared. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VPSRLD / PSRLD instruction. +/// +/// \param __a +/// A 128-bit integer vector containing the source operand. +/// \param __count +/// A 128-bit integer vector in which bits [63:0] specify the number of bits +/// to right-shift each value in operand \a __a. +/// \returns A 128-bit integer vector containing the right-shifted values. +static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_srl_epi32(__m128i __a, + __m128i __count) { + return (__m128i)__builtin_ia32_psrld128((__v4si)__a, (__v4si)__count); +} + +/// Right-shifts each of 64-bit values in the 128-bit integer vector +/// operand by the specified number of bits. High-order bits are cleared. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VPSRLQ / PSRLQ instruction. +/// +/// \param __a +/// A 128-bit integer vector containing the source operand. +/// \param __count +/// An integer value specifying the number of bits to right-shift each value +/// in operand \a __a. +/// \returns A 128-bit integer vector containing the right-shifted values. +static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_srli_epi64(__m128i __a, + int __count) { + return __builtin_ia32_psrlqi128((__v2di)__a, __count); +} + +/// Right-shifts each of 64-bit values in the 128-bit integer vector +/// operand by the specified number of bits. High-order bits are cleared. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VPSRLQ / PSRLQ instruction. +/// +/// \param __a +/// A 128-bit integer vector containing the source operand. +/// \param __count +/// A 128-bit integer vector in which bits [63:0] specify the number of bits +/// to right-shift each value in operand \a __a. +/// \returns A 128-bit integer vector containing the right-shifted values. +static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_srl_epi64(__m128i __a, + __m128i __count) { + return __builtin_ia32_psrlq128((__v2di)__a, (__v2di)__count); +} + +/// Compares each of the corresponding 8-bit values of the 128-bit +/// integer vectors for equality. +/// +/// Each comparison returns 0x0 for false, 0xFF for true. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VPCMPEQB / PCMPEQB instruction. +/// +/// \param __a +/// A 128-bit integer vector. +/// \param __b +/// A 128-bit integer vector. +/// \returns A 128-bit integer vector containing the comparison results. +static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cmpeq_epi8(__m128i __a, + __m128i __b) { + return (__m128i)((__v16qi)__a == (__v16qi)__b); +} + +/// Compares each of the corresponding 16-bit values of the 128-bit +/// integer vectors for equality. +/// +/// Each comparison returns 0x0 for false, 0xFFFF for true. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VPCMPEQW / PCMPEQW instruction. +/// +/// \param __a +/// A 128-bit integer vector. +/// \param __b +/// A 128-bit integer vector. +/// \returns A 128-bit integer vector containing the comparison results. +static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cmpeq_epi16(__m128i __a, + __m128i __b) { + return (__m128i)((__v8hi)__a == (__v8hi)__b); +} + +/// Compares each of the corresponding 32-bit values of the 128-bit +/// integer vectors for equality. +/// +/// Each comparison returns 0x0 for false, 0xFFFFFFFF for true. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VPCMPEQD / PCMPEQD instruction. +/// +/// \param __a +/// A 128-bit integer vector. +/// \param __b +/// A 128-bit integer vector. +/// \returns A 128-bit integer vector containing the comparison results. +static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cmpeq_epi32(__m128i __a, + __m128i __b) { + return (__m128i)((__v4si)__a == (__v4si)__b); +} + +/// Compares each of the corresponding signed 8-bit values of the 128-bit +/// integer vectors to determine if the values in the first operand are +/// greater than those in the second operand. +/// +/// Each comparison returns 0x0 for false, 0xFF for true. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VPCMPGTB / PCMPGTB instruction. +/// +/// \param __a +/// A 128-bit integer vector. +/// \param __b +/// A 128-bit integer vector. +/// \returns A 128-bit integer vector containing the comparison results. +static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cmpgt_epi8(__m128i __a, + __m128i __b) { + /* This function always performs a signed comparison, but __v16qi is a char + which may be signed or unsigned, so use __v16qs. */ + return (__m128i)((__v16qs)__a > (__v16qs)__b); +} + +/// Compares each of the corresponding signed 16-bit values of the +/// 128-bit integer vectors to determine if the values in the first operand +/// are greater than those in the second operand. +/// +/// Each comparison returns 0x0 for false, 0xFFFF for true. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VPCMPGTW / PCMPGTW instruction. +/// +/// \param __a +/// A 128-bit integer vector. +/// \param __b +/// A 128-bit integer vector. +/// \returns A 128-bit integer vector containing the comparison results. +static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cmpgt_epi16(__m128i __a, + __m128i __b) { + return (__m128i)((__v8hi)__a > (__v8hi)__b); +} + +/// Compares each of the corresponding signed 32-bit values of the +/// 128-bit integer vectors to determine if the values in the first operand +/// are greater than those in the second operand. +/// +/// Each comparison returns 0x0 for false, 0xFFFFFFFF for true. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VPCMPGTD / PCMPGTD instruction. +/// +/// \param __a +/// A 128-bit integer vector. +/// \param __b +/// A 128-bit integer vector. +/// \returns A 128-bit integer vector containing the comparison results. +static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cmpgt_epi32(__m128i __a, + __m128i __b) { + return (__m128i)((__v4si)__a > (__v4si)__b); +} + +/// Compares each of the corresponding signed 8-bit values of the 128-bit +/// integer vectors to determine if the values in the first operand are less +/// than those in the second operand. +/// +/// Each comparison returns 0x0 for false, 0xFF for true. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VPCMPGTB / PCMPGTB instruction. +/// +/// \param __a +/// A 128-bit integer vector. +/// \param __b +/// A 128-bit integer vector. +/// \returns A 128-bit integer vector containing the comparison results. +static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cmplt_epi8(__m128i __a, + __m128i __b) { + return _mm_cmpgt_epi8(__b, __a); +} + +/// Compares each of the corresponding signed 16-bit values of the +/// 128-bit integer vectors to determine if the values in the first operand +/// are less than those in the second operand. +/// +/// Each comparison returns 0x0 for false, 0xFFFF for true. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VPCMPGTW / PCMPGTW instruction. +/// +/// \param __a +/// A 128-bit integer vector. +/// \param __b +/// A 128-bit integer vector. +/// \returns A 128-bit integer vector containing the comparison results. +static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cmplt_epi16(__m128i __a, + __m128i __b) { + return _mm_cmpgt_epi16(__b, __a); +} + +/// Compares each of the corresponding signed 32-bit values of the +/// 128-bit integer vectors to determine if the values in the first operand +/// are less than those in the second operand. +/// +/// Each comparison returns 0x0 for false, 0xFFFFFFFF for true. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VPCMPGTD / PCMPGTD instruction. +/// +/// \param __a +/// A 128-bit integer vector. +/// \param __b +/// A 128-bit integer vector. +/// \returns A 128-bit integer vector containing the comparison results. +static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cmplt_epi32(__m128i __a, + __m128i __b) { + return _mm_cmpgt_epi32(__b, __a); +} + +#ifdef __x86_64__ +/// Converts a 64-bit signed integer value from the second operand into a +/// double-precision value and returns it in the lower element of a [2 x +/// double] vector; the upper element of the returned vector is copied from +/// the upper element of the first operand. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VCVTSI2SD / CVTSI2SD instruction. +/// +/// \param __a +/// A 128-bit vector of [2 x double]. The upper 64 bits of this operand are +/// copied to the upper 64 bits of the destination. +/// \param __b +/// A 64-bit signed integer operand containing the value to be converted. +/// \returns A 128-bit vector of [2 x double] whose lower 64 bits contain the +/// converted value of the second operand. The upper 64 bits are copied from +/// the upper 64 bits of the first operand. +static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cvtsi64_sd(__m128d __a, + long long __b) { + __a[0] = __b; + return __a; +} + +/// Converts the first (lower) element of a vector of [2 x double] into a +/// 64-bit signed integer value. +/// +/// If the converted value does not fit in a 64-bit integer, raises a +/// floating-point invalid exception. If the exception is masked, returns +/// the most negative integer. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VCVTSD2SI / CVTSD2SI instruction. +/// +/// \param __a +/// A 128-bit vector of [2 x double]. The lower 64 bits are used in the +/// conversion. +/// \returns A 64-bit signed integer containing the converted value. +static __inline__ long long __DEFAULT_FN_ATTRS _mm_cvtsd_si64(__m128d __a) { + return __builtin_ia32_cvtsd2si64((__v2df)__a); +} + +/// Converts the first (lower) element of a vector of [2 x double] into a +/// 64-bit signed truncated (rounded toward zero) integer value. +/// +/// If a converted value does not fit in a 64-bit integer, raises a +/// floating-point invalid exception. If the exception is masked, returns +/// the most negative integer. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VCVTTSD2SI / CVTTSD2SI +/// instruction. +/// +/// \param __a +/// A 128-bit vector of [2 x double]. The lower 64 bits are used in the +/// conversion. +/// \returns A 64-bit signed integer containing the converted value. +static __inline__ long long __DEFAULT_FN_ATTRS _mm_cvttsd_si64(__m128d __a) { + return __builtin_ia32_cvttsd2si64((__v2df)__a); +} +#endif + +/// Converts a vector of [4 x i32] into a vector of [4 x float]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VCVTDQ2PS / CVTDQ2PS instruction. +/// +/// \param __a +/// A 128-bit integer vector. +/// \returns A 128-bit vector of [4 x float] containing the converted values. +static __inline__ __m128 __DEFAULT_FN_ATTRS _mm_cvtepi32_ps(__m128i __a) { + return (__m128) __builtin_convertvector((__v4si)__a, __v4sf); +} + +/// Converts a vector of [4 x float] into a vector of [4 x i32]. +/// +/// If a converted value does not fit in a 32-bit integer, raises a +/// floating-point invalid exception. If the exception is masked, returns +/// the most negative integer. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VCVTPS2DQ / CVTPS2DQ instruction. +/// +/// \param __a +/// A 128-bit vector of [4 x float]. +/// \returns A 128-bit integer vector of [4 x i32] containing the converted +/// values. +static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cvtps_epi32(__m128 __a) { + return (__m128i)__builtin_ia32_cvtps2dq((__v4sf)__a); +} + +/// Converts a vector of [4 x float] into four signed truncated (rounded toward +/// zero) 32-bit integers, returned in a vector of [4 x i32]. +/// +/// If a converted value does not fit in a 32-bit integer, raises a +/// floating-point invalid exception. If the exception is masked, returns +/// the most negative integer. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VCVTTPS2DQ / CVTTPS2DQ +/// instruction. +/// +/// \param __a +/// A 128-bit vector of [4 x float]. +/// \returns A 128-bit vector of [4 x i32] containing the converted values. +static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cvttps_epi32(__m128 __a) { + return (__m128i)__builtin_ia32_cvttps2dq((__v4sf)__a); +} + +/// Returns a vector of [4 x i32] where the lowest element is the input +/// operand and the remaining elements are zero. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VMOVD / MOVD instruction. +/// +/// \param __a +/// A 32-bit signed integer operand. +/// \returns A 128-bit vector of [4 x i32]. +static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cvtsi32_si128(int __a) { + return __extension__(__m128i)(__v4si){__a, 0, 0, 0}; +} + +/// Returns a vector of [2 x i64] where the lower element is the input +/// operand and the upper element is zero. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VMOVQ / MOVQ instruction +/// in 64-bit mode. +/// +/// \param __a +/// A 64-bit signed integer operand containing the value to be converted. +/// \returns A 128-bit vector of [2 x i64] containing the converted value. +static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cvtsi64_si128(long long __a) { + return __extension__(__m128i)(__v2di){__a, 0}; +} + +/// Moves the least significant 32 bits of a vector of [4 x i32] to a +/// 32-bit signed integer value. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VMOVD / MOVD instruction. +/// +/// \param __a +/// A vector of [4 x i32]. The least significant 32 bits are moved to the +/// destination. +/// \returns A 32-bit signed integer containing the moved value. +static __inline__ int __DEFAULT_FN_ATTRS _mm_cvtsi128_si32(__m128i __a) { + __v4si __b = (__v4si)__a; + return __b[0]; +} + +/// Moves the least significant 64 bits of a vector of [2 x i64] to a +/// 64-bit signed integer value. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VMOVQ / MOVQ instruction. +/// +/// \param __a +/// A vector of [2 x i64]. The least significant 64 bits are moved to the +/// destination. +/// \returns A 64-bit signed integer containing the moved value. +static __inline__ long long __DEFAULT_FN_ATTRS _mm_cvtsi128_si64(__m128i __a) { + return __a[0]; +} + +/// Moves packed integer values from an aligned 128-bit memory location +/// to elements in a 128-bit integer vector. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VMOVDQA / MOVDQA instruction. +/// +/// \param __p +/// An aligned pointer to a memory location containing integer values. +/// \returns A 128-bit integer vector containing the moved values. +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_load_si128(__m128i const *__p) { + return *__p; +} + +/// Moves packed integer values from an unaligned 128-bit memory location +/// to elements in a 128-bit integer vector. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VMOVDQU / MOVDQU instruction. +/// +/// \param __p +/// A pointer to a memory location containing integer values. +/// \returns A 128-bit integer vector containing the moved values. +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_loadu_si128(__m128i_u const *__p) { + struct __loadu_si128 { + __m128i_u __v; + } __attribute__((__packed__, __may_alias__)); + return ((const struct __loadu_si128 *)__p)->__v; +} + +/// Returns a vector of [2 x i64] where the lower element is taken from +/// the lower element of the operand, and the upper element is zero. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VMOVQ / MOVQ instruction. +/// +/// \param __p +/// A 128-bit vector of [2 x i64]. Bits [63:0] are written to bits [63:0] of +/// the destination. +/// \returns A 128-bit vector of [2 x i64]. The lower order bits contain the +/// moved value. The higher order bits are cleared. +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_loadl_epi64(__m128i_u const *__p) { + struct __mm_loadl_epi64_struct { + long long __u; + } __attribute__((__packed__, __may_alias__)); + return __extension__(__m128i){ + ((const struct __mm_loadl_epi64_struct *)__p)->__u, 0}; +} + +/// Generates a 128-bit vector of [4 x i32] with unspecified content. +/// This could be used as an argument to another intrinsic function where the +/// argument is required but the value is not actually used. +/// +/// \headerfile +/// +/// This intrinsic has no corresponding instruction. +/// +/// \returns A 128-bit vector of [4 x i32] with unspecified content. +static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_undefined_si128(void) { + return (__m128i)__builtin_ia32_undef128(); +} + +/// Initializes both 64-bit values in a 128-bit vector of [2 x i64] with +/// the specified 64-bit integer values. +/// +/// \headerfile +/// +/// This intrinsic is a utility function and does not correspond to a specific +/// instruction. +/// +/// \param __q1 +/// A 64-bit integer value used to initialize the upper 64 bits of the +/// destination vector of [2 x i64]. +/// \param __q0 +/// A 64-bit integer value used to initialize the lower 64 bits of the +/// destination vector of [2 x i64]. +/// \returns An initialized 128-bit vector of [2 x i64] containing the values +/// provided in the operands. +static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_set_epi64x(long long __q1, + long long __q0) { + return __extension__(__m128i)(__v2di){__q0, __q1}; +} + +/// Initializes both 64-bit values in a 128-bit vector of [2 x i64] with +/// the specified 64-bit integer values. +/// +/// \headerfile +/// +/// This intrinsic is a utility function and does not correspond to a specific +/// instruction. +/// +/// \param __q1 +/// A 64-bit integer value used to initialize the upper 64 bits of the +/// destination vector of [2 x i64]. +/// \param __q0 +/// A 64-bit integer value used to initialize the lower 64 bits of the +/// destination vector of [2 x i64]. +/// \returns An initialized 128-bit vector of [2 x i64] containing the values +/// provided in the operands. +static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_set_epi64(__m64 __q1, + __m64 __q0) { + return _mm_set_epi64x((long long)__q1, (long long)__q0); +} + +/// Initializes the 32-bit values in a 128-bit vector of [4 x i32] with +/// the specified 32-bit integer values. +/// +/// \headerfile +/// +/// This intrinsic is a utility function and does not correspond to a specific +/// instruction. +/// +/// \param __i3 +/// A 32-bit integer value used to initialize bits [127:96] of the +/// destination vector. +/// \param __i2 +/// A 32-bit integer value used to initialize bits [95:64] of the destination +/// vector. +/// \param __i1 +/// A 32-bit integer value used to initialize bits [63:32] of the destination +/// vector. +/// \param __i0 +/// A 32-bit integer value used to initialize bits [31:0] of the destination +/// vector. +/// \returns An initialized 128-bit vector of [4 x i32] containing the values +/// provided in the operands. +static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_set_epi32(int __i3, int __i2, + int __i1, int __i0) { + return __extension__(__m128i)(__v4si){__i0, __i1, __i2, __i3}; +} + +/// Initializes the 16-bit values in a 128-bit vector of [8 x i16] with +/// the specified 16-bit integer values. +/// +/// \headerfile +/// +/// This intrinsic is a utility function and does not correspond to a specific +/// instruction. +/// +/// \param __w7 +/// A 16-bit integer value used to initialize bits [127:112] of the +/// destination vector. +/// \param __w6 +/// A 16-bit integer value used to initialize bits [111:96] of the +/// destination vector. +/// \param __w5 +/// A 16-bit integer value used to initialize bits [95:80] of the destination +/// vector. +/// \param __w4 +/// A 16-bit integer value used to initialize bits [79:64] of the destination +/// vector. +/// \param __w3 +/// A 16-bit integer value used to initialize bits [63:48] of the destination +/// vector. +/// \param __w2 +/// A 16-bit integer value used to initialize bits [47:32] of the destination +/// vector. +/// \param __w1 +/// A 16-bit integer value used to initialize bits [31:16] of the destination +/// vector. +/// \param __w0 +/// A 16-bit integer value used to initialize bits [15:0] of the destination +/// vector. +/// \returns An initialized 128-bit vector of [8 x i16] containing the values +/// provided in the operands. +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_set_epi16(short __w7, short __w6, short __w5, short __w4, short __w3, + short __w2, short __w1, short __w0) { + return __extension__(__m128i)(__v8hi){__w0, __w1, __w2, __w3, + __w4, __w5, __w6, __w7}; +} + +/// Initializes the 8-bit values in a 128-bit vector of [16 x i8] with +/// the specified 8-bit integer values. +/// +/// \headerfile +/// +/// This intrinsic is a utility function and does not correspond to a specific +/// instruction. +/// +/// \param __b15 +/// Initializes bits [127:120] of the destination vector. +/// \param __b14 +/// Initializes bits [119:112] of the destination vector. +/// \param __b13 +/// Initializes bits [111:104] of the destination vector. +/// \param __b12 +/// Initializes bits [103:96] of the destination vector. +/// \param __b11 +/// Initializes bits [95:88] of the destination vector. +/// \param __b10 +/// Initializes bits [87:80] of the destination vector. +/// \param __b9 +/// Initializes bits [79:72] of the destination vector. +/// \param __b8 +/// Initializes bits [71:64] of the destination vector. +/// \param __b7 +/// Initializes bits [63:56] of the destination vector. +/// \param __b6 +/// Initializes bits [55:48] of the destination vector. +/// \param __b5 +/// Initializes bits [47:40] of the destination vector. +/// \param __b4 +/// Initializes bits [39:32] of the destination vector. +/// \param __b3 +/// Initializes bits [31:24] of the destination vector. +/// \param __b2 +/// Initializes bits [23:16] of the destination vector. +/// \param __b1 +/// Initializes bits [15:8] of the destination vector. +/// \param __b0 +/// Initializes bits [7:0] of the destination vector. +/// \returns An initialized 128-bit vector of [16 x i8] containing the values +/// provided in the operands. +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_set_epi8(char __b15, char __b14, char __b13, char __b12, char __b11, + char __b10, char __b9, char __b8, char __b7, char __b6, char __b5, + char __b4, char __b3, char __b2, char __b1, char __b0) { + return __extension__(__m128i)(__v16qi){ + __b0, __b1, __b2, __b3, __b4, __b5, __b6, __b7, + __b8, __b9, __b10, __b11, __b12, __b13, __b14, __b15}; +} + +/// Initializes both values in a 128-bit integer vector with the +/// specified 64-bit integer value. +/// +/// \headerfile +/// +/// This intrinsic is a utility function and does not correspond to a specific +/// instruction. +/// +/// \param __q +/// Integer value used to initialize the elements of the destination integer +/// vector. +/// \returns An initialized 128-bit integer vector of [2 x i64] with both +/// elements containing the value provided in the operand. +static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_set1_epi64x(long long __q) { + return _mm_set_epi64x(__q, __q); +} + +/// Initializes both values in a 128-bit vector of [2 x i64] with the +/// specified 64-bit value. +/// +/// \headerfile +/// +/// This intrinsic is a utility function and does not correspond to a specific +/// instruction. +/// +/// \param __q +/// A 64-bit value used to initialize the elements of the destination integer +/// vector. +/// \returns An initialized 128-bit vector of [2 x i64] with all elements +/// containing the value provided in the operand. +static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_set1_epi64(__m64 __q) { + return _mm_set_epi64(__q, __q); +} + +/// Initializes all values in a 128-bit vector of [4 x i32] with the +/// specified 32-bit value. +/// +/// \headerfile +/// +/// This intrinsic is a utility function and does not correspond to a specific +/// instruction. +/// +/// \param __i +/// A 32-bit value used to initialize the elements of the destination integer +/// vector. +/// \returns An initialized 128-bit vector of [4 x i32] with all elements +/// containing the value provided in the operand. +static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_set1_epi32(int __i) { + return _mm_set_epi32(__i, __i, __i, __i); +} + +/// Initializes all values in a 128-bit vector of [8 x i16] with the +/// specified 16-bit value. +/// +/// \headerfile +/// +/// This intrinsic is a utility function and does not correspond to a specific +/// instruction. +/// +/// \param __w +/// A 16-bit value used to initialize the elements of the destination integer +/// vector. +/// \returns An initialized 128-bit vector of [8 x i16] with all elements +/// containing the value provided in the operand. +static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_set1_epi16(short __w) { + return _mm_set_epi16(__w, __w, __w, __w, __w, __w, __w, __w); +} + +/// Initializes all values in a 128-bit vector of [16 x i8] with the +/// specified 8-bit value. +/// +/// \headerfile +/// +/// This intrinsic is a utility function and does not correspond to a specific +/// instruction. +/// +/// \param __b +/// An 8-bit value used to initialize the elements of the destination integer +/// vector. +/// \returns An initialized 128-bit vector of [16 x i8] with all elements +/// containing the value provided in the operand. +static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_set1_epi8(char __b) { + return _mm_set_epi8(__b, __b, __b, __b, __b, __b, __b, __b, __b, __b, __b, + __b, __b, __b, __b, __b); +} + +/// Constructs a 128-bit integer vector, initialized in reverse order +/// with the specified 64-bit integral values. +/// +/// \headerfile +/// +/// This intrinsic does not correspond to a specific instruction. +/// +/// \param __q0 +/// A 64-bit integral value used to initialize the lower 64 bits of the +/// result. +/// \param __q1 +/// A 64-bit integral value used to initialize the upper 64 bits of the +/// result. +/// \returns An initialized 128-bit integer vector. +static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_setr_epi64(__m64 __q0, + __m64 __q1) { + return _mm_set_epi64(__q1, __q0); +} + +/// Constructs a 128-bit integer vector, initialized in reverse order +/// with the specified 32-bit integral values. +/// +/// \headerfile +/// +/// This intrinsic is a utility function and does not correspond to a specific +/// instruction. +/// +/// \param __i0 +/// A 32-bit integral value used to initialize bits [31:0] of the result. +/// \param __i1 +/// A 32-bit integral value used to initialize bits [63:32] of the result. +/// \param __i2 +/// A 32-bit integral value used to initialize bits [95:64] of the result. +/// \param __i3 +/// A 32-bit integral value used to initialize bits [127:96] of the result. +/// \returns An initialized 128-bit integer vector. +static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_setr_epi32(int __i0, int __i1, + int __i2, + int __i3) { + return _mm_set_epi32(__i3, __i2, __i1, __i0); +} + +/// Constructs a 128-bit integer vector, initialized in reverse order +/// with the specified 16-bit integral values. +/// +/// \headerfile +/// +/// This intrinsic is a utility function and does not correspond to a specific +/// instruction. +/// +/// \param __w0 +/// A 16-bit integral value used to initialize bits [15:0] of the result. +/// \param __w1 +/// A 16-bit integral value used to initialize bits [31:16] of the result. +/// \param __w2 +/// A 16-bit integral value used to initialize bits [47:32] of the result. +/// \param __w3 +/// A 16-bit integral value used to initialize bits [63:48] of the result. +/// \param __w4 +/// A 16-bit integral value used to initialize bits [79:64] of the result. +/// \param __w5 +/// A 16-bit integral value used to initialize bits [95:80] of the result. +/// \param __w6 +/// A 16-bit integral value used to initialize bits [111:96] of the result. +/// \param __w7 +/// A 16-bit integral value used to initialize bits [127:112] of the result. +/// \returns An initialized 128-bit integer vector. +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_setr_epi16(short __w0, short __w1, short __w2, short __w3, short __w4, + short __w5, short __w6, short __w7) { + return _mm_set_epi16(__w7, __w6, __w5, __w4, __w3, __w2, __w1, __w0); +} + +/// Constructs a 128-bit integer vector, initialized in reverse order +/// with the specified 8-bit integral values. +/// +/// \headerfile +/// +/// This intrinsic is a utility function and does not correspond to a specific +/// instruction. +/// +/// \param __b0 +/// An 8-bit integral value used to initialize bits [7:0] of the result. +/// \param __b1 +/// An 8-bit integral value used to initialize bits [15:8] of the result. +/// \param __b2 +/// An 8-bit integral value used to initialize bits [23:16] of the result. +/// \param __b3 +/// An 8-bit integral value used to initialize bits [31:24] of the result. +/// \param __b4 +/// An 8-bit integral value used to initialize bits [39:32] of the result. +/// \param __b5 +/// An 8-bit integral value used to initialize bits [47:40] of the result. +/// \param __b6 +/// An 8-bit integral value used to initialize bits [55:48] of the result. +/// \param __b7 +/// An 8-bit integral value used to initialize bits [63:56] of the result. +/// \param __b8 +/// An 8-bit integral value used to initialize bits [71:64] of the result. +/// \param __b9 +/// An 8-bit integral value used to initialize bits [79:72] of the result. +/// \param __b10 +/// An 8-bit integral value used to initialize bits [87:80] of the result. +/// \param __b11 +/// An 8-bit integral value used to initialize bits [95:88] of the result. +/// \param __b12 +/// An 8-bit integral value used to initialize bits [103:96] of the result. +/// \param __b13 +/// An 8-bit integral value used to initialize bits [111:104] of the result. +/// \param __b14 +/// An 8-bit integral value used to initialize bits [119:112] of the result. +/// \param __b15 +/// An 8-bit integral value used to initialize bits [127:120] of the result. +/// \returns An initialized 128-bit integer vector. +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_setr_epi8(char __b0, char __b1, char __b2, char __b3, char __b4, char __b5, + char __b6, char __b7, char __b8, char __b9, char __b10, + char __b11, char __b12, char __b13, char __b14, char __b15) { + return _mm_set_epi8(__b15, __b14, __b13, __b12, __b11, __b10, __b9, __b8, + __b7, __b6, __b5, __b4, __b3, __b2, __b1, __b0); +} + +/// Creates a 128-bit integer vector initialized to zero. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VXORPS / XORPS instruction. +/// +/// \returns An initialized 128-bit integer vector with all elements set to +/// zero. +static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_setzero_si128(void) { + return __extension__(__m128i)(__v2di){0LL, 0LL}; +} + +/// Stores a 128-bit integer vector to a memory location aligned on a +/// 128-bit boundary. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VMOVAPS / MOVAPS instruction. +/// +/// \param __p +/// A pointer to an aligned memory location that will receive the integer +/// values. +/// \param __b +/// A 128-bit integer vector containing the values to be moved. +static __inline__ void __DEFAULT_FN_ATTRS _mm_store_si128(__m128i *__p, + __m128i __b) { + *__p = __b; +} + +/// Stores a 128-bit integer vector to an unaligned memory location. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VMOVUPS / MOVUPS instruction. +/// +/// \param __p +/// A pointer to a memory location that will receive the integer values. +/// \param __b +/// A 128-bit integer vector containing the values to be moved. +static __inline__ void __DEFAULT_FN_ATTRS _mm_storeu_si128(__m128i_u *__p, + __m128i __b) { + struct __storeu_si128 { + __m128i_u __v; + } __attribute__((__packed__, __may_alias__)); + ((struct __storeu_si128 *)__p)->__v = __b; +} + +/// Stores a 64-bit integer value from the low element of a 128-bit integer +/// vector. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VMOVQ / MOVQ instruction. +/// +/// \param __p +/// A pointer to a 64-bit memory location. The address of the memory +/// location does not have to be aligned. +/// \param __b +/// A 128-bit integer vector containing the value to be stored. +static __inline__ void __DEFAULT_FN_ATTRS _mm_storeu_si64(void *__p, + __m128i __b) { + struct __storeu_si64 { + long long __v; + } __attribute__((__packed__, __may_alias__)); + ((struct __storeu_si64 *)__p)->__v = ((__v2di)__b)[0]; +} + +/// Stores a 32-bit integer value from the low element of a 128-bit integer +/// vector. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VMOVD / MOVD instruction. +/// +/// \param __p +/// A pointer to a 32-bit memory location. The address of the memory +/// location does not have to be aligned. +/// \param __b +/// A 128-bit integer vector containing the value to be stored. +static __inline__ void __DEFAULT_FN_ATTRS _mm_storeu_si32(void *__p, + __m128i __b) { + struct __storeu_si32 { + int __v; + } __attribute__((__packed__, __may_alias__)); + ((struct __storeu_si32 *)__p)->__v = ((__v4si)__b)[0]; +} + +/// Stores a 16-bit integer value from the low element of a 128-bit integer +/// vector. +/// +/// \headerfile +/// +/// This intrinsic does not correspond to a specific instruction. +/// +/// \param __p +/// A pointer to a 16-bit memory location. The address of the memory +/// location does not have to be aligned. +/// \param __b +/// A 128-bit integer vector containing the value to be stored. +static __inline__ void __DEFAULT_FN_ATTRS _mm_storeu_si16(void *__p, + __m128i __b) { + struct __storeu_si16 { + short __v; + } __attribute__((__packed__, __may_alias__)); + ((struct __storeu_si16 *)__p)->__v = ((__v8hi)__b)[0]; +} + +/// Moves bytes selected by the mask from the first operand to the +/// specified unaligned memory location. When a mask bit is 1, the +/// corresponding byte is written, otherwise it is not written. +/// +/// To minimize caching, the data is flagged as non-temporal (unlikely to be +/// used again soon). Exception and trap behavior for elements not selected +/// for storage to memory are implementation dependent. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VMASKMOVDQU / MASKMOVDQU +/// instruction. +/// +/// \param __d +/// A 128-bit integer vector containing the values to be moved. +/// \param __n +/// A 128-bit integer vector containing the mask. The most significant bit of +/// each byte represents the mask bits. +/// \param __p +/// A pointer to an unaligned 128-bit memory location where the specified +/// values are moved. +static __inline__ void __DEFAULT_FN_ATTRS _mm_maskmoveu_si128(__m128i __d, + __m128i __n, + char *__p) { + __builtin_ia32_maskmovdqu((__v16qi)__d, (__v16qi)__n, __p); +} + +/// Stores the lower 64 bits of a 128-bit integer vector of [2 x i64] to +/// a memory location. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VMOVLPS / MOVLPS instruction. +/// +/// \param __p +/// A pointer to a 64-bit memory location that will receive the lower 64 bits +/// of the integer vector parameter. +/// \param __a +/// A 128-bit integer vector of [2 x i64]. The lower 64 bits contain the +/// value to be stored. +static __inline__ void __DEFAULT_FN_ATTRS _mm_storel_epi64(__m128i_u *__p, + __m128i __a) { + struct __mm_storel_epi64_struct { + long long __u; + } __attribute__((__packed__, __may_alias__)); + ((struct __mm_storel_epi64_struct *)__p)->__u = __a[0]; +} + +/// Stores a 128-bit floating point vector of [2 x double] to a 128-bit +/// aligned memory location. +/// +/// To minimize caching, the data is flagged as non-temporal (unlikely to be +/// used again soon). +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VMOVNTPS / MOVNTPS instruction. +/// +/// \param __p +/// A pointer to the 128-bit aligned memory location used to store the value. +/// \param __a +/// A vector of [2 x double] containing the 64-bit values to be stored. +static __inline__ void __DEFAULT_FN_ATTRS _mm_stream_pd(void *__p, + __m128d __a) { + __builtin_nontemporal_store((__v2df)__a, (__v2df *)__p); +} + +/// Stores a 128-bit integer vector to a 128-bit aligned memory location. +/// +/// To minimize caching, the data is flagged as non-temporal (unlikely to be +/// used again soon). +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VMOVNTPS / MOVNTPS instruction. +/// +/// \param __p +/// A pointer to the 128-bit aligned memory location used to store the value. +/// \param __a +/// A 128-bit integer vector containing the values to be stored. +static __inline__ void __DEFAULT_FN_ATTRS _mm_stream_si128(void *__p, + __m128i __a) { + __builtin_nontemporal_store((__v2di)__a, (__v2di *)__p); +} + +/// Stores a 32-bit integer value in the specified memory location. +/// +/// To minimize caching, the data is flagged as non-temporal (unlikely to be +/// used again soon). +/// +/// \headerfile +/// +/// This intrinsic corresponds to the MOVNTI instruction. +/// +/// \param __p +/// A pointer to the 32-bit memory location used to store the value. +/// \param __a +/// A 32-bit integer containing the value to be stored. +static __inline__ void + __attribute__((__always_inline__, __nodebug__, __target__("sse2"))) + _mm_stream_si32(void *__p, int __a) { + __builtin_ia32_movnti((int *)__p, __a); +} + +#ifdef __x86_64__ +/// Stores a 64-bit integer value in the specified memory location. +/// +/// To minimize caching, the data is flagged as non-temporal (unlikely to be +/// used again soon). +/// +/// \headerfile +/// +/// This intrinsic corresponds to the MOVNTIQ instruction. +/// +/// \param __p +/// A pointer to the 64-bit memory location used to store the value. +/// \param __a +/// A 64-bit integer containing the value to be stored. +static __inline__ void + __attribute__((__always_inline__, __nodebug__, __target__("sse2"))) + _mm_stream_si64(void *__p, long long __a) { + __builtin_ia32_movnti64((long long *)__p, __a); +} +#endif + +#if defined(__cplusplus) +extern "C" { +#endif + +/// The cache line containing \a __p is flushed and invalidated from all +/// caches in the coherency domain. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the CLFLUSH instruction. +/// +/// \param __p +/// A pointer to the memory location used to identify the cache line to be +/// flushed. +void _mm_clflush(void const *__p); + +/// Forces strong memory ordering (serialization) between load +/// instructions preceding this instruction and load instructions following +/// this instruction, ensuring the system completes all previous loads before +/// executing subsequent loads. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the LFENCE instruction. +/// +void _mm_lfence(void); + +/// Forces strong memory ordering (serialization) between load and store +/// instructions preceding this instruction and load and store instructions +/// following this instruction, ensuring that the system completes all +/// previous memory accesses before executing subsequent memory accesses. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the MFENCE instruction. +/// +void _mm_mfence(void); + +#if defined(__cplusplus) +} // extern "C" +#endif + +/// Converts, with saturation, 16-bit signed integers from both 128-bit integer +/// vector operands into 8-bit signed integers, and packs the results into +/// the destination. +/// +/// Positive values greater than 0x7F are saturated to 0x7F. Negative values +/// less than 0x80 are saturated to 0x80. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VPACKSSWB / PACKSSWB instruction. +/// +/// \param __a +/// A 128-bit integer vector of [8 x i16]. The converted [8 x i8] values are +/// written to the lower 64 bits of the result. +/// \param __b +/// A 128-bit integer vector of [8 x i16]. The converted [8 x i8] values are +/// written to the higher 64 bits of the result. +/// \returns A 128-bit vector of [16 x i8] containing the converted values. +static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_packs_epi16(__m128i __a, + __m128i __b) { + return (__m128i)__builtin_ia32_packsswb128((__v8hi)__a, (__v8hi)__b); +} + +/// Converts, with saturation, 32-bit signed integers from both 128-bit integer +/// vector operands into 16-bit signed integers, and packs the results into +/// the destination. +/// +/// Positive values greater than 0x7FFF are saturated to 0x7FFF. Negative +/// values less than 0x8000 are saturated to 0x8000. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VPACKSSDW / PACKSSDW instruction. +/// +/// \param __a +/// A 128-bit integer vector of [4 x i32]. The converted [4 x i16] values +/// are written to the lower 64 bits of the result. +/// \param __b +/// A 128-bit integer vector of [4 x i32]. The converted [4 x i16] values +/// are written to the higher 64 bits of the result. +/// \returns A 128-bit vector of [8 x i16] containing the converted values. +static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_packs_epi32(__m128i __a, + __m128i __b) { + return (__m128i)__builtin_ia32_packssdw128((__v4si)__a, (__v4si)__b); +} + +/// Converts, with saturation, 16-bit signed integers from both 128-bit integer +/// vector operands into 8-bit unsigned integers, and packs the results into +/// the destination. +/// +/// Values greater than 0xFF are saturated to 0xFF. Values less than 0x00 +/// are saturated to 0x00. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VPACKUSWB / PACKUSWB instruction. +/// +/// \param __a +/// A 128-bit integer vector of [8 x i16]. The converted [8 x i8] values are +/// written to the lower 64 bits of the result. +/// \param __b +/// A 128-bit integer vector of [8 x i16]. The converted [8 x i8] values are +/// written to the higher 64 bits of the result. +/// \returns A 128-bit vector of [16 x i8] containing the converted values. +static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_packus_epi16(__m128i __a, + __m128i __b) { + return (__m128i)__builtin_ia32_packuswb128((__v8hi)__a, (__v8hi)__b); +} + +/// Extracts 16 bits from a 128-bit integer vector of [8 x i16], using +/// the immediate-value parameter as a selector. +/// +/// \headerfile +/// +/// \code +/// __m128i _mm_extract_epi16(__m128i a, const int imm); +/// \endcode +/// +/// This intrinsic corresponds to the VPEXTRW / PEXTRW instruction. +/// +/// \param a +/// A 128-bit integer vector. +/// \param imm +/// An immediate value. Bits [2:0] selects values from \a a to be assigned +/// to bits[15:0] of the result. \n +/// 000: assign values from bits [15:0] of \a a. \n +/// 001: assign values from bits [31:16] of \a a. \n +/// 010: assign values from bits [47:32] of \a a. \n +/// 011: assign values from bits [63:48] of \a a. \n +/// 100: assign values from bits [79:64] of \a a. \n +/// 101: assign values from bits [95:80] of \a a. \n +/// 110: assign values from bits [111:96] of \a a. \n +/// 111: assign values from bits [127:112] of \a a. +/// \returns An integer, whose lower 16 bits are selected from the 128-bit +/// integer vector parameter and the remaining bits are assigned zeros. +#define _mm_extract_epi16(a, imm) \ + ((int)(unsigned short)__builtin_ia32_vec_ext_v8hi((__v8hi)(__m128i)(a), \ + (int)(imm))) + +/// Constructs a 128-bit integer vector by first making a copy of the +/// 128-bit integer vector parameter, and then inserting the lower 16 bits +/// of an integer parameter into an offset specified by the immediate-value +/// parameter. +/// +/// \headerfile +/// +/// \code +/// __m128i _mm_insert_epi16(__m128i a, int b, const int imm); +/// \endcode +/// +/// This intrinsic corresponds to the VPINSRW / PINSRW instruction. +/// +/// \param a +/// A 128-bit integer vector of [8 x i16]. This vector is copied to the +/// result and then one of the eight elements in the result is replaced by +/// the lower 16 bits of \a b. +/// \param b +/// An integer. The lower 16 bits of this parameter are written to the +/// result beginning at an offset specified by \a imm. +/// \param imm +/// An immediate value specifying the bit offset in the result at which the +/// lower 16 bits of \a b are written. +/// \returns A 128-bit integer vector containing the constructed values. +#define _mm_insert_epi16(a, b, imm) \ + ((__m128i)__builtin_ia32_vec_set_v8hi((__v8hi)(__m128i)(a), (int)(b), \ + (int)(imm))) + +/// Copies the values of the most significant bits from each 8-bit +/// element in a 128-bit integer vector of [16 x i8] to create a 16-bit mask +/// value, zero-extends the value, and writes it to the destination. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VPMOVMSKB / PMOVMSKB instruction. +/// +/// \param __a +/// A 128-bit integer vector containing the values with bits to be extracted. +/// \returns The most significant bits from each 8-bit element in \a __a, +/// written to bits [15:0]. The other bits are assigned zeros. +static __inline__ int __DEFAULT_FN_ATTRS _mm_movemask_epi8(__m128i __a) { + return __builtin_ia32_pmovmskb128((__v16qi)__a); +} + +/// Constructs a 128-bit integer vector by shuffling four 32-bit +/// elements of a 128-bit integer vector parameter, using the immediate-value +/// parameter as a specifier. +/// +/// \headerfile +/// +/// \code +/// __m128i _mm_shuffle_epi32(__m128i a, const int imm); +/// \endcode +/// +/// This intrinsic corresponds to the VPSHUFD / PSHUFD instruction. +/// +/// \param a +/// A 128-bit integer vector containing the values to be copied. +/// \param imm +/// An immediate value containing an 8-bit value specifying which elements to +/// copy from a. The destinations within the 128-bit destination are assigned +/// values as follows: \n +/// Bits [1:0] are used to assign values to bits [31:0] of the result. \n +/// Bits [3:2] are used to assign values to bits [63:32] of the result. \n +/// Bits [5:4] are used to assign values to bits [95:64] of the result. \n +/// Bits [7:6] are used to assign values to bits [127:96] of the result. \n +/// Bit value assignments: \n +/// 00: assign values from bits [31:0] of \a a. \n +/// 01: assign values from bits [63:32] of \a a. \n +/// 10: assign values from bits [95:64] of \a a. \n +/// 11: assign values from bits [127:96] of \a a. \n +/// Note: To generate a mask, you can use the \c _MM_SHUFFLE macro. +/// _MM_SHUFFLE(b6, b4, b2, b0) can create an 8-bit mask of the form +/// [b6, b4, b2, b0]. +/// \returns A 128-bit integer vector containing the shuffled values. +#define _mm_shuffle_epi32(a, imm) \ + ((__m128i)__builtin_ia32_pshufd((__v4si)(__m128i)(a), (int)(imm))) + +/// Constructs a 128-bit integer vector by shuffling four lower 16-bit +/// elements of a 128-bit integer vector of [8 x i16], using the immediate +/// value parameter as a specifier. +/// +/// \headerfile +/// +/// \code +/// __m128i _mm_shufflelo_epi16(__m128i a, const int imm); +/// \endcode +/// +/// This intrinsic corresponds to the VPSHUFLW / PSHUFLW instruction. +/// +/// \param a +/// A 128-bit integer vector of [8 x i16]. Bits [127:64] are copied to bits +/// [127:64] of the result. +/// \param imm +/// An 8-bit immediate value specifying which elements to copy from \a a. \n +/// Bits[1:0] are used to assign values to bits [15:0] of the result. \n +/// Bits[3:2] are used to assign values to bits [31:16] of the result. \n +/// Bits[5:4] are used to assign values to bits [47:32] of the result. \n +/// Bits[7:6] are used to assign values to bits [63:48] of the result. \n +/// Bit value assignments: \n +/// 00: assign values from bits [15:0] of \a a. \n +/// 01: assign values from bits [31:16] of \a a. \n +/// 10: assign values from bits [47:32] of \a a. \n +/// 11: assign values from bits [63:48] of \a a. \n +/// Note: To generate a mask, you can use the \c _MM_SHUFFLE macro. +/// _MM_SHUFFLE(b6, b4, b2, b0) can create an 8-bit mask of the form +/// [b6, b4, b2, b0]. +/// \returns A 128-bit integer vector containing the shuffled values. +#define _mm_shufflelo_epi16(a, imm) \ + ((__m128i)__builtin_ia32_pshuflw((__v8hi)(__m128i)(a), (int)(imm))) + +/// Constructs a 128-bit integer vector by shuffling four upper 16-bit +/// elements of a 128-bit integer vector of [8 x i16], using the immediate +/// value parameter as a specifier. +/// +/// \headerfile +/// +/// \code +/// __m128i _mm_shufflehi_epi16(__m128i a, const int imm); +/// \endcode +/// +/// This intrinsic corresponds to the VPSHUFHW / PSHUFHW instruction. +/// +/// \param a +/// A 128-bit integer vector of [8 x i16]. Bits [63:0] are copied to bits +/// [63:0] of the result. +/// \param imm +/// An 8-bit immediate value specifying which elements to copy from \a a. \n +/// Bits[1:0] are used to assign values to bits [79:64] of the result. \n +/// Bits[3:2] are used to assign values to bits [95:80] of the result. \n +/// Bits[5:4] are used to assign values to bits [111:96] of the result. \n +/// Bits[7:6] are used to assign values to bits [127:112] of the result. \n +/// Bit value assignments: \n +/// 00: assign values from bits [79:64] of \a a. \n +/// 01: assign values from bits [95:80] of \a a. \n +/// 10: assign values from bits [111:96] of \a a. \n +/// 11: assign values from bits [127:112] of \a a. \n +/// Note: To generate a mask, you can use the \c _MM_SHUFFLE macro. +/// _MM_SHUFFLE(b6, b4, b2, b0) can create an 8-bit mask of the form +/// [b6, b4, b2, b0]. +/// \returns A 128-bit integer vector containing the shuffled values. +#define _mm_shufflehi_epi16(a, imm) \ + ((__m128i)__builtin_ia32_pshufhw((__v8hi)(__m128i)(a), (int)(imm))) + +/// Unpacks the high-order (index 8-15) values from two 128-bit vectors +/// of [16 x i8] and interleaves them into a 128-bit vector of [16 x i8]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VPUNPCKHBW / PUNPCKHBW +/// instruction. +/// +/// \param __a +/// A 128-bit vector of [16 x i8]. +/// Bits [71:64] are written to bits [7:0] of the result. \n +/// Bits [79:72] are written to bits [23:16] of the result. \n +/// Bits [87:80] are written to bits [39:32] of the result. \n +/// Bits [95:88] are written to bits [55:48] of the result. \n +/// Bits [103:96] are written to bits [71:64] of the result. \n +/// Bits [111:104] are written to bits [87:80] of the result. \n +/// Bits [119:112] are written to bits [103:96] of the result. \n +/// Bits [127:120] are written to bits [119:112] of the result. +/// \param __b +/// A 128-bit vector of [16 x i8]. \n +/// Bits [71:64] are written to bits [15:8] of the result. \n +/// Bits [79:72] are written to bits [31:24] of the result. \n +/// Bits [87:80] are written to bits [47:40] of the result. \n +/// Bits [95:88] are written to bits [63:56] of the result. \n +/// Bits [103:96] are written to bits [79:72] of the result. \n +/// Bits [111:104] are written to bits [95:88] of the result. \n +/// Bits [119:112] are written to bits [111:104] of the result. \n +/// Bits [127:120] are written to bits [127:120] of the result. +/// \returns A 128-bit vector of [16 x i8] containing the interleaved values. +static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_unpackhi_epi8(__m128i __a, + __m128i __b) { + return (__m128i)__builtin_shufflevector( + (__v16qi)__a, (__v16qi)__b, 8, 16 + 8, 9, 16 + 9, 10, 16 + 10, 11, + 16 + 11, 12, 16 + 12, 13, 16 + 13, 14, 16 + 14, 15, 16 + 15); +} + +/// Unpacks the high-order (index 4-7) values from two 128-bit vectors of +/// [8 x i16] and interleaves them into a 128-bit vector of [8 x i16]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VPUNPCKHWD / PUNPCKHWD +/// instruction. +/// +/// \param __a +/// A 128-bit vector of [8 x i16]. +/// Bits [79:64] are written to bits [15:0] of the result. \n +/// Bits [95:80] are written to bits [47:32] of the result. \n +/// Bits [111:96] are written to bits [79:64] of the result. \n +/// Bits [127:112] are written to bits [111:96] of the result. +/// \param __b +/// A 128-bit vector of [8 x i16]. +/// Bits [79:64] are written to bits [31:16] of the result. \n +/// Bits [95:80] are written to bits [63:48] of the result. \n +/// Bits [111:96] are written to bits [95:80] of the result. \n +/// Bits [127:112] are written to bits [127:112] of the result. +/// \returns A 128-bit vector of [8 x i16] containing the interleaved values. +static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_unpackhi_epi16(__m128i __a, + __m128i __b) { + return (__m128i)__builtin_shufflevector((__v8hi)__a, (__v8hi)__b, 4, 8 + 4, 5, + 8 + 5, 6, 8 + 6, 7, 8 + 7); +} + +/// Unpacks the high-order (index 2,3) values from two 128-bit vectors of +/// [4 x i32] and interleaves them into a 128-bit vector of [4 x i32]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VPUNPCKHDQ / PUNPCKHDQ +/// instruction. +/// +/// \param __a +/// A 128-bit vector of [4 x i32]. \n +/// Bits [95:64] are written to bits [31:0] of the destination. \n +/// Bits [127:96] are written to bits [95:64] of the destination. +/// \param __b +/// A 128-bit vector of [4 x i32]. \n +/// Bits [95:64] are written to bits [64:32] of the destination. \n +/// Bits [127:96] are written to bits [127:96] of the destination. +/// \returns A 128-bit vector of [4 x i32] containing the interleaved values. +static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_unpackhi_epi32(__m128i __a, + __m128i __b) { + return (__m128i)__builtin_shufflevector((__v4si)__a, (__v4si)__b, 2, 4 + 2, 3, + 4 + 3); +} + +/// Unpacks the high-order 64-bit elements from two 128-bit vectors of +/// [2 x i64] and interleaves them into a 128-bit vector of [2 x i64]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VPUNPCKHQDQ / PUNPCKHQDQ +/// instruction. +/// +/// \param __a +/// A 128-bit vector of [2 x i64]. \n +/// Bits [127:64] are written to bits [63:0] of the destination. +/// \param __b +/// A 128-bit vector of [2 x i64]. \n +/// Bits [127:64] are written to bits [127:64] of the destination. +/// \returns A 128-bit vector of [2 x i64] containing the interleaved values. +static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_unpackhi_epi64(__m128i __a, + __m128i __b) { + return (__m128i)__builtin_shufflevector((__v2di)__a, (__v2di)__b, 1, 2 + 1); +} + +/// Unpacks the low-order (index 0-7) values from two 128-bit vectors of +/// [16 x i8] and interleaves them into a 128-bit vector of [16 x i8]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VPUNPCKLBW / PUNPCKLBW +/// instruction. +/// +/// \param __a +/// A 128-bit vector of [16 x i8]. \n +/// Bits [7:0] are written to bits [7:0] of the result. \n +/// Bits [15:8] are written to bits [23:16] of the result. \n +/// Bits [23:16] are written to bits [39:32] of the result. \n +/// Bits [31:24] are written to bits [55:48] of the result. \n +/// Bits [39:32] are written to bits [71:64] of the result. \n +/// Bits [47:40] are written to bits [87:80] of the result. \n +/// Bits [55:48] are written to bits [103:96] of the result. \n +/// Bits [63:56] are written to bits [119:112] of the result. +/// \param __b +/// A 128-bit vector of [16 x i8]. +/// Bits [7:0] are written to bits [15:8] of the result. \n +/// Bits [15:8] are written to bits [31:24] of the result. \n +/// Bits [23:16] are written to bits [47:40] of the result. \n +/// Bits [31:24] are written to bits [63:56] of the result. \n +/// Bits [39:32] are written to bits [79:72] of the result. \n +/// Bits [47:40] are written to bits [95:88] of the result. \n +/// Bits [55:48] are written to bits [111:104] of the result. \n +/// Bits [63:56] are written to bits [127:120] of the result. +/// \returns A 128-bit vector of [16 x i8] containing the interleaved values. +static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_unpacklo_epi8(__m128i __a, + __m128i __b) { + return (__m128i)__builtin_shufflevector( + (__v16qi)__a, (__v16qi)__b, 0, 16 + 0, 1, 16 + 1, 2, 16 + 2, 3, 16 + 3, 4, + 16 + 4, 5, 16 + 5, 6, 16 + 6, 7, 16 + 7); +} + +/// Unpacks the low-order (index 0-3) values from each of the two 128-bit +/// vectors of [8 x i16] and interleaves them into a 128-bit vector of +/// [8 x i16]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VPUNPCKLWD / PUNPCKLWD +/// instruction. +/// +/// \param __a +/// A 128-bit vector of [8 x i16]. +/// Bits [15:0] are written to bits [15:0] of the result. \n +/// Bits [31:16] are written to bits [47:32] of the result. \n +/// Bits [47:32] are written to bits [79:64] of the result. \n +/// Bits [63:48] are written to bits [111:96] of the result. +/// \param __b +/// A 128-bit vector of [8 x i16]. +/// Bits [15:0] are written to bits [31:16] of the result. \n +/// Bits [31:16] are written to bits [63:48] of the result. \n +/// Bits [47:32] are written to bits [95:80] of the result. \n +/// Bits [63:48] are written to bits [127:112] of the result. +/// \returns A 128-bit vector of [8 x i16] containing the interleaved values. +static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_unpacklo_epi16(__m128i __a, + __m128i __b) { + return (__m128i)__builtin_shufflevector((__v8hi)__a, (__v8hi)__b, 0, 8 + 0, 1, + 8 + 1, 2, 8 + 2, 3, 8 + 3); +} + +/// Unpacks the low-order (index 0,1) values from two 128-bit vectors of +/// [4 x i32] and interleaves them into a 128-bit vector of [4 x i32]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VPUNPCKLDQ / PUNPCKLDQ +/// instruction. +/// +/// \param __a +/// A 128-bit vector of [4 x i32]. \n +/// Bits [31:0] are written to bits [31:0] of the destination. \n +/// Bits [63:32] are written to bits [95:64] of the destination. +/// \param __b +/// A 128-bit vector of [4 x i32]. \n +/// Bits [31:0] are written to bits [64:32] of the destination. \n +/// Bits [63:32] are written to bits [127:96] of the destination. +/// \returns A 128-bit vector of [4 x i32] containing the interleaved values. +static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_unpacklo_epi32(__m128i __a, + __m128i __b) { + return (__m128i)__builtin_shufflevector((__v4si)__a, (__v4si)__b, 0, 4 + 0, 1, + 4 + 1); +} + +/// Unpacks the low-order 64-bit elements from two 128-bit vectors of +/// [2 x i64] and interleaves them into a 128-bit vector of [2 x i64]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VPUNPCKLQDQ / PUNPCKLQDQ +/// instruction. +/// +/// \param __a +/// A 128-bit vector of [2 x i64]. \n +/// Bits [63:0] are written to bits [63:0] of the destination. \n +/// \param __b +/// A 128-bit vector of [2 x i64]. \n +/// Bits [63:0] are written to bits [127:64] of the destination. \n +/// \returns A 128-bit vector of [2 x i64] containing the interleaved values. +static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_unpacklo_epi64(__m128i __a, + __m128i __b) { + return (__m128i)__builtin_shufflevector((__v2di)__a, (__v2di)__b, 0, 2 + 0); +} + +/// Returns the lower 64 bits of a 128-bit integer vector as a 64-bit +/// integer. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the MOVDQ2Q instruction. +/// +/// \param __a +/// A 128-bit integer vector operand. The lower 64 bits are moved to the +/// destination. +/// \returns A 64-bit integer containing the lower 64 bits of the parameter. +static __inline__ __m64 __DEFAULT_FN_ATTRS _mm_movepi64_pi64(__m128i __a) { + return (__m64)__a[0]; +} + +/// Moves the 64-bit operand to a 128-bit integer vector, zeroing the +/// upper bits. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the MOVD+VMOVQ instruction. +/// +/// \param __a +/// A 64-bit value. +/// \returns A 128-bit integer vector. The lower 64 bits contain the value from +/// the operand. The upper 64 bits are assigned zeros. +static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_movpi64_epi64(__m64 __a) { + return __extension__(__m128i)(__v2di){(long long)__a, 0}; +} + +/// Moves the lower 64 bits of a 128-bit integer vector to a 128-bit +/// integer vector, zeroing the upper bits. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VMOVQ / MOVQ instruction. +/// +/// \param __a +/// A 128-bit integer vector operand. The lower 64 bits are moved to the +/// destination. +/// \returns A 128-bit integer vector. The lower 64 bits contain the value from +/// the operand. The upper 64 bits are assigned zeros. +static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_move_epi64(__m128i __a) { + return __builtin_shufflevector((__v2di)__a, _mm_setzero_si128(), 0, 2); +} + +/// Unpacks the high-order 64-bit elements from two 128-bit vectors of +/// [2 x double] and interleaves them into a 128-bit vector of [2 x +/// double]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VUNPCKHPD / UNPCKHPD instruction. +/// +/// \param __a +/// A 128-bit vector of [2 x double]. \n +/// Bits [127:64] are written to bits [63:0] of the destination. +/// \param __b +/// A 128-bit vector of [2 x double]. \n +/// Bits [127:64] are written to bits [127:64] of the destination. +/// \returns A 128-bit vector of [2 x double] containing the interleaved values. +static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_unpackhi_pd(__m128d __a, + __m128d __b) { + return __builtin_shufflevector((__v2df)__a, (__v2df)__b, 1, 2 + 1); +} + +/// Unpacks the low-order 64-bit elements from two 128-bit vectors +/// of [2 x double] and interleaves them into a 128-bit vector of [2 x +/// double]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VUNPCKLPD / UNPCKLPD instruction. +/// +/// \param __a +/// A 128-bit vector of [2 x double]. \n +/// Bits [63:0] are written to bits [63:0] of the destination. +/// \param __b +/// A 128-bit vector of [2 x double]. \n +/// Bits [63:0] are written to bits [127:64] of the destination. +/// \returns A 128-bit vector of [2 x double] containing the interleaved values. +static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_unpacklo_pd(__m128d __a, + __m128d __b) { + return __builtin_shufflevector((__v2df)__a, (__v2df)__b, 0, 2 + 0); +} + +/// Extracts the sign bits of the double-precision values in the 128-bit +/// vector of [2 x double], zero-extends the value, and writes it to the +/// low-order bits of the destination. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VMOVMSKPD / MOVMSKPD instruction. +/// +/// \param __a +/// A 128-bit vector of [2 x double] containing the values with sign bits to +/// be extracted. +/// \returns The sign bits from each of the double-precision elements in \a __a, +/// written to bits [1:0]. The remaining bits are assigned values of zero. +static __inline__ int __DEFAULT_FN_ATTRS _mm_movemask_pd(__m128d __a) { + return __builtin_ia32_movmskpd((__v2df)__a); +} + +/// Constructs a 128-bit floating-point vector of [2 x double] from two +/// 128-bit vector parameters of [2 x double], using the immediate-value +/// parameter as a specifier. +/// +/// \headerfile +/// +/// \code +/// __m128d _mm_shuffle_pd(__m128d a, __m128d b, const int i); +/// \endcode +/// +/// This intrinsic corresponds to the VSHUFPD / SHUFPD instruction. +/// +/// \param a +/// A 128-bit vector of [2 x double]. +/// \param b +/// A 128-bit vector of [2 x double]. +/// \param i +/// An 8-bit immediate value. The least significant two bits specify which +/// elements to copy from \a a and \a b: \n +/// Bit[0] = 0: lower element of \a a copied to lower element of result. \n +/// Bit[0] = 1: upper element of \a a copied to lower element of result. \n +/// Bit[1] = 0: lower element of \a b copied to upper element of result. \n +/// Bit[1] = 1: upper element of \a b copied to upper element of result. \n +/// Note: To generate a mask, you can use the \c _MM_SHUFFLE2 macro. +/// _MM_SHUFFLE2(b1, b0) can create a 2-bit mask of the form +/// [b1, b0]. +/// \returns A 128-bit vector of [2 x double] containing the shuffled values. +#define _mm_shuffle_pd(a, b, i) \ + ((__m128d)__builtin_ia32_shufpd((__v2df)(__m128d)(a), (__v2df)(__m128d)(b), \ + (int)(i))) + +/// Casts a 128-bit floating-point vector of [2 x double] into a 128-bit +/// floating-point vector of [4 x float]. +/// +/// \headerfile +/// +/// This intrinsic has no corresponding instruction. +/// +/// \param __a +/// A 128-bit floating-point vector of [2 x double]. +/// \returns A 128-bit floating-point vector of [4 x float] containing the same +/// bitwise pattern as the parameter. +static __inline__ __m128 __DEFAULT_FN_ATTRS _mm_castpd_ps(__m128d __a) { + return (__m128)__a; +} + +/// Casts a 128-bit floating-point vector of [2 x double] into a 128-bit +/// integer vector. +/// +/// \headerfile +/// +/// This intrinsic has no corresponding instruction. +/// +/// \param __a +/// A 128-bit floating-point vector of [2 x double]. +/// \returns A 128-bit integer vector containing the same bitwise pattern as the +/// parameter. +static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_castpd_si128(__m128d __a) { + return (__m128i)__a; +} + +/// Casts a 128-bit floating-point vector of [4 x float] into a 128-bit +/// floating-point vector of [2 x double]. +/// +/// \headerfile +/// +/// This intrinsic has no corresponding instruction. +/// +/// \param __a +/// A 128-bit floating-point vector of [4 x float]. +/// \returns A 128-bit floating-point vector of [2 x double] containing the same +/// bitwise pattern as the parameter. +static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_castps_pd(__m128 __a) { + return (__m128d)__a; +} + +/// Casts a 128-bit floating-point vector of [4 x float] into a 128-bit +/// integer vector. +/// +/// \headerfile +/// +/// This intrinsic has no corresponding instruction. +/// +/// \param __a +/// A 128-bit floating-point vector of [4 x float]. +/// \returns A 128-bit integer vector containing the same bitwise pattern as the +/// parameter. +static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_castps_si128(__m128 __a) { + return (__m128i)__a; +} + +/// Casts a 128-bit integer vector into a 128-bit floating-point vector +/// of [4 x float]. +/// +/// \headerfile +/// +/// This intrinsic has no corresponding instruction. +/// +/// \param __a +/// A 128-bit integer vector. +/// \returns A 128-bit floating-point vector of [4 x float] containing the same +/// bitwise pattern as the parameter. +static __inline__ __m128 __DEFAULT_FN_ATTRS _mm_castsi128_ps(__m128i __a) { + return (__m128)__a; +} + +/// Casts a 128-bit integer vector into a 128-bit floating-point vector +/// of [2 x double]. +/// +/// \headerfile +/// +/// This intrinsic has no corresponding instruction. +/// +/// \param __a +/// A 128-bit integer vector. +/// \returns A 128-bit floating-point vector of [2 x double] containing the same +/// bitwise pattern as the parameter. +static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_castsi128_pd(__m128i __a) { + return (__m128d)__a; +} + +/// Compares each of the corresponding double-precision values of two +/// 128-bit vectors of [2 x double], using the operation specified by the +/// immediate integer operand. +/// +/// Each comparison returns 0x0 for false, 0xFFFFFFFFFFFFFFFF for true. +/// If either value in a comparison is NaN, comparisons that are ordered +/// return false, and comparisons that are unordered return true. +/// +/// \headerfile +/// +/// \code +/// __m128d _mm_cmp_pd(__m128d a, __m128d b, const int c); +/// \endcode +/// +/// This intrinsic corresponds to the (V)CMPPD instruction. +/// +/// \param a +/// A 128-bit vector of [2 x double]. +/// \param b +/// A 128-bit vector of [2 x double]. +/// \param c +/// An immediate integer operand, with bits [4:0] specifying which comparison +/// operation to use: \n +/// 0x00: Equal (ordered, non-signaling) \n +/// 0x01: Less-than (ordered, signaling) \n +/// 0x02: Less-than-or-equal (ordered, signaling) \n +/// 0x03: Unordered (non-signaling) \n +/// 0x04: Not-equal (unordered, non-signaling) \n +/// 0x05: Not-less-than (unordered, signaling) \n +/// 0x06: Not-less-than-or-equal (unordered, signaling) \n +/// 0x07: Ordered (non-signaling) \n +/// \returns A 128-bit vector of [2 x double] containing the comparison results. +#define _mm_cmp_pd(a, b, c) \ + ((__m128d)__builtin_ia32_cmppd((__v2df)(__m128d)(a), (__v2df)(__m128d)(b), \ + (c))) + +/// Compares each of the corresponding scalar double-precision values of +/// two 128-bit vectors of [2 x double], using the operation specified by the +/// immediate integer operand. +/// +/// Each comparison returns 0x0 for false, 0xFFFFFFFFFFFFFFFF for true. +/// If either value in a comparison is NaN, comparisons that are ordered +/// return false, and comparisons that are unordered return true. +/// +/// \headerfile +/// +/// \code +/// __m128d _mm_cmp_sd(__m128d a, __m128d b, const int c); +/// \endcode +/// +/// This intrinsic corresponds to the (V)CMPSD instruction. +/// +/// \param a +/// A 128-bit vector of [2 x double]. +/// \param b +/// A 128-bit vector of [2 x double]. +/// \param c +/// An immediate integer operand, with bits [4:0] specifying which comparison +/// operation to use: \n +/// 0x00: Equal (ordered, non-signaling) \n +/// 0x01: Less-than (ordered, signaling) \n +/// 0x02: Less-than-or-equal (ordered, signaling) \n +/// 0x03: Unordered (non-signaling) \n +/// 0x04: Not-equal (unordered, non-signaling) \n +/// 0x05: Not-less-than (unordered, signaling) \n +/// 0x06: Not-less-than-or-equal (unordered, signaling) \n +/// 0x07: Ordered (non-signaling) \n +/// \returns A 128-bit vector of [2 x double] containing the comparison results. +#define _mm_cmp_sd(a, b, c) \ + ((__m128d)__builtin_ia32_cmpsd((__v2df)(__m128d)(a), (__v2df)(__m128d)(b), \ + (c))) + +#if defined(__cplusplus) +extern "C" { +#endif + +/// Indicates that a spin loop is being executed for the purposes of +/// optimizing power consumption during the loop. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the PAUSE instruction. +/// +void _mm_pause(void); + +#if defined(__cplusplus) +} // extern "C" +#endif +#undef __DEFAULT_FN_ATTRS +#undef __DEFAULT_FN_ATTRS_MMX + +#define _MM_SHUFFLE2(x, y) (((x) << 1) | (y)) + +#define _MM_DENORMALS_ZERO_ON (0x0040U) +#define _MM_DENORMALS_ZERO_OFF (0x0000U) + +#define _MM_DENORMALS_ZERO_MASK (0x0040U) + +#define _MM_GET_DENORMALS_ZERO_MODE() (_mm_getcsr() & _MM_DENORMALS_ZERO_MASK) +#define _MM_SET_DENORMALS_ZERO_MODE(x) \ + (_mm_setcsr((_mm_getcsr() & ~_MM_DENORMALS_ZERO_MASK) | (x))) + +#endif /* __EMMINTRIN_H */ diff --git a/third_party/intel/clang/enqcmdintrin.h b/third_party/intel/clang/enqcmdintrin.h new file mode 100644 index 000000000..30af67f6b --- /dev/null +++ b/third_party/intel/clang/enqcmdintrin.h @@ -0,0 +1,63 @@ +/*===------------------ enqcmdintrin.h - enqcmd intrinsics -----------------=== + * + * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. + * See https://llvm.org/LICENSE.txt for license information. + * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + * + *===-----------------------------------------------------------------------=== + */ + +#ifndef __IMMINTRIN_H +#error "Never use directly; include instead." +#endif + +#ifndef __ENQCMDINTRIN_H +#define __ENQCMDINTRIN_H + +/* Define the default attributes for the functions in this file */ +#define _DEFAULT_FN_ATTRS \ + __attribute__((__always_inline__, __nodebug__, __target__("enqcmd"))) + +/// Reads 64-byte command pointed by \a __src, formats 64-byte enqueue store +/// data, and performs 64-byte enqueue store to memory pointed by \a __dst. +/// This intrinsics may only be used in User mode. +/// +/// \headerfile +/// +/// This intrinsics corresponds to the ENQCMD instruction. +/// +/// \param __dst +/// Pointer to the destination of the enqueue store. +/// \param __src +/// Pointer to 64-byte command data. +/// \returns If the command data is successfully written to \a __dst then 0 is +/// returned. Otherwise 1 is returned. +static __inline__ int _DEFAULT_FN_ATTRS +_enqcmd (void *__dst, const void *__src) +{ + return __builtin_ia32_enqcmd(__dst, __src); +} + +/// Reads 64-byte command pointed by \a __src, formats 64-byte enqueue store +/// data, and performs 64-byte enqueue store to memory pointed by \a __dst +/// This intrinsic may only be used in Privileged mode. +/// +/// \headerfile +/// +/// This intrinsics corresponds to the ENQCMDS instruction. +/// +/// \param __dst +/// Pointer to the destination of the enqueue store. +/// \param __src +/// Pointer to 64-byte command data. +/// \returns If the command data is successfully written to \a __dst then 0 is +/// returned. Otherwise 1 is returned. +static __inline__ int _DEFAULT_FN_ATTRS +_enqcmds (void *__dst, const void *__src) +{ + return __builtin_ia32_enqcmds(__dst, __src); +} + +#undef _DEFAULT_FN_ATTRS + +#endif /* __ENQCMDINTRIN_H */ diff --git a/third_party/intel/clang/f16cintrin.h b/third_party/intel/clang/f16cintrin.h new file mode 100644 index 000000000..94a662c1d --- /dev/null +++ b/third_party/intel/clang/f16cintrin.h @@ -0,0 +1,162 @@ +/*===---- f16cintrin.h - F16C intrinsics -----------------------------------=== + * + * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. + * See https://llvm.org/LICENSE.txt for license information. + * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + * + *===-----------------------------------------------------------------------=== + */ + +#if !defined __IMMINTRIN_H +#error "Never use directly; include instead." +#endif + +#ifndef __F16CINTRIN_H +#define __F16CINTRIN_H + +/* Define the default attributes for the functions in this file. */ +#define __DEFAULT_FN_ATTRS128 \ + __attribute__((__always_inline__, __nodebug__, __target__("f16c"), __min_vector_width__(128))) +#define __DEFAULT_FN_ATTRS256 \ + __attribute__((__always_inline__, __nodebug__, __target__("f16c"), __min_vector_width__(256))) + +/* NOTE: Intel documents the 128-bit versions of these as being in emmintrin.h, + * but that's because icc can emulate these without f16c using a library call. + * Since we don't do that let's leave these in f16cintrin.h. + */ + +/// Converts a 16-bit half-precision float value into a 32-bit float +/// value. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VCVTPH2PS instruction. +/// +/// \param __a +/// A 16-bit half-precision float value. +/// \returns The converted 32-bit float value. +static __inline float __DEFAULT_FN_ATTRS128 +_cvtsh_ss(unsigned short __a) +{ + __v8hi __v = {(short)__a, 0, 0, 0, 0, 0, 0, 0}; + __v4sf __r = __builtin_ia32_vcvtph2ps(__v); + return __r[0]; +} + +/// Converts a 32-bit single-precision float value to a 16-bit +/// half-precision float value. +/// +/// \headerfile +/// +/// \code +/// unsigned short _cvtss_sh(float a, const int imm); +/// \endcode +/// +/// This intrinsic corresponds to the VCVTPS2PH instruction. +/// +/// \param a +/// A 32-bit single-precision float value to be converted to a 16-bit +/// half-precision float value. +/// \param imm +/// An immediate value controlling rounding using bits [2:0]: \n +/// 000: Nearest \n +/// 001: Down \n +/// 010: Up \n +/// 011: Truncate \n +/// 1XX: Use MXCSR.RC for rounding +/// \returns The converted 16-bit half-precision float value. +#define _cvtss_sh(a, imm) __extension__ ({ \ + (unsigned short)(((__v8hi)__builtin_ia32_vcvtps2ph((__v4sf){a, 0, 0, 0}, \ + (imm)))[0]); }) + +/// Converts a 128-bit vector containing 32-bit float values into a +/// 128-bit vector containing 16-bit half-precision float values. +/// +/// \headerfile +/// +/// \code +/// __m128i _mm_cvtps_ph(__m128 a, const int imm); +/// \endcode +/// +/// This intrinsic corresponds to the VCVTPS2PH instruction. +/// +/// \param a +/// A 128-bit vector containing 32-bit float values. +/// \param imm +/// An immediate value controlling rounding using bits [2:0]: \n +/// 000: Nearest \n +/// 001: Down \n +/// 010: Up \n +/// 011: Truncate \n +/// 1XX: Use MXCSR.RC for rounding +/// \returns A 128-bit vector containing converted 16-bit half-precision float +/// values. The lower 64 bits are used to store the converted 16-bit +/// half-precision floating-point values. +#define _mm_cvtps_ph(a, imm) \ + ((__m128i)__builtin_ia32_vcvtps2ph((__v4sf)(__m128)(a), (imm))) + +/// Converts a 128-bit vector containing 16-bit half-precision float +/// values into a 128-bit vector containing 32-bit float values. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VCVTPH2PS instruction. +/// +/// \param __a +/// A 128-bit vector containing 16-bit half-precision float values. The lower +/// 64 bits are used in the conversion. +/// \returns A 128-bit vector of [4 x float] containing converted float values. +static __inline __m128 __DEFAULT_FN_ATTRS128 +_mm_cvtph_ps(__m128i __a) +{ + return (__m128)__builtin_ia32_vcvtph2ps((__v8hi)__a); +} + +/// Converts a 256-bit vector of [8 x float] into a 128-bit vector +/// containing 16-bit half-precision float values. +/// +/// \headerfile +/// +/// \code +/// __m128i _mm256_cvtps_ph(__m256 a, const int imm); +/// \endcode +/// +/// This intrinsic corresponds to the VCVTPS2PH instruction. +/// +/// \param a +/// A 256-bit vector containing 32-bit single-precision float values to be +/// converted to 16-bit half-precision float values. +/// \param imm +/// An immediate value controlling rounding using bits [2:0]: \n +/// 000: Nearest \n +/// 001: Down \n +/// 010: Up \n +/// 011: Truncate \n +/// 1XX: Use MXCSR.RC for rounding +/// \returns A 128-bit vector containing the converted 16-bit half-precision +/// float values. +#define _mm256_cvtps_ph(a, imm) \ + ((__m128i)__builtin_ia32_vcvtps2ph256((__v8sf)(__m256)(a), (imm))) + +/// Converts a 128-bit vector containing 16-bit half-precision float +/// values into a 256-bit vector of [8 x float]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VCVTPH2PS instruction. +/// +/// \param __a +/// A 128-bit vector containing 16-bit half-precision float values to be +/// converted to 32-bit single-precision float values. +/// \returns A vector of [8 x float] containing the converted 32-bit +/// single-precision float values. +static __inline __m256 __DEFAULT_FN_ATTRS256 +_mm256_cvtph_ps(__m128i __a) +{ + return (__m256)__builtin_ia32_vcvtph2ps256((__v8hi)__a); +} + +#undef __DEFAULT_FN_ATTRS128 +#undef __DEFAULT_FN_ATTRS256 + +#endif /* __F16CINTRIN_H */ diff --git a/third_party/intel/clang/fma4intrin.h b/third_party/intel/clang/fma4intrin.h new file mode 100644 index 000000000..7ff69d96d --- /dev/null +++ b/third_party/intel/clang/fma4intrin.h @@ -0,0 +1,218 @@ +/*===---- fma4intrin.h - FMA4 intrinsics -----------------------------------=== + * + * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. + * See https://llvm.org/LICENSE.txt for license information. + * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + * + *===-----------------------------------------------------------------------=== + */ + +#ifndef __X86INTRIN_H +#error "Never use directly; include instead." +#endif + +#ifndef __FMA4INTRIN_H +#define __FMA4INTRIN_H + +#include "pmmintrin.h" + +/* Define the default attributes for the functions in this file. */ +#define __DEFAULT_FN_ATTRS128 __attribute__((__always_inline__, __nodebug__, __target__("fma4"), __min_vector_width__(128))) +#define __DEFAULT_FN_ATTRS256 __attribute__((__always_inline__, __nodebug__, __target__("fma4"), __min_vector_width__(256))) + +static __inline__ __m128 __DEFAULT_FN_ATTRS128 +_mm_macc_ps(__m128 __A, __m128 __B, __m128 __C) +{ + return (__m128)__builtin_ia32_vfmaddps((__v4sf)__A, (__v4sf)__B, (__v4sf)__C); +} + +static __inline__ __m128d __DEFAULT_FN_ATTRS128 +_mm_macc_pd(__m128d __A, __m128d __B, __m128d __C) +{ + return (__m128d)__builtin_ia32_vfmaddpd((__v2df)__A, (__v2df)__B, (__v2df)__C); +} + +static __inline__ __m128 __DEFAULT_FN_ATTRS128 +_mm_macc_ss(__m128 __A, __m128 __B, __m128 __C) +{ + return (__m128)__builtin_ia32_vfmaddss((__v4sf)__A, (__v4sf)__B, (__v4sf)__C); +} + +static __inline__ __m128d __DEFAULT_FN_ATTRS128 +_mm_macc_sd(__m128d __A, __m128d __B, __m128d __C) +{ + return (__m128d)__builtin_ia32_vfmaddsd((__v2df)__A, (__v2df)__B, (__v2df)__C); +} + +static __inline__ __m128 __DEFAULT_FN_ATTRS128 +_mm_msub_ps(__m128 __A, __m128 __B, __m128 __C) +{ + return (__m128)__builtin_ia32_vfmaddps((__v4sf)__A, (__v4sf)__B, -(__v4sf)__C); +} + +static __inline__ __m128d __DEFAULT_FN_ATTRS128 +_mm_msub_pd(__m128d __A, __m128d __B, __m128d __C) +{ + return (__m128d)__builtin_ia32_vfmaddpd((__v2df)__A, (__v2df)__B, -(__v2df)__C); +} + +static __inline__ __m128 __DEFAULT_FN_ATTRS128 +_mm_msub_ss(__m128 __A, __m128 __B, __m128 __C) +{ + return (__m128)__builtin_ia32_vfmaddss((__v4sf)__A, (__v4sf)__B, -(__v4sf)__C); +} + +static __inline__ __m128d __DEFAULT_FN_ATTRS128 +_mm_msub_sd(__m128d __A, __m128d __B, __m128d __C) +{ + return (__m128d)__builtin_ia32_vfmaddsd((__v2df)__A, (__v2df)__B, -(__v2df)__C); +} + +static __inline__ __m128 __DEFAULT_FN_ATTRS128 +_mm_nmacc_ps(__m128 __A, __m128 __B, __m128 __C) +{ + return (__m128)__builtin_ia32_vfmaddps(-(__v4sf)__A, (__v4sf)__B, (__v4sf)__C); +} + +static __inline__ __m128d __DEFAULT_FN_ATTRS128 +_mm_nmacc_pd(__m128d __A, __m128d __B, __m128d __C) +{ + return (__m128d)__builtin_ia32_vfmaddpd(-(__v2df)__A, (__v2df)__B, (__v2df)__C); +} + +static __inline__ __m128 __DEFAULT_FN_ATTRS128 +_mm_nmacc_ss(__m128 __A, __m128 __B, __m128 __C) +{ + return (__m128)__builtin_ia32_vfmaddss(-(__v4sf)__A, (__v4sf)__B, (__v4sf)__C); +} + +static __inline__ __m128d __DEFAULT_FN_ATTRS128 +_mm_nmacc_sd(__m128d __A, __m128d __B, __m128d __C) +{ + return (__m128d)__builtin_ia32_vfmaddsd(-(__v2df)__A, (__v2df)__B, (__v2df)__C); +} + +static __inline__ __m128 __DEFAULT_FN_ATTRS128 +_mm_nmsub_ps(__m128 __A, __m128 __B, __m128 __C) +{ + return (__m128)__builtin_ia32_vfmaddps(-(__v4sf)__A, (__v4sf)__B, -(__v4sf)__C); +} + +static __inline__ __m128d __DEFAULT_FN_ATTRS128 +_mm_nmsub_pd(__m128d __A, __m128d __B, __m128d __C) +{ + return (__m128d)__builtin_ia32_vfmaddpd(-(__v2df)__A, (__v2df)__B, -(__v2df)__C); +} + +static __inline__ __m128 __DEFAULT_FN_ATTRS128 +_mm_nmsub_ss(__m128 __A, __m128 __B, __m128 __C) +{ + return (__m128)__builtin_ia32_vfmaddss(-(__v4sf)__A, (__v4sf)__B, -(__v4sf)__C); +} + +static __inline__ __m128d __DEFAULT_FN_ATTRS128 +_mm_nmsub_sd(__m128d __A, __m128d __B, __m128d __C) +{ + return (__m128d)__builtin_ia32_vfmaddsd(-(__v2df)__A, (__v2df)__B, -(__v2df)__C); +} + +static __inline__ __m128 __DEFAULT_FN_ATTRS128 +_mm_maddsub_ps(__m128 __A, __m128 __B, __m128 __C) +{ + return (__m128)__builtin_ia32_vfmaddsubps((__v4sf)__A, (__v4sf)__B, (__v4sf)__C); +} + +static __inline__ __m128d __DEFAULT_FN_ATTRS128 +_mm_maddsub_pd(__m128d __A, __m128d __B, __m128d __C) +{ + return (__m128d)__builtin_ia32_vfmaddsubpd((__v2df)__A, (__v2df)__B, (__v2df)__C); +} + +static __inline__ __m128 __DEFAULT_FN_ATTRS128 +_mm_msubadd_ps(__m128 __A, __m128 __B, __m128 __C) +{ + return (__m128)__builtin_ia32_vfmaddsubps((__v4sf)__A, (__v4sf)__B, -(__v4sf)__C); +} + +static __inline__ __m128d __DEFAULT_FN_ATTRS128 +_mm_msubadd_pd(__m128d __A, __m128d __B, __m128d __C) +{ + return (__m128d)__builtin_ia32_vfmaddsubpd((__v2df)__A, (__v2df)__B, -(__v2df)__C); +} + +static __inline__ __m256 __DEFAULT_FN_ATTRS256 +_mm256_macc_ps(__m256 __A, __m256 __B, __m256 __C) +{ + return (__m256)__builtin_ia32_vfmaddps256((__v8sf)__A, (__v8sf)__B, (__v8sf)__C); +} + +static __inline__ __m256d __DEFAULT_FN_ATTRS256 +_mm256_macc_pd(__m256d __A, __m256d __B, __m256d __C) +{ + return (__m256d)__builtin_ia32_vfmaddpd256((__v4df)__A, (__v4df)__B, (__v4df)__C); +} + +static __inline__ __m256 __DEFAULT_FN_ATTRS256 +_mm256_msub_ps(__m256 __A, __m256 __B, __m256 __C) +{ + return (__m256)__builtin_ia32_vfmaddps256((__v8sf)__A, (__v8sf)__B, -(__v8sf)__C); +} + +static __inline__ __m256d __DEFAULT_FN_ATTRS256 +_mm256_msub_pd(__m256d __A, __m256d __B, __m256d __C) +{ + return (__m256d)__builtin_ia32_vfmaddpd256((__v4df)__A, (__v4df)__B, -(__v4df)__C); +} + +static __inline__ __m256 __DEFAULT_FN_ATTRS256 +_mm256_nmacc_ps(__m256 __A, __m256 __B, __m256 __C) +{ + return (__m256)__builtin_ia32_vfmaddps256(-(__v8sf)__A, (__v8sf)__B, (__v8sf)__C); +} + +static __inline__ __m256d __DEFAULT_FN_ATTRS256 +_mm256_nmacc_pd(__m256d __A, __m256d __B, __m256d __C) +{ + return (__m256d)__builtin_ia32_vfmaddpd256(-(__v4df)__A, (__v4df)__B, (__v4df)__C); +} + +static __inline__ __m256 __DEFAULT_FN_ATTRS256 +_mm256_nmsub_ps(__m256 __A, __m256 __B, __m256 __C) +{ + return (__m256)__builtin_ia32_vfmaddps256(-(__v8sf)__A, (__v8sf)__B, -(__v8sf)__C); +} + +static __inline__ __m256d __DEFAULT_FN_ATTRS256 +_mm256_nmsub_pd(__m256d __A, __m256d __B, __m256d __C) +{ + return (__m256d)__builtin_ia32_vfmaddpd256(-(__v4df)__A, (__v4df)__B, -(__v4df)__C); +} + +static __inline__ __m256 __DEFAULT_FN_ATTRS256 +_mm256_maddsub_ps(__m256 __A, __m256 __B, __m256 __C) +{ + return (__m256)__builtin_ia32_vfmaddsubps256((__v8sf)__A, (__v8sf)__B, (__v8sf)__C); +} + +static __inline__ __m256d __DEFAULT_FN_ATTRS256 +_mm256_maddsub_pd(__m256d __A, __m256d __B, __m256d __C) +{ + return (__m256d)__builtin_ia32_vfmaddsubpd256((__v4df)__A, (__v4df)__B, (__v4df)__C); +} + +static __inline__ __m256 __DEFAULT_FN_ATTRS256 +_mm256_msubadd_ps(__m256 __A, __m256 __B, __m256 __C) +{ + return (__m256)__builtin_ia32_vfmaddsubps256((__v8sf)__A, (__v8sf)__B, -(__v8sf)__C); +} + +static __inline__ __m256d __DEFAULT_FN_ATTRS256 +_mm256_msubadd_pd(__m256d __A, __m256d __B, __m256d __C) +{ + return (__m256d)__builtin_ia32_vfmaddsubpd256((__v4df)__A, (__v4df)__B, -(__v4df)__C); +} + +#undef __DEFAULT_FN_ATTRS128 +#undef __DEFAULT_FN_ATTRS256 + +#endif /* __FMA4INTRIN_H */ diff --git a/third_party/intel/clang/fmaintrin.h b/third_party/intel/clang/fmaintrin.h new file mode 100644 index 000000000..22d1a780b --- /dev/null +++ b/third_party/intel/clang/fmaintrin.h @@ -0,0 +1,796 @@ +/*===---- fmaintrin.h - FMA intrinsics -------------------------------------=== + * + * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. + * See https://llvm.org/LICENSE.txt for license information. + * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + * + *===-----------------------------------------------------------------------=== + */ + +#ifndef __IMMINTRIN_H +#error "Never use directly; include instead." +#endif + +#ifndef __FMAINTRIN_H +#define __FMAINTRIN_H + +/* Define the default attributes for the functions in this file. */ +#define __DEFAULT_FN_ATTRS128 __attribute__((__always_inline__, __nodebug__, __target__("fma"), __min_vector_width__(128))) +#define __DEFAULT_FN_ATTRS256 __attribute__((__always_inline__, __nodebug__, __target__("fma"), __min_vector_width__(256))) + +/// Computes a multiply-add of 128-bit vectors of [4 x float]. +/// For each element, computes (__A * __B) + __C . +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c VFMADD213PS instruction. +/// +/// \param __A +/// A 128-bit vector of [4 x float] containing the multiplicand. +/// \param __B +/// A 128-bit vector of [4 x float] containing the multiplier. +/// \param __C +/// A 128-bit vector of [4 x float] containing the addend. +/// \returns A 128-bit vector of [4 x float] containing the result. +static __inline__ __m128 __DEFAULT_FN_ATTRS128 +_mm_fmadd_ps(__m128 __A, __m128 __B, __m128 __C) +{ + return (__m128)__builtin_ia32_vfmaddps((__v4sf)__A, (__v4sf)__B, (__v4sf)__C); +} + +/// Computes a multiply-add of 128-bit vectors of [2 x double]. +/// For each element, computes (__A * __B) + __C . +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c VFMADD213PD instruction. +/// +/// \param __A +/// A 128-bit vector of [2 x double] containing the multiplicand. +/// \param __B +/// A 128-bit vector of [2 x double] containing the multiplier. +/// \param __C +/// A 128-bit vector of [2 x double] containing the addend. +/// \returns A 128-bit [2 x double] vector containing the result. +static __inline__ __m128d __DEFAULT_FN_ATTRS128 +_mm_fmadd_pd(__m128d __A, __m128d __B, __m128d __C) +{ + return (__m128d)__builtin_ia32_vfmaddpd((__v2df)__A, (__v2df)__B, (__v2df)__C); +} + +/// Computes a scalar multiply-add of the single-precision values in the +/// low 32 bits of 128-bit vectors of [4 x float]. +/// +/// \code{.operation} +/// result[31:0] = (__A[31:0] * __B[31:0]) + __C[31:0] +/// result[127:32] = __A[127:32] +/// \endcode +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c VFMADD213SS instruction. +/// +/// \param __A +/// A 128-bit vector of [4 x float] containing the multiplicand in the low +/// 32 bits. +/// \param __B +/// A 128-bit vector of [4 x float] containing the multiplier in the low +/// 32 bits. +/// \param __C +/// A 128-bit vector of [4 x float] containing the addend in the low +/// 32 bits. +/// \returns A 128-bit vector of [4 x float] containing the result in the low +/// 32 bits and a copy of \a __A[127:32] in the upper 96 bits. +static __inline__ __m128 __DEFAULT_FN_ATTRS128 +_mm_fmadd_ss(__m128 __A, __m128 __B, __m128 __C) +{ + return (__m128)__builtin_ia32_vfmaddss3((__v4sf)__A, (__v4sf)__B, (__v4sf)__C); +} + +/// Computes a scalar multiply-add of the double-precision values in the +/// low 64 bits of 128-bit vectors of [2 x double]. +/// +/// \code{.operation} +/// result[63:0] = (__A[63:0] * __B[63:0]) + __C[63:0] +/// result[127:64] = __A[127:64] +/// \endcode +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c VFMADD213SD instruction. +/// +/// \param __A +/// A 128-bit vector of [2 x double] containing the multiplicand in the low +/// 64 bits. +/// \param __B +/// A 128-bit vector of [2 x double] containing the multiplier in the low +/// 64 bits. +/// \param __C +/// A 128-bit vector of [2 x double] containing the addend in the low +/// 64 bits. +/// \returns A 128-bit vector of [2 x double] containing the result in the low +/// 64 bits and a copy of \a __A[127:64] in the upper 64 bits. +static __inline__ __m128d __DEFAULT_FN_ATTRS128 +_mm_fmadd_sd(__m128d __A, __m128d __B, __m128d __C) +{ + return (__m128d)__builtin_ia32_vfmaddsd3((__v2df)__A, (__v2df)__B, (__v2df)__C); +} + +/// Computes a multiply-subtract of 128-bit vectors of [4 x float]. +/// For each element, computes (__A * __B) - __C . +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c VFMSUB213PS instruction. +/// +/// \param __A +/// A 128-bit vector of [4 x float] containing the multiplicand. +/// \param __B +/// A 128-bit vector of [4 x float] containing the multiplier. +/// \param __C +/// A 128-bit vector of [4 x float] containing the subtrahend. +/// \returns A 128-bit vector of [4 x float] containing the result. +static __inline__ __m128 __DEFAULT_FN_ATTRS128 +_mm_fmsub_ps(__m128 __A, __m128 __B, __m128 __C) +{ + return (__m128)__builtin_ia32_vfmaddps((__v4sf)__A, (__v4sf)__B, -(__v4sf)__C); +} + +/// Computes a multiply-subtract of 128-bit vectors of [2 x double]. +/// For each element, computes (__A * __B) - __C . +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c VFMSUB213PD instruction. +/// +/// \param __A +/// A 128-bit vector of [2 x double] containing the multiplicand. +/// \param __B +/// A 128-bit vector of [2 x double] containing the multiplier. +/// \param __C +/// A 128-bit vector of [2 x double] containing the addend. +/// \returns A 128-bit vector of [2 x double] containing the result. +static __inline__ __m128d __DEFAULT_FN_ATTRS128 +_mm_fmsub_pd(__m128d __A, __m128d __B, __m128d __C) +{ + return (__m128d)__builtin_ia32_vfmaddpd((__v2df)__A, (__v2df)__B, -(__v2df)__C); +} + +/// Computes a scalar multiply-subtract of the single-precision values in +/// the low 32 bits of 128-bit vectors of [4 x float]. +/// +/// \code{.operation} +/// result[31:0] = (__A[31:0] * __B[31:0]) - __C[31:0] +/// result[127:32] = __A[127:32] +/// \endcode +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c VFMSUB213SS instruction. +/// +/// \param __A +/// A 128-bit vector of [4 x float] containing the multiplicand in the low +/// 32 bits. +/// \param __B +/// A 128-bit vector of [4 x float] containing the multiplier in the low +/// 32 bits. +/// \param __C +/// A 128-bit vector of [4 x float] containing the subtrahend in the low +/// 32 bits. +/// \returns A 128-bit vector of [4 x float] containing the result in the low +/// 32 bits, and a copy of \a __A[127:32] in the upper 96 bits. +static __inline__ __m128 __DEFAULT_FN_ATTRS128 +_mm_fmsub_ss(__m128 __A, __m128 __B, __m128 __C) +{ + return (__m128)__builtin_ia32_vfmaddss3((__v4sf)__A, (__v4sf)__B, -(__v4sf)__C); +} + +/// Computes a scalar multiply-subtract of the double-precision values in +/// the low 64 bits of 128-bit vectors of [2 x double]. +/// +/// \code{.operation} +/// result[63:0] = (__A[63:0] * __B[63:0]) - __C[63:0] +/// result[127:64] = __A[127:64] +/// \endcode +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c VFMSUB213SD instruction. +/// +/// \param __A +/// A 128-bit vector of [2 x double] containing the multiplicand in the low +/// 64 bits. +/// \param __B +/// A 128-bit vector of [2 x double] containing the multiplier in the low +/// 64 bits. +/// \param __C +/// A 128-bit vector of [2 x double] containing the subtrahend in the low +/// 64 bits. +/// \returns A 128-bit vector of [2 x double] containing the result in the low +/// 64 bits, and a copy of \a __A[127:64] in the upper 64 bits. +static __inline__ __m128d __DEFAULT_FN_ATTRS128 +_mm_fmsub_sd(__m128d __A, __m128d __B, __m128d __C) +{ + return (__m128d)__builtin_ia32_vfmaddsd3((__v2df)__A, (__v2df)__B, -(__v2df)__C); +} + +/// Computes a negated multiply-add of 128-bit vectors of [4 x float]. +/// For each element, computes -(__A * __B) + __C . +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c VFNMADD213DPS instruction. +/// +/// \param __A +/// A 128-bit vector of [4 x float] containing the multiplicand. +/// \param __B +/// A 128-bit vector of [4 x float] containing the multiplier. +/// \param __C +/// A 128-bit vector of [4 x float] containing the addend. +/// \returns A 128-bit [4 x float] vector containing the result. +static __inline__ __m128 __DEFAULT_FN_ATTRS128 +_mm_fnmadd_ps(__m128 __A, __m128 __B, __m128 __C) +{ + return (__m128)__builtin_ia32_vfmaddps(-(__v4sf)__A, (__v4sf)__B, (__v4sf)__C); +} + +/// Computes a negated multiply-add of 128-bit vectors of [2 x double]. +/// For each element, computes -(__A * __B) + __C . +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c VFNMADD213PD instruction. +/// +/// \param __A +/// A 128-bit vector of [2 x double] containing the multiplicand. +/// \param __B +/// A 128-bit vector of [2 x double] containing the multiplier. +/// \param __C +/// A 128-bit vector of [2 x double] containing the addend. +/// \returns A 128-bit vector of [2 x double] containing the result. +static __inline__ __m128d __DEFAULT_FN_ATTRS128 +_mm_fnmadd_pd(__m128d __A, __m128d __B, __m128d __C) +{ + return (__m128d)__builtin_ia32_vfmaddpd(-(__v2df)__A, (__v2df)__B, (__v2df)__C); +} + +/// Computes a scalar negated multiply-add of the single-precision values in +/// the low 32 bits of 128-bit vectors of [4 x float]. +/// +/// \code{.operation} +/// result[31:0] = -(__A[31:0] * __B[31:0]) + __C[31:0] +/// result[127:32] = __A[127:32] +/// \endcode +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c VFNMADD213SS instruction. +/// +/// \param __A +/// A 128-bit vector of [4 x float] containing the multiplicand in the low +/// 32 bits. +/// \param __B +/// A 128-bit vector of [4 x float] containing the multiplier in the low +/// 32 bits. +/// \param __C +/// A 128-bit vector of [4 x float] containing the addend in the low +/// 32 bits. +/// \returns A 128-bit vector of [4 x float] containing the result in the low +/// 32 bits, and a copy of \a __A[127:32] in the upper 96 bits. +static __inline__ __m128 __DEFAULT_FN_ATTRS128 +_mm_fnmadd_ss(__m128 __A, __m128 __B, __m128 __C) +{ + return (__m128)__builtin_ia32_vfmaddss3((__v4sf)__A, -(__v4sf)__B, (__v4sf)__C); +} + +/// Computes a scalar negated multiply-add of the double-precision values +/// in the low 64 bits of 128-bit vectors of [2 x double]. +/// +/// \code{.operation} +/// result[63:0] = -(__A[63:0] * __B[63:0]) + __C[63:0] +/// result[127:64] = __A[127:64] +/// \endcode +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c VFNMADD213SD instruction. +/// +/// \param __A +/// A 128-bit vector of [2 x double] containing the multiplicand in the low +/// 64 bits. +/// \param __B +/// A 128-bit vector of [2 x double] containing the multiplier in the low +/// 64 bits. +/// \param __C +/// A 128-bit vector of [2 x double] containing the addend in the low +/// 64 bits. +/// \returns A 128-bit vector of [2 x double] containing the result in the low +/// 64 bits, and a copy of \a __A[127:64] in the upper 64 bits. +static __inline__ __m128d __DEFAULT_FN_ATTRS128 +_mm_fnmadd_sd(__m128d __A, __m128d __B, __m128d __C) +{ + return (__m128d)__builtin_ia32_vfmaddsd3((__v2df)__A, -(__v2df)__B, (__v2df)__C); +} + +/// Computes a negated multiply-subtract of 128-bit vectors of [4 x float]. +/// For each element, computes -(__A * __B) - __C . +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c VFNMSUB213PS instruction. +/// +/// \param __A +/// A 128-bit vector of [4 x float] containing the multiplicand. +/// \param __B +/// A 128-bit vector of [4 x float] containing the multiplier. +/// \param __C +/// A 128-bit vector of [4 x float] containing the subtrahend. +/// \returns A 128-bit vector of [4 x float] containing the result. +static __inline__ __m128 __DEFAULT_FN_ATTRS128 +_mm_fnmsub_ps(__m128 __A, __m128 __B, __m128 __C) +{ + return (__m128)__builtin_ia32_vfmaddps(-(__v4sf)__A, (__v4sf)__B, -(__v4sf)__C); +} + +/// Computes a negated multiply-subtract of 128-bit vectors of [2 x double]. +/// For each element, computes -(__A * __B) - __C . +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c VFNMSUB213PD instruction. +/// +/// \param __A +/// A 128-bit vector of [2 x double] containing the multiplicand. +/// \param __B +/// A 128-bit vector of [2 x double] containing the multiplier. +/// \param __C +/// A 128-bit vector of [2 x double] containing the subtrahend. +/// \returns A 128-bit vector of [2 x double] containing the result. +static __inline__ __m128d __DEFAULT_FN_ATTRS128 +_mm_fnmsub_pd(__m128d __A, __m128d __B, __m128d __C) +{ + return (__m128d)__builtin_ia32_vfmaddpd(-(__v2df)__A, (__v2df)__B, -(__v2df)__C); +} + +/// Computes a scalar negated multiply-subtract of the single-precision +/// values in the low 32 bits of 128-bit vectors of [4 x float]. +/// +/// \code{.operation} +/// result[31:0] = -(__A[31:0] * __B[31:0]) - __C[31:0] +/// result[127:32] = __A[127:32] +/// \endcode +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c VFNMSUB213SS instruction. +/// +/// \param __A +/// A 128-bit vector of [4 x float] containing the multiplicand in the low +/// 32 bits. +/// \param __B +/// A 128-bit vector of [4 x float] containing the multiplier in the low +/// 32 bits. +/// \param __C +/// A 128-bit vector of [4 x float] containing the subtrahend in the low +/// 32 bits. +/// \returns A 128-bit vector of [4 x float] containing the result in the low +/// 32 bits, and a copy of \a __A[127:32] in the upper 96 bits. +static __inline__ __m128 __DEFAULT_FN_ATTRS128 +_mm_fnmsub_ss(__m128 __A, __m128 __B, __m128 __C) +{ + return (__m128)__builtin_ia32_vfmaddss3((__v4sf)__A, -(__v4sf)__B, -(__v4sf)__C); +} + +/// Computes a scalar negated multiply-subtract of the double-precision +/// values in the low 64 bits of 128-bit vectors of [2 x double]. +/// +/// \code{.operation} +/// result[63:0] = -(__A[63:0] * __B[63:0]) - __C[63:0] +/// result[127:64] = __A[127:64] +/// \endcode +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c VFNMSUB213SD instruction. +/// +/// \param __A +/// A 128-bit vector of [2 x double] containing the multiplicand in the low +/// 64 bits. +/// \param __B +/// A 128-bit vector of [2 x double] containing the multiplier in the low +/// 64 bits. +/// \param __C +/// A 128-bit vector of [2 x double] containing the subtrahend in the low +/// 64 bits. +/// \returns A 128-bit vector of [2 x double] containing the result in the low +/// 64 bits, and a copy of \a __A[127:64] in the upper 64 bits. +static __inline__ __m128d __DEFAULT_FN_ATTRS128 +_mm_fnmsub_sd(__m128d __A, __m128d __B, __m128d __C) +{ + return (__m128d)__builtin_ia32_vfmaddsd3((__v2df)__A, -(__v2df)__B, -(__v2df)__C); +} + +/// Computes a multiply with alternating add/subtract of 128-bit vectors of +/// [4 x float]. +/// +/// \code{.operation} +/// result[31:0] = (__A[31:0] * __B[31:0]) - __C[31:0] +/// result[63:32] = (__A[63:32] * __B[63:32]) + __C[63:32] +/// result[95:64] = (__A[95:64] * __B[95:64]) - __C[95:64] +/// result[127:96] = (__A[127:96] * __B[127:96]) + __C[127:96] +/// \endcode +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c VFMADDSUB213PS instruction. +/// +/// \param __A +/// A 128-bit vector of [4 x float] containing the multiplicand. +/// \param __B +/// A 128-bit vector of [4 x float] containing the multiplier. +/// \param __C +/// A 128-bit vector of [4 x float] containing the addend/subtrahend. +/// \returns A 128-bit vector of [4 x float] containing the result. +static __inline__ __m128 __DEFAULT_FN_ATTRS128 +_mm_fmaddsub_ps(__m128 __A, __m128 __B, __m128 __C) +{ + return (__m128)__builtin_ia32_vfmaddsubps((__v4sf)__A, (__v4sf)__B, (__v4sf)__C); +} + +/// Computes a multiply with alternating add/subtract of 128-bit vectors of +/// [2 x double]. +/// +/// \code{.operation} +/// result[63:0] = (__A[63:0] * __B[63:0]) - __C[63:0] +/// result[127:64] = (__A[127:64] * __B[127:64]) + __C[127:64] +/// \endcode +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c VFMADDSUB213PD instruction. +/// +/// \param __A +/// A 128-bit vector of [2 x double] containing the multiplicand. +/// \param __B +/// A 128-bit vector of [2 x double] containing the multiplier. +/// \param __C +/// A 128-bit vector of [2 x double] containing the addend/subtrahend. +/// \returns A 128-bit vector of [2 x double] containing the result. +static __inline__ __m128d __DEFAULT_FN_ATTRS128 +_mm_fmaddsub_pd(__m128d __A, __m128d __B, __m128d __C) +{ + return (__m128d)__builtin_ia32_vfmaddsubpd((__v2df)__A, (__v2df)__B, (__v2df)__C); +} + +/// Computes a multiply with alternating add/subtract of 128-bit vectors of +/// [4 x float]. +/// +/// \code{.operation} +/// result[31:0] = (__A[31:0] * __B[31:0]) + __C[31:0] +/// result[63:32] = (__A[63:32] * __B[63:32]) - __C[63:32] +/// result[95:64] = (__A[95:64] * __B[95:64]) + __C[95:64] +/// result[127:96 = (__A[127:96] * __B[127:96]) - __C[127:96] +/// \endcode +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c VFMSUBADD213PS instruction. +/// +/// \param __A +/// A 128-bit vector of [4 x float] containing the multiplicand. +/// \param __B +/// A 128-bit vector of [4 x float] containing the multiplier. +/// \param __C +/// A 128-bit vector of [4 x float] containing the addend/subtrahend. +/// \returns A 128-bit vector of [4 x float] containing the result. +static __inline__ __m128 __DEFAULT_FN_ATTRS128 +_mm_fmsubadd_ps(__m128 __A, __m128 __B, __m128 __C) +{ + return (__m128)__builtin_ia32_vfmaddsubps((__v4sf)__A, (__v4sf)__B, -(__v4sf)__C); +} + +/// Computes a multiply with alternating add/subtract of 128-bit vectors of +/// [2 x double]. +/// +/// \code{.operation} +/// result[63:0] = (__A[63:0] * __B[63:0]) + __C[63:0] +/// result[127:64] = (__A[127:64] * __B[127:64]) - __C[127:64] +/// \endcode +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c VFMADDSUB213PD instruction. +/// +/// \param __A +/// A 128-bit vector of [2 x double] containing the multiplicand. +/// \param __B +/// A 128-bit vector of [2 x double] containing the multiplier. +/// \param __C +/// A 128-bit vector of [2 x double] containing the addend/subtrahend. +/// \returns A 128-bit vector of [2 x double] containing the result. +static __inline__ __m128d __DEFAULT_FN_ATTRS128 +_mm_fmsubadd_pd(__m128d __A, __m128d __B, __m128d __C) +{ + return (__m128d)__builtin_ia32_vfmaddsubpd((__v2df)__A, (__v2df)__B, -(__v2df)__C); +} + +/// Computes a multiply-add of 256-bit vectors of [8 x float]. +/// For each element, computes (__A * __B) + __C . +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c VFMADD213PS instruction. +/// +/// \param __A +/// A 256-bit vector of [8 x float] containing the multiplicand. +/// \param __B +/// A 256-bit vector of [8 x float] containing the multiplier. +/// \param __C +/// A 256-bit vector of [8 x float] containing the addend. +/// \returns A 256-bit vector of [8 x float] containing the result. +static __inline__ __m256 __DEFAULT_FN_ATTRS256 +_mm256_fmadd_ps(__m256 __A, __m256 __B, __m256 __C) +{ + return (__m256)__builtin_ia32_vfmaddps256((__v8sf)__A, (__v8sf)__B, (__v8sf)__C); +} + +/// Computes a multiply-add of 256-bit vectors of [4 x double]. +/// For each element, computes (__A * __B) + __C . +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c VFMADD213PD instruction. +/// +/// \param __A +/// A 256-bit vector of [4 x double] containing the multiplicand. +/// \param __B +/// A 256-bit vector of [4 x double] containing the multiplier. +/// \param __C +/// A 256-bit vector of [4 x double] containing the addend. +/// \returns A 256-bit vector of [4 x double] containing the result. +static __inline__ __m256d __DEFAULT_FN_ATTRS256 +_mm256_fmadd_pd(__m256d __A, __m256d __B, __m256d __C) +{ + return (__m256d)__builtin_ia32_vfmaddpd256((__v4df)__A, (__v4df)__B, (__v4df)__C); +} + +/// Computes a multiply-subtract of 256-bit vectors of [8 x float]. +/// For each element, computes (__A * __B) - __C . +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c VFMSUB213PS instruction. +/// +/// \param __A +/// A 256-bit vector of [8 x float] containing the multiplicand. +/// \param __B +/// A 256-bit vector of [8 x float] containing the multiplier. +/// \param __C +/// A 256-bit vector of [8 x float] containing the subtrahend. +/// \returns A 256-bit vector of [8 x float] containing the result. +static __inline__ __m256 __DEFAULT_FN_ATTRS256 +_mm256_fmsub_ps(__m256 __A, __m256 __B, __m256 __C) +{ + return (__m256)__builtin_ia32_vfmaddps256((__v8sf)__A, (__v8sf)__B, -(__v8sf)__C); +} + +/// Computes a multiply-subtract of 256-bit vectors of [4 x double]. +/// For each element, computes (__A * __B) - __C . +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c VFMSUB213PD instruction. +/// +/// \param __A +/// A 256-bit vector of [4 x double] containing the multiplicand. +/// \param __B +/// A 256-bit vector of [4 x double] containing the multiplier. +/// \param __C +/// A 256-bit vector of [4 x double] containing the subtrahend. +/// \returns A 256-bit vector of [4 x double] containing the result. +static __inline__ __m256d __DEFAULT_FN_ATTRS256 +_mm256_fmsub_pd(__m256d __A, __m256d __B, __m256d __C) +{ + return (__m256d)__builtin_ia32_vfmaddpd256((__v4df)__A, (__v4df)__B, -(__v4df)__C); +} + +/// Computes a negated multiply-add of 256-bit vectors of [8 x float]. +/// For each element, computes -(__A * __B) + __C . +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c VFNMADD213PS instruction. +/// +/// \param __A +/// A 256-bit vector of [8 x float] containing the multiplicand. +/// \param __B +/// A 256-bit vector of [8 x float] containing the multiplier. +/// \param __C +/// A 256-bit vector of [8 x float] containing the addend. +/// \returns A 256-bit vector of [8 x float] containing the result. +static __inline__ __m256 __DEFAULT_FN_ATTRS256 +_mm256_fnmadd_ps(__m256 __A, __m256 __B, __m256 __C) +{ + return (__m256)__builtin_ia32_vfmaddps256(-(__v8sf)__A, (__v8sf)__B, (__v8sf)__C); +} + +/// Computes a negated multiply-add of 256-bit vectors of [4 x double]. +/// For each element, computes -(__A * __B) + __C . +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c VFNMADD213PD instruction. +/// +/// \param __A +/// A 256-bit vector of [4 x double] containing the multiplicand. +/// \param __B +/// A 256-bit vector of [4 x double] containing the multiplier. +/// \param __C +/// A 256-bit vector of [4 x double] containing the addend. +/// \returns A 256-bit vector of [4 x double] containing the result. +static __inline__ __m256d __DEFAULT_FN_ATTRS256 +_mm256_fnmadd_pd(__m256d __A, __m256d __B, __m256d __C) +{ + return (__m256d)__builtin_ia32_vfmaddpd256(-(__v4df)__A, (__v4df)__B, (__v4df)__C); +} + +/// Computes a negated multiply-subtract of 256-bit vectors of [8 x float]. +/// For each element, computes -(__A * __B) - __C . +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c VFNMSUB213PS instruction. +/// +/// \param __A +/// A 256-bit vector of [8 x float] containing the multiplicand. +/// \param __B +/// A 256-bit vector of [8 x float] containing the multiplier. +/// \param __C +/// A 256-bit vector of [8 x float] containing the subtrahend. +/// \returns A 256-bit vector of [8 x float] containing the result. +static __inline__ __m256 __DEFAULT_FN_ATTRS256 +_mm256_fnmsub_ps(__m256 __A, __m256 __B, __m256 __C) +{ + return (__m256)__builtin_ia32_vfmaddps256(-(__v8sf)__A, (__v8sf)__B, -(__v8sf)__C); +} + +/// Computes a negated multiply-subtract of 256-bit vectors of [4 x double]. +/// For each element, computes -(__A * __B) - __C . +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c VFNMSUB213PD instruction. +/// +/// \param __A +/// A 256-bit vector of [4 x double] containing the multiplicand. +/// \param __B +/// A 256-bit vector of [4 x double] containing the multiplier. +/// \param __C +/// A 256-bit vector of [4 x double] containing the subtrahend. +/// \returns A 256-bit vector of [4 x double] containing the result. +static __inline__ __m256d __DEFAULT_FN_ATTRS256 +_mm256_fnmsub_pd(__m256d __A, __m256d __B, __m256d __C) +{ + return (__m256d)__builtin_ia32_vfmaddpd256(-(__v4df)__A, (__v4df)__B, -(__v4df)__C); +} + +/// Computes a multiply with alternating add/subtract of 256-bit vectors of +/// [8 x float]. +/// +/// \code{.operation} +/// result[31:0] = (__A[31:0] * __B[31:0]) - __C[31:0] +/// result[63:32] = (__A[63:32] * __B[63:32]) + __C[63:32] +/// result[95:64] = (__A[95:64] * __B[95:64]) - __C[95:64] +/// result[127:96] = (__A[127:96] * __B[127:96]) + __C[127:96] +/// result[159:128] = (__A[159:128] * __B[159:128]) - __C[159:128] +/// result[191:160] = (__A[191:160] * __B[191:160]) + __C[191:160] +/// result[223:192] = (__A[223:192] * __B[223:192]) - __C[223:192] +/// result[255:224] = (__A[255:224] * __B[255:224]) + __C[255:224] +/// \endcode +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c VFMADDSUB213PS instruction. +/// +/// \param __A +/// A 256-bit vector of [8 x float] containing the multiplicand. +/// \param __B +/// A 256-bit vector of [8 x float] containing the multiplier. +/// \param __C +/// A 256-bit vector of [8 x float] containing the addend/subtrahend. +/// \returns A 256-bit vector of [8 x float] containing the result. +static __inline__ __m256 __DEFAULT_FN_ATTRS256 +_mm256_fmaddsub_ps(__m256 __A, __m256 __B, __m256 __C) +{ + return (__m256)__builtin_ia32_vfmaddsubps256((__v8sf)__A, (__v8sf)__B, (__v8sf)__C); +} + +/// Computes a multiply with alternating add/subtract of 256-bit vectors of +/// [4 x double]. +/// +/// \code{.operation} +/// result[63:0] = (__A[63:0] * __B[63:0]) - __C[63:0] +/// result[127:64] = (__A[127:64] * __B[127:64]) + __C[127:64] +/// result[191:128] = (__A[191:128] * __B[191:128]) - __C[191:128] +/// result[255:192] = (__A[255:192] * __B[255:192]) + __C[255:192] +/// \endcode +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c VFMADDSUB213PD instruction. +/// +/// \param __A +/// A 256-bit vector of [4 x double] containing the multiplicand. +/// \param __B +/// A 256-bit vector of [4 x double] containing the multiplier. +/// \param __C +/// A 256-bit vector of [4 x double] containing the addend/subtrahend. +/// \returns A 256-bit vector of [4 x double] containing the result. +static __inline__ __m256d __DEFAULT_FN_ATTRS256 +_mm256_fmaddsub_pd(__m256d __A, __m256d __B, __m256d __C) +{ + return (__m256d)__builtin_ia32_vfmaddsubpd256((__v4df)__A, (__v4df)__B, (__v4df)__C); +} + +/// Computes a vector multiply with alternating add/subtract of 256-bit +/// vectors of [8 x float]. +/// +/// \code{.operation} +/// result[31:0] = (__A[31:0] * __B[31:0]) + __C[31:0] +/// result[63:32] = (__A[63:32] * __B[63:32]) - __C[63:32] +/// result[95:64] = (__A[95:64] * __B[95:64]) + __C[95:64] +/// result[127:96] = (__A[127:96] * __B[127:96]) - __C[127:96] +/// result[159:128] = (__A[159:128] * __B[159:128]) + __C[159:128] +/// result[191:160] = (__A[191:160] * __B[191:160]) - __C[191:160] +/// result[223:192] = (__A[223:192] * __B[223:192]) + __C[223:192] +/// result[255:224] = (__A[255:224] * __B[255:224]) - __C[255:224] +/// \endcode +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c VFMSUBADD213PS instruction. +/// +/// \param __A +/// A 256-bit vector of [8 x float] containing the multiplicand. +/// \param __B +/// A 256-bit vector of [8 x float] containing the multiplier. +/// \param __C +/// A 256-bit vector of [8 x float] containing the addend/subtrahend. +/// \returns A 256-bit vector of [8 x float] containing the result. +static __inline__ __m256 __DEFAULT_FN_ATTRS256 +_mm256_fmsubadd_ps(__m256 __A, __m256 __B, __m256 __C) +{ + return (__m256)__builtin_ia32_vfmaddsubps256((__v8sf)__A, (__v8sf)__B, -(__v8sf)__C); +} + +/// Computes a vector multiply with alternating add/subtract of 256-bit +/// vectors of [4 x double]. +/// +/// \code{.operation} +/// result[63:0] = (__A[63:0] * __B[63:0]) + __C[63:0] +/// result[127:64] = (__A[127:64] * __B[127:64]) - __C[127:64] +/// result[191:128] = (__A[191:128] * __B[191:128]) + __C[191:128] +/// result[255:192] = (__A[255:192] * __B[255:192]) - __C[255:192] +/// \endcode +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c VFMSUBADD213PD instruction. +/// +/// \param __A +/// A 256-bit vector of [4 x double] containing the multiplicand. +/// \param __B +/// A 256-bit vector of [4 x double] containing the multiplier. +/// \param __C +/// A 256-bit vector of [4 x double] containing the addend/subtrahend. +/// \returns A 256-bit vector of [4 x double] containing the result. +static __inline__ __m256d __DEFAULT_FN_ATTRS256 +_mm256_fmsubadd_pd(__m256d __A, __m256d __B, __m256d __C) +{ + return (__m256d)__builtin_ia32_vfmaddsubpd256((__v4df)__A, (__v4df)__B, -(__v4df)__C); +} + +#undef __DEFAULT_FN_ATTRS128 +#undef __DEFAULT_FN_ATTRS256 + +#endif /* __FMAINTRIN_H */ diff --git a/third_party/intel/clang/fxsrintrin.h b/third_party/intel/clang/fxsrintrin.h new file mode 100644 index 000000000..afee6aa97 --- /dev/null +++ b/third_party/intel/clang/fxsrintrin.h @@ -0,0 +1,91 @@ +/*===---- fxsrintrin.h - FXSR intrinsic ------------------------------------=== + * + * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. + * See https://llvm.org/LICENSE.txt for license information. + * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + * + *===-----------------------------------------------------------------------=== + */ + +#ifndef __IMMINTRIN_H +#error "Never use directly; include instead." +#endif + +#ifndef __FXSRINTRIN_H +#define __FXSRINTRIN_H + +#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("fxsr"))) + +/// Saves the XMM, MMX, MXCSR and x87 FPU registers into a 512-byte +/// memory region pointed to by the input parameter \a __p. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the FXSAVE instruction. +/// +/// \param __p +/// A pointer to a 512-byte memory region. The beginning of this memory +/// region should be aligned on a 16-byte boundary. +static __inline__ void __DEFAULT_FN_ATTRS +_fxsave(void *__p) +{ + __builtin_ia32_fxsave(__p); +} + +/// Restores the XMM, MMX, MXCSR and x87 FPU registers from the 512-byte +/// memory region pointed to by the input parameter \a __p. The contents of +/// this memory region should have been written to by a previous \c _fxsave +/// or \c _fxsave64 intrinsic. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the FXRSTOR instruction. +/// +/// \param __p +/// A pointer to a 512-byte memory region. The beginning of this memory +/// region should be aligned on a 16-byte boundary. +static __inline__ void __DEFAULT_FN_ATTRS +_fxrstor(void *__p) +{ + __builtin_ia32_fxrstor(__p); +} + +#ifdef __x86_64__ +/// Saves the XMM, MMX, MXCSR and x87 FPU registers into a 512-byte +/// memory region pointed to by the input parameter \a __p. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the FXSAVE64 instruction. +/// +/// \param __p +/// A pointer to a 512-byte memory region. The beginning of this memory +/// region should be aligned on a 16-byte boundary. +static __inline__ void __DEFAULT_FN_ATTRS +_fxsave64(void *__p) +{ + __builtin_ia32_fxsave64(__p); +} + +/// Restores the XMM, MMX, MXCSR and x87 FPU registers from the 512-byte +/// memory region pointed to by the input parameter \a __p. The contents of +/// this memory region should have been written to by a previous \c _fxsave +/// or \c _fxsave64 intrinsic. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the FXRSTOR64 instruction. +/// +/// \param __p +/// A pointer to a 512-byte memory region. The beginning of this memory +/// region should be aligned on a 16-byte boundary. +static __inline__ void __DEFAULT_FN_ATTRS +_fxrstor64(void *__p) +{ + __builtin_ia32_fxrstor64(__p); +} +#endif + +#undef __DEFAULT_FN_ATTRS + +#endif diff --git a/third_party/intel/clang/gfniintrin.h b/third_party/intel/clang/gfniintrin.h new file mode 100644 index 000000000..73b04a824 --- /dev/null +++ b/third_party/intel/clang/gfniintrin.h @@ -0,0 +1,211 @@ +/*===----------------- gfniintrin.h - GFNI intrinsics ----------------------=== + * + * + * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. + * See https://llvm.org/LICENSE.txt for license information. + * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + * + *===-----------------------------------------------------------------------=== + */ +#ifndef __IMMINTRIN_H +#error "Never use directly; include instead." +#endif + +#ifndef __GFNIINTRIN_H +#define __GFNIINTRIN_H + +/* Default attributes for simple form (no masking). */ +#define __DEFAULT_FN_ATTRS \ + __attribute__((__always_inline__, __nodebug__, \ + __target__("gfni,no-evex512"), __min_vector_width__(128))) + +/* Default attributes for YMM unmasked form. */ +#define __DEFAULT_FN_ATTRS_Y \ + __attribute__((__always_inline__, __nodebug__, \ + __target__("avx,gfni,no-evex512"), \ + __min_vector_width__(256))) + +/* Default attributes for ZMM unmasked forms. */ +#define __DEFAULT_FN_ATTRS_Z \ + __attribute__((__always_inline__, __nodebug__, \ + __target__("avx512f,evex512,gfni"), \ + __min_vector_width__(512))) +/* Default attributes for ZMM masked forms. */ +#define __DEFAULT_FN_ATTRS_Z_MASK \ + __attribute__((__always_inline__, __nodebug__, \ + __target__("avx512bw,evex512,gfni"), \ + __min_vector_width__(512))) + +/* Default attributes for VLX masked forms. */ +#define __DEFAULT_FN_ATTRS_VL128 \ + __attribute__((__always_inline__, __nodebug__, \ + __target__("avx512bw,avx512vl,gfni,no-evex512"), \ + __min_vector_width__(128))) +#define __DEFAULT_FN_ATTRS_VL256 \ + __attribute__((__always_inline__, __nodebug__, \ + __target__("avx512bw,avx512vl,gfni,no-evex512"), \ + __min_vector_width__(256))) + +#define _mm_gf2p8affineinv_epi64_epi8(A, B, I) \ + ((__m128i)__builtin_ia32_vgf2p8affineinvqb_v16qi((__v16qi)(__m128i)(A), \ + (__v16qi)(__m128i)(B), \ + (char)(I))) + +#define _mm_gf2p8affine_epi64_epi8(A, B, I) \ + ((__m128i)__builtin_ia32_vgf2p8affineqb_v16qi((__v16qi)(__m128i)(A), \ + (__v16qi)(__m128i)(B), \ + (char)(I))) + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_gf2p8mul_epi8(__m128i __A, __m128i __B) +{ + return (__m128i) __builtin_ia32_vgf2p8mulb_v16qi((__v16qi) __A, + (__v16qi) __B); +} + +#ifdef __AVXINTRIN_H +#define _mm256_gf2p8affineinv_epi64_epi8(A, B, I) \ + ((__m256i)__builtin_ia32_vgf2p8affineinvqb_v32qi((__v32qi)(__m256i)(A), \ + (__v32qi)(__m256i)(B), \ + (char)(I))) + +#define _mm256_gf2p8affine_epi64_epi8(A, B, I) \ + ((__m256i)__builtin_ia32_vgf2p8affineqb_v32qi((__v32qi)(__m256i)(A), \ + (__v32qi)(__m256i)(B), \ + (char)(I))) + +static __inline__ __m256i __DEFAULT_FN_ATTRS_Y +_mm256_gf2p8mul_epi8(__m256i __A, __m256i __B) +{ + return (__m256i) __builtin_ia32_vgf2p8mulb_v32qi((__v32qi) __A, + (__v32qi) __B); +} +#endif /* __AVXINTRIN_H */ + +#ifdef __AVX512BWINTRIN_H +#define _mm512_gf2p8affineinv_epi64_epi8(A, B, I) \ + ((__m512i)__builtin_ia32_vgf2p8affineinvqb_v64qi((__v64qi)(__m512i)(A), \ + (__v64qi)(__m512i)(B), \ + (char)(I))) + +#define _mm512_mask_gf2p8affineinv_epi64_epi8(S, U, A, B, I) \ + ((__m512i)__builtin_ia32_selectb_512((__mmask64)(U), \ + (__v64qi)_mm512_gf2p8affineinv_epi64_epi8(A, B, I), \ + (__v64qi)(__m512i)(S))) + +#define _mm512_maskz_gf2p8affineinv_epi64_epi8(U, A, B, I) \ + _mm512_mask_gf2p8affineinv_epi64_epi8((__m512i)_mm512_setzero_si512(), \ + U, A, B, I) + +#define _mm512_gf2p8affine_epi64_epi8(A, B, I) \ + ((__m512i)__builtin_ia32_vgf2p8affineqb_v64qi((__v64qi)(__m512i)(A), \ + (__v64qi)(__m512i)(B), \ + (char)(I))) + +#define _mm512_mask_gf2p8affine_epi64_epi8(S, U, A, B, I) \ + ((__m512i)__builtin_ia32_selectb_512((__mmask64)(U), \ + (__v64qi)_mm512_gf2p8affine_epi64_epi8((A), (B), (I)), \ + (__v64qi)(__m512i)(S))) + +#define _mm512_maskz_gf2p8affine_epi64_epi8(U, A, B, I) \ + _mm512_mask_gf2p8affine_epi64_epi8((__m512i)_mm512_setzero_si512(), \ + U, A, B, I) + +static __inline__ __m512i __DEFAULT_FN_ATTRS_Z +_mm512_gf2p8mul_epi8(__m512i __A, __m512i __B) +{ + return (__m512i) __builtin_ia32_vgf2p8mulb_v64qi((__v64qi) __A, + (__v64qi) __B); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS_Z_MASK +_mm512_mask_gf2p8mul_epi8(__m512i __S, __mmask64 __U, __m512i __A, __m512i __B) +{ + return (__m512i) __builtin_ia32_selectb_512(__U, + (__v64qi) _mm512_gf2p8mul_epi8(__A, __B), + (__v64qi) __S); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS_Z_MASK +_mm512_maskz_gf2p8mul_epi8(__mmask64 __U, __m512i __A, __m512i __B) +{ + return _mm512_mask_gf2p8mul_epi8((__m512i)_mm512_setzero_si512(), + __U, __A, __B); +} +#endif /* __AVX512BWINTRIN_H */ + +#ifdef __AVX512VLBWINTRIN_H +#define _mm_mask_gf2p8affineinv_epi64_epi8(S, U, A, B, I) \ + ((__m128i)__builtin_ia32_selectb_128((__mmask16)(U), \ + (__v16qi)_mm_gf2p8affineinv_epi64_epi8(A, B, I), \ + (__v16qi)(__m128i)(S))) + +#define _mm_maskz_gf2p8affineinv_epi64_epi8(U, A, B, I) \ + _mm_mask_gf2p8affineinv_epi64_epi8((__m128i)_mm_setzero_si128(), \ + U, A, B, I) + +#define _mm256_mask_gf2p8affineinv_epi64_epi8(S, U, A, B, I) \ + ((__m256i)__builtin_ia32_selectb_256((__mmask32)(U), \ + (__v32qi)_mm256_gf2p8affineinv_epi64_epi8(A, B, I), \ + (__v32qi)(__m256i)(S))) + +#define _mm256_maskz_gf2p8affineinv_epi64_epi8(U, A, B, I) \ + _mm256_mask_gf2p8affineinv_epi64_epi8((__m256i)_mm256_setzero_si256(), \ + U, A, B, I) + +#define _mm_mask_gf2p8affine_epi64_epi8(S, U, A, B, I) \ + ((__m128i)__builtin_ia32_selectb_128((__mmask16)(U), \ + (__v16qi)_mm_gf2p8affine_epi64_epi8(A, B, I), \ + (__v16qi)(__m128i)(S))) + +#define _mm_maskz_gf2p8affine_epi64_epi8(U, A, B, I) \ + _mm_mask_gf2p8affine_epi64_epi8((__m128i)_mm_setzero_si128(), U, A, B, I) + +#define _mm256_mask_gf2p8affine_epi64_epi8(S, U, A, B, I) \ + ((__m256i)__builtin_ia32_selectb_256((__mmask32)(U), \ + (__v32qi)_mm256_gf2p8affine_epi64_epi8(A, B, I), \ + (__v32qi)(__m256i)(S))) + +#define _mm256_maskz_gf2p8affine_epi64_epi8(U, A, B, I) \ + _mm256_mask_gf2p8affine_epi64_epi8((__m256i)_mm256_setzero_si256(), \ + U, A, B, I) + +static __inline__ __m128i __DEFAULT_FN_ATTRS_VL128 +_mm_mask_gf2p8mul_epi8(__m128i __S, __mmask16 __U, __m128i __A, __m128i __B) +{ + return (__m128i) __builtin_ia32_selectb_128(__U, + (__v16qi) _mm_gf2p8mul_epi8(__A, __B), + (__v16qi) __S); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS_VL128 +_mm_maskz_gf2p8mul_epi8(__mmask16 __U, __m128i __A, __m128i __B) +{ + return _mm_mask_gf2p8mul_epi8((__m128i)_mm_setzero_si128(), + __U, __A, __B); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS_VL256 +_mm256_mask_gf2p8mul_epi8(__m256i __S, __mmask32 __U, __m256i __A, __m256i __B) +{ + return (__m256i) __builtin_ia32_selectb_256(__U, + (__v32qi) _mm256_gf2p8mul_epi8(__A, __B), + (__v32qi) __S); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS_VL256 +_mm256_maskz_gf2p8mul_epi8(__mmask32 __U, __m256i __A, __m256i __B) +{ + return _mm256_mask_gf2p8mul_epi8((__m256i)_mm256_setzero_si256(), + __U, __A, __B); +} +#endif /* __AVX512VLBWINTRIN_H */ + +#undef __DEFAULT_FN_ATTRS +#undef __DEFAULT_FN_ATTRS_Y +#undef __DEFAULT_FN_ATTRS_Z +#undef __DEFAULT_FN_ATTRS_VL128 +#undef __DEFAULT_FN_ATTRS_VL256 + +#endif /* __GFNIINTRIN_H */ + diff --git a/third_party/intel/clang/hresetintrin.h b/third_party/intel/clang/hresetintrin.h new file mode 100644 index 000000000..646f6c130 --- /dev/null +++ b/third_party/intel/clang/hresetintrin.h @@ -0,0 +1,49 @@ +/*===---------------- hresetintrin.h - HRESET intrinsics -------------------=== + * + * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. + * See https://llvm.org/LICENSE.txt for license information. + * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + * + *===-----------------------------------------------------------------------=== + */ +#ifndef __X86GPRINTRIN_H +#error "Never use directly; include instead." +#endif + +#ifndef __HRESETINTRIN_H +#define __HRESETINTRIN_H + +#if __has_extension(gnu_asm) + +/* Define the default attributes for the functions in this file. */ +#define __DEFAULT_FN_ATTRS \ + __attribute__((__always_inline__, __nodebug__, __target__("hreset"))) + +/// Provides a hint to the processor to selectively reset the prediction +/// history of the current logical processor specified by a 32-bit integer +/// value \a __eax. +/// +/// This intrinsic corresponds to the HRESET instruction. +/// +/// \code{.operation} +/// IF __eax == 0 +/// // nop +/// ELSE +/// FOR i := 0 to 31 +/// IF __eax[i] +/// ResetPredictionFeature(i) +/// FI +/// ENDFOR +/// FI +/// \endcode +static __inline void __DEFAULT_FN_ATTRS +_hreset(int __eax) +{ + __asm__ ("hreset $0" :: "a"(__eax)); +} + +#undef __DEFAULT_FN_ATTRS + +#endif /* __has_extension(gnu_asm) */ + +#endif /* __HRESETINTRIN_H */ diff --git a/third_party/intel/clang/ia32intrin.h b/third_party/intel/clang/ia32intrin.h new file mode 100644 index 000000000..8e65f232a --- /dev/null +++ b/third_party/intel/clang/ia32intrin.h @@ -0,0 +1,863 @@ +/* ===-------- ia32intrin.h ---------------------------------------------------=== + * + * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. + * See https://llvm.org/LICENSE.txt for license information. + * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + * + *===-----------------------------------------------------------------------=== + */ + +#ifndef __X86INTRIN_H +#error "Never use directly; include instead." +#endif + +#ifndef __IA32INTRIN_H +#define __IA32INTRIN_H + +/* Define the default attributes for the functions in this file. */ +#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__)) +#define __DEFAULT_FN_ATTRS_CRC32 __attribute__((__always_inline__, __nodebug__, __target__("crc32"))) + +#if defined(__cplusplus) && (__cplusplus >= 201103L) +#define __DEFAULT_FN_ATTRS_CAST __attribute__((__always_inline__)) constexpr +#define __DEFAULT_FN_ATTRS_CONSTEXPR __DEFAULT_FN_ATTRS constexpr +#else +#define __DEFAULT_FN_ATTRS_CAST __attribute__((__always_inline__)) +#define __DEFAULT_FN_ATTRS_CONSTEXPR __DEFAULT_FN_ATTRS +#endif + +/// Finds the first set bit starting from the least significant bit. The result +/// is undefined if the input is 0. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c BSF instruction or the +/// \c TZCNT instruction. +/// +/// \param __A +/// A 32-bit integer operand. +/// \returns A 32-bit integer containing the bit number. +/// \see _bit_scan_forward +static __inline__ int __DEFAULT_FN_ATTRS_CONSTEXPR +__bsfd(int __A) { + return __builtin_ctz((unsigned int)__A); +} + +/// Finds the first set bit starting from the most significant bit. The result +/// is undefined if the input is 0. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c BSR instruction or the +/// \c LZCNT instruction and an \c XOR. +/// +/// \param __A +/// A 32-bit integer operand. +/// \returns A 32-bit integer containing the bit number. +/// \see _bit_scan_reverse +static __inline__ int __DEFAULT_FN_ATTRS_CONSTEXPR +__bsrd(int __A) { + return 31 - __builtin_clz((unsigned int)__A); +} + +/// Swaps the bytes in the input, converting little endian to big endian or +/// vice versa. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c BSWAP instruction. +/// +/// \param __A +/// A 32-bit integer operand. +/// \returns A 32-bit integer containing the swapped bytes. +static __inline__ int __DEFAULT_FN_ATTRS_CONSTEXPR +__bswapd(int __A) { + return (int)__builtin_bswap32((unsigned int)__A); +} + +/// Swaps the bytes in the input, converting little endian to big endian or +/// vice versa. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c BSWAP instruction. +/// +/// \param __A +/// A 32-bit integer operand. +/// \returns A 32-bit integer containing the swapped bytes. +static __inline__ int __DEFAULT_FN_ATTRS_CONSTEXPR +_bswap(int __A) { + return (int)__builtin_bswap32((unsigned int)__A); +} + +/// Finds the first set bit starting from the least significant bit. The result +/// is undefined if the input is 0. +/// +/// \headerfile +/// +/// \code +/// int _bit_scan_forward(int A); +/// \endcode +/// +/// This intrinsic corresponds to the \c BSF instruction or the +/// \c TZCNT instruction. +/// +/// \param A +/// A 32-bit integer operand. +/// \returns A 32-bit integer containing the bit number. +/// \see __bsfd +#define _bit_scan_forward(A) __bsfd((A)) + +/// Finds the first set bit starting from the most significant bit. The result +/// is undefined if the input is 0. +/// +/// \headerfile +/// +/// \code +/// int _bit_scan_reverse(int A); +/// \endcode +/// +/// This intrinsic corresponds to the \c BSR instruction or the +/// \c LZCNT instruction and an \c XOR. +/// +/// \param A +/// A 32-bit integer operand. +/// \returns A 32-bit integer containing the bit number. +/// \see __bsrd +#define _bit_scan_reverse(A) __bsrd((A)) + +#ifdef __x86_64__ +/// Finds the first set bit starting from the least significant bit. The result +/// is undefined if the input is 0. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c BSF instruction or the +/// \c TZCNT instruction. +/// +/// \param __A +/// A 64-bit integer operand. +/// \returns A 32-bit integer containing the bit number. +static __inline__ int __DEFAULT_FN_ATTRS_CONSTEXPR +__bsfq(long long __A) { + return (long long)__builtin_ctzll((unsigned long long)__A); +} + +/// Finds the first set bit starting from the most significant bit. The result +/// is undefined if input is 0. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c BSR instruction or the +/// \c LZCNT instruction and an \c XOR. +/// +/// \param __A +/// A 64-bit integer operand. +/// \returns A 32-bit integer containing the bit number. +static __inline__ int __DEFAULT_FN_ATTRS_CONSTEXPR +__bsrq(long long __A) { + return 63 - __builtin_clzll((unsigned long long)__A); +} + +/// Swaps the bytes in the input, converting little endian to big endian or +/// vice versa. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c BSWAP instruction. +/// +/// \param __A +/// A 64-bit integer operand. +/// \returns A 64-bit integer containing the swapped bytes. +/// \see _bswap64 +static __inline__ long long __DEFAULT_FN_ATTRS_CONSTEXPR +__bswapq(long long __A) { + return (long long)__builtin_bswap64((unsigned long long)__A); +} + +/// Swaps the bytes in the input, converting little endian to big endian or +/// vice versa. +/// +/// \headerfile +/// +/// \code +/// long long _bswap64(long long A); +/// \endcode +/// +/// This intrinsic corresponds to the \c BSWAP instruction. +/// +/// \param A +/// A 64-bit integer operand. +/// \returns A 64-bit integer containing the swapped bytes. +/// \see __bswapq +#define _bswap64(A) __bswapq((A)) +#endif /* __x86_64__ */ + +/// Counts the number of bits in the source operand having a value of 1. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c POPCNT instruction or a +/// sequence of arithmetic and logic operations to calculate it. +/// +/// \param __A +/// An unsigned 32-bit integer operand. +/// \returns A 32-bit integer containing the number of bits with value 1 in the +/// source operand. +/// \see _popcnt32 +static __inline__ int __DEFAULT_FN_ATTRS_CONSTEXPR +__popcntd(unsigned int __A) +{ + return __builtin_popcount(__A); +} + +/// Counts the number of bits in the source operand having a value of 1. +/// +/// \headerfile +/// +/// \code +/// int _popcnt32(int A); +/// \endcode +/// +/// This intrinsic corresponds to the \c POPCNT instruction or a +/// sequence of arithmetic and logic operations to calculate it. +/// +/// \param A +/// An unsigned 32-bit integer operand. +/// \returns A 32-bit integer containing the number of bits with value 1 in the +/// source operand. +/// \see __popcntd +#define _popcnt32(A) __popcntd((A)) + +#ifdef __x86_64__ +/// Counts the number of bits in the source operand having a value of 1. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c POPCNT instruction or a +/// sequence of arithmetic and logic operations to calculate it. +/// +/// \param __A +/// An unsigned 64-bit integer operand. +/// \returns A 64-bit integer containing the number of bits with value 1 in the +/// source operand. +/// \see _popcnt64 +static __inline__ long long __DEFAULT_FN_ATTRS_CONSTEXPR +__popcntq(unsigned long long __A) +{ + return __builtin_popcountll(__A); +} + +/// Counts the number of bits in the source operand having a value of 1. +/// +/// \headerfile +/// +/// \code +/// long long _popcnt64(unsigned long long A); +/// \endcode +/// +/// This intrinsic corresponds to the \c POPCNT instruction or a +/// sequence of arithmetic and logic operations to calculate it. +/// +/// \param A +/// An unsigned 64-bit integer operand. +/// \returns A 64-bit integer containing the number of bits with value 1 in the +/// source operand. +/// \see __popcntq +#define _popcnt64(A) __popcntq((A)) +#endif /* __x86_64__ */ + +#ifdef __x86_64__ +/// Returns the program status-and-control \c RFLAGS register with the \c VM +/// and \c RF flags cleared. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c PUSHFQ + \c POP instruction sequence. +/// +/// \returns The 64-bit value of the RFLAGS register. +static __inline__ unsigned long long __DEFAULT_FN_ATTRS +__readeflags(void) +{ + return __builtin_ia32_readeflags_u64(); +} + +/// Writes the specified value to the program status-and-control \c RFLAGS +/// register. Reserved bits are not affected. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c PUSH + \c POPFQ instruction sequence. +/// +/// \param __f +/// The 64-bit value to write to \c RFLAGS. +static __inline__ void __DEFAULT_FN_ATTRS +__writeeflags(unsigned long long __f) +{ + __builtin_ia32_writeeflags_u64(__f); +} + +#else /* !__x86_64__ */ +/// Returns the program status-and-control \c EFLAGS register with the \c VM +/// and \c RF flags cleared. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c PUSHFD + \c POP instruction sequence. +/// +/// \returns The 32-bit value of the EFLAGS register. +static __inline__ unsigned int __DEFAULT_FN_ATTRS +__readeflags(void) +{ + return __builtin_ia32_readeflags_u32(); +} + +/// Writes the specified value to the program status-and-control \c EFLAGS +/// register. Reserved bits are not affected. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c PUSH + \c POPFD instruction sequence. +/// +/// \param __f +/// The 32-bit value to write to \c EFLAGS. +static __inline__ void __DEFAULT_FN_ATTRS +__writeeflags(unsigned int __f) +{ + __builtin_ia32_writeeflags_u32(__f); +} +#endif /* !__x86_64__ */ + +/// Casts a 32-bit float value to a 32-bit unsigned integer value. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c VMOVD / \c MOVD instruction in x86_64, +/// and corresponds to the \c VMOVL / \c MOVL instruction in ia32. +/// +/// \param __A +/// A 32-bit float value. +/// \returns A 32-bit unsigned integer containing the converted value. +static __inline__ unsigned int __DEFAULT_FN_ATTRS_CAST +_castf32_u32(float __A) { + return __builtin_bit_cast(unsigned int, __A); +} + +/// Casts a 64-bit float value to a 64-bit unsigned integer value. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c VMOVQ / \c MOVQ instruction in x86_64, +/// and corresponds to the \c VMOVL / \c MOVL instruction in ia32. +/// +/// \param __A +/// A 64-bit float value. +/// \returns A 64-bit unsigned integer containing the converted value. +static __inline__ unsigned long long __DEFAULT_FN_ATTRS_CAST +_castf64_u64(double __A) { + return __builtin_bit_cast(unsigned long long, __A); +} + +/// Casts a 32-bit unsigned integer value to a 32-bit float value. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c VMOVQ / \c MOVQ instruction in x86_64, +/// and corresponds to the \c FLDS instruction in ia32. +/// +/// \param __A +/// A 32-bit unsigned integer value. +/// \returns A 32-bit float value containing the converted value. +static __inline__ float __DEFAULT_FN_ATTRS_CAST +_castu32_f32(unsigned int __A) { + return __builtin_bit_cast(float, __A); +} + +/// Casts a 64-bit unsigned integer value to a 64-bit float value. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c VMOVQ / \c MOVQ instruction in x86_64, +/// and corresponds to the \c FLDL instruction in ia32. +/// +/// \param __A +/// A 64-bit unsigned integer value. +/// \returns A 64-bit float value containing the converted value. +static __inline__ double __DEFAULT_FN_ATTRS_CAST +_castu64_f64(unsigned long long __A) { + return __builtin_bit_cast(double, __A); +} + +/// Adds the unsigned integer operand to the CRC-32C checksum of the +/// unsigned char operand. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c CRC32B instruction. +/// +/// \param __C +/// An unsigned integer operand to add to the CRC-32C checksum of operand +/// \a __D. +/// \param __D +/// An unsigned 8-bit integer operand used to compute the CRC-32C checksum. +/// \returns The result of adding operand \a __C to the CRC-32C checksum of +/// operand \a __D. +static __inline__ unsigned int __DEFAULT_FN_ATTRS_CRC32 +__crc32b(unsigned int __C, unsigned char __D) +{ + return __builtin_ia32_crc32qi(__C, __D); +} + +/// Adds the unsigned integer operand to the CRC-32C checksum of the +/// unsigned short operand. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c CRC32W instruction. +/// +/// \param __C +/// An unsigned integer operand to add to the CRC-32C checksum of operand +/// \a __D. +/// \param __D +/// An unsigned 16-bit integer operand used to compute the CRC-32C checksum. +/// \returns The result of adding operand \a __C to the CRC-32C checksum of +/// operand \a __D. +static __inline__ unsigned int __DEFAULT_FN_ATTRS_CRC32 +__crc32w(unsigned int __C, unsigned short __D) +{ + return __builtin_ia32_crc32hi(__C, __D); +} + +/// Adds the unsigned integer operand to the CRC-32C checksum of the +/// second unsigned integer operand. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c CRC32D instruction. +/// +/// \param __C +/// An unsigned integer operand to add to the CRC-32C checksum of operand +/// \a __D. +/// \param __D +/// An unsigned 32-bit integer operand used to compute the CRC-32C checksum. +/// \returns The result of adding operand \a __C to the CRC-32C checksum of +/// operand \a __D. +static __inline__ unsigned int __DEFAULT_FN_ATTRS_CRC32 +__crc32d(unsigned int __C, unsigned int __D) +{ + return __builtin_ia32_crc32si(__C, __D); +} + +#ifdef __x86_64__ +/// Adds the unsigned integer operand to the CRC-32C checksum of the +/// unsigned 64-bit integer operand. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c CRC32Q instruction. +/// +/// \param __C +/// An unsigned integer operand to add to the CRC-32C checksum of operand +/// \a __D. +/// \param __D +/// An unsigned 64-bit integer operand used to compute the CRC-32C checksum. +/// \returns The result of adding operand \a __C to the CRC-32C checksum of +/// operand \a __D. +static __inline__ unsigned long long __DEFAULT_FN_ATTRS_CRC32 +__crc32q(unsigned long long __C, unsigned long long __D) +{ + return __builtin_ia32_crc32di(__C, __D); +} +#endif /* __x86_64__ */ + +/// Reads the specified performance-monitoring counter. Refer to your +/// processor's documentation to determine which performance counters are +/// supported. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c RDPMC instruction. +/// +/// \param __A +/// The performance counter to read. +/// \returns The 64-bit value read from the performance counter. +/// \see _rdpmc +static __inline__ unsigned long long __DEFAULT_FN_ATTRS +__rdpmc(int __A) { + return __builtin_ia32_rdpmc(__A); +} + +/// Reads the processor's time-stamp counter and the \c IA32_TSC_AUX MSR +/// \c (0xc0000103). +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c RDTSCP instruction. +/// +/// \param __A +/// The address of where to store the 32-bit \c IA32_TSC_AUX value. +/// \returns The 64-bit value of the time-stamp counter. +static __inline__ unsigned long long __DEFAULT_FN_ATTRS +__rdtscp(unsigned int *__A) { + return __builtin_ia32_rdtscp(__A); +} + +/// Reads the processor's time-stamp counter. +/// +/// \headerfile +/// +/// \code +/// unsigned long long _rdtsc(); +/// \endcode +/// +/// This intrinsic corresponds to the \c RDTSC instruction. +/// +/// \returns The 64-bit value of the time-stamp counter. +#define _rdtsc() __rdtsc() + +/// Reads the specified performance monitoring counter. Refer to your +/// processor's documentation to determine which performance counters are +/// supported. +/// +/// \headerfile +/// +/// \code +/// unsigned long long _rdpmc(int A); +/// \endcode +/// +/// This intrinsic corresponds to the \c RDPMC instruction. +/// +/// \param A +/// The performance counter to read. +/// \returns The 64-bit value read from the performance counter. +/// \see __rdpmc +#define _rdpmc(A) __rdpmc(A) + +static __inline__ void __DEFAULT_FN_ATTRS +_wbinvd(void) { + __builtin_ia32_wbinvd(); +} + +/// Rotates an 8-bit value to the left by the specified number of bits. +/// This operation is undefined if the number of bits exceeds the size of +/// the value. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c ROL instruction. +/// +/// \param __X +/// The unsigned 8-bit value to be rotated. +/// \param __C +/// The number of bits to rotate the value. +/// \returns The rotated value. +static __inline__ unsigned char __DEFAULT_FN_ATTRS_CONSTEXPR +__rolb(unsigned char __X, int __C) { + return __builtin_rotateleft8(__X, __C); +} + +/// Rotates an 8-bit value to the right by the specified number of bits. +/// This operation is undefined if the number of bits exceeds the size of +/// the value. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c ROR instruction. +/// +/// \param __X +/// The unsigned 8-bit value to be rotated. +/// \param __C +/// The number of bits to rotate the value. +/// \returns The rotated value. +static __inline__ unsigned char __DEFAULT_FN_ATTRS_CONSTEXPR +__rorb(unsigned char __X, int __C) { + return __builtin_rotateright8(__X, __C); +} + +/// Rotates a 16-bit value to the left by the specified number of bits. +/// This operation is undefined if the number of bits exceeds the size of +/// the value. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c ROL instruction. +/// +/// \param __X +/// The unsigned 16-bit value to be rotated. +/// \param __C +/// The number of bits to rotate the value. +/// \returns The rotated value. +/// \see _rotwl +static __inline__ unsigned short __DEFAULT_FN_ATTRS_CONSTEXPR +__rolw(unsigned short __X, int __C) { + return __builtin_rotateleft16(__X, __C); +} + +/// Rotates a 16-bit value to the right by the specified number of bits. +/// This operation is undefined if the number of bits exceeds the size of +/// the value. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c ROR instruction. +/// +/// \param __X +/// The unsigned 16-bit value to be rotated. +/// \param __C +/// The number of bits to rotate the value. +/// \returns The rotated value. +/// \see _rotwr +static __inline__ unsigned short __DEFAULT_FN_ATTRS_CONSTEXPR +__rorw(unsigned short __X, int __C) { + return __builtin_rotateright16(__X, __C); +} + +/// Rotates a 32-bit value to the left by the specified number of bits. +/// This operation is undefined if the number of bits exceeds the size of +/// the value. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c ROL instruction. +/// +/// \param __X +/// The unsigned 32-bit value to be rotated. +/// \param __C +/// The number of bits to rotate the value. +/// \returns The rotated value. +/// \see _rotl +static __inline__ unsigned int __DEFAULT_FN_ATTRS_CONSTEXPR +__rold(unsigned int __X, int __C) { + return __builtin_rotateleft32(__X, (unsigned int)__C); +} + +/// Rotates a 32-bit value to the right by the specified number of bits. +/// This operation is undefined if the number of bits exceeds the size of +/// the value. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c ROR instruction. +/// +/// \param __X +/// The unsigned 32-bit value to be rotated. +/// \param __C +/// The number of bits to rotate the value. +/// \returns The rotated value. +/// \see _rotr +static __inline__ unsigned int __DEFAULT_FN_ATTRS_CONSTEXPR +__rord(unsigned int __X, int __C) { + return __builtin_rotateright32(__X, (unsigned int)__C); +} + +#ifdef __x86_64__ +/// Rotates a 64-bit value to the left by the specified number of bits. +/// This operation is undefined if the number of bits exceeds the size of +/// the value. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c ROL instruction. +/// +/// \param __X +/// The unsigned 64-bit value to be rotated. +/// \param __C +/// The number of bits to rotate the value. +/// \returns The rotated value. +static __inline__ unsigned long long __DEFAULT_FN_ATTRS_CONSTEXPR +__rolq(unsigned long long __X, int __C) { + return __builtin_rotateleft64(__X, (unsigned long long)__C); +} + +/// Rotates a 64-bit value to the right by the specified number of bits. +/// This operation is undefined if the number of bits exceeds the size of +/// the value. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c ROR instruction. +/// +/// \param __X +/// The unsigned 64-bit value to be rotated. +/// \param __C +/// The number of bits to rotate the value. +/// \returns The rotated value. +static __inline__ unsigned long long __DEFAULT_FN_ATTRS_CONSTEXPR +__rorq(unsigned long long __X, int __C) { + return __builtin_rotateright64(__X, (unsigned long long)__C); +} +#endif /* __x86_64__ */ + +#ifndef _MSC_VER +/* These are already provided as builtins for MSVC. */ +/* Select the correct function based on the size of long. */ +#ifdef __LP64__ +/// Rotates a 64-bit value to the left by the specified number of bits. +/// This operation is undefined if the number of bits exceeds the size of +/// the value. +/// +/// \headerfile +/// +/// \code +/// unsigned long long _lrotl(unsigned long long a, int b); +/// \endcode +/// +/// This intrinsic corresponds to the \c ROL instruction. +/// +/// \param a +/// The unsigned 64-bit value to be rotated. +/// \param b +/// The number of bits to rotate the value. +/// \returns The rotated value. +/// \see __rolq +#define _lrotl(a,b) __rolq((a), (b)) + +/// Rotates a 64-bit value to the right by the specified number of bits. +/// This operation is undefined if the number of bits exceeds the size of +/// the value. +/// +/// \headerfile +/// +/// \code +/// unsigned long long _lrotr(unsigned long long a, int b); +/// \endcode +/// +/// This intrinsic corresponds to the \c ROR instruction. +/// +/// \param a +/// The unsigned 64-bit value to be rotated. +/// \param b +/// The number of bits to rotate the value. +/// \returns The rotated value. +/// \see __rorq +#define _lrotr(a,b) __rorq((a), (b)) +#else // __LP64__ +/// Rotates a 32-bit value to the left by the specified number of bits. +/// This operation is undefined if the number of bits exceeds the size of +/// the value. +/// +/// \headerfile +/// +/// \code +/// unsigned int _lrotl(unsigned int a, int b); +/// \endcode +/// +/// This intrinsic corresponds to the \c ROL instruction. +/// +/// \param a +/// The unsigned 32-bit value to be rotated. +/// \param b +/// The number of bits to rotate the value. +/// \returns The rotated value. +/// \see __rold +#define _lrotl(a,b) __rold((a), (b)) + +/// Rotates a 32-bit value to the right by the specified number of bits. +/// This operation is undefined if the number of bits exceeds the size of +/// the value. +/// +/// \headerfile +/// +/// \code +/// unsigned int _lrotr(unsigned int a, int b); +/// \endcode +/// +/// This intrinsic corresponds to the \c ROR instruction. +/// +/// \param a +/// The unsigned 32-bit value to be rotated. +/// \param b +/// The number of bits to rotate the value. +/// \returns The rotated value. +/// \see __rord +#define _lrotr(a,b) __rord((a), (b)) +#endif // __LP64__ + +/// Rotates a 32-bit value to the left by the specified number of bits. +/// This operation is undefined if the number of bits exceeds the size of +/// the value. +/// +/// \headerfile +/// +/// \code +/// unsigned int _rotl(unsigned int a, int b); +/// \endcode +/// +/// This intrinsic corresponds to the \c ROL instruction. +/// +/// \param a +/// The unsigned 32-bit value to be rotated. +/// \param b +/// The number of bits to rotate the value. +/// \returns The rotated value. +/// \see __rold +#define _rotl(a,b) __rold((a), (b)) + +/// Rotates a 32-bit value to the right by the specified number of bits. +/// This operation is undefined if the number of bits exceeds the size of +/// the value. +/// +/// \headerfile +/// +/// \code +/// unsigned int _rotr(unsigned int a, int b); +/// \endcode +/// +/// This intrinsic corresponds to the \c ROR instruction. +/// +/// \param a +/// The unsigned 32-bit value to be rotated. +/// \param b +/// The number of bits to rotate the value. +/// \returns The rotated value. +/// \see __rord +#define _rotr(a,b) __rord((a), (b)) +#endif // _MSC_VER + +/* These are not builtins so need to be provided in all modes. */ +/// Rotates a 16-bit value to the left by the specified number of bits. +/// This operation is undefined if the number of bits exceeds the size of +/// the value. +/// +/// \headerfile +/// +/// \code +/// unsigned short _rotwl(unsigned short a, int b); +/// \endcode +/// +/// This intrinsic corresponds to the \c ROL instruction. +/// +/// \param a +/// The unsigned 16-bit value to be rotated. +/// \param b +/// The number of bits to rotate the value. +/// \returns The rotated value. +/// \see __rolw +#define _rotwl(a,b) __rolw((a), (b)) + +/// Rotates a 16-bit value to the right by the specified number of bits. +/// This operation is undefined if the number of bits exceeds the size of +/// the value. +/// +/// \headerfile +/// +/// \code +/// unsigned short _rotwr(unsigned short a, int b); +/// \endcode +/// +/// This intrinsic corresponds to the \c ROR instruction. +/// +/// \param a +/// The unsigned 16-bit value to be rotated. +/// \param b +/// The number of bits to rotate the value. +/// \returns The rotated value. +/// \see __rorw +#define _rotwr(a,b) __rorw((a), (b)) + +#undef __DEFAULT_FN_ATTRS +#undef __DEFAULT_FN_ATTRS_CAST +#undef __DEFAULT_FN_ATTRS_CRC32 +#undef __DEFAULT_FN_ATTRS_CONSTEXPR + +#endif /* __IA32INTRIN_H */ diff --git a/third_party/intel/clang/immintrin.h b/third_party/intel/clang/immintrin.h new file mode 100644 index 000000000..a0b08a1e2 --- /dev/null +++ b/third_party/intel/clang/immintrin.h @@ -0,0 +1,747 @@ +/*===---- immintrin.h - Intel intrinsics -----------------------------------=== + * + * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. + * See https://llvm.org/LICENSE.txt for license information. + * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + * + *===-----------------------------------------------------------------------=== + */ + +#ifndef __IMMINTRIN_H +#define __IMMINTRIN_H + +#if !defined(__i386__) && !defined(__x86_64__) +#error "This header is only meant to be used on x86 and x64 architecture" +#endif + +#include "x86gprintrin.h" + +#if !defined(__SCE__) || __has_feature(modules) || defined(__MMX__) +#include "mmintrin.h" +#endif + +#if !defined(__SCE__) || __has_feature(modules) || defined(__SSE__) +#include "xmmintrin.h" +#endif + +#if !defined(__SCE__) || __has_feature(modules) || defined(__SSE2__) +#include "emmintrin.h" +#endif + +#if !defined(__SCE__) || __has_feature(modules) || defined(__SSE3__) +#include "pmmintrin.h" +#endif + +#if !defined(__SCE__) || __has_feature(modules) || defined(__SSSE3__) +#include "tmmintrin.h" +#endif + +#if !defined(__SCE__) || __has_feature(modules) || \ + (defined(__SSE4_2__) || defined(__SSE4_1__)) +#include "smmintrin.h" +#endif + +#if !defined(__SCE__) || __has_feature(modules) || \ + (defined(__AES__) || defined(__PCLMUL__)) +#include "wmmintrin.h" +#endif + +#if !defined(__SCE__) || __has_feature(modules) || defined(__CLFLUSHOPT__) +#include "clflushoptintrin.h" +#endif + +#if !defined(__SCE__) || __has_feature(modules) || defined(__CLWB__) +#include "clwbintrin.h" +#endif + +#if !defined(__SCE__) || __has_feature(modules) || defined(__AVX__) +#include "avxintrin.h" +#endif + +#if !defined(__SCE__) || __has_feature(modules) || defined(__AVX2__) +#include "avx2intrin.h" +#endif + +#if !defined(__SCE__) || __has_feature(modules) || defined(__F16C__) +#include "f16cintrin.h" +#endif + +/* No feature check desired due to internal checks */ +#include "bmiintrin.h" + +#if !defined(__SCE__) || __has_feature(modules) || defined(__BMI2__) +#include "bmi2intrin.h" +#endif + +#if !defined(__SCE__) || __has_feature(modules) || defined(__LZCNT__) +#include "lzcntintrin.h" +#endif + +#if !defined(__SCE__) || __has_feature(modules) || defined(__POPCNT__) +#include "popcntintrin.h" +#endif + +#if !defined(__SCE__) || __has_feature(modules) || defined(__FMA__) +#include "fmaintrin.h" +#endif + +#if !defined(__SCE__) || __has_feature(modules) || defined(__AVX512F__) +#include "avx512fintrin.h" +#endif + +#if !defined(__SCE__) || __has_feature(modules) || defined(__AVX512VL__) +#include "avx512vlintrin.h" +#endif + +#if !defined(__SCE__) || __has_feature(modules) || defined(__AVX512BW__) +#include "avx512bwintrin.h" +#endif + +#if !defined(__SCE__) || __has_feature(modules) || defined(__AVX512BITALG__) +#include "avx512bitalgintrin.h" +#endif + +#if !defined(__SCE__) || __has_feature(modules) || defined(__AVX512CD__) +#include "avx512cdintrin.h" +#endif + +#if !defined(__SCE__) || __has_feature(modules) || defined(__AVX512VPOPCNTDQ__) +#include "avx512vpopcntdqintrin.h" +#endif + +#if !defined(__SCE__) || __has_feature(modules) || \ + (defined(__AVX512VL__) && defined(__AVX512VPOPCNTDQ__)) +#include "avx512vpopcntdqvlintrin.h" +#endif + +#if !defined(__SCE__) || __has_feature(modules) || defined(__AVX512VNNI__) +#include "avx512vnniintrin.h" +#endif + +#if !defined(__SCE__) || __has_feature(modules) || \ + (defined(__AVX512VL__) && defined(__AVX512VNNI__)) +#include "avx512vlvnniintrin.h" +#endif + +#if !defined(__SCE__) || __has_feature(modules) || defined(__AVXVNNI__) +#include "avxvnniintrin.h" +#endif + +#if !defined(__SCE__) || __has_feature(modules) || defined(__AVX512DQ__) +#include "avx512dqintrin.h" +#endif + +#if !defined(__SCE__) || __has_feature(modules) || \ + (defined(__AVX512VL__) && defined(__AVX512BITALG__)) +#include "avx512vlbitalgintrin.h" +#endif + +#if !defined(__SCE__) || __has_feature(modules) || \ + (defined(__AVX512VL__) && defined(__AVX512BW__)) +#include "avx512vlbwintrin.h" +#endif + +#if !defined(__SCE__) || __has_feature(modules) || \ + (defined(__AVX512VL__) && defined(__AVX512CD__)) +#include "avx512vlcdintrin.h" +#endif + +#if !defined(__SCE__) || __has_feature(modules) || \ + (defined(__AVX512VL__) && defined(__AVX512DQ__)) +#include "avx512vldqintrin.h" +#endif + +#if !defined(__SCE__) || __has_feature(modules) || defined(__AVX512IFMA__) +#include "avx512ifmaintrin.h" +#endif + +#if !defined(__SCE__) || __has_feature(modules) || \ + (defined(__AVX512IFMA__) && defined(__AVX512VL__)) +#include "avx512ifmavlintrin.h" +#endif + +#if !defined(__SCE__) || __has_feature(modules) || defined(__AVXIFMA__) +#include "avxifmaintrin.h" +#endif + +#if !defined(__SCE__) || __has_feature(modules) || defined(__AVX512VBMI__) +#include "avx512vbmiintrin.h" +#endif + +#if !defined(__SCE__) || __has_feature(modules) || \ + (defined(__AVX512VBMI__) && defined(__AVX512VL__)) +#include "avx512vbmivlintrin.h" +#endif + +#if !defined(__SCE__) || __has_feature(modules) || defined(__AVX512VBMI2__) +#include "avx512vbmi2intrin.h" +#endif + +#if !defined(__SCE__) || __has_feature(modules) || \ + (defined(__AVX512VBMI2__) && defined(__AVX512VL__)) +#include "avx512vlvbmi2intrin.h" +#endif + +#if !defined(__SCE__) || __has_feature(modules) || defined(__AVX512FP16__) +#include "avx512fp16intrin.h" +#endif + +#if !defined(__SCE__) || __has_feature(modules) || \ + (defined(__AVX512VL__) && defined(__AVX512FP16__)) +#include "avx512vlfp16intrin.h" +#endif + +#if !defined(__SCE__) || __has_feature(modules) || defined(__AVX512BF16__) +#include "avx512bf16intrin.h" +#endif + +#if !defined(__SCE__) || __has_feature(modules) || \ + (defined(__AVX512VL__) && defined(__AVX512BF16__)) +#include "avx512vlbf16intrin.h" +#endif + +#if !defined(__SCE__) || __has_feature(modules) || defined(__PKU__) +#include "pkuintrin.h" +#endif + +#if !defined(__SCE__) || __has_feature(modules) || defined(__VPCLMULQDQ__) +#include "vpclmulqdqintrin.h" +#endif + +#if !defined(__SCE__) || __has_feature(modules) || defined(__VAES__) +#include "vaesintrin.h" +#endif + +#if !defined(__SCE__) || __has_feature(modules) || defined(__GFNI__) +#include "gfniintrin.h" +#endif + +#if !defined(__SCE__) || __has_feature(modules) || defined(__AVXVNNIINT8__) +#include "avxvnniint8intrin.h" +#endif + +#if !defined(__SCE__) || __has_feature(modules) || defined(__AVXNECONVERT__) +#include "avxneconvertintrin.h" +#endif + +#if !defined(__SCE__) || __has_feature(modules) || defined(__SHA512__) +#include "sha512intrin.h" +#endif + +#if !defined(__SCE__) || __has_feature(modules) || defined(__SM3__) +#include "sm3intrin.h" +#endif + +#if !defined(__SCE__) || __has_feature(modules) || defined(__SM4__) +#include "sm4intrin.h" +#endif + +#if !defined(__SCE__) || __has_feature(modules) || defined(__AVXVNNIINT16__) +#include "avxvnniint16intrin.h" +#endif + +#if !defined(__SCE__) || __has_feature(modules) || defined(__RDPID__) +/// Reads the value of the IA32_TSC_AUX MSR (0xc0000103). +/// +/// \headerfile +/// +/// This intrinsic corresponds to the RDPID instruction. +/// +/// \returns The 32-bit contents of the MSR. +static __inline__ unsigned int __attribute__((__always_inline__, __nodebug__, __target__("rdpid"))) +_rdpid_u32(void) { + return __builtin_ia32_rdpid(); +} +#endif // __RDPID__ + +#if !defined(__SCE__) || __has_feature(modules) || defined(__RDRND__) +/// Returns a 16-bit hardware-generated random value. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the RDRAND instruction. +/// +/// \param __p +/// A pointer to a 16-bit memory location to place the random value. +/// \returns 1 if the value was successfully generated, 0 otherwise. +static __inline__ int __attribute__((__always_inline__, __nodebug__, __target__("rdrnd"))) +_rdrand16_step(unsigned short *__p) +{ + return (int)__builtin_ia32_rdrand16_step(__p); +} + +/// Returns a 32-bit hardware-generated random value. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the RDRAND instruction. +/// +/// \param __p +/// A pointer to a 32-bit memory location to place the random value. +/// \returns 1 if the value was successfully generated, 0 otherwise. +static __inline__ int __attribute__((__always_inline__, __nodebug__, __target__("rdrnd"))) +_rdrand32_step(unsigned int *__p) +{ + return (int)__builtin_ia32_rdrand32_step(__p); +} + +/// Returns a 64-bit hardware-generated random value. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the RDRAND instruction. +/// +/// \param __p +/// A pointer to a 64-bit memory location to place the random value. +/// \returns 1 if the value was successfully generated, 0 otherwise. +static __inline__ int __attribute__((__always_inline__, __nodebug__, __target__("rdrnd"))) +_rdrand64_step(unsigned long long *__p) +{ +#ifdef __x86_64__ + return (int)__builtin_ia32_rdrand64_step(__p); +#else + // We need to emulate the functionality of 64-bit rdrand with 2 32-bit + // rdrand instructions. + unsigned int __lo, __hi; + unsigned int __res_lo = __builtin_ia32_rdrand32_step(&__lo); + unsigned int __res_hi = __builtin_ia32_rdrand32_step(&__hi); + if (__res_lo && __res_hi) { + *__p = ((unsigned long long)__hi << 32) | (unsigned long long)__lo; + return 1; + } else { + *__p = 0; + return 0; + } +#endif +} +#endif /* __RDRND__ */ + +#if !defined(__SCE__) || __has_feature(modules) || defined(__FSGSBASE__) +#ifdef __x86_64__ +/// Reads the FS base register. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the RDFSBASE instruction. +/// +/// \returns The lower 32 bits of the FS base register. +static __inline__ unsigned int __attribute__((__always_inline__, __nodebug__, __target__("fsgsbase"))) +_readfsbase_u32(void) +{ + return __builtin_ia32_rdfsbase32(); +} + +/// Reads the FS base register. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the RDFSBASE instruction. +/// +/// \returns The contents of the FS base register. +static __inline__ unsigned long long __attribute__((__always_inline__, __nodebug__, __target__("fsgsbase"))) +_readfsbase_u64(void) +{ + return __builtin_ia32_rdfsbase64(); +} + +/// Reads the GS base register. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the RDGSBASE instruction. +/// +/// \returns The lower 32 bits of the GS base register. +static __inline__ unsigned int __attribute__((__always_inline__, __nodebug__, __target__("fsgsbase"))) +_readgsbase_u32(void) +{ + return __builtin_ia32_rdgsbase32(); +} + +/// Reads the GS base register. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the RDGSBASE instruction. +/// +/// \returns The contents of the GS base register. +static __inline__ unsigned long long __attribute__((__always_inline__, __nodebug__, __target__("fsgsbase"))) +_readgsbase_u64(void) +{ + return __builtin_ia32_rdgsbase64(); +} + +/// Modifies the FS base register. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the WRFSBASE instruction. +/// +/// \param __V +/// Value to use for the lower 32 bits of the FS base register. +static __inline__ void __attribute__((__always_inline__, __nodebug__, __target__("fsgsbase"))) +_writefsbase_u32(unsigned int __V) +{ + __builtin_ia32_wrfsbase32(__V); +} + +/// Modifies the FS base register. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the WRFSBASE instruction. +/// +/// \param __V +/// Value to use for the FS base register. +static __inline__ void __attribute__((__always_inline__, __nodebug__, __target__("fsgsbase"))) +_writefsbase_u64(unsigned long long __V) +{ + __builtin_ia32_wrfsbase64(__V); +} + +/// Modifies the GS base register. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the WRGSBASE instruction. +/// +/// \param __V +/// Value to use for the lower 32 bits of the GS base register. +static __inline__ void __attribute__((__always_inline__, __nodebug__, __target__("fsgsbase"))) +_writegsbase_u32(unsigned int __V) +{ + __builtin_ia32_wrgsbase32(__V); +} + +/// Modifies the GS base register. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the WRFSBASE instruction. +/// +/// \param __V +/// Value to use for GS base register. +static __inline__ void __attribute__((__always_inline__, __nodebug__, __target__("fsgsbase"))) +_writegsbase_u64(unsigned long long __V) +{ + __builtin_ia32_wrgsbase64(__V); +} + +#endif +#endif /* __FSGSBASE__ */ + +#if !defined(__SCE__) || __has_feature(modules) || defined(__MOVBE__) + +/* The structs used below are to force the load/store to be unaligned. This + * is accomplished with the __packed__ attribute. The __may_alias__ prevents + * tbaa metadata from being generated based on the struct and the type of the + * field inside of it. + */ + +/// Load a 16-bit value from memory and swap its bytes. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the MOVBE instruction. +/// +/// \param __P +/// A pointer to the 16-bit value to load. +/// \returns The byte-swapped value. +static __inline__ short __attribute__((__always_inline__, __nodebug__, __target__("movbe"))) +_loadbe_i16(void const * __P) { + struct __loadu_i16 { + unsigned short __v; + } __attribute__((__packed__, __may_alias__)); + return (short)__builtin_bswap16(((const struct __loadu_i16*)__P)->__v); +} + +/// Swap the bytes of a 16-bit value and store it to memory. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the MOVBE instruction. +/// +/// \param __P +/// A pointer to the memory for storing the swapped value. +/// \param __D +/// The 16-bit value to be byte-swapped. +static __inline__ void __attribute__((__always_inline__, __nodebug__, __target__("movbe"))) +_storebe_i16(void * __P, short __D) { + struct __storeu_i16 { + unsigned short __v; + } __attribute__((__packed__, __may_alias__)); + ((struct __storeu_i16*)__P)->__v = __builtin_bswap16((unsigned short)__D); +} + +/// Load a 32-bit value from memory and swap its bytes. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the MOVBE instruction. +/// +/// \param __P +/// A pointer to the 32-bit value to load. +/// \returns The byte-swapped value. +static __inline__ int __attribute__((__always_inline__, __nodebug__, __target__("movbe"))) +_loadbe_i32(void const * __P) { + struct __loadu_i32 { + unsigned int __v; + } __attribute__((__packed__, __may_alias__)); + return (int)__builtin_bswap32(((const struct __loadu_i32*)__P)->__v); +} + +/// Swap the bytes of a 32-bit value and store it to memory. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the MOVBE instruction. +/// +/// \param __P +/// A pointer to the memory for storing the swapped value. +/// \param __D +/// The 32-bit value to be byte-swapped. +static __inline__ void __attribute__((__always_inline__, __nodebug__, __target__("movbe"))) +_storebe_i32(void * __P, int __D) { + struct __storeu_i32 { + unsigned int __v; + } __attribute__((__packed__, __may_alias__)); + ((struct __storeu_i32*)__P)->__v = __builtin_bswap32((unsigned int)__D); +} + +#ifdef __x86_64__ +/// Load a 64-bit value from memory and swap its bytes. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the MOVBE instruction. +/// +/// \param __P +/// A pointer to the 64-bit value to load. +/// \returns The byte-swapped value. +static __inline__ long long __attribute__((__always_inline__, __nodebug__, __target__("movbe"))) +_loadbe_i64(void const * __P) { + struct __loadu_i64 { + unsigned long long __v; + } __attribute__((__packed__, __may_alias__)); + return (long long)__builtin_bswap64(((const struct __loadu_i64*)__P)->__v); +} + +/// Swap the bytes of a 64-bit value and store it to memory. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the MOVBE instruction. +/// +/// \param __P +/// A pointer to the memory for storing the swapped value. +/// \param __D +/// The 64-bit value to be byte-swapped. +static __inline__ void __attribute__((__always_inline__, __nodebug__, __target__("movbe"))) +_storebe_i64(void * __P, long long __D) { + struct __storeu_i64 { + unsigned long long __v; + } __attribute__((__packed__, __may_alias__)); + ((struct __storeu_i64*)__P)->__v = __builtin_bswap64((unsigned long long)__D); +} +#endif +#endif /* __MOVBE */ + +#if !defined(__SCE__) || __has_feature(modules) || defined(__RTM__) +#include "rtmintrin.h" +#include "xtestintrin.h" +#endif + +#if !defined(__SCE__) || __has_feature(modules) || defined(__SHA__) +#include "shaintrin.h" +#endif + +#if !defined(__SCE__) || __has_feature(modules) || defined(__FXSR__) +#include "fxsrintrin.h" +#endif + +/* No feature check desired due to internal MSC_VER checks */ +#include "xsaveintrin.h" + +#if !defined(__SCE__) || __has_feature(modules) || defined(__XSAVEOPT__) +#include "xsaveoptintrin.h" +#endif + +#if !defined(__SCE__) || __has_feature(modules) || defined(__XSAVEC__) +#include "xsavecintrin.h" +#endif + +#if !defined(__SCE__) || __has_feature(modules) || defined(__XSAVES__) +#include "xsavesintrin.h" +#endif + +#if !defined(__SCE__) || __has_feature(modules) || defined(__SHSTK__) +#include "cetintrin.h" +#endif + +/* Intrinsics inside adcintrin.h are available at all times. */ +#include "adcintrin.h" + +#if !defined(__SCE__) || __has_feature(modules) || defined(__ADX__) +#include "adxintrin.h" +#endif + +#if !defined(__SCE__) || __has_feature(modules) || defined(__RDSEED__) +#include "rdseedintrin.h" +#endif + +#if !defined(__SCE__) || __has_feature(modules) || defined(__WBNOINVD__) +#include "wbnoinvdintrin.h" +#endif + +#if !defined(__SCE__) || __has_feature(modules) || defined(__CLDEMOTE__) +#include "cldemoteintrin.h" +#endif + +#if !defined(__SCE__) || __has_feature(modules) || defined(__WAITPKG__) +#include "waitpkgintrin.h" +#endif + +#if !defined(__SCE__) || __has_feature(modules) || defined(__MOVDIRI__) || \ + defined(__MOVDIR64B__) +#include "movdirintrin.h" +#endif + +#if !defined(__SCE__) || __has_feature(modules) || defined(__PCONFIG__) +#include "pconfigintrin.h" +#endif + +#if !defined(__SCE__) || __has_feature(modules) || defined(__SGX__) +#include "sgxintrin.h" +#endif + +#if !defined(__SCE__) || __has_feature(modules) || defined(__PTWRITE__) +#include "ptwriteintrin.h" +#endif + +#if !defined(__SCE__) || __has_feature(modules) || defined(__INVPCID__) +#include "invpcidintrin.h" +#endif +#if !defined(__SCE__) || __has_feature(modules) || defined(__AMX_FP16__) +#include "amxfp16intrin.h" +#endif + +#if !defined(__SCE__) || __has_feature(modules) || defined(__KL__) || \ + defined(__WIDEKL__) +#include "keylockerintrin.h" +#endif + +#if !defined(__SCE__) || __has_feature(modules) || defined(__AMX_TILE__) || \ + defined(__AMX_INT8__) || defined(__AMX_BF16__) +#include "amxintrin.h" +#endif + +#if !defined(__SCE__) || __has_feature(modules) || defined(__AMX_COMPLEX__) +#include "amxcomplexintrin.h" +#endif + +#if !defined(__SCE__) || __has_feature(modules) || \ + defined(__AVX512VP2INTERSECT__) +#include "avx512vp2intersectintrin.h" +#endif + +#if !defined(__SCE__) || __has_feature(modules) || \ + (defined(__AVX512VL__) && defined(__AVX512VP2INTERSECT__)) +#include "avx512vlvp2intersectintrin.h" +#endif + +#if !defined(__SCE__) || __has_feature(modules) || defined(__ENQCMD__) +#include "enqcmdintrin.h" +#endif + +#if !defined(__SCE__) || __has_feature(modules) || defined(__SERIALIZE__) +#include "serializeintrin.h" +#endif + +#if !defined(__SCE__) || __has_feature(modules) || defined(__TSXLDTRK__) +#include "tsxldtrkintrin.h" +#endif + +#if defined(_MSC_VER) && __has_extension(gnu_asm) +/* Define the default attributes for these intrinsics */ +#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__)) +#ifdef __cplusplus +extern "C" { +#endif +/*----------------------------------------------------------------------------*\ +|* Interlocked Exchange HLE +\*----------------------------------------------------------------------------*/ +#if defined(__i386__) || defined(__x86_64__) +static __inline__ long __DEFAULT_FN_ATTRS +_InterlockedExchange_HLEAcquire(long volatile *_Target, long _Value) { + __asm__ __volatile__(".byte 0xf2 ; lock ; xchg {%0, %1|%1, %0}" + : "+r" (_Value), "+m" (*_Target) :: "memory"); + return _Value; +} +static __inline__ long __DEFAULT_FN_ATTRS +_InterlockedExchange_HLERelease(long volatile *_Target, long _Value) { + __asm__ __volatile__(".byte 0xf3 ; lock ; xchg {%0, %1|%1, %0}" + : "+r" (_Value), "+m" (*_Target) :: "memory"); + return _Value; +} +#endif +#if defined(__x86_64__) +static __inline__ __int64 __DEFAULT_FN_ATTRS +_InterlockedExchange64_HLEAcquire(__int64 volatile *_Target, __int64 _Value) { + __asm__ __volatile__(".byte 0xf2 ; lock ; xchg {%0, %1|%1, %0}" + : "+r" (_Value), "+m" (*_Target) :: "memory"); + return _Value; +} +static __inline__ __int64 __DEFAULT_FN_ATTRS +_InterlockedExchange64_HLERelease(__int64 volatile *_Target, __int64 _Value) { + __asm__ __volatile__(".byte 0xf3 ; lock ; xchg {%0, %1|%1, %0}" + : "+r" (_Value), "+m" (*_Target) :: "memory"); + return _Value; +} +#endif +/*----------------------------------------------------------------------------*\ +|* Interlocked Compare Exchange HLE +\*----------------------------------------------------------------------------*/ +#if defined(__i386__) || defined(__x86_64__) +static __inline__ long __DEFAULT_FN_ATTRS +_InterlockedCompareExchange_HLEAcquire(long volatile *_Destination, + long _Exchange, long _Comparand) { + __asm__ __volatile__(".byte 0xf2 ; lock ; cmpxchg {%2, %1|%1, %2}" + : "+a" (_Comparand), "+m" (*_Destination) + : "r" (_Exchange) : "memory"); + return _Comparand; +} +static __inline__ long __DEFAULT_FN_ATTRS +_InterlockedCompareExchange_HLERelease(long volatile *_Destination, + long _Exchange, long _Comparand) { + __asm__ __volatile__(".byte 0xf3 ; lock ; cmpxchg {%2, %1|%1, %2}" + : "+a" (_Comparand), "+m" (*_Destination) + : "r" (_Exchange) : "memory"); + return _Comparand; +} +#endif +#if defined(__x86_64__) +static __inline__ __int64 __DEFAULT_FN_ATTRS +_InterlockedCompareExchange64_HLEAcquire(__int64 volatile *_Destination, + __int64 _Exchange, __int64 _Comparand) { + __asm__ __volatile__(".byte 0xf2 ; lock ; cmpxchg {%2, %1|%1, %2}" + : "+a" (_Comparand), "+m" (*_Destination) + : "r" (_Exchange) : "memory"); + return _Comparand; +} +static __inline__ __int64 __DEFAULT_FN_ATTRS +_InterlockedCompareExchange64_HLERelease(__int64 volatile *_Destination, + __int64 _Exchange, __int64 _Comparand) { + __asm__ __volatile__(".byte 0xf3 ; lock ; cmpxchg {%2, %1|%1, %2}" + : "+a" (_Comparand), "+m" (*_Destination) + : "r" (_Exchange) : "memory"); + return _Comparand; +} +#endif +#ifdef __cplusplus +} +#endif + +#undef __DEFAULT_FN_ATTRS + +#endif /* defined(_MSC_VER) && __has_extension(gnu_asm) */ + +#endif /* __IMMINTRIN_H */ diff --git a/third_party/intel/clang/invpcidintrin.h b/third_party/intel/clang/invpcidintrin.h new file mode 100644 index 000000000..48dae0a86 --- /dev/null +++ b/third_party/intel/clang/invpcidintrin.h @@ -0,0 +1,23 @@ +/*===------------- invpcidintrin.h - INVPCID intrinsic ---------------------=== + * + * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. + * See https://llvm.org/LICENSE.txt for license information. + * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + * + *===-----------------------------------------------------------------------=== + */ + +#ifndef __IMMINTRIN_H +#error "Never use directly; include instead." +#endif + +#ifndef __INVPCIDINTRIN_H +#define __INVPCIDINTRIN_H + +static __inline__ void + __attribute__((__always_inline__, __nodebug__, __target__("invpcid"))) +_invpcid(unsigned int __type, void *__descriptor) { + __builtin_ia32_invpcid(__type, __descriptor); +} + +#endif /* __INVPCIDINTRIN_H */ diff --git a/third_party/intel/clang/keylockerintrin.h b/third_party/intel/clang/keylockerintrin.h new file mode 100644 index 000000000..f76e91b4d --- /dev/null +++ b/third_party/intel/clang/keylockerintrin.h @@ -0,0 +1,527 @@ +/*===----------------- keylockerintrin.h - KL Intrinsics -------------------=== + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + * + *===-----------------------------------------------------------------------=== + */ + +#ifndef __IMMINTRIN_H +#error "Never use directly; include instead." +#endif + +#ifndef _KEYLOCKERINTRIN_H +#define _KEYLOCKERINTRIN_H + +#if !defined(__SCE__) || __has_feature(modules) || defined(__KL__) + +/* Define the default attributes for the functions in this file. */ +#define __DEFAULT_FN_ATTRS \ + __attribute__((__always_inline__, __nodebug__, __target__("kl"),\ + __min_vector_width__(128))) + +/// Load internal wrapping key from __intkey, __enkey_lo and __enkey_hi. __ctl +/// will assigned to EAX, whch specifies the KeySource and whether backing up +/// the key is permitted. The 256-bit encryption key is loaded from the two +/// explicit operands (__enkey_lo and __enkey_hi). The 128-bit integrity key is +/// loaded from the implicit operand XMM0 which assigned by __intkey. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the LOADIWKEY instructions. +/// +/// \code{.operation} +/// IF CPL > 0 // LOADKWKEY only allowed at ring 0 (supervisor mode) +/// GP (0) +/// FI +/// IF “LOADIWKEY exiting” VM execution control set +/// VMexit +/// FI +/// IF __ctl[4:1] > 1 // Reserved KeySource encoding used +/// GP (0) +/// FI +/// IF __ctl[31:5] != 0 // Reserved bit in __ctl is set +/// GP (0) +/// FI +/// IF __ctl[0] AND (CPUID.19H.ECX[0] == 0) // NoBackup is not supported on this part +/// GP (0) +/// FI +/// IF (__ctl[4:1] == 1) AND (CPUID.19H.ECX[1] == 0) // KeySource of 1 is not supported on this part +/// GP (0) +/// FI +/// IF (__ctl[4:1] == 0) // KeySource of 0. +/// IWKey.Encryption Key[127:0] := __enkey_hi[127:0]: +/// IWKey.Encryption Key[255:128] := __enkey_lo[127:0] +/// IWKey.IntegrityKey[127:0] := __intkey[127:0] +/// IWKey.NoBackup := __ctl[0] +/// IWKey.KeySource := __ctl[4:1] +/// ZF := 0 +/// ELSE // KeySource of 1. See RDSEED definition for details of randomness +/// IF HW_NRND_GEN.ready == 1 // Full-entropy random data from RDSEED was received +/// IWKey.Encryption Key[127:0] := __enkey_hi[127:0] XOR HW_NRND_GEN.data[127:0] +/// IWKey.Encryption Key[255:128] := __enkey_lo[127:0] XOR HW_NRND_GEN.data[255:128] +/// IWKey.Encryption Key[255:0] := __enkey_hi[127:0]:__enkey_lo[127:0] XOR HW_NRND_GEN.data[255:0] +/// IWKey.IntegrityKey[127:0] := __intkey[127:0] XOR HW_NRND_GEN.data[383:256] +/// IWKey.NoBackup := __ctl[0] +/// IWKey.KeySource := __ctl[4:1] +/// ZF := 0 +/// ELSE // Random data was not returned from RDSEED. IWKey was not loaded +/// ZF := 1 +/// FI +/// FI +/// dst := ZF +/// OF := 0 +/// SF := 0 +/// AF := 0 +/// PF := 0 +/// CF := 0 +/// \endcode +static __inline__ void __DEFAULT_FN_ATTRS +_mm_loadiwkey (unsigned int __ctl, __m128i __intkey, + __m128i __enkey_lo, __m128i __enkey_hi) { + __builtin_ia32_loadiwkey (__intkey, __enkey_lo, __enkey_hi, __ctl); +} + +/// Wrap a 128-bit AES key from __key into a key handle and output in +/// ((__m128i*)__h) to ((__m128i*)__h) + 2 and a 32-bit value as return. +/// The explicit source operand __htype specifies handle restrictions. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the ENCODEKEY128 instructions. +/// +/// \code{.operation} +/// InputKey[127:0] := __key[127:0] +/// KeyMetadata[2:0] := __htype[2:0] +/// KeyMetadata[23:3] := 0 // Reserved for future usage +/// KeyMetadata[27:24] := 0 // KeyType is AES-128 (value of 0) +/// KeyMetadata[127:28] := 0 // Reserved for future usage +/// Handle[383:0] := WrapKey128(InputKey[127:0], KeyMetadata[127:0], +/// IWKey.Integrity Key[127:0], IWKey.Encryption Key[255:0]) +/// dst[0] := IWKey.NoBackup +/// dst[4:1] := IWKey.KeySource[3:0] +/// dst[31:5] := 0 +/// MEM[__h+127:__h] := Handle[127:0] // AAD +/// MEM[__h+255:__h+128] := Handle[255:128] // Integrity Tag +/// MEM[__h+383:__h+256] := Handle[383:256] // CipherText +/// OF := 0 +/// SF := 0 +/// ZF := 0 +/// AF := 0 +/// PF := 0 +/// CF := 0 +/// \endcode +static __inline__ unsigned int __DEFAULT_FN_ATTRS +_mm_encodekey128_u32(unsigned int __htype, __m128i __key, void *__h) { + return __builtin_ia32_encodekey128_u32(__htype, (__v2di)__key, __h); +} + +/// Wrap a 256-bit AES key from __key_hi:__key_lo into a key handle, then +/// output handle in ((__m128i*)__h) to ((__m128i*)__h) + 3 and +/// a 32-bit value as return. +/// The explicit source operand __htype specifies handle restrictions. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the ENCODEKEY256 instructions. +/// +/// \code{.operation} +/// InputKey[127:0] := __key_lo[127:0] +/// InputKey[255:128] := __key_hi[255:128] +/// KeyMetadata[2:0] := __htype[2:0] +/// KeyMetadata[23:3] := 0 // Reserved for future usage +/// KeyMetadata[27:24] := 1 // KeyType is AES-256 (value of 1) +/// KeyMetadata[127:28] := 0 // Reserved for future usage +/// Handle[511:0] := WrapKey256(InputKey[255:0], KeyMetadata[127:0], +/// IWKey.Integrity Key[127:0], IWKey.Encryption Key[255:0]) +/// dst[0] := IWKey.NoBackup +/// dst[4:1] := IWKey.KeySource[3:0] +/// dst[31:5] := 0 +/// MEM[__h+127:__h] := Handle[127:0] // AAD +/// MEM[__h+255:__h+128] := Handle[255:128] // Tag +/// MEM[__h+383:__h+256] := Handle[383:256] // CipherText[127:0] +/// MEM[__h+511:__h+384] := Handle[511:384] // CipherText[255:128] +/// OF := 0 +/// SF := 0 +/// ZF := 0 +/// AF := 0 +/// PF := 0 +/// CF := 0 +/// \endcode +static __inline__ unsigned int __DEFAULT_FN_ATTRS +_mm_encodekey256_u32(unsigned int __htype, __m128i __key_lo, __m128i __key_hi, + void *__h) { + return __builtin_ia32_encodekey256_u32(__htype, (__v2di)__key_lo, + (__v2di)__key_hi, __h); +} + +/// The AESENC128KL performs 10 rounds of AES to encrypt the __idata using +/// the 128-bit key in the handle from the __h. It stores the result in the +/// __odata. And return the affected ZF flag status. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the AESENC128KL instructions. +/// +/// \code{.operation} +/// Handle[383:0] := MEM[__h+383:__h] // Load is not guaranteed to be atomic. +/// IllegalHandle := ( HandleReservedBitSet (Handle[383:0]) || +/// (Handle[127:0] AND (CPL > 0)) || +/// Handle[383:256] || +/// HandleKeyType (Handle[383:0]) != HANDLE_KEY_TYPE_AES128 ) +/// IF (IllegalHandle) +/// ZF := 1 +/// ELSE +/// (UnwrappedKey, Authentic) := UnwrapKeyAndAuthenticate384 (Handle[383:0], IWKey) +/// IF (Authentic == 0) +/// ZF := 1 +/// ELSE +/// MEM[__odata+127:__odata] := AES128Encrypt (__idata[127:0], UnwrappedKey) +/// ZF := 0 +/// FI +/// FI +/// dst := ZF +/// OF := 0 +/// SF := 0 +/// AF := 0 +/// PF := 0 +/// CF := 0 +/// \endcode +static __inline__ unsigned char __DEFAULT_FN_ATTRS +_mm_aesenc128kl_u8(__m128i* __odata, __m128i __idata, const void *__h) { + return __builtin_ia32_aesenc128kl_u8((__v2di *)__odata, (__v2di)__idata, __h); +} + +/// The AESENC256KL performs 14 rounds of AES to encrypt the __idata using +/// the 256-bit key in the handle from the __h. It stores the result in the +/// __odata. And return the affected ZF flag status. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the AESENC256KL instructions. +/// +/// \code{.operation} +/// Handle[511:0] := MEM[__h+511:__h] // Load is not guaranteed to be atomic. +/// IllegalHandle := ( HandleReservedBitSet (Handle[511:0]) || +/// (Handle[127:0] AND (CPL > 0)) || +/// Handle[255:128] || +/// HandleKeyType (Handle[511:0]) != HANDLE_KEY_TYPE_AES256 ) +/// IF (IllegalHandle) +/// ZF := 1 +/// MEM[__odata+127:__odata] := 0 +/// ELSE +/// (UnwrappedKey, Authentic) := UnwrapKeyAndAuthenticate512 (Handle[511:0], IWKey) +/// IF (Authentic == 0) +/// ZF := 1 +/// MEM[__odata+127:__odata] := 0 +/// ELSE +/// MEM[__odata+127:__odata] := AES256Encrypt (__idata[127:0], UnwrappedKey) +/// ZF := 0 +/// FI +/// FI +/// dst := ZF +/// OF := 0 +/// SF := 0 +/// AF := 0 +/// PF := 0 +/// CF := 0 +/// \endcode +static __inline__ unsigned char __DEFAULT_FN_ATTRS +_mm_aesenc256kl_u8(__m128i* __odata, __m128i __idata, const void *__h) { + return __builtin_ia32_aesenc256kl_u8((__v2di *)__odata, (__v2di)__idata, __h); +} + +/// The AESDEC128KL performs 10 rounds of AES to decrypt the __idata using +/// the 128-bit key in the handle from the __h. It stores the result in the +/// __odata. And return the affected ZF flag status. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the AESDEC128KL instructions. +/// +/// \code{.operation} +/// Handle[383:0] := MEM[__h+383:__h] // Load is not guaranteed to be atomic. +/// IllegalHandle := (HandleReservedBitSet (Handle[383:0]) || +/// (Handle[127:0] AND (CPL > 0)) || +/// Handle[383:256] || +/// HandleKeyType (Handle[383:0]) != HANDLE_KEY_TYPE_AES128) +/// IF (IllegalHandle) +/// ZF := 1 +/// MEM[__odata+127:__odata] := 0 +/// ELSE +/// (UnwrappedKey, Authentic) := UnwrapKeyAndAuthenticate384 (Handle[383:0], IWKey) +/// IF (Authentic == 0) +/// ZF := 1 +/// MEM[__odata+127:__odata] := 0 +/// ELSE +/// MEM[__odata+127:__odata] := AES128Decrypt (__idata[127:0], UnwrappedKey) +/// ZF := 0 +/// FI +/// FI +/// dst := ZF +/// OF := 0 +/// SF := 0 +/// AF := 0 +/// PF := 0 +/// CF := 0 +/// \endcode +static __inline__ unsigned char __DEFAULT_FN_ATTRS +_mm_aesdec128kl_u8(__m128i* __odata, __m128i __idata, const void *__h) { + return __builtin_ia32_aesdec128kl_u8((__v2di *)__odata, (__v2di)__idata, __h); +} + +/// The AESDEC256KL performs 10 rounds of AES to decrypt the __idata using +/// the 256-bit key in the handle from the __h. It stores the result in the +/// __odata. And return the affected ZF flag status. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the AESDEC256KL instructions. +/// +/// \code{.operation} +/// Handle[511:0] := MEM[__h+511:__h] +/// IllegalHandle := (HandleReservedBitSet (Handle[511:0]) || +/// (Handle[127:0] AND (CPL > 0)) || +/// Handle[383:256] || +/// HandleKeyType (Handle[511:0]) != HANDLE_KEY_TYPE_AES256) +/// IF (IllegalHandle) +/// ZF := 1 +/// MEM[__odata+127:__odata] := 0 +/// ELSE +/// (UnwrappedKey, Authentic) := UnwrapKeyAndAuthenticate512 (Handle[511:0], IWKey) +/// IF (Authentic == 0) +/// ZF := 1 +/// MEM[__odata+127:__odata] := 0 +/// ELSE +/// MEM[__odata+127:__odata] := AES256Decrypt (__idata[127:0], UnwrappedKey) +/// ZF := 0 +/// FI +/// FI +/// dst := ZF +/// OF := 0 +/// SF := 0 +/// AF := 0 +/// PF := 0 +/// CF := 0 +/// \endcode +static __inline__ unsigned char __DEFAULT_FN_ATTRS +_mm_aesdec256kl_u8(__m128i* __odata, __m128i __idata, const void *__h) { + return __builtin_ia32_aesdec256kl_u8((__v2di *)__odata, (__v2di)__idata, __h); +} + +#undef __DEFAULT_FN_ATTRS + +#endif /* !defined(__SCE__ || __has_feature(modules) || defined(__KL__) */ + +#if !defined(__SCE__) || __has_feature(modules) || defined(__WIDEKL__) + +/* Define the default attributes for the functions in this file. */ +#define __DEFAULT_FN_ATTRS \ + __attribute__((__always_inline__, __nodebug__, __target__("kl,widekl"),\ + __min_vector_width__(128))) + +/// Encrypt __idata[0] to __idata[7] using 128-bit AES key indicated by handle +/// at __h and store each resultant block back from __odata to __odata+7. And +/// return the affected ZF flag status. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the AESENCWIDE128KL instructions. +/// +/// \code{.operation} +/// Handle := MEM[__h+383:__h] +/// IllegalHandle := ( HandleReservedBitSet (Handle[383:0]) || +/// (Handle[127:0] AND (CPL > 0)) || +/// Handle[255:128] || +/// HandleKeyType (Handle[383:0]) != HANDLE_KEY_TYPE_AES128 ) +/// IF (IllegalHandle) +/// ZF := 1 +/// FOR i := 0 to 7 +/// __odata[i] := 0 +/// ENDFOR +/// ELSE +/// (UnwrappedKey, Authentic) := UnwrapKeyAndAuthenticate384 (Handle[383:0], IWKey) +/// IF Authentic == 0 +/// ZF := 1 +/// FOR i := 0 to 7 +/// __odata[i] := 0 +/// ENDFOR +/// ELSE +/// FOR i := 0 to 7 +/// __odata[i] := AES128Encrypt (__idata[i], UnwrappedKey) +/// ENDFOR +/// ZF := 0 +/// FI +/// FI +/// dst := ZF +/// OF := 0 +/// SF := 0 +/// AF := 0 +/// PF := 0 +/// CF := 0 +/// \endcode +static __inline__ unsigned char __DEFAULT_FN_ATTRS +_mm_aesencwide128kl_u8(__m128i __odata[8], const __m128i __idata[8], const void* __h) { + return __builtin_ia32_aesencwide128kl_u8((__v2di *)__odata, + (const __v2di *)__idata, __h); +} + +/// Encrypt __idata[0] to __idata[7] using 256-bit AES key indicated by handle +/// at __h and store each resultant block back from __odata to __odata+7. And +/// return the affected ZF flag status. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the AESENCWIDE256KL instructions. +/// +/// \code{.operation} +/// Handle[511:0] := MEM[__h+511:__h] +/// IllegalHandle := ( HandleReservedBitSet (Handle[511:0]) || +/// (Handle[127:0] AND (CPL > 0)) || +/// Handle[255:128] || +/// HandleKeyType (Handle[511:0]) != HANDLE_KEY_TYPE_AES512 ) +/// IF (IllegalHandle) +/// ZF := 1 +/// FOR i := 0 to 7 +/// __odata[i] := 0 +/// ENDFOR +/// ELSE +/// (UnwrappedKey, Authentic) := UnwrapKeyAndAuthenticate512 (Handle[511:0], IWKey) +/// IF Authentic == 0 +/// ZF := 1 +/// FOR i := 0 to 7 +/// __odata[i] := 0 +/// ENDFOR +/// ELSE +/// FOR i := 0 to 7 +/// __odata[i] := AES256Encrypt (__idata[i], UnwrappedKey) +/// ENDFOR +/// ZF := 0 +/// FI +/// FI +/// dst := ZF +/// OF := 0 +/// SF := 0 +/// AF := 0 +/// PF := 0 +/// CF := 0 +/// \endcode +static __inline__ unsigned char __DEFAULT_FN_ATTRS +_mm_aesencwide256kl_u8(__m128i __odata[8], const __m128i __idata[8], const void* __h) { + return __builtin_ia32_aesencwide256kl_u8((__v2di *)__odata, + (const __v2di *)__idata, __h); +} + +/// Decrypt __idata[0] to __idata[7] using 128-bit AES key indicated by handle +/// at __h and store each resultant block back from __odata to __odata+7. And +/// return the affected ZF flag status. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the AESDECWIDE128KL instructions. +/// +/// \code{.operation} +/// Handle[383:0] := MEM[__h+383:__h] +/// IllegalHandle := ( HandleReservedBitSet (Handle[383:0]) || +/// (Handle[127:0] AND (CPL > 0)) || +/// Handle[255:128] || +/// HandleKeyType (Handle) != HANDLE_KEY_TYPE_AES128 ) +/// IF (IllegalHandle) +/// ZF := 1 +/// FOR i := 0 to 7 +/// __odata[i] := 0 +/// ENDFOR +/// ELSE +/// (UnwrappedKey, Authentic) := UnwrapKeyAndAuthenticate384 (Handle[383:0], IWKey) +/// IF Authentic == 0 +/// ZF := 1 +/// FOR i := 0 to 7 +/// __odata[i] := 0 +/// ENDFOR +/// ELSE +/// FOR i := 0 to 7 +/// __odata[i] := AES128Decrypt (__idata[i], UnwrappedKey) +/// ENDFOR +/// ZF := 0 +/// FI +/// FI +/// dst := ZF +/// OF := 0 +/// SF := 0 +/// AF := 0 +/// PF := 0 +/// CF := 0 +/// \endcode +static __inline__ unsigned char __DEFAULT_FN_ATTRS +_mm_aesdecwide128kl_u8(__m128i __odata[8], const __m128i __idata[8], const void* __h) { + return __builtin_ia32_aesdecwide128kl_u8((__v2di *)__odata, + (const __v2di *)__idata, __h); +} + +/// Decrypt __idata[0] to __idata[7] using 256-bit AES key indicated by handle +/// at __h and store each resultant block back from __odata to __odata+7. And +/// return the affected ZF flag status. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the AESDECWIDE256KL instructions. +/// +/// \code{.operation} +/// Handle[511:0] := MEM[__h+511:__h] +/// IllegalHandle = ( HandleReservedBitSet (Handle[511:0]) || +/// (Handle[127:0] AND (CPL > 0)) || +/// Handle[255:128] || +/// HandleKeyType (Handle) != HANDLE_KEY_TYPE_AES512 ) +/// If (IllegalHandle) +/// ZF := 1 +/// FOR i := 0 to 7 +/// __odata[i] := 0 +/// ENDFOR +/// ELSE +/// (UnwrappedKey, Authentic) := UnwrapKeyAndAuthenticate512 (Handle[511:0], IWKey) +/// IF Authentic == 0 +/// ZF := 1 +/// FOR i := 0 to 7 +/// __odata[i] := 0 +/// ENDFOR +/// ELSE +/// FOR i := 0 to 7 +/// __odata[i] := AES256Decrypt (__idata[i], UnwrappedKey) +/// ENDFOR +/// ZF := 0 +/// FI +/// FI +/// dst := ZF +/// OF := 0 +/// SF := 0 +/// AF := 0 +/// PF := 0 +/// CF := 0 +/// \endcode +static __inline__ unsigned char __DEFAULT_FN_ATTRS +_mm_aesdecwide256kl_u8(__m128i __odata[8], const __m128i __idata[8], const void* __h) { + return __builtin_ia32_aesdecwide256kl_u8((__v2di *)__odata, + (const __v2di *)__idata, __h); +} + +#undef __DEFAULT_FN_ATTRS + +#endif /* !defined(__SCE__) || __has_feature(modules) || defined(__WIDEKL__) \ + */ + +#endif /* _KEYLOCKERINTRIN_H */ diff --git a/third_party/intel/clang/lwpintrin.h b/third_party/intel/clang/lwpintrin.h new file mode 100644 index 000000000..d8ab0db03 --- /dev/null +++ b/third_party/intel/clang/lwpintrin.h @@ -0,0 +1,136 @@ +/*===---- lwpintrin.h - LWP intrinsics -------------------------------------=== + * + * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. + * See https://llvm.org/LICENSE.txt for license information. + * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + * + *===-----------------------------------------------------------------------=== + */ + +#ifndef __X86INTRIN_H +#error "Never use directly; include instead." +#endif + +#ifndef __LWPINTRIN_H +#define __LWPINTRIN_H + +/* Define the default attributes for the functions in this file. */ +#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("lwp"))) + +/// Parses the LWPCB at the specified address and enables +/// profiling if valid. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the LLWPCB instruction. +/// +/// \param __addr +/// Address to the new Lightweight Profiling Control Block (LWPCB). If the +/// LWPCB is valid, writes the address into the LWP_CBADDR MSR and enables +/// Lightweight Profiling. +static __inline__ void __DEFAULT_FN_ATTRS +__llwpcb (void *__addr) +{ + __builtin_ia32_llwpcb(__addr); +} + +/// Flushes the LWP state to memory and returns the address of the LWPCB. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the SLWPCB instruction. +/// +/// \return +/// Address to the current Lightweight Profiling Control Block (LWPCB). +/// If LWP is not currently enabled, returns NULL. +static __inline__ void* __DEFAULT_FN_ATTRS +__slwpcb (void) +{ + return __builtin_ia32_slwpcb(); +} + +/// Inserts programmed event record into the LWP event ring buffer +/// and advances the ring buffer pointer. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the LWPINS instruction. +/// +/// \param DATA2 +/// A 32-bit value is zero-extended and inserted into the 64-bit Data2 field. +/// \param DATA1 +/// A 32-bit value is inserted into the 32-bit Data1 field. +/// \param FLAGS +/// A 32-bit immediate value is inserted into the 32-bit Flags field. +/// \returns If the ring buffer is full and LWP is running in Synchronized Mode, +/// the event record overwrites the last record in the buffer, the MissedEvents +/// counter in the LWPCB is incremented, the head pointer is not advanced, and +/// 1 is returned. Otherwise 0 is returned. +#define __lwpins32(DATA2, DATA1, FLAGS) \ + (__builtin_ia32_lwpins32((unsigned int) (DATA2), (unsigned int) (DATA1), \ + (unsigned int) (FLAGS))) + +/// Decrements the LWP programmed value sample event counter. If the result is +/// negative, inserts an event record into the LWP event ring buffer in memory +/// and advances the ring buffer pointer. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the LWPVAL instruction. +/// +/// \param DATA2 +/// A 32-bit value is zero-extended and inserted into the 64-bit Data2 field. +/// \param DATA1 +/// A 32-bit value is inserted into the 32-bit Data1 field. +/// \param FLAGS +/// A 32-bit immediate value is inserted into the 32-bit Flags field. +#define __lwpval32(DATA2, DATA1, FLAGS) \ + (__builtin_ia32_lwpval32((unsigned int) (DATA2), (unsigned int) (DATA1), \ + (unsigned int) (FLAGS))) + +#ifdef __x86_64__ + +/// Inserts programmed event record into the LWP event ring buffer +/// and advances the ring buffer pointer. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the LWPINS instruction. +/// +/// \param DATA2 +/// A 64-bit value is inserted into the 64-bit Data2 field. +/// \param DATA1 +/// A 32-bit value is inserted into the 32-bit Data1 field. +/// \param FLAGS +/// A 32-bit immediate value is inserted into the 32-bit Flags field. +/// \returns If the ring buffer is full and LWP is running in Synchronized Mode, +/// the event record overwrites the last record in the buffer, the MissedEvents +/// counter in the LWPCB is incremented, the head pointer is not advanced, and +/// 1 is returned. Otherwise 0 is returned. +#define __lwpins64(DATA2, DATA1, FLAGS) \ + (__builtin_ia32_lwpins64((unsigned long long) (DATA2), (unsigned int) (DATA1), \ + (unsigned int) (FLAGS))) + +/// Decrements the LWP programmed value sample event counter. If the result is +/// negative, inserts an event record into the LWP event ring buffer in memory +/// and advances the ring buffer pointer. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the LWPVAL instruction. +/// +/// \param DATA2 +/// A 64-bit value is and inserted into the 64-bit Data2 field. +/// \param DATA1 +/// A 32-bit value is inserted into the 32-bit Data1 field. +/// \param FLAGS +/// A 32-bit immediate value is inserted into the 32-bit Flags field. +#define __lwpval64(DATA2, DATA1, FLAGS) \ + (__builtin_ia32_lwpval64((unsigned long long) (DATA2), (unsigned int) (DATA1), \ + (unsigned int) (FLAGS))) + +#endif + +#undef __DEFAULT_FN_ATTRS + +#endif /* __LWPINTRIN_H */ diff --git a/third_party/intel/clang/lzcntintrin.h b/third_party/intel/clang/lzcntintrin.h new file mode 100644 index 000000000..f4ddce9d0 --- /dev/null +++ b/third_party/intel/clang/lzcntintrin.h @@ -0,0 +1,104 @@ +/*===---- lzcntintrin.h - LZCNT intrinsics ---------------------------------=== + * + * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. + * See https://llvm.org/LICENSE.txt for license information. + * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + * + *===-----------------------------------------------------------------------=== + */ + +#if !defined __X86INTRIN_H && !defined __IMMINTRIN_H +#error "Never use directly; include instead." +#endif + +#ifndef __LZCNTINTRIN_H +#define __LZCNTINTRIN_H + +/* Define the default attributes for the functions in this file. */ +#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("lzcnt"))) + +#ifndef _MSC_VER +/// Counts the number of leading zero bits in the operand. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c LZCNT instruction. +/// +/// \param __X +/// An unsigned 16-bit integer whose leading zeros are to be counted. +/// \returns An unsigned 16-bit integer containing the number of leading zero +/// bits in the operand. +#define __lzcnt16(X) __builtin_ia32_lzcnt_u16((unsigned short)(X)) +#endif // _MSC_VER + +/// Counts the number of leading zero bits in the operand. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c LZCNT instruction. +/// +/// \param __X +/// An unsigned 32-bit integer whose leading zeros are to be counted. +/// \returns An unsigned 32-bit integer containing the number of leading zero +/// bits in the operand. +/// \see _lzcnt_u32 +static __inline__ unsigned int __DEFAULT_FN_ATTRS +__lzcnt32(unsigned int __X) +{ + return __builtin_ia32_lzcnt_u32(__X); +} + +/// Counts the number of leading zero bits in the operand. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c LZCNT instruction. +/// +/// \param __X +/// An unsigned 32-bit integer whose leading zeros are to be counted. +/// \returns An unsigned 32-bit integer containing the number of leading zero +/// bits in the operand. +/// \see __lzcnt32 +static __inline__ unsigned int __DEFAULT_FN_ATTRS +_lzcnt_u32(unsigned int __X) +{ + return __builtin_ia32_lzcnt_u32(__X); +} + +#ifdef __x86_64__ +#ifndef _MSC_VER +/// Counts the number of leading zero bits in the operand. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c LZCNT instruction. +/// +/// \param __X +/// An unsigned 64-bit integer whose leading zeros are to be counted. +/// \returns An unsigned 64-bit integer containing the number of leading zero +/// bits in the operand. +/// \see _lzcnt_u64 +#define __lzcnt64(X) __builtin_ia32_lzcnt_u64((unsigned long long)(X)) +#endif // _MSC_VER + +/// Counts the number of leading zero bits in the operand. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c LZCNT instruction. +/// +/// \param __X +/// An unsigned 64-bit integer whose leading zeros are to be counted. +/// \returns An unsigned 64-bit integer containing the number of leading zero +/// bits in the operand. +/// \see __lzcnt64 +static __inline__ unsigned long long __DEFAULT_FN_ATTRS +_lzcnt_u64(unsigned long long __X) +{ + return __builtin_ia32_lzcnt_u64(__X); +} +#endif + +#undef __DEFAULT_FN_ATTRS + +#endif /* __LZCNTINTRIN_H */ diff --git a/third_party/intel/clang/mm_malloc.h b/third_party/intel/clang/mm_malloc.h new file mode 100644 index 000000000..d32fe5941 --- /dev/null +++ b/third_party/intel/clang/mm_malloc.h @@ -0,0 +1,67 @@ +/*===---- mm_malloc.h - Allocating and Freeing Aligned Memory Blocks -------=== + * + * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. + * See https://llvm.org/LICENSE.txt for license information. + * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + * + *===-----------------------------------------------------------------------=== + */ + +#ifndef __MM_MALLOC_H +#define __MM_MALLOC_H + +#include + +#ifdef _WIN32 +#include +#else +#ifndef __cplusplus +extern int posix_memalign(void **__memptr, size_t __alignment, size_t __size); +#else +// Some systems (e.g. those with GNU libc) declare posix_memalign with an +// exception specifier. Via an "egregious workaround" in +// Sema::CheckEquivalentExceptionSpec, Clang accepts the following as a valid +// redeclaration of glibc's declaration. +extern "C" int posix_memalign(void **__memptr, size_t __alignment, size_t __size); +#endif +#endif + +#if !(defined(_WIN32) && defined(_mm_malloc)) +static __inline__ void *__attribute__((__always_inline__, __nodebug__, + __malloc__, __alloc_size__(1), + __alloc_align__(2))) +_mm_malloc(size_t __size, size_t __align) { + if (__align == 1) { + return malloc(__size); + } + + if (!(__align & (__align - 1)) && __align < sizeof(void *)) + __align = sizeof(void *); + + void *__mallocedMemory; +#if defined(__MINGW32__) + __mallocedMemory = __mingw_aligned_malloc(__size, __align); +#elif defined(_WIN32) + __mallocedMemory = _aligned_malloc(__size, __align); +#else + if (posix_memalign(&__mallocedMemory, __align, __size)) + return 0; +#endif + + return __mallocedMemory; +} + +static __inline__ void __attribute__((__always_inline__, __nodebug__)) +_mm_free(void *__p) +{ +#if defined(__MINGW32__) + __mingw_aligned_free(__p); +#elif defined(_WIN32) + _aligned_free(__p); +#else + free(__p); +#endif +} +#endif + +#endif /* __MM_MALLOC_H */ diff --git a/third_party/intel/clang/mmintrin.h b/third_party/intel/clang/mmintrin.h new file mode 100644 index 000000000..4e154e2d8 --- /dev/null +++ b/third_party/intel/clang/mmintrin.h @@ -0,0 +1,1556 @@ +/*===---- mmintrin.h - MMX intrinsics --------------------------------------=== + * + * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. + * See https://llvm.org/LICENSE.txt for license information. + * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + * + *===-----------------------------------------------------------------------=== + */ + +#ifndef __MMINTRIN_H +#define __MMINTRIN_H + +#if !defined(__i386__) && !defined(__x86_64__) +#error "This header is only meant to be used on x86 and x64 architecture" +#endif + +typedef long long __m64 __attribute__((__vector_size__(8), __aligned__(8))); + +typedef long long __v1di __attribute__((__vector_size__(8))); +typedef int __v2si __attribute__((__vector_size__(8))); +typedef short __v4hi __attribute__((__vector_size__(8))); +typedef char __v8qi __attribute__((__vector_size__(8))); + +/* Define the default attributes for the functions in this file. */ +#define __DEFAULT_FN_ATTRS \ + __attribute__((__always_inline__, __nodebug__, __target__("mmx,no-evex512"), \ + __min_vector_width__(64))) + +/// Clears the MMX state by setting the state of the x87 stack registers +/// to empty. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the EMMS instruction. +/// +static __inline__ void __attribute__((__always_inline__, __nodebug__, + __target__("mmx,no-evex512"))) +_mm_empty(void) { + __builtin_ia32_emms(); +} + +/// Constructs a 64-bit integer vector, setting the lower 32 bits to the +/// value of the 32-bit integer parameter and setting the upper 32 bits to 0. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the MOVD instruction. +/// +/// \param __i +/// A 32-bit integer value. +/// \returns A 64-bit integer vector. The lower 32 bits contain the value of the +/// parameter. The upper 32 bits are set to 0. +static __inline__ __m64 __DEFAULT_FN_ATTRS +_mm_cvtsi32_si64(int __i) +{ + return (__m64)__builtin_ia32_vec_init_v2si(__i, 0); +} + +/// Returns the lower 32 bits of a 64-bit integer vector as a 32-bit +/// signed integer. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the MOVD instruction. +/// +/// \param __m +/// A 64-bit integer vector. +/// \returns A 32-bit signed integer value containing the lower 32 bits of the +/// parameter. +static __inline__ int __DEFAULT_FN_ATTRS +_mm_cvtsi64_si32(__m64 __m) +{ + return __builtin_ia32_vec_ext_v2si((__v2si)__m, 0); +} + +/// Casts a 64-bit signed integer value into a 64-bit integer vector. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the MOVQ instruction. +/// +/// \param __i +/// A 64-bit signed integer. +/// \returns A 64-bit integer vector containing the same bitwise pattern as the +/// parameter. +static __inline__ __m64 __DEFAULT_FN_ATTRS +_mm_cvtsi64_m64(long long __i) +{ + return (__m64)__i; +} + +/// Casts a 64-bit integer vector into a 64-bit signed integer value. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the MOVQ instruction. +/// +/// \param __m +/// A 64-bit integer vector. +/// \returns A 64-bit signed integer containing the same bitwise pattern as the +/// parameter. +static __inline__ long long __DEFAULT_FN_ATTRS +_mm_cvtm64_si64(__m64 __m) +{ + return (long long)__m; +} + +/// Converts, with saturation, 16-bit signed integers from both 64-bit integer +/// vector parameters of [4 x i16] into 8-bit signed integer values, and +/// constructs a 64-bit integer vector of [8 x i8] as the result. +/// +/// Positive values greater than 0x7F are saturated to 0x7F. Negative values +/// less than 0x80 are saturated to 0x80. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the PACKSSWB instruction. +/// +/// \param __m1 +/// A 64-bit integer vector of [4 x i16]. The converted [4 x i8] values are +/// written to the lower 32 bits of the result. +/// \param __m2 +/// A 64-bit integer vector of [4 x i16]. The converted [4 x i8] values are +/// written to the upper 32 bits of the result. +/// \returns A 64-bit integer vector of [8 x i8] containing the converted +/// values. +static __inline__ __m64 __DEFAULT_FN_ATTRS +_mm_packs_pi16(__m64 __m1, __m64 __m2) +{ + return (__m64)__builtin_ia32_packsswb((__v4hi)__m1, (__v4hi)__m2); +} + +/// Converts, with saturation, 32-bit signed integers from both 64-bit integer +/// vector parameters of [2 x i32] into 16-bit signed integer values, and +/// constructs a 64-bit integer vector of [4 x i16] as the result. +/// +/// Positive values greater than 0x7FFF are saturated to 0x7FFF. Negative +/// values less than 0x8000 are saturated to 0x8000. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the PACKSSDW instruction. +/// +/// \param __m1 +/// A 64-bit integer vector of [2 x i32]. The converted [2 x i16] values are +/// written to the lower 32 bits of the result. +/// \param __m2 +/// A 64-bit integer vector of [2 x i32]. The converted [2 x i16] values are +/// written to the upper 32 bits of the result. +/// \returns A 64-bit integer vector of [4 x i16] containing the converted +/// values. +static __inline__ __m64 __DEFAULT_FN_ATTRS +_mm_packs_pi32(__m64 __m1, __m64 __m2) +{ + return (__m64)__builtin_ia32_packssdw((__v2si)__m1, (__v2si)__m2); +} + +/// Converts, with saturation, 16-bit signed integers from both 64-bit integer +/// vector parameters of [4 x i16] into 8-bit unsigned integer values, and +/// constructs a 64-bit integer vector of [8 x i8] as the result. +/// +/// Values greater than 0xFF are saturated to 0xFF. Values less than 0 are +/// saturated to 0. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the PACKUSWB instruction. +/// +/// \param __m1 +/// A 64-bit integer vector of [4 x i16]. The converted [4 x i8] values are +/// written to the lower 32 bits of the result. +/// \param __m2 +/// A 64-bit integer vector of [4 x i16]. The converted [4 x i8] values are +/// written to the upper 32 bits of the result. +/// \returns A 64-bit integer vector of [8 x i8] containing the converted +/// values. +static __inline__ __m64 __DEFAULT_FN_ATTRS +_mm_packs_pu16(__m64 __m1, __m64 __m2) +{ + return (__m64)__builtin_ia32_packuswb((__v4hi)__m1, (__v4hi)__m2); +} + +/// Unpacks the upper 32 bits from two 64-bit integer vectors of [8 x i8] +/// and interleaves them into a 64-bit integer vector of [8 x i8]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the PUNPCKHBW instruction. +/// +/// \param __m1 +/// A 64-bit integer vector of [8 x i8]. \n +/// Bits [39:32] are written to bits [7:0] of the result. \n +/// Bits [47:40] are written to bits [23:16] of the result. \n +/// Bits [55:48] are written to bits [39:32] of the result. \n +/// Bits [63:56] are written to bits [55:48] of the result. +/// \param __m2 +/// A 64-bit integer vector of [8 x i8]. +/// Bits [39:32] are written to bits [15:8] of the result. \n +/// Bits [47:40] are written to bits [31:24] of the result. \n +/// Bits [55:48] are written to bits [47:40] of the result. \n +/// Bits [63:56] are written to bits [63:56] of the result. +/// \returns A 64-bit integer vector of [8 x i8] containing the interleaved +/// values. +static __inline__ __m64 __DEFAULT_FN_ATTRS +_mm_unpackhi_pi8(__m64 __m1, __m64 __m2) +{ + return (__m64)__builtin_ia32_punpckhbw((__v8qi)__m1, (__v8qi)__m2); +} + +/// Unpacks the upper 32 bits from two 64-bit integer vectors of +/// [4 x i16] and interleaves them into a 64-bit integer vector of [4 x i16]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the PUNPCKHWD instruction. +/// +/// \param __m1 +/// A 64-bit integer vector of [4 x i16]. +/// Bits [47:32] are written to bits [15:0] of the result. \n +/// Bits [63:48] are written to bits [47:32] of the result. +/// \param __m2 +/// A 64-bit integer vector of [4 x i16]. +/// Bits [47:32] are written to bits [31:16] of the result. \n +/// Bits [63:48] are written to bits [63:48] of the result. +/// \returns A 64-bit integer vector of [4 x i16] containing the interleaved +/// values. +static __inline__ __m64 __DEFAULT_FN_ATTRS +_mm_unpackhi_pi16(__m64 __m1, __m64 __m2) +{ + return (__m64)__builtin_ia32_punpckhwd((__v4hi)__m1, (__v4hi)__m2); +} + +/// Unpacks the upper 32 bits from two 64-bit integer vectors of +/// [2 x i32] and interleaves them into a 64-bit integer vector of [2 x i32]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the PUNPCKHDQ instruction. +/// +/// \param __m1 +/// A 64-bit integer vector of [2 x i32]. The upper 32 bits are written to +/// the lower 32 bits of the result. +/// \param __m2 +/// A 64-bit integer vector of [2 x i32]. The upper 32 bits are written to +/// the upper 32 bits of the result. +/// \returns A 64-bit integer vector of [2 x i32] containing the interleaved +/// values. +static __inline__ __m64 __DEFAULT_FN_ATTRS +_mm_unpackhi_pi32(__m64 __m1, __m64 __m2) +{ + return (__m64)__builtin_ia32_punpckhdq((__v2si)__m1, (__v2si)__m2); +} + +/// Unpacks the lower 32 bits from two 64-bit integer vectors of [8 x i8] +/// and interleaves them into a 64-bit integer vector of [8 x i8]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the PUNPCKLBW instruction. +/// +/// \param __m1 +/// A 64-bit integer vector of [8 x i8]. +/// Bits [7:0] are written to bits [7:0] of the result. \n +/// Bits [15:8] are written to bits [23:16] of the result. \n +/// Bits [23:16] are written to bits [39:32] of the result. \n +/// Bits [31:24] are written to bits [55:48] of the result. +/// \param __m2 +/// A 64-bit integer vector of [8 x i8]. +/// Bits [7:0] are written to bits [15:8] of the result. \n +/// Bits [15:8] are written to bits [31:24] of the result. \n +/// Bits [23:16] are written to bits [47:40] of the result. \n +/// Bits [31:24] are written to bits [63:56] of the result. +/// \returns A 64-bit integer vector of [8 x i8] containing the interleaved +/// values. +static __inline__ __m64 __DEFAULT_FN_ATTRS +_mm_unpacklo_pi8(__m64 __m1, __m64 __m2) +{ + return (__m64)__builtin_ia32_punpcklbw((__v8qi)__m1, (__v8qi)__m2); +} + +/// Unpacks the lower 32 bits from two 64-bit integer vectors of +/// [4 x i16] and interleaves them into a 64-bit integer vector of [4 x i16]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the PUNPCKLWD instruction. +/// +/// \param __m1 +/// A 64-bit integer vector of [4 x i16]. +/// Bits [15:0] are written to bits [15:0] of the result. \n +/// Bits [31:16] are written to bits [47:32] of the result. +/// \param __m2 +/// A 64-bit integer vector of [4 x i16]. +/// Bits [15:0] are written to bits [31:16] of the result. \n +/// Bits [31:16] are written to bits [63:48] of the result. +/// \returns A 64-bit integer vector of [4 x i16] containing the interleaved +/// values. +static __inline__ __m64 __DEFAULT_FN_ATTRS +_mm_unpacklo_pi16(__m64 __m1, __m64 __m2) +{ + return (__m64)__builtin_ia32_punpcklwd((__v4hi)__m1, (__v4hi)__m2); +} + +/// Unpacks the lower 32 bits from two 64-bit integer vectors of +/// [2 x i32] and interleaves them into a 64-bit integer vector of [2 x i32]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the PUNPCKLDQ instruction. +/// +/// \param __m1 +/// A 64-bit integer vector of [2 x i32]. The lower 32 bits are written to +/// the lower 32 bits of the result. +/// \param __m2 +/// A 64-bit integer vector of [2 x i32]. The lower 32 bits are written to +/// the upper 32 bits of the result. +/// \returns A 64-bit integer vector of [2 x i32] containing the interleaved +/// values. +static __inline__ __m64 __DEFAULT_FN_ATTRS +_mm_unpacklo_pi32(__m64 __m1, __m64 __m2) +{ + return (__m64)__builtin_ia32_punpckldq((__v2si)__m1, (__v2si)__m2); +} + +/// Adds each 8-bit integer element of the first 64-bit integer vector +/// of [8 x i8] to the corresponding 8-bit integer element of the second +/// 64-bit integer vector of [8 x i8]. The lower 8 bits of the results are +/// packed into a 64-bit integer vector of [8 x i8]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the PADDB instruction. +/// +/// \param __m1 +/// A 64-bit integer vector of [8 x i8]. +/// \param __m2 +/// A 64-bit integer vector of [8 x i8]. +/// \returns A 64-bit integer vector of [8 x i8] containing the sums of both +/// parameters. +static __inline__ __m64 __DEFAULT_FN_ATTRS +_mm_add_pi8(__m64 __m1, __m64 __m2) +{ + return (__m64)__builtin_ia32_paddb((__v8qi)__m1, (__v8qi)__m2); +} + +/// Adds each 16-bit integer element of the first 64-bit integer vector +/// of [4 x i16] to the corresponding 16-bit integer element of the second +/// 64-bit integer vector of [4 x i16]. The lower 16 bits of the results are +/// packed into a 64-bit integer vector of [4 x i16]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the PADDW instruction. +/// +/// \param __m1 +/// A 64-bit integer vector of [4 x i16]. +/// \param __m2 +/// A 64-bit integer vector of [4 x i16]. +/// \returns A 64-bit integer vector of [4 x i16] containing the sums of both +/// parameters. +static __inline__ __m64 __DEFAULT_FN_ATTRS +_mm_add_pi16(__m64 __m1, __m64 __m2) +{ + return (__m64)__builtin_ia32_paddw((__v4hi)__m1, (__v4hi)__m2); +} + +/// Adds each 32-bit integer element of the first 64-bit integer vector +/// of [2 x i32] to the corresponding 32-bit integer element of the second +/// 64-bit integer vector of [2 x i32]. The lower 32 bits of the results are +/// packed into a 64-bit integer vector of [2 x i32]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the PADDD instruction. +/// +/// \param __m1 +/// A 64-bit integer vector of [2 x i32]. +/// \param __m2 +/// A 64-bit integer vector of [2 x i32]. +/// \returns A 64-bit integer vector of [2 x i32] containing the sums of both +/// parameters. +static __inline__ __m64 __DEFAULT_FN_ATTRS +_mm_add_pi32(__m64 __m1, __m64 __m2) +{ + return (__m64)__builtin_ia32_paddd((__v2si)__m1, (__v2si)__m2); +} + +/// Adds, with saturation, each 8-bit signed integer element of the first +/// 64-bit integer vector of [8 x i8] to the corresponding 8-bit signed +/// integer element of the second 64-bit integer vector of [8 x i8]. +/// +/// Positive sums greater than 0x7F are saturated to 0x7F. Negative sums +/// less than 0x80 are saturated to 0x80. The results are packed into a +/// 64-bit integer vector of [8 x i8]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the PADDSB instruction. +/// +/// \param __m1 +/// A 64-bit integer vector of [8 x i8]. +/// \param __m2 +/// A 64-bit integer vector of [8 x i8]. +/// \returns A 64-bit integer vector of [8 x i8] containing the saturated sums +/// of both parameters. +static __inline__ __m64 __DEFAULT_FN_ATTRS +_mm_adds_pi8(__m64 __m1, __m64 __m2) +{ + return (__m64)__builtin_ia32_paddsb((__v8qi)__m1, (__v8qi)__m2); +} + +/// Adds, with saturation, each 16-bit signed integer element of the first +/// 64-bit integer vector of [4 x i16] to the corresponding 16-bit signed +/// integer element of the second 64-bit integer vector of [4 x i16]. +/// +/// Positive sums greater than 0x7FFF are saturated to 0x7FFF. Negative sums +/// less than 0x8000 are saturated to 0x8000. The results are packed into a +/// 64-bit integer vector of [4 x i16]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the PADDSW instruction. +/// +/// \param __m1 +/// A 64-bit integer vector of [4 x i16]. +/// \param __m2 +/// A 64-bit integer vector of [4 x i16]. +/// \returns A 64-bit integer vector of [4 x i16] containing the saturated sums +/// of both parameters. +static __inline__ __m64 __DEFAULT_FN_ATTRS +_mm_adds_pi16(__m64 __m1, __m64 __m2) +{ + return (__m64)__builtin_ia32_paddsw((__v4hi)__m1, (__v4hi)__m2); +} + +/// Adds, with saturation, each 8-bit unsigned integer element of the first +/// 64-bit integer vector of [8 x i8] to the corresponding 8-bit unsigned +/// integer element of the second 64-bit integer vector of [8 x i8]. +/// +/// Sums greater than 0xFF are saturated to 0xFF. The results are packed +/// into a 64-bit integer vector of [8 x i8]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the PADDUSB instruction. +/// +/// \param __m1 +/// A 64-bit integer vector of [8 x i8]. +/// \param __m2 +/// A 64-bit integer vector of [8 x i8]. +/// \returns A 64-bit integer vector of [8 x i8] containing the saturated +/// unsigned sums of both parameters. +static __inline__ __m64 __DEFAULT_FN_ATTRS +_mm_adds_pu8(__m64 __m1, __m64 __m2) +{ + return (__m64)__builtin_ia32_paddusb((__v8qi)__m1, (__v8qi)__m2); +} + +/// Adds, with saturation, each 16-bit unsigned integer element of the first +/// 64-bit integer vector of [4 x i16] to the corresponding 16-bit unsigned +/// integer element of the second 64-bit integer vector of [4 x i16]. +/// +/// Sums greater than 0xFFFF are saturated to 0xFFFF. The results are packed +/// into a 64-bit integer vector of [4 x i16]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the PADDUSW instruction. +/// +/// \param __m1 +/// A 64-bit integer vector of [4 x i16]. +/// \param __m2 +/// A 64-bit integer vector of [4 x i16]. +/// \returns A 64-bit integer vector of [4 x i16] containing the saturated +/// unsigned sums of both parameters. +static __inline__ __m64 __DEFAULT_FN_ATTRS +_mm_adds_pu16(__m64 __m1, __m64 __m2) +{ + return (__m64)__builtin_ia32_paddusw((__v4hi)__m1, (__v4hi)__m2); +} + +/// Subtracts each 8-bit integer element of the second 64-bit integer +/// vector of [8 x i8] from the corresponding 8-bit integer element of the +/// first 64-bit integer vector of [8 x i8]. The lower 8 bits of the results +/// are packed into a 64-bit integer vector of [8 x i8]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the PSUBB instruction. +/// +/// \param __m1 +/// A 64-bit integer vector of [8 x i8] containing the minuends. +/// \param __m2 +/// A 64-bit integer vector of [8 x i8] containing the subtrahends. +/// \returns A 64-bit integer vector of [8 x i8] containing the differences of +/// both parameters. +static __inline__ __m64 __DEFAULT_FN_ATTRS +_mm_sub_pi8(__m64 __m1, __m64 __m2) +{ + return (__m64)__builtin_ia32_psubb((__v8qi)__m1, (__v8qi)__m2); +} + +/// Subtracts each 16-bit integer element of the second 64-bit integer +/// vector of [4 x i16] from the corresponding 16-bit integer element of the +/// first 64-bit integer vector of [4 x i16]. The lower 16 bits of the +/// results are packed into a 64-bit integer vector of [4 x i16]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the PSUBW instruction. +/// +/// \param __m1 +/// A 64-bit integer vector of [4 x i16] containing the minuends. +/// \param __m2 +/// A 64-bit integer vector of [4 x i16] containing the subtrahends. +/// \returns A 64-bit integer vector of [4 x i16] containing the differences of +/// both parameters. +static __inline__ __m64 __DEFAULT_FN_ATTRS +_mm_sub_pi16(__m64 __m1, __m64 __m2) +{ + return (__m64)__builtin_ia32_psubw((__v4hi)__m1, (__v4hi)__m2); +} + +/// Subtracts each 32-bit integer element of the second 64-bit integer +/// vector of [2 x i32] from the corresponding 32-bit integer element of the +/// first 64-bit integer vector of [2 x i32]. The lower 32 bits of the +/// results are packed into a 64-bit integer vector of [2 x i32]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the PSUBD instruction. +/// +/// \param __m1 +/// A 64-bit integer vector of [2 x i32] containing the minuends. +/// \param __m2 +/// A 64-bit integer vector of [2 x i32] containing the subtrahends. +/// \returns A 64-bit integer vector of [2 x i32] containing the differences of +/// both parameters. +static __inline__ __m64 __DEFAULT_FN_ATTRS +_mm_sub_pi32(__m64 __m1, __m64 __m2) +{ + return (__m64)__builtin_ia32_psubd((__v2si)__m1, (__v2si)__m2); +} + +/// Subtracts, with saturation, each 8-bit signed integer element of the second +/// 64-bit integer vector of [8 x i8] from the corresponding 8-bit signed +/// integer element of the first 64-bit integer vector of [8 x i8]. +/// +/// Positive results greater than 0x7F are saturated to 0x7F. Negative +/// results less than 0x80 are saturated to 0x80. The results are packed +/// into a 64-bit integer vector of [8 x i8]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the PSUBSB instruction. +/// +/// \param __m1 +/// A 64-bit integer vector of [8 x i8] containing the minuends. +/// \param __m2 +/// A 64-bit integer vector of [8 x i8] containing the subtrahends. +/// \returns A 64-bit integer vector of [8 x i8] containing the saturated +/// differences of both parameters. +static __inline__ __m64 __DEFAULT_FN_ATTRS +_mm_subs_pi8(__m64 __m1, __m64 __m2) +{ + return (__m64)__builtin_ia32_psubsb((__v8qi)__m1, (__v8qi)__m2); +} + +/// Subtracts, with saturation, each 16-bit signed integer element of the +/// second 64-bit integer vector of [4 x i16] from the corresponding 16-bit +/// signed integer element of the first 64-bit integer vector of [4 x i16]. +/// +/// Positive results greater than 0x7FFF are saturated to 0x7FFF. Negative +/// results less than 0x8000 are saturated to 0x8000. The results are packed +/// into a 64-bit integer vector of [4 x i16]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the PSUBSW instruction. +/// +/// \param __m1 +/// A 64-bit integer vector of [4 x i16] containing the minuends. +/// \param __m2 +/// A 64-bit integer vector of [4 x i16] containing the subtrahends. +/// \returns A 64-bit integer vector of [4 x i16] containing the saturated +/// differences of both parameters. +static __inline__ __m64 __DEFAULT_FN_ATTRS +_mm_subs_pi16(__m64 __m1, __m64 __m2) +{ + return (__m64)__builtin_ia32_psubsw((__v4hi)__m1, (__v4hi)__m2); +} + +/// Subtracts each 8-bit unsigned integer element of the second 64-bit +/// integer vector of [8 x i8] from the corresponding 8-bit unsigned integer +/// element of the first 64-bit integer vector of [8 x i8]. +/// +/// If an element of the first vector is less than the corresponding element +/// of the second vector, the result is saturated to 0. The results are +/// packed into a 64-bit integer vector of [8 x i8]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the PSUBUSB instruction. +/// +/// \param __m1 +/// A 64-bit integer vector of [8 x i8] containing the minuends. +/// \param __m2 +/// A 64-bit integer vector of [8 x i8] containing the subtrahends. +/// \returns A 64-bit integer vector of [8 x i8] containing the saturated +/// differences of both parameters. +static __inline__ __m64 __DEFAULT_FN_ATTRS +_mm_subs_pu8(__m64 __m1, __m64 __m2) +{ + return (__m64)__builtin_ia32_psubusb((__v8qi)__m1, (__v8qi)__m2); +} + +/// Subtracts each 16-bit unsigned integer element of the second 64-bit +/// integer vector of [4 x i16] from the corresponding 16-bit unsigned +/// integer element of the first 64-bit integer vector of [4 x i16]. +/// +/// If an element of the first vector is less than the corresponding element +/// of the second vector, the result is saturated to 0. The results are +/// packed into a 64-bit integer vector of [4 x i16]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the PSUBUSW instruction. +/// +/// \param __m1 +/// A 64-bit integer vector of [4 x i16] containing the minuends. +/// \param __m2 +/// A 64-bit integer vector of [4 x i16] containing the subtrahends. +/// \returns A 64-bit integer vector of [4 x i16] containing the saturated +/// differences of both parameters. +static __inline__ __m64 __DEFAULT_FN_ATTRS +_mm_subs_pu16(__m64 __m1, __m64 __m2) +{ + return (__m64)__builtin_ia32_psubusw((__v4hi)__m1, (__v4hi)__m2); +} + +/// Multiplies each 16-bit signed integer element of the first 64-bit +/// integer vector of [4 x i16] by the corresponding 16-bit signed integer +/// element of the second 64-bit integer vector of [4 x i16] and get four +/// 32-bit products. Adds adjacent pairs of products to get two 32-bit sums. +/// The lower 32 bits of these two sums are packed into a 64-bit integer +/// vector of [2 x i32]. +/// +/// For example, bits [15:0] of both parameters are multiplied, bits [31:16] +/// of both parameters are multiplied, and the sum of both results is written +/// to bits [31:0] of the result. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the PMADDWD instruction. +/// +/// \param __m1 +/// A 64-bit integer vector of [4 x i16]. +/// \param __m2 +/// A 64-bit integer vector of [4 x i16]. +/// \returns A 64-bit integer vector of [2 x i32] containing the sums of +/// products of both parameters. +static __inline__ __m64 __DEFAULT_FN_ATTRS +_mm_madd_pi16(__m64 __m1, __m64 __m2) +{ + return (__m64)__builtin_ia32_pmaddwd((__v4hi)__m1, (__v4hi)__m2); +} + +/// Multiplies each 16-bit signed integer element of the first 64-bit +/// integer vector of [4 x i16] by the corresponding 16-bit signed integer +/// element of the second 64-bit integer vector of [4 x i16]. Packs the upper +/// 16 bits of the 32-bit products into a 64-bit integer vector of [4 x i16]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the PMULHW instruction. +/// +/// \param __m1 +/// A 64-bit integer vector of [4 x i16]. +/// \param __m2 +/// A 64-bit integer vector of [4 x i16]. +/// \returns A 64-bit integer vector of [4 x i16] containing the upper 16 bits +/// of the products of both parameters. +static __inline__ __m64 __DEFAULT_FN_ATTRS +_mm_mulhi_pi16(__m64 __m1, __m64 __m2) +{ + return (__m64)__builtin_ia32_pmulhw((__v4hi)__m1, (__v4hi)__m2); +} + +/// Multiplies each 16-bit signed integer element of the first 64-bit +/// integer vector of [4 x i16] by the corresponding 16-bit signed integer +/// element of the second 64-bit integer vector of [4 x i16]. Packs the lower +/// 16 bits of the 32-bit products into a 64-bit integer vector of [4 x i16]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the PMULLW instruction. +/// +/// \param __m1 +/// A 64-bit integer vector of [4 x i16]. +/// \param __m2 +/// A 64-bit integer vector of [4 x i16]. +/// \returns A 64-bit integer vector of [4 x i16] containing the lower 16 bits +/// of the products of both parameters. +static __inline__ __m64 __DEFAULT_FN_ATTRS +_mm_mullo_pi16(__m64 __m1, __m64 __m2) +{ + return (__m64)__builtin_ia32_pmullw((__v4hi)__m1, (__v4hi)__m2); +} + +/// Left-shifts each 16-bit signed integer element of the first +/// parameter, which is a 64-bit integer vector of [4 x i16], by the number +/// of bits specified by the second parameter, which is a 64-bit integer. The +/// lower 16 bits of the results are packed into a 64-bit integer vector of +/// [4 x i16]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the PSLLW instruction. +/// +/// \param __m +/// A 64-bit integer vector of [4 x i16]. +/// \param __count +/// A 64-bit integer vector interpreted as a single 64-bit integer. +/// \returns A 64-bit integer vector of [4 x i16] containing the left-shifted +/// values. If \a __count is greater or equal to 16, the result is set to all +/// 0. +static __inline__ __m64 __DEFAULT_FN_ATTRS +_mm_sll_pi16(__m64 __m, __m64 __count) +{ + return (__m64)__builtin_ia32_psllw((__v4hi)__m, __count); +} + +/// Left-shifts each 16-bit signed integer element of a 64-bit integer +/// vector of [4 x i16] by the number of bits specified by a 32-bit integer. +/// The lower 16 bits of the results are packed into a 64-bit integer vector +/// of [4 x i16]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the PSLLW instruction. +/// +/// \param __m +/// A 64-bit integer vector of [4 x i16]. +/// \param __count +/// A 32-bit integer value. +/// \returns A 64-bit integer vector of [4 x i16] containing the left-shifted +/// values. If \a __count is greater or equal to 16, the result is set to all +/// 0. +static __inline__ __m64 __DEFAULT_FN_ATTRS +_mm_slli_pi16(__m64 __m, int __count) +{ + return (__m64)__builtin_ia32_psllwi((__v4hi)__m, __count); +} + +/// Left-shifts each 32-bit signed integer element of the first +/// parameter, which is a 64-bit integer vector of [2 x i32], by the number +/// of bits specified by the second parameter, which is a 64-bit integer. The +/// lower 32 bits of the results are packed into a 64-bit integer vector of +/// [2 x i32]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the PSLLD instruction. +/// +/// \param __m +/// A 64-bit integer vector of [2 x i32]. +/// \param __count +/// A 64-bit integer vector interpreted as a single 64-bit integer. +/// \returns A 64-bit integer vector of [2 x i32] containing the left-shifted +/// values. If \a __count is greater or equal to 32, the result is set to all +/// 0. +static __inline__ __m64 __DEFAULT_FN_ATTRS +_mm_sll_pi32(__m64 __m, __m64 __count) +{ + return (__m64)__builtin_ia32_pslld((__v2si)__m, __count); +} + +/// Left-shifts each 32-bit signed integer element of a 64-bit integer +/// vector of [2 x i32] by the number of bits specified by a 32-bit integer. +/// The lower 32 bits of the results are packed into a 64-bit integer vector +/// of [2 x i32]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the PSLLD instruction. +/// +/// \param __m +/// A 64-bit integer vector of [2 x i32]. +/// \param __count +/// A 32-bit integer value. +/// \returns A 64-bit integer vector of [2 x i32] containing the left-shifted +/// values. If \a __count is greater or equal to 32, the result is set to all +/// 0. +static __inline__ __m64 __DEFAULT_FN_ATTRS +_mm_slli_pi32(__m64 __m, int __count) +{ + return (__m64)__builtin_ia32_pslldi((__v2si)__m, __count); +} + +/// Left-shifts the first 64-bit integer parameter by the number of bits +/// specified by the second 64-bit integer parameter. The lower 64 bits of +/// result are returned. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the PSLLQ instruction. +/// +/// \param __m +/// A 64-bit integer vector interpreted as a single 64-bit integer. +/// \param __count +/// A 64-bit integer vector interpreted as a single 64-bit integer. +/// \returns A 64-bit integer vector containing the left-shifted value. If +/// \a __count is greater or equal to 64, the result is set to 0. +static __inline__ __m64 __DEFAULT_FN_ATTRS +_mm_sll_si64(__m64 __m, __m64 __count) +{ + return (__m64)__builtin_ia32_psllq((__v1di)__m, __count); +} + +/// Left-shifts the first parameter, which is a 64-bit integer, by the +/// number of bits specified by the second parameter, which is a 32-bit +/// integer. The lower 64 bits of result are returned. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the PSLLQ instruction. +/// +/// \param __m +/// A 64-bit integer vector interpreted as a single 64-bit integer. +/// \param __count +/// A 32-bit integer value. +/// \returns A 64-bit integer vector containing the left-shifted value. If +/// \a __count is greater or equal to 64, the result is set to 0. +static __inline__ __m64 __DEFAULT_FN_ATTRS +_mm_slli_si64(__m64 __m, int __count) +{ + return (__m64)__builtin_ia32_psllqi((__v1di)__m, __count); +} + +/// Right-shifts each 16-bit integer element of the first parameter, +/// which is a 64-bit integer vector of [4 x i16], by the number of bits +/// specified by the second parameter, which is a 64-bit integer. +/// +/// High-order bits are filled with the sign bit of the initial value of each +/// 16-bit element. The 16-bit results are packed into a 64-bit integer +/// vector of [4 x i16]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the PSRAW instruction. +/// +/// \param __m +/// A 64-bit integer vector of [4 x i16]. +/// \param __count +/// A 64-bit integer vector interpreted as a single 64-bit integer. +/// \returns A 64-bit integer vector of [4 x i16] containing the right-shifted +/// values. +static __inline__ __m64 __DEFAULT_FN_ATTRS +_mm_sra_pi16(__m64 __m, __m64 __count) +{ + return (__m64)__builtin_ia32_psraw((__v4hi)__m, __count); +} + +/// Right-shifts each 16-bit integer element of a 64-bit integer vector +/// of [4 x i16] by the number of bits specified by a 32-bit integer. +/// +/// High-order bits are filled with the sign bit of the initial value of each +/// 16-bit element. The 16-bit results are packed into a 64-bit integer +/// vector of [4 x i16]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the PSRAW instruction. +/// +/// \param __m +/// A 64-bit integer vector of [4 x i16]. +/// \param __count +/// A 32-bit integer value. +/// \returns A 64-bit integer vector of [4 x i16] containing the right-shifted +/// values. +static __inline__ __m64 __DEFAULT_FN_ATTRS +_mm_srai_pi16(__m64 __m, int __count) +{ + return (__m64)__builtin_ia32_psrawi((__v4hi)__m, __count); +} + +/// Right-shifts each 32-bit integer element of the first parameter, +/// which is a 64-bit integer vector of [2 x i32], by the number of bits +/// specified by the second parameter, which is a 64-bit integer. +/// +/// High-order bits are filled with the sign bit of the initial value of each +/// 32-bit element. The 32-bit results are packed into a 64-bit integer +/// vector of [2 x i32]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the PSRAD instruction. +/// +/// \param __m +/// A 64-bit integer vector of [2 x i32]. +/// \param __count +/// A 64-bit integer vector interpreted as a single 64-bit integer. +/// \returns A 64-bit integer vector of [2 x i32] containing the right-shifted +/// values. +static __inline__ __m64 __DEFAULT_FN_ATTRS +_mm_sra_pi32(__m64 __m, __m64 __count) +{ + return (__m64)__builtin_ia32_psrad((__v2si)__m, __count); +} + +/// Right-shifts each 32-bit integer element of a 64-bit integer vector +/// of [2 x i32] by the number of bits specified by a 32-bit integer. +/// +/// High-order bits are filled with the sign bit of the initial value of each +/// 32-bit element. The 32-bit results are packed into a 64-bit integer +/// vector of [2 x i32]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the PSRAD instruction. +/// +/// \param __m +/// A 64-bit integer vector of [2 x i32]. +/// \param __count +/// A 32-bit integer value. +/// \returns A 64-bit integer vector of [2 x i32] containing the right-shifted +/// values. +static __inline__ __m64 __DEFAULT_FN_ATTRS +_mm_srai_pi32(__m64 __m, int __count) +{ + return (__m64)__builtin_ia32_psradi((__v2si)__m, __count); +} + +/// Right-shifts each 16-bit integer element of the first parameter, +/// which is a 64-bit integer vector of [4 x i16], by the number of bits +/// specified by the second parameter, which is a 64-bit integer. +/// +/// High-order bits are cleared. The 16-bit results are packed into a 64-bit +/// integer vector of [4 x i16]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the PSRLW instruction. +/// +/// \param __m +/// A 64-bit integer vector of [4 x i16]. +/// \param __count +/// A 64-bit integer vector interpreted as a single 64-bit integer. +/// \returns A 64-bit integer vector of [4 x i16] containing the right-shifted +/// values. +static __inline__ __m64 __DEFAULT_FN_ATTRS +_mm_srl_pi16(__m64 __m, __m64 __count) +{ + return (__m64)__builtin_ia32_psrlw((__v4hi)__m, __count); +} + +/// Right-shifts each 16-bit integer element of a 64-bit integer vector +/// of [4 x i16] by the number of bits specified by a 32-bit integer. +/// +/// High-order bits are cleared. The 16-bit results are packed into a 64-bit +/// integer vector of [4 x i16]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the PSRLW instruction. +/// +/// \param __m +/// A 64-bit integer vector of [4 x i16]. +/// \param __count +/// A 32-bit integer value. +/// \returns A 64-bit integer vector of [4 x i16] containing the right-shifted +/// values. +static __inline__ __m64 __DEFAULT_FN_ATTRS +_mm_srli_pi16(__m64 __m, int __count) +{ + return (__m64)__builtin_ia32_psrlwi((__v4hi)__m, __count); +} + +/// Right-shifts each 32-bit integer element of the first parameter, +/// which is a 64-bit integer vector of [2 x i32], by the number of bits +/// specified by the second parameter, which is a 64-bit integer. +/// +/// High-order bits are cleared. The 32-bit results are packed into a 64-bit +/// integer vector of [2 x i32]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the PSRLD instruction. +/// +/// \param __m +/// A 64-bit integer vector of [2 x i32]. +/// \param __count +/// A 64-bit integer vector interpreted as a single 64-bit integer. +/// \returns A 64-bit integer vector of [2 x i32] containing the right-shifted +/// values. +static __inline__ __m64 __DEFAULT_FN_ATTRS +_mm_srl_pi32(__m64 __m, __m64 __count) +{ + return (__m64)__builtin_ia32_psrld((__v2si)__m, __count); +} + +/// Right-shifts each 32-bit integer element of a 64-bit integer vector +/// of [2 x i32] by the number of bits specified by a 32-bit integer. +/// +/// High-order bits are cleared. The 32-bit results are packed into a 64-bit +/// integer vector of [2 x i32]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the PSRLD instruction. +/// +/// \param __m +/// A 64-bit integer vector of [2 x i32]. +/// \param __count +/// A 32-bit integer value. +/// \returns A 64-bit integer vector of [2 x i32] containing the right-shifted +/// values. +static __inline__ __m64 __DEFAULT_FN_ATTRS +_mm_srli_pi32(__m64 __m, int __count) +{ + return (__m64)__builtin_ia32_psrldi((__v2si)__m, __count); +} + +/// Right-shifts the first 64-bit integer parameter by the number of bits +/// specified by the second 64-bit integer parameter. +/// +/// High-order bits are cleared. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the PSRLQ instruction. +/// +/// \param __m +/// A 64-bit integer vector interpreted as a single 64-bit integer. +/// \param __count +/// A 64-bit integer vector interpreted as a single 64-bit integer. +/// \returns A 64-bit integer vector containing the right-shifted value. +static __inline__ __m64 __DEFAULT_FN_ATTRS +_mm_srl_si64(__m64 __m, __m64 __count) +{ + return (__m64)__builtin_ia32_psrlq((__v1di)__m, __count); +} + +/// Right-shifts the first parameter, which is a 64-bit integer, by the +/// number of bits specified by the second parameter, which is a 32-bit +/// integer. +/// +/// High-order bits are cleared. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the PSRLQ instruction. +/// +/// \param __m +/// A 64-bit integer vector interpreted as a single 64-bit integer. +/// \param __count +/// A 32-bit integer value. +/// \returns A 64-bit integer vector containing the right-shifted value. +static __inline__ __m64 __DEFAULT_FN_ATTRS +_mm_srli_si64(__m64 __m, int __count) +{ + return (__m64)__builtin_ia32_psrlqi((__v1di)__m, __count); +} + +/// Performs a bitwise AND of two 64-bit integer vectors. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the PAND instruction. +/// +/// \param __m1 +/// A 64-bit integer vector. +/// \param __m2 +/// A 64-bit integer vector. +/// \returns A 64-bit integer vector containing the bitwise AND of both +/// parameters. +static __inline__ __m64 __DEFAULT_FN_ATTRS +_mm_and_si64(__m64 __m1, __m64 __m2) +{ + return __builtin_ia32_pand((__v1di)__m1, (__v1di)__m2); +} + +/// Performs a bitwise NOT of the first 64-bit integer vector, and then +/// performs a bitwise AND of the intermediate result and the second 64-bit +/// integer vector. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the PANDN instruction. +/// +/// \param __m1 +/// A 64-bit integer vector. The one's complement of this parameter is used +/// in the bitwise AND. +/// \param __m2 +/// A 64-bit integer vector. +/// \returns A 64-bit integer vector containing the bitwise AND of the second +/// parameter and the one's complement of the first parameter. +static __inline__ __m64 __DEFAULT_FN_ATTRS +_mm_andnot_si64(__m64 __m1, __m64 __m2) +{ + return __builtin_ia32_pandn((__v1di)__m1, (__v1di)__m2); +} + +/// Performs a bitwise OR of two 64-bit integer vectors. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the POR instruction. +/// +/// \param __m1 +/// A 64-bit integer vector. +/// \param __m2 +/// A 64-bit integer vector. +/// \returns A 64-bit integer vector containing the bitwise OR of both +/// parameters. +static __inline__ __m64 __DEFAULT_FN_ATTRS +_mm_or_si64(__m64 __m1, __m64 __m2) +{ + return __builtin_ia32_por((__v1di)__m1, (__v1di)__m2); +} + +/// Performs a bitwise exclusive OR of two 64-bit integer vectors. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the PXOR instruction. +/// +/// \param __m1 +/// A 64-bit integer vector. +/// \param __m2 +/// A 64-bit integer vector. +/// \returns A 64-bit integer vector containing the bitwise exclusive OR of both +/// parameters. +static __inline__ __m64 __DEFAULT_FN_ATTRS +_mm_xor_si64(__m64 __m1, __m64 __m2) +{ + return __builtin_ia32_pxor((__v1di)__m1, (__v1di)__m2); +} + +/// Compares the 8-bit integer elements of two 64-bit integer vectors of +/// [8 x i8] to determine if the element of the first vector is equal to the +/// corresponding element of the second vector. +/// +/// Each comparison returns 0 for false, 0xFF for true. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the PCMPEQB instruction. +/// +/// \param __m1 +/// A 64-bit integer vector of [8 x i8]. +/// \param __m2 +/// A 64-bit integer vector of [8 x i8]. +/// \returns A 64-bit integer vector of [8 x i8] containing the comparison +/// results. +static __inline__ __m64 __DEFAULT_FN_ATTRS +_mm_cmpeq_pi8(__m64 __m1, __m64 __m2) +{ + return (__m64)__builtin_ia32_pcmpeqb((__v8qi)__m1, (__v8qi)__m2); +} + +/// Compares the 16-bit integer elements of two 64-bit integer vectors of +/// [4 x i16] to determine if the element of the first vector is equal to the +/// corresponding element of the second vector. +/// +/// Each comparison returns 0 for false, 0xFFFF for true. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the PCMPEQW instruction. +/// +/// \param __m1 +/// A 64-bit integer vector of [4 x i16]. +/// \param __m2 +/// A 64-bit integer vector of [4 x i16]. +/// \returns A 64-bit integer vector of [4 x i16] containing the comparison +/// results. +static __inline__ __m64 __DEFAULT_FN_ATTRS +_mm_cmpeq_pi16(__m64 __m1, __m64 __m2) +{ + return (__m64)__builtin_ia32_pcmpeqw((__v4hi)__m1, (__v4hi)__m2); +} + +/// Compares the 32-bit integer elements of two 64-bit integer vectors of +/// [2 x i32] to determine if the element of the first vector is equal to the +/// corresponding element of the second vector. +/// +/// Each comparison returns 0 for false, 0xFFFFFFFF for true. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the PCMPEQD instruction. +/// +/// \param __m1 +/// A 64-bit integer vector of [2 x i32]. +/// \param __m2 +/// A 64-bit integer vector of [2 x i32]. +/// \returns A 64-bit integer vector of [2 x i32] containing the comparison +/// results. +static __inline__ __m64 __DEFAULT_FN_ATTRS +_mm_cmpeq_pi32(__m64 __m1, __m64 __m2) +{ + return (__m64)__builtin_ia32_pcmpeqd((__v2si)__m1, (__v2si)__m2); +} + +/// Compares the 8-bit integer elements of two 64-bit integer vectors of +/// [8 x i8] to determine if the element of the first vector is greater than +/// the corresponding element of the second vector. +/// +/// Each comparison returns 0 for false, 0xFF for true. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the PCMPGTB instruction. +/// +/// \param __m1 +/// A 64-bit integer vector of [8 x i8]. +/// \param __m2 +/// A 64-bit integer vector of [8 x i8]. +/// \returns A 64-bit integer vector of [8 x i8] containing the comparison +/// results. +static __inline__ __m64 __DEFAULT_FN_ATTRS +_mm_cmpgt_pi8(__m64 __m1, __m64 __m2) +{ + return (__m64)__builtin_ia32_pcmpgtb((__v8qi)__m1, (__v8qi)__m2); +} + +/// Compares the 16-bit integer elements of two 64-bit integer vectors of +/// [4 x i16] to determine if the element of the first vector is greater than +/// the corresponding element of the second vector. +/// +/// Each comparison returns 0 for false, 0xFFFF for true. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the PCMPGTW instruction. +/// +/// \param __m1 +/// A 64-bit integer vector of [4 x i16]. +/// \param __m2 +/// A 64-bit integer vector of [4 x i16]. +/// \returns A 64-bit integer vector of [4 x i16] containing the comparison +/// results. +static __inline__ __m64 __DEFAULT_FN_ATTRS +_mm_cmpgt_pi16(__m64 __m1, __m64 __m2) +{ + return (__m64)__builtin_ia32_pcmpgtw((__v4hi)__m1, (__v4hi)__m2); +} + +/// Compares the 32-bit integer elements of two 64-bit integer vectors of +/// [2 x i32] to determine if the element of the first vector is greater than +/// the corresponding element of the second vector. +/// +/// Each comparison returns 0 for false, 0xFFFFFFFF for true. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the PCMPGTD instruction. +/// +/// \param __m1 +/// A 64-bit integer vector of [2 x i32]. +/// \param __m2 +/// A 64-bit integer vector of [2 x i32]. +/// \returns A 64-bit integer vector of [2 x i32] containing the comparison +/// results. +static __inline__ __m64 __DEFAULT_FN_ATTRS +_mm_cmpgt_pi32(__m64 __m1, __m64 __m2) +{ + return (__m64)__builtin_ia32_pcmpgtd((__v2si)__m1, (__v2si)__m2); +} + +/// Constructs a 64-bit integer vector initialized to zero. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the PXOR instruction. +/// +/// \returns An initialized 64-bit integer vector with all elements set to zero. +static __inline__ __m64 __DEFAULT_FN_ATTRS +_mm_setzero_si64(void) +{ + return __extension__ (__m64){ 0LL }; +} + +/// Constructs a 64-bit integer vector initialized with the specified +/// 32-bit integer values. +/// +/// \headerfile +/// +/// This intrinsic is a utility function and does not correspond to a specific +/// instruction. +/// +/// \param __i1 +/// A 32-bit integer value used to initialize the upper 32 bits of the +/// result. +/// \param __i0 +/// A 32-bit integer value used to initialize the lower 32 bits of the +/// result. +/// \returns An initialized 64-bit integer vector. +static __inline__ __m64 __DEFAULT_FN_ATTRS +_mm_set_pi32(int __i1, int __i0) +{ + return (__m64)__builtin_ia32_vec_init_v2si(__i0, __i1); +} + +/// Constructs a 64-bit integer vector initialized with the specified +/// 16-bit integer values. +/// +/// \headerfile +/// +/// This intrinsic is a utility function and does not correspond to a specific +/// instruction. +/// +/// \param __s3 +/// A 16-bit integer value used to initialize bits [63:48] of the result. +/// \param __s2 +/// A 16-bit integer value used to initialize bits [47:32] of the result. +/// \param __s1 +/// A 16-bit integer value used to initialize bits [31:16] of the result. +/// \param __s0 +/// A 16-bit integer value used to initialize bits [15:0] of the result. +/// \returns An initialized 64-bit integer vector. +static __inline__ __m64 __DEFAULT_FN_ATTRS +_mm_set_pi16(short __s3, short __s2, short __s1, short __s0) +{ + return (__m64)__builtin_ia32_vec_init_v4hi(__s0, __s1, __s2, __s3); +} + +/// Constructs a 64-bit integer vector initialized with the specified +/// 8-bit integer values. +/// +/// \headerfile +/// +/// This intrinsic is a utility function and does not correspond to a specific +/// instruction. +/// +/// \param __b7 +/// An 8-bit integer value used to initialize bits [63:56] of the result. +/// \param __b6 +/// An 8-bit integer value used to initialize bits [55:48] of the result. +/// \param __b5 +/// An 8-bit integer value used to initialize bits [47:40] of the result. +/// \param __b4 +/// An 8-bit integer value used to initialize bits [39:32] of the result. +/// \param __b3 +/// An 8-bit integer value used to initialize bits [31:24] of the result. +/// \param __b2 +/// An 8-bit integer value used to initialize bits [23:16] of the result. +/// \param __b1 +/// An 8-bit integer value used to initialize bits [15:8] of the result. +/// \param __b0 +/// An 8-bit integer value used to initialize bits [7:0] of the result. +/// \returns An initialized 64-bit integer vector. +static __inline__ __m64 __DEFAULT_FN_ATTRS +_mm_set_pi8(char __b7, char __b6, char __b5, char __b4, char __b3, char __b2, + char __b1, char __b0) +{ + return (__m64)__builtin_ia32_vec_init_v8qi(__b0, __b1, __b2, __b3, + __b4, __b5, __b6, __b7); +} + +/// Constructs a 64-bit integer vector of [2 x i32], with each of the +/// 32-bit integer vector elements set to the specified 32-bit integer +/// value. +/// +/// \headerfile +/// +/// This intrinsic is a utility function and does not correspond to a specific +/// instruction. +/// +/// \param __i +/// A 32-bit integer value used to initialize each vector element of the +/// result. +/// \returns An initialized 64-bit integer vector of [2 x i32]. +static __inline__ __m64 __DEFAULT_FN_ATTRS +_mm_set1_pi32(int __i) +{ + return _mm_set_pi32(__i, __i); +} + +/// Constructs a 64-bit integer vector of [4 x i16], with each of the +/// 16-bit integer vector elements set to the specified 16-bit integer +/// value. +/// +/// \headerfile +/// +/// This intrinsic is a utility function and does not correspond to a specific +/// instruction. +/// +/// \param __w +/// A 16-bit integer value used to initialize each vector element of the +/// result. +/// \returns An initialized 64-bit integer vector of [4 x i16]. +static __inline__ __m64 __DEFAULT_FN_ATTRS +_mm_set1_pi16(short __w) +{ + return _mm_set_pi16(__w, __w, __w, __w); +} + +/// Constructs a 64-bit integer vector of [8 x i8], with each of the +/// 8-bit integer vector elements set to the specified 8-bit integer value. +/// +/// \headerfile +/// +/// This intrinsic is a utility function and does not correspond to a specific +/// instruction. +/// +/// \param __b +/// An 8-bit integer value used to initialize each vector element of the +/// result. +/// \returns An initialized 64-bit integer vector of [8 x i8]. +static __inline__ __m64 __DEFAULT_FN_ATTRS +_mm_set1_pi8(char __b) +{ + return _mm_set_pi8(__b, __b, __b, __b, __b, __b, __b, __b); +} + +/// Constructs a 64-bit integer vector, initialized in reverse order with +/// the specified 32-bit integer values. +/// +/// \headerfile +/// +/// This intrinsic is a utility function and does not correspond to a specific +/// instruction. +/// +/// \param __i0 +/// A 32-bit integer value used to initialize the lower 32 bits of the +/// result. +/// \param __i1 +/// A 32-bit integer value used to initialize the upper 32 bits of the +/// result. +/// \returns An initialized 64-bit integer vector. +static __inline__ __m64 __DEFAULT_FN_ATTRS +_mm_setr_pi32(int __i0, int __i1) +{ + return _mm_set_pi32(__i1, __i0); +} + +/// Constructs a 64-bit integer vector, initialized in reverse order with +/// the specified 16-bit integer values. +/// +/// \headerfile +/// +/// This intrinsic is a utility function and does not correspond to a specific +/// instruction. +/// +/// \param __w0 +/// A 16-bit integer value used to initialize bits [15:0] of the result. +/// \param __w1 +/// A 16-bit integer value used to initialize bits [31:16] of the result. +/// \param __w2 +/// A 16-bit integer value used to initialize bits [47:32] of the result. +/// \param __w3 +/// A 16-bit integer value used to initialize bits [63:48] of the result. +/// \returns An initialized 64-bit integer vector. +static __inline__ __m64 __DEFAULT_FN_ATTRS +_mm_setr_pi16(short __w0, short __w1, short __w2, short __w3) +{ + return _mm_set_pi16(__w3, __w2, __w1, __w0); +} + +/// Constructs a 64-bit integer vector, initialized in reverse order with +/// the specified 8-bit integer values. +/// +/// \headerfile +/// +/// This intrinsic is a utility function and does not correspond to a specific +/// instruction. +/// +/// \param __b0 +/// An 8-bit integer value used to initialize bits [7:0] of the result. +/// \param __b1 +/// An 8-bit integer value used to initialize bits [15:8] of the result. +/// \param __b2 +/// An 8-bit integer value used to initialize bits [23:16] of the result. +/// \param __b3 +/// An 8-bit integer value used to initialize bits [31:24] of the result. +/// \param __b4 +/// An 8-bit integer value used to initialize bits [39:32] of the result. +/// \param __b5 +/// An 8-bit integer value used to initialize bits [47:40] of the result. +/// \param __b6 +/// An 8-bit integer value used to initialize bits [55:48] of the result. +/// \param __b7 +/// An 8-bit integer value used to initialize bits [63:56] of the result. +/// \returns An initialized 64-bit integer vector. +static __inline__ __m64 __DEFAULT_FN_ATTRS +_mm_setr_pi8(char __b0, char __b1, char __b2, char __b3, char __b4, char __b5, + char __b6, char __b7) +{ + return _mm_set_pi8(__b7, __b6, __b5, __b4, __b3, __b2, __b1, __b0); +} + +#undef __DEFAULT_FN_ATTRS + +/* Aliases for compatibility. */ +#define _m_empty _mm_empty +#define _m_from_int _mm_cvtsi32_si64 +#define _m_from_int64 _mm_cvtsi64_m64 +#define _m_to_int _mm_cvtsi64_si32 +#define _m_to_int64 _mm_cvtm64_si64 +#define _m_packsswb _mm_packs_pi16 +#define _m_packssdw _mm_packs_pi32 +#define _m_packuswb _mm_packs_pu16 +#define _m_punpckhbw _mm_unpackhi_pi8 +#define _m_punpckhwd _mm_unpackhi_pi16 +#define _m_punpckhdq _mm_unpackhi_pi32 +#define _m_punpcklbw _mm_unpacklo_pi8 +#define _m_punpcklwd _mm_unpacklo_pi16 +#define _m_punpckldq _mm_unpacklo_pi32 +#define _m_paddb _mm_add_pi8 +#define _m_paddw _mm_add_pi16 +#define _m_paddd _mm_add_pi32 +#define _m_paddsb _mm_adds_pi8 +#define _m_paddsw _mm_adds_pi16 +#define _m_paddusb _mm_adds_pu8 +#define _m_paddusw _mm_adds_pu16 +#define _m_psubb _mm_sub_pi8 +#define _m_psubw _mm_sub_pi16 +#define _m_psubd _mm_sub_pi32 +#define _m_psubsb _mm_subs_pi8 +#define _m_psubsw _mm_subs_pi16 +#define _m_psubusb _mm_subs_pu8 +#define _m_psubusw _mm_subs_pu16 +#define _m_pmaddwd _mm_madd_pi16 +#define _m_pmulhw _mm_mulhi_pi16 +#define _m_pmullw _mm_mullo_pi16 +#define _m_psllw _mm_sll_pi16 +#define _m_psllwi _mm_slli_pi16 +#define _m_pslld _mm_sll_pi32 +#define _m_pslldi _mm_slli_pi32 +#define _m_psllq _mm_sll_si64 +#define _m_psllqi _mm_slli_si64 +#define _m_psraw _mm_sra_pi16 +#define _m_psrawi _mm_srai_pi16 +#define _m_psrad _mm_sra_pi32 +#define _m_psradi _mm_srai_pi32 +#define _m_psrlw _mm_srl_pi16 +#define _m_psrlwi _mm_srli_pi16 +#define _m_psrld _mm_srl_pi32 +#define _m_psrldi _mm_srli_pi32 +#define _m_psrlq _mm_srl_si64 +#define _m_psrlqi _mm_srli_si64 +#define _m_pand _mm_and_si64 +#define _m_pandn _mm_andnot_si64 +#define _m_por _mm_or_si64 +#define _m_pxor _mm_xor_si64 +#define _m_pcmpeqb _mm_cmpeq_pi8 +#define _m_pcmpeqw _mm_cmpeq_pi16 +#define _m_pcmpeqd _mm_cmpeq_pi32 +#define _m_pcmpgtb _mm_cmpgt_pi8 +#define _m_pcmpgtw _mm_cmpgt_pi16 +#define _m_pcmpgtd _mm_cmpgt_pi32 + +#endif /* __MMINTRIN_H */ + diff --git a/third_party/intel/clang/movdirintrin.h b/third_party/intel/clang/movdirintrin.h new file mode 100644 index 000000000..30c4d02c8 --- /dev/null +++ b/third_party/intel/clang/movdirintrin.h @@ -0,0 +1,49 @@ +/*===------------------------- movdirintrin.h ------------------------------=== + * + * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. + * See https://llvm.org/LICENSE.txt for license information. + * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + * + *===-----------------------------------------------------------------------=== + */ +#if !defined __X86INTRIN_H && !defined __IMMINTRIN_H +#error "Never use directly; include instead." +#endif + +#ifndef _MOVDIRINTRIN_H +#define _MOVDIRINTRIN_H + +/* Move doubleword as direct store */ +static __inline__ void +__attribute__((__always_inline__, __nodebug__, __target__("movdiri"))) +_directstoreu_u32 (void *__dst, unsigned int __value) +{ + __builtin_ia32_directstore_u32((unsigned int *)__dst, (unsigned int)__value); +} + +#ifdef __x86_64__ + +/* Move quadword as direct store */ +static __inline__ void +__attribute__((__always_inline__, __nodebug__, __target__("movdiri"))) +_directstoreu_u64 (void *__dst, unsigned long __value) +{ + __builtin_ia32_directstore_u64((unsigned long *)__dst, __value); +} + +#endif /* __x86_64__ */ + +/* + * movdir64b - Move 64 bytes as direct store. + * The destination must be 64 byte aligned, and the store is atomic. + * The source address has no alignment requirement, and the load from + * the source address is not atomic. + */ +static __inline__ void +__attribute__((__always_inline__, __nodebug__, __target__("movdir64b"))) +_movdir64b (void *__dst __attribute__((align_value(64))), const void *__src) +{ + __builtin_ia32_movdir64b(__dst, __src); +} + +#endif /* _MOVDIRINTRIN_H */ diff --git a/third_party/intel/clang/mwaitxintrin.h b/third_party/intel/clang/mwaitxintrin.h new file mode 100644 index 000000000..65f427105 --- /dev/null +++ b/third_party/intel/clang/mwaitxintrin.h @@ -0,0 +1,62 @@ +/*===---- mwaitxintrin.h - MONITORX/MWAITX intrinsics ----------------------=== + * + * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. + * See https://llvm.org/LICENSE.txt for license information. + * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + * + *===-----------------------------------------------------------------------=== + */ + +#ifndef __X86INTRIN_H +#error "Never use directly; include instead." +#endif + +#ifndef __MWAITXINTRIN_H +#define __MWAITXINTRIN_H + +/* Define the default attributes for the functions in this file. */ +#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("mwaitx"))) + +/// Establishes a linear address memory range to be monitored and puts +/// the processor in the monitor event pending state. Data stored in the +/// monitored address range causes the processor to exit the pending state. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c MONITORX instruction. +/// +/// \param __p +/// The memory range to be monitored. The size of the range is determined by +/// CPUID function 0000_0005h. +/// \param __extensions +/// Optional extensions for the monitoring state. +/// \param __hints +/// Optional hints for the monitoring state. +static __inline__ void __DEFAULT_FN_ATTRS +_mm_monitorx(void * __p, unsigned __extensions, unsigned __hints) +{ + __builtin_ia32_monitorx(__p, __extensions, __hints); +} + +/// Used with the \c MONITORX instruction to wait while the processor is in +/// the monitor event pending state. Data stored in the monitored address +/// range, or an interrupt, causes the processor to exit the pending state. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c MWAITX instruction. +/// +/// \param __extensions +/// Optional extensions for the monitoring state, which can vary by +/// processor. +/// \param __hints +/// Optional hints for the monitoring state, which can vary by processor. +static __inline__ void __DEFAULT_FN_ATTRS +_mm_mwaitx(unsigned __extensions, unsigned __hints, unsigned __clock) +{ + __builtin_ia32_mwaitx(__extensions, __hints, __clock); +} + +#undef __DEFAULT_FN_ATTRS + +#endif /* __MWAITXINTRIN_H */ diff --git a/third_party/intel/clang/nmmintrin.h b/third_party/intel/clang/nmmintrin.h new file mode 100644 index 000000000..d26d58eab --- /dev/null +++ b/third_party/intel/clang/nmmintrin.h @@ -0,0 +1,20 @@ +/*===---- nmmintrin.h - SSE4 intrinsics ------------------------------------=== + * + * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. + * See https://llvm.org/LICENSE.txt for license information. + * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + * + *===-----------------------------------------------------------------------=== + */ + +#ifndef __NMMINTRIN_H +#define __NMMINTRIN_H + +#if !defined(__i386__) && !defined(__x86_64__) +#error "This header is only meant to be used on x86 and x64 architecture" +#endif + +/* To match expectations of gcc we put the sse4.2 definitions into smmintrin.h, + just include it now then. */ +#include "smmintrin.h" +#endif /* __NMMINTRIN_H */ diff --git a/third_party/intel/clang/pconfigintrin.h b/third_party/intel/clang/pconfigintrin.h new file mode 100644 index 000000000..d2014b026 --- /dev/null +++ b/third_party/intel/clang/pconfigintrin.h @@ -0,0 +1,40 @@ +/*===---- pconfigintrin.h - X86 platform configuration ---------------------=== + * + * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. + * See https://llvm.org/LICENSE.txt for license information. + * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + * + *===-----------------------------------------------------------------------=== + */ + +#if !defined __X86INTRIN_H && !defined __IMMINTRIN_H +#error "Never use directly; include instead." +#endif + +#ifndef __PCONFIGINTRIN_H +#define __PCONFIGINTRIN_H + +#define __PCONFIG_KEY_PROGRAM 0x00000001 + +#if __has_extension(gnu_asm) + +/* Define the default attributes for the functions in this file. */ +#define __DEFAULT_FN_ATTRS \ + __attribute__((__always_inline__, __nodebug__, __target__("pconfig"))) + +static __inline unsigned int __DEFAULT_FN_ATTRS +_pconfig_u32(unsigned int __leaf, __SIZE_TYPE__ __d[]) +{ + unsigned int __result; + __asm__ ("pconfig" + : "=a" (__result), "=b" (__d[0]), "=c" (__d[1]), "=d" (__d[2]) + : "a" (__leaf), "b" (__d[0]), "c" (__d[1]), "d" (__d[2]) + : "cc"); + return __result; +} + +#undef __DEFAULT_FN_ATTRS + +#endif /* __has_extension(gnu_asm) */ + +#endif diff --git a/third_party/intel/clang/pkuintrin.h b/third_party/intel/clang/pkuintrin.h new file mode 100644 index 000000000..c62080bec --- /dev/null +++ b/third_party/intel/clang/pkuintrin.h @@ -0,0 +1,34 @@ +/*===---- pkuintrin.h - PKU intrinsics -------------------------------------=== + * + * + * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. + * See https://llvm.org/LICENSE.txt for license information. + * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + * + *===-----------------------------------------------------------------------=== + */ +#ifndef __IMMINTRIN_H +#error "Never use directly; include instead." +#endif + +#ifndef __PKUINTRIN_H +#define __PKUINTRIN_H + +/* Define the default attributes for the functions in this file. */ +#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("pku"))) + +static __inline__ unsigned int __DEFAULT_FN_ATTRS +_rdpkru_u32(void) +{ + return __builtin_ia32_rdpkru(); +} + +static __inline__ void __DEFAULT_FN_ATTRS +_wrpkru(unsigned int __val) +{ + __builtin_ia32_wrpkru(__val); +} + +#undef __DEFAULT_FN_ATTRS + +#endif diff --git a/third_party/intel/clang/pmmintrin.h b/third_party/intel/clang/pmmintrin.h new file mode 100644 index 000000000..6414e9e0c --- /dev/null +++ b/third_party/intel/clang/pmmintrin.h @@ -0,0 +1,301 @@ +/*===---- pmmintrin.h - SSE3 intrinsics ------------------------------------=== + * + * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. + * See https://llvm.org/LICENSE.txt for license information. + * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + * + *===-----------------------------------------------------------------------=== + */ + +#ifndef __PMMINTRIN_H +#define __PMMINTRIN_H + +#if !defined(__i386__) && !defined(__x86_64__) +#error "This header is only meant to be used on x86 and x64 architecture" +#endif + +#include "emmintrin.h" + +/* Define the default attributes for the functions in this file. */ +#define __DEFAULT_FN_ATTRS \ + __attribute__((__always_inline__, __nodebug__, \ + __target__("sse3,no-evex512"), __min_vector_width__(128))) + +/// Loads data from an unaligned memory location to elements in a 128-bit +/// vector. +/// +/// If the address of the data is not 16-byte aligned, the instruction may +/// read two adjacent aligned blocks of memory to retrieve the requested +/// data. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VLDDQU instruction. +/// +/// \param __p +/// A pointer to a 128-bit integer vector containing integer values. +/// \returns A 128-bit vector containing the moved values. +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_lddqu_si128(__m128i_u const *__p) +{ + return (__m128i)__builtin_ia32_lddqu((char const *)__p); +} + +/// Adds the even-indexed values and subtracts the odd-indexed values of +/// two 128-bit vectors of [4 x float]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VADDSUBPS instruction. +/// +/// \param __a +/// A 128-bit vector of [4 x float] containing the left source operand. +/// \param __b +/// A 128-bit vector of [4 x float] containing the right source operand. +/// \returns A 128-bit vector of [4 x float] containing the alternating sums and +/// differences of both operands. +static __inline__ __m128 __DEFAULT_FN_ATTRS +_mm_addsub_ps(__m128 __a, __m128 __b) +{ + return __builtin_ia32_addsubps((__v4sf)__a, (__v4sf)__b); +} + +/// Horizontally adds the adjacent pairs of values contained in two +/// 128-bit vectors of [4 x float]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VHADDPS instruction. +/// +/// \param __a +/// A 128-bit vector of [4 x float] containing one of the source operands. +/// The horizontal sums of the values are stored in the lower bits of the +/// destination. +/// \param __b +/// A 128-bit vector of [4 x float] containing one of the source operands. +/// The horizontal sums of the values are stored in the upper bits of the +/// destination. +/// \returns A 128-bit vector of [4 x float] containing the horizontal sums of +/// both operands. +static __inline__ __m128 __DEFAULT_FN_ATTRS +_mm_hadd_ps(__m128 __a, __m128 __b) +{ + return __builtin_ia32_haddps((__v4sf)__a, (__v4sf)__b); +} + +/// Horizontally subtracts the adjacent pairs of values contained in two +/// 128-bit vectors of [4 x float]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VHSUBPS instruction. +/// +/// \param __a +/// A 128-bit vector of [4 x float] containing one of the source operands. +/// The horizontal differences between the values are stored in the lower +/// bits of the destination. +/// \param __b +/// A 128-bit vector of [4 x float] containing one of the source operands. +/// The horizontal differences between the values are stored in the upper +/// bits of the destination. +/// \returns A 128-bit vector of [4 x float] containing the horizontal +/// differences of both operands. +static __inline__ __m128 __DEFAULT_FN_ATTRS +_mm_hsub_ps(__m128 __a, __m128 __b) +{ + return __builtin_ia32_hsubps((__v4sf)__a, (__v4sf)__b); +} + +/// Moves and duplicates odd-indexed values from a 128-bit vector +/// of [4 x float] to float values stored in a 128-bit vector of +/// [4 x float]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VMOVSHDUP instruction. +/// +/// \param __a +/// A 128-bit vector of [4 x float]. \n +/// Bits [127:96] of the source are written to bits [127:96] and [95:64] of +/// the destination. \n +/// Bits [63:32] of the source are written to bits [63:32] and [31:0] of the +/// destination. +/// \returns A 128-bit vector of [4 x float] containing the moved and duplicated +/// values. +static __inline__ __m128 __DEFAULT_FN_ATTRS +_mm_movehdup_ps(__m128 __a) +{ + return __builtin_shufflevector((__v4sf)__a, (__v4sf)__a, 1, 1, 3, 3); +} + +/// Duplicates even-indexed values from a 128-bit vector of +/// [4 x float] to float values stored in a 128-bit vector of [4 x float]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VMOVSLDUP instruction. +/// +/// \param __a +/// A 128-bit vector of [4 x float] \n +/// Bits [95:64] of the source are written to bits [127:96] and [95:64] of +/// the destination. \n +/// Bits [31:0] of the source are written to bits [63:32] and [31:0] of the +/// destination. +/// \returns A 128-bit vector of [4 x float] containing the moved and duplicated +/// values. +static __inline__ __m128 __DEFAULT_FN_ATTRS +_mm_moveldup_ps(__m128 __a) +{ + return __builtin_shufflevector((__v4sf)__a, (__v4sf)__a, 0, 0, 2, 2); +} + +/// Adds the even-indexed values and subtracts the odd-indexed values of +/// two 128-bit vectors of [2 x double]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VADDSUBPD instruction. +/// +/// \param __a +/// A 128-bit vector of [2 x double] containing the left source operand. +/// \param __b +/// A 128-bit vector of [2 x double] containing the right source operand. +/// \returns A 128-bit vector of [2 x double] containing the alternating sums +/// and differences of both operands. +static __inline__ __m128d __DEFAULT_FN_ATTRS +_mm_addsub_pd(__m128d __a, __m128d __b) +{ + return __builtin_ia32_addsubpd((__v2df)__a, (__v2df)__b); +} + +/// Horizontally adds the pairs of values contained in two 128-bit +/// vectors of [2 x double]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VHADDPD instruction. +/// +/// \param __a +/// A 128-bit vector of [2 x double] containing one of the source operands. +/// The horizontal sum of the values is stored in the lower bits of the +/// destination. +/// \param __b +/// A 128-bit vector of [2 x double] containing one of the source operands. +/// The horizontal sum of the values is stored in the upper bits of the +/// destination. +/// \returns A 128-bit vector of [2 x double] containing the horizontal sums of +/// both operands. +static __inline__ __m128d __DEFAULT_FN_ATTRS +_mm_hadd_pd(__m128d __a, __m128d __b) +{ + return __builtin_ia32_haddpd((__v2df)__a, (__v2df)__b); +} + +/// Horizontally subtracts the pairs of values contained in two 128-bit +/// vectors of [2 x double]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VHSUBPD instruction. +/// +/// \param __a +/// A 128-bit vector of [2 x double] containing one of the source operands. +/// The horizontal difference of the values is stored in the lower bits of +/// the destination. +/// \param __b +/// A 128-bit vector of [2 x double] containing one of the source operands. +/// The horizontal difference of the values is stored in the upper bits of +/// the destination. +/// \returns A 128-bit vector of [2 x double] containing the horizontal +/// differences of both operands. +static __inline__ __m128d __DEFAULT_FN_ATTRS +_mm_hsub_pd(__m128d __a, __m128d __b) +{ + return __builtin_ia32_hsubpd((__v2df)__a, (__v2df)__b); +} + +/// Moves and duplicates one double-precision value to double-precision +/// values stored in a 128-bit vector of [2 x double]. +/// +/// \headerfile +/// +/// \code +/// __m128d _mm_loaddup_pd(double const *dp); +/// \endcode +/// +/// This intrinsic corresponds to the VMOVDDUP instruction. +/// +/// \param dp +/// A pointer to a double-precision value to be moved and duplicated. +/// \returns A 128-bit vector of [2 x double] containing the moved and +/// duplicated values. +#define _mm_loaddup_pd(dp) _mm_load1_pd(dp) + +/// Moves and duplicates the double-precision value in the lower bits of +/// a 128-bit vector of [2 x double] to double-precision values stored in a +/// 128-bit vector of [2 x double]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VMOVDDUP instruction. +/// +/// \param __a +/// A 128-bit vector of [2 x double]. Bits [63:0] are written to bits +/// [127:64] and [63:0] of the destination. +/// \returns A 128-bit vector of [2 x double] containing the moved and +/// duplicated values. +static __inline__ __m128d __DEFAULT_FN_ATTRS +_mm_movedup_pd(__m128d __a) +{ + return __builtin_shufflevector((__v2df)__a, (__v2df)__a, 0, 0); +} + +/// Establishes a linear address memory range to be monitored and puts +/// the processor in the monitor event pending state. Data stored in the +/// monitored address range causes the processor to exit the pending state. +/// +/// The \c MONITOR instruction can be used in kernel mode, and in other modes +/// if MSR C001_0015h[MonMwaitUserEn] is set. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c MONITOR instruction. +/// +/// \param __p +/// The memory range to be monitored. The size of the range is determined by +/// CPUID function 0000_0005h. +/// \param __extensions +/// Optional extensions for the monitoring state. +/// \param __hints +/// Optional hints for the monitoring state. +static __inline__ void __DEFAULT_FN_ATTRS +_mm_monitor(void const *__p, unsigned __extensions, unsigned __hints) +{ + __builtin_ia32_monitor(__p, __extensions, __hints); +} + +/// Used with the \c MONITOR instruction to wait while the processor is in +/// the monitor event pending state. Data stored in the monitored address +/// range, or an interrupt, causes the processor to exit the pending state. +/// +/// The \c MWAIT instruction can be used in kernel mode, and in other modes if +/// MSR C001_0015h[MonMwaitUserEn] is set. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c MWAIT instruction. +/// +/// \param __extensions +/// Optional extensions for the monitoring state, which can vary by +/// processor. +/// \param __hints +/// Optional hints for the monitoring state, which can vary by processor. +static __inline__ void __DEFAULT_FN_ATTRS +_mm_mwait(unsigned __extensions, unsigned __hints) +{ + __builtin_ia32_mwait(__extensions, __hints); +} + +#undef __DEFAULT_FN_ATTRS + +#endif /* __PMMINTRIN_H */ diff --git a/third_party/intel/clang/popcntintrin.h b/third_party/intel/clang/popcntintrin.h new file mode 100644 index 000000000..0aa94aecd --- /dev/null +++ b/third_party/intel/clang/popcntintrin.h @@ -0,0 +1,59 @@ +/*===---- popcntintrin.h - POPCNT intrinsics -------------------------------=== + * + * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. + * See https://llvm.org/LICENSE.txt for license information. + * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + * + *===-----------------------------------------------------------------------=== + */ + +#ifndef __POPCNTINTRIN_H +#define __POPCNTINTRIN_H + +/* Define the default attributes for the functions in this file. */ +#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("popcnt"))) + +#if defined(__cplusplus) && (__cplusplus >= 201103L) +#define __DEFAULT_FN_ATTRS_CONSTEXPR __DEFAULT_FN_ATTRS constexpr +#else +#define __DEFAULT_FN_ATTRS_CONSTEXPR __DEFAULT_FN_ATTRS +#endif + +/// Counts the number of bits in the source operand having a value of 1. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the POPCNT instruction. +/// +/// \param __A +/// An unsigned 32-bit integer operand. +/// \returns A 32-bit integer containing the number of bits with value 1 in the +/// source operand. +static __inline__ int __DEFAULT_FN_ATTRS_CONSTEXPR +_mm_popcnt_u32(unsigned int __A) +{ + return __builtin_popcount(__A); +} + +#ifdef __x86_64__ +/// Counts the number of bits in the source operand having a value of 1. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the POPCNT instruction. +/// +/// \param __A +/// An unsigned 64-bit integer operand. +/// \returns A 64-bit integer containing the number of bits with value 1 in the +/// source operand. +static __inline__ long long __DEFAULT_FN_ATTRS_CONSTEXPR +_mm_popcnt_u64(unsigned long long __A) +{ + return __builtin_popcountll(__A); +} +#endif /* __x86_64__ */ + +#undef __DEFAULT_FN_ATTRS +#undef __DEFAULT_FN_ATTRS_CONSTEXPR + +#endif /* __POPCNTINTRIN_H */ diff --git a/third_party/intel/clang/prfchiintrin.h b/third_party/intel/clang/prfchiintrin.h new file mode 100644 index 000000000..36600b25a --- /dev/null +++ b/third_party/intel/clang/prfchiintrin.h @@ -0,0 +1,61 @@ +/*===---- prfchiintrin.h - PREFETCHI intrinsic -----------------------------=== + * + * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. + * See https://llvm.org/LICENSE.txt for license information. + * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + * + *===-----------------------------------------------------------------------=== + */ + +#ifndef __PRFCHIINTRIN_H +#define __PRFCHIINTRIN_H + +#ifdef __x86_64__ + +/* Define the default attributes for the functions in this file. */ +#define __DEFAULT_FN_ATTRS \ + __attribute__((__always_inline__, __nodebug__, __target__("prefetchi"))) + +/// Loads an instruction sequence containing the specified memory address into +/// all level cache. +/// +/// Note that the effect of this intrinsic is dependent on the processor +/// implementation. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c PREFETCHIT0 instruction. +/// +/// \param __P +/// A pointer specifying the memory address to be prefetched. +static __inline__ void __DEFAULT_FN_ATTRS +_m_prefetchit0(volatile const void *__P) { +#pragma clang diagnostic push +#pragma clang diagnostic ignored "-Wcast-qual" + __builtin_ia32_prefetchi((const void *)__P, 3 /* _MM_HINT_T0 */); +#pragma clang diagnostic pop +} + +/// Loads an instruction sequence containing the specified memory address into +/// all but the first-level cache. +/// +/// Note that the effect of this intrinsic is dependent on the processor +/// implementation. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c PREFETCHIT1 instruction. +/// +/// \param __P +/// A pointer specifying the memory address to be prefetched. +static __inline__ void __DEFAULT_FN_ATTRS +_m_prefetchit1(volatile const void *__P) { +#pragma clang diagnostic push +#pragma clang diagnostic ignored "-Wcast-qual" + __builtin_ia32_prefetchi((const void *)__P, 2 /* _MM_HINT_T1 */); +#pragma clang diagnostic pop +} +#endif /* __x86_64__ */ +#undef __DEFAULT_FN_ATTRS + +#endif /* __PRFCHWINTRIN_H */ diff --git a/third_party/intel/clang/prfchwintrin.h b/third_party/intel/clang/prfchwintrin.h new file mode 100644 index 000000000..eaea5f3cf --- /dev/null +++ b/third_party/intel/clang/prfchwintrin.h @@ -0,0 +1,60 @@ +/*===---- prfchwintrin.h - PREFETCHW intrinsic -----------------------------=== + * + * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. + * See https://llvm.org/LICENSE.txt for license information. + * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + * + *===-----------------------------------------------------------------------=== + */ + +#if !defined(__X86INTRIN_H) && !defined(_MM3DNOW_H_INCLUDED) +#error "Never use directly; include instead." +#endif + +#ifndef __PRFCHWINTRIN_H +#define __PRFCHWINTRIN_H + +/// Loads a memory sequence containing the specified memory address into +/// all data cache levels. +/// +/// The cache-coherency state is set to exclusive. Data can be read from +/// and written to the cache line without additional delay. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c PREFETCHT0 instruction. +/// +/// \param __P +/// A pointer specifying the memory address to be prefetched. +static __inline__ void __attribute__((__always_inline__, __nodebug__)) +_m_prefetch(void *__P) +{ + __builtin_prefetch (__P, 0, 3 /* _MM_HINT_T0 */); +} + +/// Loads a memory sequence containing the specified memory address into +/// the L1 data cache and sets the cache-coherency state to modified. +/// +/// This provides a hint to the processor that the cache line will be +/// modified. It is intended for use when the cache line will be written to +/// shortly after the prefetch is performed. +/// +/// Note that the effect of this intrinsic is dependent on the processor +/// implementation. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c PREFETCHW instruction. +/// +/// \param __P +/// A pointer specifying the memory address to be prefetched. +static __inline__ void __attribute__((__always_inline__, __nodebug__)) +_m_prefetchw(volatile const void *__P) +{ +#pragma clang diagnostic push +#pragma clang diagnostic ignored "-Wcast-qual" + __builtin_prefetch ((const void*)__P, 1, 3 /* _MM_HINT_T0 */); +#pragma clang diagnostic pop +} + +#endif /* __PRFCHWINTRIN_H */ diff --git a/third_party/intel/clang/ptwriteintrin.h b/third_party/intel/clang/ptwriteintrin.h new file mode 100644 index 000000000..0a04f7c1d --- /dev/null +++ b/third_party/intel/clang/ptwriteintrin.h @@ -0,0 +1,37 @@ +/*===------------ ptwriteintrin.h - PTWRITE intrinsic --------------------=== + * + * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. + * See https://llvm.org/LICENSE.txt for license information. + * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + * + *===-----------------------------------------------------------------------=== + */ + +#if !defined __X86INTRIN_H && !defined __IMMINTRIN_H +#error "Never use directly; include instead." +#endif + +#ifndef __PTWRITEINTRIN_H +#define __PTWRITEINTRIN_H + +/* Define the default attributes for the functions in this file. */ +#define __DEFAULT_FN_ATTRS \ + __attribute__((__always_inline__, __nodebug__, __target__("ptwrite"))) + +static __inline__ void __DEFAULT_FN_ATTRS +_ptwrite32(unsigned int __value) { + __builtin_ia32_ptwrite32(__value); +} + +#ifdef __x86_64__ + +static __inline__ void __DEFAULT_FN_ATTRS +_ptwrite64(unsigned long long __value) { + __builtin_ia32_ptwrite64(__value); +} + +#endif /* __x86_64__ */ + +#undef __DEFAULT_FN_ATTRS + +#endif /* __PTWRITEINTRIN_H */ diff --git a/third_party/intel/clang/raointintrin.h b/third_party/intel/clang/raointintrin.h new file mode 100644 index 000000000..d3290eb62 --- /dev/null +++ b/third_party/intel/clang/raointintrin.h @@ -0,0 +1,203 @@ +/*===----------------------- raointintrin.h - RAOINT ------------------------=== + * + * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. + * See https://llvm.org/LICENSE.txt for license information. + * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + * + *===-----------------------------------------------------------------------=== + */ + +#ifndef __X86GPRINTRIN_H +#error "Never use directly; include instead." +#endif // __X86GPRINTRIN_H + +#ifndef __RAOINTINTRIN_H +#define __RAOINTINTRIN_H + +#define __DEFAULT_FN_ATTRS \ + __attribute__((__always_inline__, __nodebug__, __target__("raoint"))) + +/// Atomically add a 32-bit value at memory operand \a __A and a 32-bit \a __B, +/// and store the result to the same memory location. +/// +/// This intrinsic should be used for contention or weak ordering. It may +/// result in bad performance for hot data used by single thread only. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c AADD instruction. +/// +/// \param __A +/// A pointer to a 32-bit memory location. +/// \param __B +/// A 32-bit integer value. +/// +/// \code{.operation} +/// MEM[__A+31:__A] := MEM[__A+31:__A] + __B[31:0] +/// \endcode +static __inline__ void __DEFAULT_FN_ATTRS _aadd_i32(int *__A, int __B) { + __builtin_ia32_aadd32((int *)__A, __B); +} + +/// Atomically and a 32-bit value at memory operand \a __A and a 32-bit \a __B, +/// and store the result to the same memory location. +/// +/// This intrinsic should be used for contention or weak ordering. It may +/// result in bad performance for hot data used by single thread only. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c AAND instruction. +/// +/// \param __A +/// A pointer to a 32-bit memory location. +/// \param __B +/// A 32-bit integer value. +/// +/// \code{.operation} +/// MEM[__A+31:__A] := MEM[__A+31:__A] AND __B[31:0] +/// \endcode +static __inline__ void __DEFAULT_FN_ATTRS _aand_i32(int *__A, int __B) { + __builtin_ia32_aand32((int *)__A, __B); +} + +/// Atomically or a 32-bit value at memory operand \a __A and a 32-bit \a __B, +/// and store the result to the same memory location. +/// +/// This intrinsic should be used for contention or weak ordering. It may +/// result in bad performance for hot data used by single thread only. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c AOR instruction. +/// +/// \param __A +/// A pointer to a 32-bit memory location. +/// \param __B +/// A 32-bit integer value. +/// +/// \code{.operation} +/// MEM[__A+31:__A] := MEM[__A+31:__A] OR __B[31:0] +/// \endcode +static __inline__ void __DEFAULT_FN_ATTRS _aor_i32(int *__A, int __B) { + __builtin_ia32_aor32((int *)__A, __B); +} + +/// Atomically xor a 32-bit value at memory operand \a __A and a 32-bit \a __B, +/// and store the result to the same memory location. +/// +/// This intrinsic should be used for contention or weak ordering. It may +/// result in bad performance for hot data used by single thread only. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c AXOR instruction. +/// +/// \param __A +/// A pointer to a 32-bit memory location. +/// \param __B +/// A 32-bit integer value. +/// +/// \code{.operation} +/// MEM[__A+31:__A] := MEM[__A+31:__A] XOR __B[31:0] +/// \endcode +static __inline__ void __DEFAULT_FN_ATTRS _axor_i32(int *__A, int __B) { + __builtin_ia32_axor32((int *)__A, __B); +} + +#ifdef __x86_64__ +/// Atomically add a 64-bit value at memory operand \a __A and a 64-bit \a __B, +/// and store the result to the same memory location. +/// +/// This intrinsic should be used for contention or weak ordering. It may +/// result in bad performance for hot data used by single thread only. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c AADD instruction. +/// +/// \param __A +/// A pointer to a 64-bit memory location. +/// \param __B +/// A 64-bit integer value. +/// +/// \code{.operation} +/// MEM[__A+63:__A] := MEM[__A+63:__A] + __B[63:0] +/// \endcode +static __inline__ void __DEFAULT_FN_ATTRS _aadd_i64(long long *__A, + long long __B) { + __builtin_ia32_aadd64((long long *)__A, __B); +} + +/// Atomically and a 64-bit value at memory operand \a __A and a 64-bit \a __B, +/// and store the result to the same memory location. +/// +/// This intrinsic should be used for contention or weak ordering. It may +/// result in bad performance for hot data used by single thread only. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c AAND instruction. +/// +/// \param __A +/// A pointer to a 64-bit memory location. +/// \param __B +/// A 64-bit integer value. +/// +/// \code{.operation} +/// MEM[__A+63:__A] := MEM[__A+63:__A] AND __B[63:0] +/// \endcode +static __inline__ void __DEFAULT_FN_ATTRS _aand_i64(long long *__A, + long long __B) { + __builtin_ia32_aand64((long long *)__A, __B); +} + +/// Atomically or a 64-bit value at memory operand \a __A and a 64-bit \a __B, +/// and store the result to the same memory location. +/// +/// This intrinsic should be used for contention or weak ordering. It may +/// result in bad performance for hot data used by single thread only. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c AOR instruction. +/// +/// \param __A +/// A pointer to a 64-bit memory location. +/// \param __B +/// A 64-bit integer value. +/// +/// \code{.operation} +/// MEM[__A+63:__A] := MEM[__A+63:__A] OR __B[63:0] +/// \endcode +static __inline__ void __DEFAULT_FN_ATTRS _aor_i64(long long *__A, + long long __B) { + __builtin_ia32_aor64((long long *)__A, __B); +} + +/// Atomically xor a 64-bit value at memory operand \a __A and a 64-bit \a __B, +/// and store the result to the same memory location. +/// +/// This intrinsic should be used for contention or weak ordering. It may +/// result in bad performance for hot data used by single thread only. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c AXOR instruction. +/// +/// \param __A +/// A pointer to a 64-bit memory location. +/// \param __B +/// A 64-bit integer value. +/// +/// \code{.operation} +/// MEM[__A+63:__A] := MEM[__A+63:__A] XOR __B[63:0] +/// \endcode +static __inline__ void __DEFAULT_FN_ATTRS _axor_i64(long long *__A, + long long __B) { + __builtin_ia32_axor64((long long *)__A, __B); +} +#endif // __x86_64__ + +#undef __DEFAULT_FN_ATTRS +#endif // __RAOINTINTRIN_H diff --git a/third_party/intel/clang/rdpruintrin.h b/third_party/intel/clang/rdpruintrin.h new file mode 100644 index 000000000..89732bb8b --- /dev/null +++ b/third_party/intel/clang/rdpruintrin.h @@ -0,0 +1,57 @@ +/*===---- rdpruintrin.h - RDPRU intrinsics ---------------------------------=== + * + * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. + * See https://llvm.org/LICENSE.txt for license information. + * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + * + *===-----------------------------------------------------------------------=== + */ + +#if !defined __X86INTRIN_H +#error "Never use directly; include instead." +#endif + +#ifndef __RDPRUINTRIN_H +#define __RDPRUINTRIN_H + +/* Define the default attributes for the functions in this file. */ +#define __DEFAULT_FN_ATTRS \ + __attribute__((__always_inline__, __nodebug__, __target__("rdpru"))) + + +/// Reads the content of a processor register. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the RDPRU instruction. +/// +/// \param reg_id +/// A processor register identifier. +static __inline__ unsigned long long __DEFAULT_FN_ATTRS +__rdpru (int reg_id) +{ + return __builtin_ia32_rdpru(reg_id); +} + +#define __RDPRU_MPERF 0 +#define __RDPRU_APERF 1 + +/// Reads the content of processor register MPERF. +/// +/// \headerfile +/// +/// This intrinsic generates instruction RDPRU to read the value of +/// register MPERF. +#define __mperf() __builtin_ia32_rdpru(__RDPRU_MPERF) + +/// Reads the content of processor register APERF. +/// +/// \headerfile +/// +/// This intrinsic generates instruction RDPRU to read the value of +/// register APERF. +#define __aperf() __builtin_ia32_rdpru(__RDPRU_APERF) + +#undef __DEFAULT_FN_ATTRS + +#endif /* __RDPRUINTRIN_H */ diff --git a/third_party/intel/clang/rdseedintrin.h b/third_party/intel/clang/rdseedintrin.h new file mode 100644 index 000000000..8a4fe0930 --- /dev/null +++ b/third_party/intel/clang/rdseedintrin.h @@ -0,0 +1,105 @@ +/*===---- rdseedintrin.h - RDSEED intrinsics -------------------------------=== + * + * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. + * See https://llvm.org/LICENSE.txt for license information. + * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + * + *===-----------------------------------------------------------------------=== + */ + +#ifndef __IMMINTRIN_H +#error "Never use directly; include instead." +#endif + +#ifndef __RDSEEDINTRIN_H +#define __RDSEEDINTRIN_H + +/* Define the default attributes for the functions in this file. */ +#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("rdseed"))) + +/// Stores a hardware-generated 16-bit random value in the memory at \a __p. +/// +/// The random number generator complies with NIST SP800-90B and SP800-90C. +/// +/// \code{.operation} +/// IF HW_NRND_GEN.ready == 1 +/// Store16(__p, HW_NRND_GEN.data) +/// result := 1 +/// ELSE +/// Store16(__p, 0) +/// result := 0 +/// END +/// \endcode +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c RDSEED instruction. +/// +/// \param __p +/// Pointer to memory for storing the 16-bit random number. +/// \returns 1 if a random number was generated, 0 if not. +static __inline__ int __DEFAULT_FN_ATTRS +_rdseed16_step(unsigned short *__p) +{ + return (int) __builtin_ia32_rdseed16_step(__p); +} + +/// Stores a hardware-generated 32-bit random value in the memory at \a __p. +/// +/// The random number generator complies with NIST SP800-90B and SP800-90C. +/// +/// \code{.operation} +/// IF HW_NRND_GEN.ready == 1 +/// Store32(__p, HW_NRND_GEN.data) +/// result := 1 +/// ELSE +/// Store32(__p, 0) +/// result := 0 +/// END +/// \endcode +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c RDSEED instruction. +/// +/// \param __p +/// Pointer to memory for storing the 32-bit random number. +/// \returns 1 if a random number was generated, 0 if not. +static __inline__ int __DEFAULT_FN_ATTRS +_rdseed32_step(unsigned int *__p) +{ + return (int) __builtin_ia32_rdseed32_step(__p); +} + +#ifdef __x86_64__ +/// Stores a hardware-generated 64-bit random value in the memory at \a __p. +/// +/// The random number generator complies with NIST SP800-90B and SP800-90C. +/// +/// \code{.operation} +/// IF HW_NRND_GEN.ready == 1 +/// Store64(__p, HW_NRND_GEN.data) +/// result := 1 +/// ELSE +/// Store64(__p, 0) +/// result := 0 +/// END +/// \endcode +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c RDSEED instruction. +/// +/// \param __p +/// Pointer to memory for storing the 64-bit random number. +/// \returns 1 if a random number was generated, 0 if not. +static __inline__ int __DEFAULT_FN_ATTRS +_rdseed64_step(unsigned long long *__p) +{ + return (int) __builtin_ia32_rdseed64_step(__p); +} +#endif + +#undef __DEFAULT_FN_ATTRS + +#endif /* __RDSEEDINTRIN_H */ diff --git a/third_party/intel/clang/rtmintrin.h b/third_party/intel/clang/rtmintrin.h new file mode 100644 index 000000000..a3ec81e3f --- /dev/null +++ b/third_party/intel/clang/rtmintrin.h @@ -0,0 +1,45 @@ +/*===---- rtmintrin.h - RTM intrinsics -------------------------------------=== + * + * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. + * See https://llvm.org/LICENSE.txt for license information. + * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + * + *===-----------------------------------------------------------------------=== + */ + +#ifndef __IMMINTRIN_H +#error "Never use directly; include instead." +#endif + +#ifndef __RTMINTRIN_H +#define __RTMINTRIN_H + +#define _XBEGIN_STARTED (~0u) +#define _XABORT_EXPLICIT (1 << 0) +#define _XABORT_RETRY (1 << 1) +#define _XABORT_CONFLICT (1 << 2) +#define _XABORT_CAPACITY (1 << 3) +#define _XABORT_DEBUG (1 << 4) +#define _XABORT_NESTED (1 << 5) +#define _XABORT_CODE(x) (((x) >> 24) & 0xFF) + +/* Define the default attributes for the functions in this file. */ +#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("rtm"))) + +static __inline__ unsigned int __DEFAULT_FN_ATTRS +_xbegin(void) +{ + return (unsigned int)__builtin_ia32_xbegin(); +} + +static __inline__ void __DEFAULT_FN_ATTRS +_xend(void) +{ + __builtin_ia32_xend(); +} + +#define _xabort(imm) __builtin_ia32_xabort((imm)) + +#undef __DEFAULT_FN_ATTRS + +#endif /* __RTMINTRIN_H */ diff --git a/third_party/intel/clang/serializeintrin.h b/third_party/intel/clang/serializeintrin.h new file mode 100644 index 000000000..b774e5a24 --- /dev/null +++ b/third_party/intel/clang/serializeintrin.h @@ -0,0 +1,30 @@ +/*===--------------- serializeintrin.h - serialize intrinsics --------------=== + * + * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. + * See https://llvm.org/LICENSE.txt for license information. + * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + * + *===-----------------------------------------------------------------------=== + */ + +#ifndef __IMMINTRIN_H +#error "Never use directly; include instead." +#endif + +#ifndef __SERIALIZEINTRIN_H +#define __SERIALIZEINTRIN_H + +/// Serialize instruction fetch and execution. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the SERIALIZE instruction. +/// +static __inline__ void +__attribute__((__always_inline__, __nodebug__, __target__("serialize"))) +_serialize (void) +{ + __builtin_ia32_serialize (); +} + +#endif /* __SERIALIZEINTRIN_H */ diff --git a/third_party/intel/clang/sgxintrin.h b/third_party/intel/clang/sgxintrin.h new file mode 100644 index 000000000..303a21f6b --- /dev/null +++ b/third_party/intel/clang/sgxintrin.h @@ -0,0 +1,60 @@ +/*===---- sgxintrin.h - X86 SGX intrinsics configuration -------------------=== + * + * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. + * See https://llvm.org/LICENSE.txt for license information. + * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + * + *===-----------------------------------------------------------------------=== + */ + +#if !defined __X86INTRIN_H && !defined __IMMINTRIN_H +#error "Never use directly; include instead." +#endif + +#ifndef __SGXINTRIN_H +#define __SGXINTRIN_H + +#if __has_extension(gnu_asm) + +/* Define the default attributes for the functions in this file. */ +#define __DEFAULT_FN_ATTRS \ + __attribute__((__always_inline__, __nodebug__, __target__("sgx"))) + +static __inline unsigned int __DEFAULT_FN_ATTRS +_enclu_u32(unsigned int __leaf, __SIZE_TYPE__ __d[]) +{ + unsigned int __result; + __asm__ ("enclu" + : "=a" (__result), "=b" (__d[0]), "=c" (__d[1]), "=d" (__d[2]) + : "a" (__leaf), "b" (__d[0]), "c" (__d[1]), "d" (__d[2]) + : "cc"); + return __result; +} + +static __inline unsigned int __DEFAULT_FN_ATTRS +_encls_u32(unsigned int __leaf, __SIZE_TYPE__ __d[]) +{ + unsigned int __result; + __asm__ ("encls" + : "=a" (__result), "=b" (__d[0]), "=c" (__d[1]), "=d" (__d[2]) + : "a" (__leaf), "b" (__d[0]), "c" (__d[1]), "d" (__d[2]) + : "cc"); + return __result; +} + +static __inline unsigned int __DEFAULT_FN_ATTRS +_enclv_u32(unsigned int __leaf, __SIZE_TYPE__ __d[]) +{ + unsigned int __result; + __asm__ ("enclv" + : "=a" (__result), "=b" (__d[0]), "=c" (__d[1]), "=d" (__d[2]) + : "a" (__leaf), "b" (__d[0]), "c" (__d[1]), "d" (__d[2]) + : "cc"); + return __result; +} + +#undef __DEFAULT_FN_ATTRS + +#endif /* __has_extension(gnu_asm) */ + +#endif diff --git a/third_party/intel/clang/sha512intrin.h b/third_party/intel/clang/sha512intrin.h new file mode 100644 index 000000000..065ef5dac --- /dev/null +++ b/third_party/intel/clang/sha512intrin.h @@ -0,0 +1,200 @@ +/*===--------------- sha512intrin.h - SHA512 intrinsics -----------------=== + * + * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. + * See https://llvm.org/LICENSE.txt for license information. + * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + * + *===-----------------------------------------------------------------------=== + */ + +#ifndef __IMMINTRIN_H +#error "Never use directly; include instead." +#endif // __IMMINTRIN_H + +#ifndef __SHA512INTRIN_H +#define __SHA512INTRIN_H + +#define __DEFAULT_FN_ATTRS256 \ + __attribute__((__always_inline__, __nodebug__, __target__("sha512"), \ + __min_vector_width__(256))) + +/// This intrinisc is one of the two SHA512 message scheduling instructions. +/// The intrinsic performs an intermediate calculation for the next four +/// SHA512 message qwords. The calculated results are stored in \a dst. +/// +/// \headerfile +/// +/// \code +/// __m256i _mm256_sha512msg1_epi64(__m256i __A, __m128i __B) +/// \endcode +/// +/// This intrinsic corresponds to the \c VSHA512MSG1 instruction. +/// +/// \param __A +/// A 256-bit vector of [4 x long long]. +/// \param __B +/// A 128-bit vector of [2 x long long]. +/// \returns +/// A 256-bit vector of [4 x long long]. +/// +/// \code{.operation} +/// DEFINE ROR64(qword, n) { +/// count := n % 64 +/// dest := (qword >> count) | (qword << (64 - count)) +/// RETURN dest +/// } +/// DEFINE SHR64(qword, n) { +/// RETURN qword >> n +/// } +/// DEFINE s0(qword): +/// RETURN ROR64(qword,1) ^ ROR64(qword, 8) ^ SHR64(qword, 7) +/// } +/// W[4] := __B.qword[0] +/// W[3] := __A.qword[3] +/// W[2] := __A.qword[2] +/// W[1] := __A.qword[1] +/// W[0] := __A.qword[0] +/// dst.qword[3] := W[3] + s0(W[4]) +/// dst.qword[2] := W[2] + s0(W[3]) +/// dst.qword[1] := W[1] + s0(W[2]) +/// dst.qword[0] := W[0] + s0(W[1]) +/// dst[MAX:256] := 0 +/// \endcode +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_sha512msg1_epi64(__m256i __A, __m128i __B) { + return (__m256i)__builtin_ia32_vsha512msg1((__v4du)__A, (__v2du)__B); +} + +/// This intrinisc is one of the two SHA512 message scheduling instructions. +/// The intrinsic performs the final calculation for the next four SHA512 +/// message qwords. The calculated results are stored in \a dst. +/// +/// \headerfile +/// +/// \code +/// __m256i _mm256_sha512msg2_epi64(__m256i __A, __m256i __B) +/// \endcode +/// +/// This intrinsic corresponds to the \c VSHA512MSG2 instruction. +/// +/// \param __A +/// A 256-bit vector of [4 x long long]. +/// \param __B +/// A 256-bit vector of [4 x long long]. +/// \returns +/// A 256-bit vector of [4 x long long]. +/// +/// \code{.operation} +/// DEFINE ROR64(qword, n) { +/// count := n % 64 +/// dest := (qword >> count) | (qword << (64 - count)) +/// RETURN dest +/// } +/// DEFINE SHR64(qword, n) { +/// RETURN qword >> n +/// } +/// DEFINE s1(qword) { +/// RETURN ROR64(qword,19) ^ ROR64(qword, 61) ^ SHR64(qword, 6) +/// } +/// W[14] := __B.qword[2] +/// W[15] := __B.qword[3] +/// W[16] := __A.qword[0] + s1(W[14]) +/// W[17] := __A.qword[1] + s1(W[15]) +/// W[18] := __A.qword[2] + s1(W[16]) +/// W[19] := __A.qword[3] + s1(W[17]) +/// dst.qword[3] := W[19] +/// dst.qword[2] := W[18] +/// dst.qword[1] := W[17] +/// dst.qword[0] := W[16] +/// dst[MAX:256] := 0 +/// \endcode +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_sha512msg2_epi64(__m256i __A, __m256i __B) { + return (__m256i)__builtin_ia32_vsha512msg2((__v4du)__A, (__v4du)__B); +} + +/// This intrinisc performs two rounds of SHA512 operation using initial SHA512 +/// state (C,D,G,H) from \a __A, an initial SHA512 state (A,B,E,F) from +/// \a __A, and a pre-computed sum of the next two round message qwords and +/// the corresponding round constants from \a __C (only the two lower qwords +/// of the third operand). The updated SHA512 state (A,B,E,F) is written to +/// \a __A, and \a __A can be used as the updated state (C,D,G,H) in later +/// rounds. +/// +/// \headerfile +/// +/// \code +/// __m256i _mm256_sha512rnds2_epi64(__m256i __A, __m256i __B, __m128i __C) +/// \endcode +/// +/// This intrinsic corresponds to the \c VSHA512RNDS2 instruction. +/// +/// \param __A +/// A 256-bit vector of [4 x long long]. +/// \param __B +/// A 256-bit vector of [4 x long long]. +/// \param __C +/// A 128-bit vector of [2 x long long]. +/// \returns +/// A 256-bit vector of [4 x long long]. +/// +/// \code{.operation} +/// DEFINE ROR64(qword, n) { +/// count := n % 64 +/// dest := (qword >> count) | (qword << (64 - count)) +/// RETURN dest +/// } +/// DEFINE SHR64(qword, n) { +/// RETURN qword >> n +/// } +/// DEFINE cap_sigma0(qword) { +/// RETURN ROR64(qword,28) ^ ROR64(qword, 34) ^ ROR64(qword, 39) +/// } +/// DEFINE cap_sigma1(qword) { +/// RETURN ROR64(qword,14) ^ ROR64(qword, 18) ^ ROR64(qword, 41) +/// } +/// DEFINE MAJ(a,b,c) { +/// RETURN (a & b) ^ (a & c) ^ (b & c) +/// } +/// DEFINE CH(e,f,g) { +/// RETURN (e & f) ^ (g & ~e) +/// } +/// A[0] := __B.qword[3] +/// B[0] := __B.qword[2] +/// C[0] := __C.qword[3] +/// D[0] := __C.qword[2] +/// E[0] := __B.qword[1] +/// F[0] := __B.qword[0] +/// G[0] := __C.qword[1] +/// H[0] := __C.qword[0] +/// WK[0]:= __A.qword[0] +/// WK[1]:= __A.qword[1] +/// FOR i := 0 to 1: +/// A[i+1] := CH(E[i], F[i], G[i]) + +/// cap_sigma1(E[i]) + WK[i] + H[i] + +/// MAJ(A[i], B[i], C[i]) + +/// cap_sigma0(A[i]) +/// B[i+1] := A[i] +/// C[i+1] := B[i] +/// D[i+1] := C[i] +/// E[i+1] := CH(E[i], F[i], G[i]) + +/// cap_sigma1(E[i]) + WK[i] + H[i] + D[i] +/// F[i+1] := E[i] +/// G[i+1] := F[i] +/// H[i+1] := G[i] +/// ENDFOR +/// dst.qword[3] := A[2] +/// dst.qword[2] := B[2] +/// dst.qword[1] := E[2] +/// dst.qword[0] := F[2] +/// dst[MAX:256] := 0 +/// \endcode +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_sha512rnds2_epi64(__m256i __A, __m256i __B, __m128i __C) { + return (__m256i)__builtin_ia32_vsha512rnds2((__v4du)__A, (__v4du)__B, + (__v2du)__C); +} + +#undef __DEFAULT_FN_ATTRS256 + +#endif // __SHA512INTRIN_H diff --git a/third_party/intel/clang/shaintrin.h b/third_party/intel/clang/shaintrin.h new file mode 100644 index 000000000..232e1fa29 --- /dev/null +++ b/third_party/intel/clang/shaintrin.h @@ -0,0 +1,189 @@ +/*===---- shaintrin.h - SHA intrinsics -------------------------------------=== + * + * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. + * See https://llvm.org/LICENSE.txt for license information. + * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + * + *===-----------------------------------------------------------------------=== + */ + +#ifndef __IMMINTRIN_H +#error "Never use directly; include instead." +#endif + +#ifndef __SHAINTRIN_H +#define __SHAINTRIN_H + +/* Define the default attributes for the functions in this file. */ +#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("sha"), __min_vector_width__(128))) + +/// Performs four iterations of the inner loop of the SHA-1 message digest +/// algorithm using the starting SHA-1 state (A, B, C, D) from the 128-bit +/// vector of [4 x i32] in \a V1 and the next four 32-bit elements of the +/// message from the 128-bit vector of [4 x i32] in \a V2. Note that the +/// SHA-1 state variable E must have already been added to \a V2 +/// (\c _mm_sha1nexte_epu32() can perform this step). Returns the updated +/// SHA-1 state (A, B, C, D) as a 128-bit vector of [4 x i32]. +/// +/// The SHA-1 algorithm has an inner loop of 80 iterations, twenty each +/// with a different combining function and rounding constant. This +/// intrinsic performs four iterations using a combining function and +/// rounding constant selected by \a M[1:0]. +/// +/// \headerfile +/// +/// \code +/// __m128i _mm_sha1rnds4_epu32(__m128i V1, __m128i V2, const int M); +/// \endcode +/// +/// This intrinsic corresponds to the \c SHA1RNDS4 instruction. +/// +/// \param V1 +/// A 128-bit vector of [4 x i32] containing the initial SHA-1 state. +/// \param V2 +/// A 128-bit vector of [4 x i32] containing the next four elements of +/// the message, plus SHA-1 state variable E. +/// \param M +/// An immediate value where bits [1:0] select among four possible +/// combining functions and rounding constants (not specified here). +/// \returns A 128-bit vector of [4 x i32] containing the updated SHA-1 state. +#define _mm_sha1rnds4_epu32(V1, V2, M) \ + __builtin_ia32_sha1rnds4((__v4si)(__m128i)(V1), (__v4si)(__m128i)(V2), (M)) + +/// Calculates the SHA-1 state variable E from the SHA-1 state variables in +/// the 128-bit vector of [4 x i32] in \a __X, adds that to the next set of +/// four message elements in the 128-bit vector of [4 x i32] in \a __Y, and +/// returns the result. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c SHA1NEXTE instruction. +/// +/// \param __X +/// A 128-bit vector of [4 x i32] containing the current SHA-1 state. +/// \param __Y +/// A 128-bit vector of [4 x i32] containing the next four elements of the +/// message. +/// \returns A 128-bit vector of [4 x i32] containing the updated SHA-1 +/// values. +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_sha1nexte_epu32(__m128i __X, __m128i __Y) +{ + return (__m128i)__builtin_ia32_sha1nexte((__v4si)__X, (__v4si)__Y); +} + +/// Performs an intermediate calculation for deriving the next four SHA-1 +/// message elements using previous message elements from the 128-bit +/// vectors of [4 x i32] in \a __X and \a __Y, and returns the result. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c SHA1MSG1 instruction. +/// +/// \param __X +/// A 128-bit vector of [4 x i32] containing previous message elements. +/// \param __Y +/// A 128-bit vector of [4 x i32] containing previous message elements. +/// \returns A 128-bit vector of [4 x i32] containing the derived SHA-1 +/// elements. +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_sha1msg1_epu32(__m128i __X, __m128i __Y) +{ + return (__m128i)__builtin_ia32_sha1msg1((__v4si)__X, (__v4si)__Y); +} + +/// Performs the final calculation for deriving the next four SHA-1 message +/// elements using previous message elements from the 128-bit vectors of +/// [4 x i32] in \a __X and \a __Y, and returns the result. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c SHA1MSG2 instruction. +/// +/// \param __X +/// A 128-bit vector of [4 x i32] containing an intermediate result. +/// \param __Y +/// A 128-bit vector of [4 x i32] containing previous message values. +/// \returns A 128-bit vector of [4 x i32] containing the updated SHA-1 +/// values. +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_sha1msg2_epu32(__m128i __X, __m128i __Y) +{ + return (__m128i)__builtin_ia32_sha1msg2((__v4si)__X, (__v4si)__Y); +} + +/// Performs two rounds of SHA-256 operation using the following inputs: a +/// starting SHA-256 state (C, D, G, H) from the 128-bit vector of +/// [4 x i32] in \a __X; a starting SHA-256 state (A, B, E, F) from the +/// 128-bit vector of [4 x i32] in \a __Y; and a pre-computed sum of the +/// next two message elements (unsigned 32-bit integers) and corresponding +/// rounding constants from the 128-bit vector of [4 x i32] in \a __Z. +/// Returns the updated SHA-256 state (A, B, E, F) as a 128-bit vector of +/// [4 x i32]. +/// +/// The SHA-256 algorithm has a core loop of 64 iterations. This intrinsic +/// performs two of those iterations. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c SHA256RNDS2 instruction. +/// +/// \param __X +/// A 128-bit vector of [4 x i32] containing part of the initial SHA-256 +/// state. +/// \param __Y +/// A 128-bit vector of [4 x i32] containing part of the initial SHA-256 +/// state. +/// \param __Z +/// A 128-bit vector of [4 x i32] containing additional input to the +/// SHA-256 operation. +/// \returns A 128-bit vector of [4 x i32] containing the updated SHA-1 state. +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_sha256rnds2_epu32(__m128i __X, __m128i __Y, __m128i __Z) +{ + return (__m128i)__builtin_ia32_sha256rnds2((__v4si)__X, (__v4si)__Y, (__v4si)__Z); +} + +/// Performs an intermediate calculation for deriving the next four SHA-256 +/// message elements using previous message elements from the 128-bit +/// vectors of [4 x i32] in \a __X and \a __Y, and returns the result. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c SHA256MSG1 instruction. +/// +/// \param __X +/// A 128-bit vector of [4 x i32] containing previous message elements. +/// \param __Y +/// A 128-bit vector of [4 x i32] containing previous message elements. +/// \returns A 128-bit vector of [4 x i32] containing the updated SHA-256 +/// values. +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_sha256msg1_epu32(__m128i __X, __m128i __Y) +{ + return (__m128i)__builtin_ia32_sha256msg1((__v4si)__X, (__v4si)__Y); +} + +/// Performs the final calculation for deriving the next four SHA-256 message +/// elements using previous message elements from the 128-bit vectors of +/// [4 x i32] in \a __X and \a __Y, and returns the result. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c SHA256MSG2 instruction. +/// +/// \param __X +/// A 128-bit vector of [4 x i32] containing an intermediate result. +/// \param __Y +/// A 128-bit vector of [4 x i32] containing previous message values. +/// \returns A 128-bit vector of [4 x i32] containing the updated SHA-256 +/// values. +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_sha256msg2_epu32(__m128i __X, __m128i __Y) +{ + return (__m128i)__builtin_ia32_sha256msg2((__v4si)__X, (__v4si)__Y); +} + +#undef __DEFAULT_FN_ATTRS + +#endif /* __SHAINTRIN_H */ diff --git a/third_party/intel/clang/sm3intrin.h b/third_party/intel/clang/sm3intrin.h new file mode 100644 index 000000000..8a3d8bc9e --- /dev/null +++ b/third_party/intel/clang/sm3intrin.h @@ -0,0 +1,238 @@ +/*===-------------------- sm3intrin.h - SM3 intrinsics ---------------------=== + * + * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. + * See https://llvm.org/LICENSE.txt for license information. + * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + * + *===-----------------------------------------------------------------------=== + */ + +#ifndef __IMMINTRIN_H +#error "Never use directly; include instead." +#endif // __IMMINTRIN_H + +#ifndef __SM3INTRIN_H +#define __SM3INTRIN_H + +#define __DEFAULT_FN_ATTRS128 \ + __attribute__((__always_inline__, __nodebug__, __target__("sm3"), \ + __min_vector_width__(128))) + +/// This intrinisc is one of the two SM3 message scheduling intrinsics. The +/// intrinsic performs an initial calculation for the next four SM3 message +/// words. The calculated results are stored in \a dst. +/// +/// \headerfile +/// +/// \code +/// __m128i _mm_sm3msg1_epi32(__m128i __A, __m128i __B, __m128i __C) +/// \endcode +/// +/// This intrinsic corresponds to the \c VSM3MSG1 instruction. +/// +/// \param __A +/// A 128-bit vector of [4 x int]. +/// \param __B +/// A 128-bit vector of [4 x int]. +/// \param __C +/// A 128-bit vector of [4 x int]. +/// \returns +/// A 128-bit vector of [4 x int]. +/// +/// \code{.operation} +/// DEFINE ROL32(dword, n) { +/// count := n % 32 +/// dest := (dword << count) | (dword >> (32 - count)) +/// RETURN dest +/// } +/// DEFINE P1(x) { +/// RETURN x ^ ROL32(x, 15) ^ ROL32(x, 23) +/// } +/// W[0] := __C.dword[0] +/// W[1] := __C.dword[1] +/// W[2] := __C.dword[2] +/// W[3] := __C.dword[3] +/// W[7] := __A.dword[0] +/// W[8] := __A.dword[1] +/// W[9] := __A.dword[2] +/// W[10] := __A.dword[3] +/// W[13] := __B.dword[0] +/// W[14] := __B.dword[1] +/// W[15] := __B.dword[2] +/// TMP0 := W[7] ^ W[0] ^ ROL32(W[13], 15) +/// TMP1 := W[8] ^ W[1] ^ ROL32(W[14], 15) +/// TMP2 := W[9] ^ W[2] ^ ROL32(W[15], 15) +/// TMP3 := W[10] ^ W[3] +/// dst.dword[0] := P1(TMP0) +/// dst.dword[1] := P1(TMP1) +/// dst.dword[2] := P1(TMP2) +/// dst.dword[3] := P1(TMP3) +/// dst[MAX:128] := 0 +/// \endcode +static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_sm3msg1_epi32(__m128i __A, + __m128i __B, + __m128i __C) { + return (__m128i)__builtin_ia32_vsm3msg1((__v4su)__A, (__v4su)__B, + (__v4su)__C); +} + +/// This intrinisc is one of the two SM3 message scheduling intrinsics. The +/// intrinsic performs the final calculation for the next four SM3 message +/// words. The calculated results are stored in \a dst. +/// +/// \headerfile +/// +/// \code +/// __m128i _mm_sm3msg2_epi32(__m128i __A, __m128i __B, __m128i __C) +/// \endcode +/// +/// This intrinsic corresponds to the \c VSM3MSG2 instruction. +/// +/// \param __A +/// A 128-bit vector of [4 x int]. +/// \param __B +/// A 128-bit vector of [4 x int]. +/// \param __C +/// A 128-bit vector of [4 x int]. +/// \returns +/// A 128-bit vector of [4 x int]. +/// +/// \code{.operation} +/// DEFINE ROL32(dword, n) { +/// count := n % 32 +/// dest := (dword << count) | (dword >> (32-count)) +/// RETURN dest +/// } +/// WTMP[0] := __A.dword[0] +/// WTMP[1] := __A.dword[1] +/// WTMP[2] := __A.dword[2] +/// WTMP[3] := __A.dword[3] +/// W[3] := __B.dword[0] +/// W[4] := __B.dword[1] +/// W[5] := __B.dword[2] +/// W[6] := __B.dword[3] +/// W[10] := __C.dword[0] +/// W[11] := __C.dword[1] +/// W[12] := __C.dword[2] +/// W[13] := __C.dword[3] +/// W[16] := ROL32(W[3], 7) ^ W[10] ^ WTMP[0] +/// W[17] := ROL32(W[4], 7) ^ W[11] ^ WTMP[1] +/// W[18] := ROL32(W[5], 7) ^ W[12] ^ WTMP[2] +/// W[19] := ROL32(W[6], 7) ^ W[13] ^ WTMP[3] +/// W[19] := W[19] ^ ROL32(W[16], 6) ^ ROL32(W[16], 15) ^ ROL32(W[16], 30) +/// dst.dword[0] := W[16] +/// dst.dword[1] := W[17] +/// dst.dword[2] := W[18] +/// dst.dword[3] := W[19] +/// dst[MAX:128] := 0 +/// \endcode +static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_sm3msg2_epi32(__m128i __A, + __m128i __B, + __m128i __C) { + return (__m128i)__builtin_ia32_vsm3msg2((__v4su)__A, (__v4su)__B, + (__v4su)__C); +} + +/// This intrinsic performs two rounds of SM3 operation using initial SM3 state +/// (C, D, G, H) from \a __A, an initial SM3 states (A, B, E, F) +/// from \a __B and a pre-computed words from the \a __C. \a __A with +/// initial SM3 state of (C, D, G, H) assumes input of non-rotated left +/// variables from previous state. The updated SM3 state (A, B, E, F) is +/// written to \a __A. The \a imm8 should contain the even round number +/// for the first of the two rounds computed by this instruction. The +/// computation masks the \a imm8 value by AND’ing it with 0x3E so that only +/// even round numbers from 0 through 62 are used for this operation. The +/// calculated results are stored in \a dst. +/// +/// \headerfile +/// +/// \code +/// __m128i _mm_sm3rnds2_epi32(__m128i __A, __m128i __B, __m128i __C, const int +/// imm8) \endcode +/// +/// This intrinsic corresponds to the \c VSM3RNDS2 instruction. +/// +/// \param __A +/// A 128-bit vector of [4 x int]. +/// \param __B +/// A 128-bit vector of [4 x int]. +/// \param __C +/// A 128-bit vector of [4 x int]. +/// \param imm8 +/// A 8-bit constant integer. +/// \returns +/// A 128-bit vector of [4 x int]. +/// +/// \code{.operation} +/// DEFINE ROL32(dword, n) { +/// count := n % 32 +/// dest := (dword << count) | (dword >> (32-count)) +/// RETURN dest +/// } +/// DEFINE P0(dword) { +/// RETURN dword ^ ROL32(dword, 9) ^ ROL32(dword, 17) +/// } +/// DEFINE FF(x,y,z, round){ +/// IF round < 16 +/// RETURN (x ^ y ^ z) +/// ELSE +/// RETURN (x & y) | (x & z) | (y & z) +/// FI +/// } +/// DEFINE GG(x, y, z, round){ +/// IF round < 16 +/// RETURN (x ^ y ^ z) +/// ELSE +/// RETURN (x & y) | (~x & z) +/// FI +/// } +/// A[0] := __B.dword[3] +/// B[0] := __B.dword[2] +/// C[0] := __A.dword[3] +/// D[0] := __A.dword[2] +/// E[0] := __B.dword[1] +/// F[0] := __B.dword[0] +/// G[0] := __A.dword[1] +/// H[0] := __A.dword[0] +/// W[0] := __C.dword[0] +/// W[1] := __C.dword[1] +/// W[4] := __C.dword[2] +/// W[5] := __C.dword[3] +/// C[0] := ROL32(C[0], 9) +/// D[0] := ROL32(D[0], 9) +/// G[0] := ROL32(G[0], 19) +/// H[0] := ROL32(H[0], 19) +/// ROUND := __D & 0x3E +/// IF ROUND < 16 +/// CONST := 0x79CC4519 +/// ELSE +/// CONST := 0x7A879D8A +/// FI +/// CONST := ROL32(CONST,ROUND) +/// FOR i:= 0 to 1 +/// S1 := ROL32((ROL32(A[i], 12) + E[i] + CONST), 7) +/// S2 := S1 ^ ROL32(A[i], 12) +/// T1 := FF(A[i], B[i], C[i], ROUND) + D[i] + S2 + (W[i] ^ W[i+4]) +/// T2 := GG(E[i], F[i], G[i], ROUND) + H[i] + S1 + W[i] +/// D[i+1] := C[i] +/// C[i+1] := ROL32(B[i],9) +/// B[i+1] := A[i] +/// A[i+1] := T1 +/// H[i+1] := G[i] +/// G[i+1] := ROL32(F[i], 19) +/// F[i+1] := E[i] +/// E[i+1] := P0(T2) +/// CONST := ROL32(CONST, 1) +/// ENDFOR +/// dst.dword[3] := A[2] +/// dst.dword[2] := B[2] +/// dst.dword[1] := E[2] +/// dst.dword[0] := F[2] +/// dst[MAX:128] := 0 +/// \endcode +#define _mm_sm3rnds2_epi32(A, B, C, D) \ + (__m128i) __builtin_ia32_vsm3rnds2((__v4su)A, (__v4su)B, (__v4su)C, (int)D) + +#undef __DEFAULT_FN_ATTRS128 + +#endif // __SM3INTRIN_H diff --git a/third_party/intel/clang/sm4intrin.h b/third_party/intel/clang/sm4intrin.h new file mode 100644 index 000000000..47aeec46a --- /dev/null +++ b/third_party/intel/clang/sm4intrin.h @@ -0,0 +1,269 @@ +/*===--------------- sm4intrin.h - SM4 intrinsics -----------------=== + * + * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. + * See https://llvm.org/LICENSE.txt for license information. + * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + * + *===-----------------------------------------------------------------------=== + */ + +#ifndef __IMMINTRIN_H +#error "Never use directly; include instead." +#endif // __IMMINTRIN_H + +#ifndef __SM4INTRIN_H +#define __SM4INTRIN_H + +/// This intrinsic performs four rounds of SM4 key expansion. The intrinsic +/// operates on independent 128-bit lanes. The calculated results are +/// stored in \a dst. +/// \headerfile +/// +/// \code +/// __m128i _mm_sm4key4_epi32(__m128i __A, __m128i __B) +/// \endcode +/// +/// This intrinsic corresponds to the \c VSM4KEY4 instruction. +/// +/// \param __A +/// A 128-bit vector of [4 x int]. +/// \param __B +/// A 128-bit vector of [4 x int]. +/// \returns +/// A 128-bit vector of [4 x int]. +/// +/// \code{.operation} +/// DEFINE ROL32(dword, n) { +/// count := n % 32 +/// dest := (dword << count) | (dword >> (32-count)) +/// RETURN dest +/// } +/// DEFINE SBOX_BYTE(dword, i) { +/// RETURN sbox[dword.byte[i]] +/// } +/// DEFINE lower_t(dword) { +/// tmp.byte[0] := SBOX_BYTE(dword, 0) +/// tmp.byte[1] := SBOX_BYTE(dword, 1) +/// tmp.byte[2] := SBOX_BYTE(dword, 2) +/// tmp.byte[3] := SBOX_BYTE(dword, 3) +/// RETURN tmp +/// } +/// DEFINE L_KEY(dword) { +/// RETURN dword ^ ROL32(dword, 13) ^ ROL32(dword, 23) +/// } +/// DEFINE T_KEY(dword) { +/// RETURN L_KEY(lower_t(dword)) +/// } +/// DEFINE F_KEY(X0, X1, X2, X3, round_key) { +/// RETURN X0 ^ T_KEY(X1 ^ X2 ^ X3 ^ round_key) +/// } +/// FOR i:= 0 to 0 +/// P[0] := __B.xmm[i].dword[0] +/// P[1] := __B.xmm[i].dword[1] +/// P[2] := __B.xmm[i].dword[2] +/// P[3] := __B.xmm[i].dword[3] +/// C[0] := F_KEY(P[0], P[1], P[2], P[3], __A.xmm[i].dword[0]) +/// C[1] := F_KEY(P[1], P[2], P[3], C[0], __A.xmm[i].dword[1]) +/// C[2] := F_KEY(P[2], P[3], C[0], C[1], __A.xmm[i].dword[2]) +/// C[3] := F_KEY(P[3], C[0], C[1], C[2], __A.xmm[i].dword[3]) +/// DEST.xmm[i].dword[0] := C[0] +/// DEST.xmm[i].dword[1] := C[1] +/// DEST.xmm[i].dword[2] := C[2] +/// DEST.xmm[i].dword[3] := C[3] +/// ENDFOR +/// DEST[MAX:128] := 0 +/// \endcode +#define _mm_sm4key4_epi32(A, B) \ + (__m128i) __builtin_ia32_vsm4key4128((__v4su)A, (__v4su)B) + +/// This intrinsic performs four rounds of SM4 key expansion. The intrinsic +/// operates on independent 128-bit lanes. The calculated results are +/// stored in \a dst. +/// \headerfile +/// +/// \code +/// __m256i _mm256_sm4key4_epi32(__m256i __A, __m256i __B) +/// \endcode +/// +/// This intrinsic corresponds to the \c VSM4KEY4 instruction. +/// +/// \param __A +/// A 256-bit vector of [8 x int]. +/// \param __B +/// A 256-bit vector of [8 x int]. +/// \returns +/// A 256-bit vector of [8 x int]. +/// +/// \code{.operation} +/// DEFINE ROL32(dword, n) { +/// count := n % 32 +/// dest := (dword << count) | (dword >> (32-count)) +/// RETURN dest +/// } +/// DEFINE SBOX_BYTE(dword, i) { +/// RETURN sbox[dword.byte[i]] +/// } +/// DEFINE lower_t(dword) { +/// tmp.byte[0] := SBOX_BYTE(dword, 0) +/// tmp.byte[1] := SBOX_BYTE(dword, 1) +/// tmp.byte[2] := SBOX_BYTE(dword, 2) +/// tmp.byte[3] := SBOX_BYTE(dword, 3) +/// RETURN tmp +/// } +/// DEFINE L_KEY(dword) { +/// RETURN dword ^ ROL32(dword, 13) ^ ROL32(dword, 23) +/// } +/// DEFINE T_KEY(dword) { +/// RETURN L_KEY(lower_t(dword)) +/// } +/// DEFINE F_KEY(X0, X1, X2, X3, round_key) { +/// RETURN X0 ^ T_KEY(X1 ^ X2 ^ X3 ^ round_key) +/// } +/// FOR i:= 0 to 1 +/// P[0] := __B.xmm[i].dword[0] +/// P[1] := __B.xmm[i].dword[1] +/// P[2] := __B.xmm[i].dword[2] +/// P[3] := __B.xmm[i].dword[3] +/// C[0] := F_KEY(P[0], P[1], P[2], P[3], __A.xmm[i].dword[0]) +/// C[1] := F_KEY(P[1], P[2], P[3], C[0], __A.xmm[i].dword[1]) +/// C[2] := F_KEY(P[2], P[3], C[0], C[1], __A.xmm[i].dword[2]) +/// C[3] := F_KEY(P[3], C[0], C[1], C[2], __A.xmm[i].dword[3]) +/// DEST.xmm[i].dword[0] := C[0] +/// DEST.xmm[i].dword[1] := C[1] +/// DEST.xmm[i].dword[2] := C[2] +/// DEST.xmm[i].dword[3] := C[3] +/// ENDFOR +/// DEST[MAX:256] := 0 +/// \endcode +#define _mm256_sm4key4_epi32(A, B) \ + (__m256i) __builtin_ia32_vsm4key4256((__v8su)A, (__v8su)B) + +/// This intrinisc performs four rounds of SM4 encryption. The intrinisc +/// operates on independent 128-bit lanes. The calculated results are +/// stored in \a dst. +/// \headerfile +/// +/// \code +/// __m128i _mm_sm4rnds4_epi32(__m128i __A, __m128i __B) +/// \endcode +/// +/// This intrinsic corresponds to the \c VSM4RNDS4 instruction. +/// +/// \param __A +/// A 128-bit vector of [4 x int]. +/// \param __B +/// A 128-bit vector of [4 x int]. +/// \returns +/// A 128-bit vector of [4 x int]. +/// +/// \code{.operation} +/// DEFINE ROL32(dword, n) { +/// count := n % 32 +/// dest := (dword << count) | (dword >> (32-count)) +/// RETURN dest +/// } +/// DEFINE lower_t(dword) { +/// tmp.byte[0] := SBOX_BYTE(dword, 0) +/// tmp.byte[1] := SBOX_BYTE(dword, 1) +/// tmp.byte[2] := SBOX_BYTE(dword, 2) +/// tmp.byte[3] := SBOX_BYTE(dword, 3) +/// RETURN tmp +/// } +/// DEFINE L_RND(dword) { +/// tmp := dword +/// tmp := tmp ^ ROL32(dword, 2) +/// tmp := tmp ^ ROL32(dword, 10) +/// tmp := tmp ^ ROL32(dword, 18) +/// tmp := tmp ^ ROL32(dword, 24) +/// RETURN tmp +/// } +/// DEFINE T_RND(dword) { +/// RETURN L_RND(lower_t(dword)) +/// } +/// DEFINE F_RND(X0, X1, X2, X3, round_key) { +/// RETURN X0 ^ T_RND(X1 ^ X2 ^ X3 ^ round_key) +/// } +/// FOR i:= 0 to 0 +/// P[0] := __B.xmm[i].dword[0] +/// P[1] := __B.xmm[i].dword[1] +/// P[2] := __B.xmm[i].dword[2] +/// P[3] := __B.xmm[i].dword[3] +/// C[0] := F_RND(P[0], P[1], P[2], P[3], __A.xmm[i].dword[0]) +/// C[1] := F_RND(P[1], P[2], P[3], C[0], __A.xmm[i].dword[1]) +/// C[2] := F_RND(P[2], P[3], C[0], C[1], __A.xmm[i].dword[2]) +/// C[3] := F_RND(P[3], C[0], C[1], C[2], __A.xmm[i].dword[3]) +/// DEST.xmm[i].dword[0] := C[0] +/// DEST.xmm[i].dword[1] := C[1] +/// DEST.xmm[i].dword[2] := C[2] +/// DEST.xmm[i].dword[3] := C[3] +/// ENDFOR +/// DEST[MAX:128] := 0 +/// \endcode +#define _mm_sm4rnds4_epi32(A, B) \ + (__m128i) __builtin_ia32_vsm4rnds4128((__v4su)A, (__v4su)B) + +/// This intrinisc performs four rounds of SM4 encryption. The intrinisc +/// operates on independent 128-bit lanes. The calculated results are +/// stored in \a dst. +/// \headerfile +/// +/// \code +/// __m256i _mm256_sm4rnds4_epi32(__m256i __A, __m256i __B) +/// \endcode +/// +/// This intrinsic corresponds to the \c VSM4RNDS4 instruction. +/// +/// \param __A +/// A 256-bit vector of [8 x int]. +/// \param __B +/// A 256-bit vector of [8 x int]. +/// \returns +/// A 256-bit vector of [8 x int]. +/// +/// \code{.operation} +/// DEFINE ROL32(dword, n) { +/// count := n % 32 +/// dest := (dword << count) | (dword >> (32-count)) +/// RETURN dest +/// } +/// DEFINE lower_t(dword) { +/// tmp.byte[0] := SBOX_BYTE(dword, 0) +/// tmp.byte[1] := SBOX_BYTE(dword, 1) +/// tmp.byte[2] := SBOX_BYTE(dword, 2) +/// tmp.byte[3] := SBOX_BYTE(dword, 3) +/// RETURN tmp +/// } +/// DEFINE L_RND(dword) { +/// tmp := dword +/// tmp := tmp ^ ROL32(dword, 2) +/// tmp := tmp ^ ROL32(dword, 10) +/// tmp := tmp ^ ROL32(dword, 18) +/// tmp := tmp ^ ROL32(dword, 24) +/// RETURN tmp +/// } +/// DEFINE T_RND(dword) { +/// RETURN L_RND(lower_t(dword)) +/// } +/// DEFINE F_RND(X0, X1, X2, X3, round_key) { +/// RETURN X0 ^ T_RND(X1 ^ X2 ^ X3 ^ round_key) +/// } +/// FOR i:= 0 to 0 +/// P[0] := __B.xmm[i].dword[0] +/// P[1] := __B.xmm[i].dword[1] +/// P[2] := __B.xmm[i].dword[2] +/// P[3] := __B.xmm[i].dword[3] +/// C[0] := F_RND(P[0], P[1], P[2], P[3], __A.xmm[i].dword[0]) +/// C[1] := F_RND(P[1], P[2], P[3], C[0], __A.xmm[i].dword[1]) +/// C[2] := F_RND(P[2], P[3], C[0], C[1], __A.xmm[i].dword[2]) +/// C[3] := F_RND(P[3], C[0], C[1], C[2], __A.xmm[i].dword[3]) +/// DEST.xmm[i].dword[0] := C[0] +/// DEST.xmm[i].dword[1] := C[1] +/// DEST.xmm[i].dword[2] := C[2] +/// DEST.xmm[i].dword[3] := C[3] +/// ENDFOR +/// DEST[MAX:256] := 0 +/// \endcode +#define _mm256_sm4rnds4_epi32(A, B) \ + (__m256i) __builtin_ia32_vsm4rnds4256((__v8su)A, (__v8su)B) + +#endif // __SM4INTRIN_H diff --git a/third_party/intel/clang/smmintrin.h b/third_party/intel/clang/smmintrin.h new file mode 100644 index 000000000..6f7f586dc --- /dev/null +++ b/third_party/intel/clang/smmintrin.h @@ -0,0 +1,2328 @@ +/*===---- smmintrin.h - SSE4 intrinsics ------------------------------------=== + * + * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. + * See https://llvm.org/LICENSE.txt for license information. + * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + * + *===-----------------------------------------------------------------------=== + */ + +#ifndef __SMMINTRIN_H +#define __SMMINTRIN_H + +#if !defined(__i386__) && !defined(__x86_64__) +#error "This header is only meant to be used on x86 and x64 architecture" +#endif + +#include "tmmintrin.h" + +/* Define the default attributes for the functions in this file. */ +#define __DEFAULT_FN_ATTRS \ + __attribute__((__always_inline__, __nodebug__, \ + __target__("sse4.1,no-evex512"), __min_vector_width__(128))) + +/* SSE4 Rounding macros. */ +#define _MM_FROUND_TO_NEAREST_INT 0x00 +#define _MM_FROUND_TO_NEG_INF 0x01 +#define _MM_FROUND_TO_POS_INF 0x02 +#define _MM_FROUND_TO_ZERO 0x03 +#define _MM_FROUND_CUR_DIRECTION 0x04 + +#define _MM_FROUND_RAISE_EXC 0x00 +#define _MM_FROUND_NO_EXC 0x08 + +#define _MM_FROUND_NINT (_MM_FROUND_RAISE_EXC | _MM_FROUND_TO_NEAREST_INT) +#define _MM_FROUND_FLOOR (_MM_FROUND_RAISE_EXC | _MM_FROUND_TO_NEG_INF) +#define _MM_FROUND_CEIL (_MM_FROUND_RAISE_EXC | _MM_FROUND_TO_POS_INF) +#define _MM_FROUND_TRUNC (_MM_FROUND_RAISE_EXC | _MM_FROUND_TO_ZERO) +#define _MM_FROUND_RINT (_MM_FROUND_RAISE_EXC | _MM_FROUND_CUR_DIRECTION) +#define _MM_FROUND_NEARBYINT (_MM_FROUND_NO_EXC | _MM_FROUND_CUR_DIRECTION) + +/// Rounds up each element of the 128-bit vector of [4 x float] to an +/// integer and returns the rounded values in a 128-bit vector of +/// [4 x float]. +/// +/// \headerfile +/// +/// \code +/// __m128 _mm_ceil_ps(__m128 X); +/// \endcode +/// +/// This intrinsic corresponds to the VROUNDPS / ROUNDPS instruction. +/// +/// \param X +/// A 128-bit vector of [4 x float] values to be rounded up. +/// \returns A 128-bit vector of [4 x float] containing the rounded values. +#define _mm_ceil_ps(X) _mm_round_ps((X), _MM_FROUND_CEIL) + +/// Rounds up each element of the 128-bit vector of [2 x double] to an +/// integer and returns the rounded values in a 128-bit vector of +/// [2 x double]. +/// +/// \headerfile +/// +/// \code +/// __m128d _mm_ceil_pd(__m128d X); +/// \endcode +/// +/// This intrinsic corresponds to the VROUNDPD / ROUNDPD instruction. +/// +/// \param X +/// A 128-bit vector of [2 x double] values to be rounded up. +/// \returns A 128-bit vector of [2 x double] containing the rounded values. +#define _mm_ceil_pd(X) _mm_round_pd((X), _MM_FROUND_CEIL) + +/// Copies three upper elements of the first 128-bit vector operand to +/// the corresponding three upper elements of the 128-bit result vector of +/// [4 x float]. Rounds up the lowest element of the second 128-bit vector +/// operand to an integer and copies it to the lowest element of the 128-bit +/// result vector of [4 x float]. +/// +/// \headerfile +/// +/// \code +/// __m128 _mm_ceil_ss(__m128 X, __m128 Y); +/// \endcode +/// +/// This intrinsic corresponds to the VROUNDSS / ROUNDSS instruction. +/// +/// \param X +/// A 128-bit vector of [4 x float]. The values stored in bits [127:32] are +/// copied to the corresponding bits of the result. +/// \param Y +/// A 128-bit vector of [4 x float]. The value stored in bits [31:0] is +/// rounded up to the nearest integer and copied to the corresponding bits +/// of the result. +/// \returns A 128-bit vector of [4 x float] containing the copied and rounded +/// values. +#define _mm_ceil_ss(X, Y) _mm_round_ss((X), (Y), _MM_FROUND_CEIL) + +/// Copies the upper element of the first 128-bit vector operand to the +/// corresponding upper element of the 128-bit result vector of [2 x double]. +/// Rounds up the lower element of the second 128-bit vector operand to an +/// integer and copies it to the lower element of the 128-bit result vector +/// of [2 x double]. +/// +/// \headerfile +/// +/// \code +/// __m128d _mm_ceil_sd(__m128d X, __m128d Y); +/// \endcode +/// +/// This intrinsic corresponds to the VROUNDSD / ROUNDSD instruction. +/// +/// \param X +/// A 128-bit vector of [2 x double]. The value stored in bits [127:64] is +/// copied to the corresponding bits of the result. +/// \param Y +/// A 128-bit vector of [2 x double]. The value stored in bits [63:0] is +/// rounded up to the nearest integer and copied to the corresponding bits +/// of the result. +/// \returns A 128-bit vector of [2 x double] containing the copied and rounded +/// values. +#define _mm_ceil_sd(X, Y) _mm_round_sd((X), (Y), _MM_FROUND_CEIL) + +/// Rounds down each element of the 128-bit vector of [4 x float] to an +/// an integer and returns the rounded values in a 128-bit vector of +/// [4 x float]. +/// +/// \headerfile +/// +/// \code +/// __m128 _mm_floor_ps(__m128 X); +/// \endcode +/// +/// This intrinsic corresponds to the VROUNDPS / ROUNDPS instruction. +/// +/// \param X +/// A 128-bit vector of [4 x float] values to be rounded down. +/// \returns A 128-bit vector of [4 x float] containing the rounded values. +#define _mm_floor_ps(X) _mm_round_ps((X), _MM_FROUND_FLOOR) + +/// Rounds down each element of the 128-bit vector of [2 x double] to an +/// integer and returns the rounded values in a 128-bit vector of +/// [2 x double]. +/// +/// \headerfile +/// +/// \code +/// __m128d _mm_floor_pd(__m128d X); +/// \endcode +/// +/// This intrinsic corresponds to the VROUNDPD / ROUNDPD instruction. +/// +/// \param X +/// A 128-bit vector of [2 x double]. +/// \returns A 128-bit vector of [2 x double] containing the rounded values. +#define _mm_floor_pd(X) _mm_round_pd((X), _MM_FROUND_FLOOR) + +/// Copies three upper elements of the first 128-bit vector operand to +/// the corresponding three upper elements of the 128-bit result vector of +/// [4 x float]. Rounds down the lowest element of the second 128-bit vector +/// operand to an integer and copies it to the lowest element of the 128-bit +/// result vector of [4 x float]. +/// +/// \headerfile +/// +/// \code +/// __m128 _mm_floor_ss(__m128 X, __m128 Y); +/// \endcode +/// +/// This intrinsic corresponds to the VROUNDSS / ROUNDSS instruction. +/// +/// \param X +/// A 128-bit vector of [4 x float]. The values stored in bits [127:32] are +/// copied to the corresponding bits of the result. +/// \param Y +/// A 128-bit vector of [4 x float]. The value stored in bits [31:0] is +/// rounded down to the nearest integer and copied to the corresponding bits +/// of the result. +/// \returns A 128-bit vector of [4 x float] containing the copied and rounded +/// values. +#define _mm_floor_ss(X, Y) _mm_round_ss((X), (Y), _MM_FROUND_FLOOR) + +/// Copies the upper element of the first 128-bit vector operand to the +/// corresponding upper element of the 128-bit result vector of [2 x double]. +/// Rounds down the lower element of the second 128-bit vector operand to an +/// integer and copies it to the lower element of the 128-bit result vector +/// of [2 x double]. +/// +/// \headerfile +/// +/// \code +/// __m128d _mm_floor_sd(__m128d X, __m128d Y); +/// \endcode +/// +/// This intrinsic corresponds to the VROUNDSD / ROUNDSD instruction. +/// +/// \param X +/// A 128-bit vector of [2 x double]. The value stored in bits [127:64] is +/// copied to the corresponding bits of the result. +/// \param Y +/// A 128-bit vector of [2 x double]. The value stored in bits [63:0] is +/// rounded down to the nearest integer and copied to the corresponding bits +/// of the result. +/// \returns A 128-bit vector of [2 x double] containing the copied and rounded +/// values. +#define _mm_floor_sd(X, Y) _mm_round_sd((X), (Y), _MM_FROUND_FLOOR) + +/// Rounds each element of the 128-bit vector of [4 x float] to an +/// integer value according to the rounding control specified by the second +/// argument and returns the rounded values in a 128-bit vector of +/// [4 x float]. +/// +/// \headerfile +/// +/// \code +/// __m128 _mm_round_ps(__m128 X, const int M); +/// \endcode +/// +/// This intrinsic corresponds to the VROUNDPS / ROUNDPS instruction. +/// +/// \param X +/// A 128-bit vector of [4 x float]. +/// \param M +/// An integer value that specifies the rounding operation. \n +/// Bits [7:4] are reserved. \n +/// Bit [3] is a precision exception value: \n +/// 0: A normal PE exception is used \n +/// 1: The PE field is not updated \n +/// Bit [2] is the rounding control source: \n +/// 0: Use bits [1:0] of \a M \n +/// 1: Use the current MXCSR setting \n +/// Bits [1:0] contain the rounding control definition: \n +/// 00: Nearest \n +/// 01: Downward (toward negative infinity) \n +/// 10: Upward (toward positive infinity) \n +/// 11: Truncated +/// \returns A 128-bit vector of [4 x float] containing the rounded values. +#define _mm_round_ps(X, M) \ + ((__m128)__builtin_ia32_roundps((__v4sf)(__m128)(X), (M))) + +/// Copies three upper elements of the first 128-bit vector operand to +/// the corresponding three upper elements of the 128-bit result vector of +/// [4 x float]. Rounds the lowest element of the second 128-bit vector +/// operand to an integer value according to the rounding control specified +/// by the third argument and copies it to the lowest element of the 128-bit +/// result vector of [4 x float]. +/// +/// \headerfile +/// +/// \code +/// __m128 _mm_round_ss(__m128 X, __m128 Y, const int M); +/// \endcode +/// +/// This intrinsic corresponds to the VROUNDSS / ROUNDSS instruction. +/// +/// \param X +/// A 128-bit vector of [4 x float]. The values stored in bits [127:32] are +/// copied to the corresponding bits of the result. +/// \param Y +/// A 128-bit vector of [4 x float]. The value stored in bits [31:0] is +/// rounded to the nearest integer using the specified rounding control and +/// copied to the corresponding bits of the result. +/// \param M +/// An integer value that specifies the rounding operation. \n +/// Bits [7:4] are reserved. \n +/// Bit [3] is a precision exception value: \n +/// 0: A normal PE exception is used \n +/// 1: The PE field is not updated \n +/// Bit [2] is the rounding control source: \n +/// 0: Use bits [1:0] of \a M \n +/// 1: Use the current MXCSR setting \n +/// Bits [1:0] contain the rounding control definition: \n +/// 00: Nearest \n +/// 01: Downward (toward negative infinity) \n +/// 10: Upward (toward positive infinity) \n +/// 11: Truncated +/// \returns A 128-bit vector of [4 x float] containing the copied and rounded +/// values. +#define _mm_round_ss(X, Y, M) \ + ((__m128)__builtin_ia32_roundss((__v4sf)(__m128)(X), (__v4sf)(__m128)(Y), \ + (M))) + +/// Rounds each element of the 128-bit vector of [2 x double] to an +/// integer value according to the rounding control specified by the second +/// argument and returns the rounded values in a 128-bit vector of +/// [2 x double]. +/// +/// \headerfile +/// +/// \code +/// __m128d _mm_round_pd(__m128d X, const int M); +/// \endcode +/// +/// This intrinsic corresponds to the VROUNDPD / ROUNDPD instruction. +/// +/// \param X +/// A 128-bit vector of [2 x double]. +/// \param M +/// An integer value that specifies the rounding operation. \n +/// Bits [7:4] are reserved. \n +/// Bit [3] is a precision exception value: \n +/// 0: A normal PE exception is used \n +/// 1: The PE field is not updated \n +/// Bit [2] is the rounding control source: \n +/// 0: Use bits [1:0] of \a M \n +/// 1: Use the current MXCSR setting \n +/// Bits [1:0] contain the rounding control definition: \n +/// 00: Nearest \n +/// 01: Downward (toward negative infinity) \n +/// 10: Upward (toward positive infinity) \n +/// 11: Truncated +/// \returns A 128-bit vector of [2 x double] containing the rounded values. +#define _mm_round_pd(X, M) \ + ((__m128d)__builtin_ia32_roundpd((__v2df)(__m128d)(X), (M))) + +/// Copies the upper element of the first 128-bit vector operand to the +/// corresponding upper element of the 128-bit result vector of [2 x double]. +/// Rounds the lower element of the second 128-bit vector operand to an +/// integer value according to the rounding control specified by the third +/// argument and copies it to the lower element of the 128-bit result vector +/// of [2 x double]. +/// +/// \headerfile +/// +/// \code +/// __m128d _mm_round_sd(__m128d X, __m128d Y, const int M); +/// \endcode +/// +/// This intrinsic corresponds to the VROUNDSD / ROUNDSD instruction. +/// +/// \param X +/// A 128-bit vector of [2 x double]. The value stored in bits [127:64] is +/// copied to the corresponding bits of the result. +/// \param Y +/// A 128-bit vector of [2 x double]. The value stored in bits [63:0] is +/// rounded to the nearest integer using the specified rounding control and +/// copied to the corresponding bits of the result. +/// \param M +/// An integer value that specifies the rounding operation. \n +/// Bits [7:4] are reserved. \n +/// Bit [3] is a precision exception value: \n +/// 0: A normal PE exception is used \n +/// 1: The PE field is not updated \n +/// Bit [2] is the rounding control source: \n +/// 0: Use bits [1:0] of \a M \n +/// 1: Use the current MXCSR setting \n +/// Bits [1:0] contain the rounding control definition: \n +/// 00: Nearest \n +/// 01: Downward (toward negative infinity) \n +/// 10: Upward (toward positive infinity) \n +/// 11: Truncated +/// \returns A 128-bit vector of [2 x double] containing the copied and rounded +/// values. +#define _mm_round_sd(X, Y, M) \ + ((__m128d)__builtin_ia32_roundsd((__v2df)(__m128d)(X), (__v2df)(__m128d)(Y), \ + (M))) + +/* SSE4 Packed Blending Intrinsics. */ +/// Returns a 128-bit vector of [2 x double] where the values are +/// selected from either the first or second operand as specified by the +/// third operand, the control mask. +/// +/// \headerfile +/// +/// \code +/// __m128d _mm_blend_pd(__m128d V1, __m128d V2, const int M); +/// \endcode +/// +/// This intrinsic corresponds to the VBLENDPD / BLENDPD instruction. +/// +/// \param V1 +/// A 128-bit vector of [2 x double]. +/// \param V2 +/// A 128-bit vector of [2 x double]. +/// \param M +/// An immediate integer operand, with mask bits [1:0] specifying how the +/// values are to be copied. The position of the mask bit corresponds to the +/// index of a copied value. When a mask bit is 0, the corresponding 64-bit +/// element in operand \a V1 is copied to the same position in the result. +/// When a mask bit is 1, the corresponding 64-bit element in operand \a V2 +/// is copied to the same position in the result. +/// \returns A 128-bit vector of [2 x double] containing the copied values. +#define _mm_blend_pd(V1, V2, M) \ + ((__m128d)__builtin_ia32_blendpd((__v2df)(__m128d)(V1), \ + (__v2df)(__m128d)(V2), (int)(M))) + +/// Returns a 128-bit vector of [4 x float] where the values are selected +/// from either the first or second operand as specified by the third +/// operand, the control mask. +/// +/// \headerfile +/// +/// \code +/// __m128 _mm_blend_ps(__m128 V1, __m128 V2, const int M); +/// \endcode +/// +/// This intrinsic corresponds to the VBLENDPS / BLENDPS instruction. +/// +/// \param V1 +/// A 128-bit vector of [4 x float]. +/// \param V2 +/// A 128-bit vector of [4 x float]. +/// \param M +/// An immediate integer operand, with mask bits [3:0] specifying how the +/// values are to be copied. The position of the mask bit corresponds to the +/// index of a copied value. When a mask bit is 0, the corresponding 32-bit +/// element in operand \a V1 is copied to the same position in the result. +/// When a mask bit is 1, the corresponding 32-bit element in operand \a V2 +/// is copied to the same position in the result. +/// \returns A 128-bit vector of [4 x float] containing the copied values. +#define _mm_blend_ps(V1, V2, M) \ + ((__m128)__builtin_ia32_blendps((__v4sf)(__m128)(V1), (__v4sf)(__m128)(V2), \ + (int)(M))) + +/// Returns a 128-bit vector of [2 x double] where the values are +/// selected from either the first or second operand as specified by the +/// third operand, the control mask. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VBLENDVPD / BLENDVPD instruction. +/// +/// \param __V1 +/// A 128-bit vector of [2 x double]. +/// \param __V2 +/// A 128-bit vector of [2 x double]. +/// \param __M +/// A 128-bit vector operand, with mask bits 127 and 63 specifying how the +/// values are to be copied. The position of the mask bit corresponds to the +/// most significant bit of a copied value. When a mask bit is 0, the +/// corresponding 64-bit element in operand \a __V1 is copied to the same +/// position in the result. When a mask bit is 1, the corresponding 64-bit +/// element in operand \a __V2 is copied to the same position in the result. +/// \returns A 128-bit vector of [2 x double] containing the copied values. +static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_blendv_pd(__m128d __V1, + __m128d __V2, + __m128d __M) { + return (__m128d)__builtin_ia32_blendvpd((__v2df)__V1, (__v2df)__V2, + (__v2df)__M); +} + +/// Returns a 128-bit vector of [4 x float] where the values are +/// selected from either the first or second operand as specified by the +/// third operand, the control mask. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VBLENDVPS / BLENDVPS instruction. +/// +/// \param __V1 +/// A 128-bit vector of [4 x float]. +/// \param __V2 +/// A 128-bit vector of [4 x float]. +/// \param __M +/// A 128-bit vector operand, with mask bits 127, 95, 63, and 31 specifying +/// how the values are to be copied. The position of the mask bit corresponds +/// to the most significant bit of a copied value. When a mask bit is 0, the +/// corresponding 32-bit element in operand \a __V1 is copied to the same +/// position in the result. When a mask bit is 1, the corresponding 32-bit +/// element in operand \a __V2 is copied to the same position in the result. +/// \returns A 128-bit vector of [4 x float] containing the copied values. +static __inline__ __m128 __DEFAULT_FN_ATTRS _mm_blendv_ps(__m128 __V1, + __m128 __V2, + __m128 __M) { + return (__m128)__builtin_ia32_blendvps((__v4sf)__V1, (__v4sf)__V2, + (__v4sf)__M); +} + +/// Returns a 128-bit vector of [16 x i8] where the values are selected +/// from either of the first or second operand as specified by the third +/// operand, the control mask. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VPBLENDVB / PBLENDVB instruction. +/// +/// \param __V1 +/// A 128-bit vector of [16 x i8]. +/// \param __V2 +/// A 128-bit vector of [16 x i8]. +/// \param __M +/// A 128-bit vector operand, with mask bits 127, 119, 111...7 specifying +/// how the values are to be copied. The position of the mask bit corresponds +/// to the most significant bit of a copied value. When a mask bit is 0, the +/// corresponding 8-bit element in operand \a __V1 is copied to the same +/// position in the result. When a mask bit is 1, the corresponding 8-bit +/// element in operand \a __V2 is copied to the same position in the result. +/// \returns A 128-bit vector of [16 x i8] containing the copied values. +static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_blendv_epi8(__m128i __V1, + __m128i __V2, + __m128i __M) { + return (__m128i)__builtin_ia32_pblendvb128((__v16qi)__V1, (__v16qi)__V2, + (__v16qi)__M); +} + +/// Returns a 128-bit vector of [8 x i16] where the values are selected +/// from either of the first or second operand as specified by the third +/// operand, the control mask. +/// +/// \headerfile +/// +/// \code +/// __m128i _mm_blend_epi16(__m128i V1, __m128i V2, const int M); +/// \endcode +/// +/// This intrinsic corresponds to the VPBLENDW / PBLENDW instruction. +/// +/// \param V1 +/// A 128-bit vector of [8 x i16]. +/// \param V2 +/// A 128-bit vector of [8 x i16]. +/// \param M +/// An immediate integer operand, with mask bits [7:0] specifying how the +/// values are to be copied. The position of the mask bit corresponds to the +/// index of a copied value. When a mask bit is 0, the corresponding 16-bit +/// element in operand \a V1 is copied to the same position in the result. +/// When a mask bit is 1, the corresponding 16-bit element in operand \a V2 +/// is copied to the same position in the result. +/// \returns A 128-bit vector of [8 x i16] containing the copied values. +#define _mm_blend_epi16(V1, V2, M) \ + ((__m128i)__builtin_ia32_pblendw128((__v8hi)(__m128i)(V1), \ + (__v8hi)(__m128i)(V2), (int)(M))) + +/* SSE4 Dword Multiply Instructions. */ +/// Multiples corresponding elements of two 128-bit vectors of [4 x i32] +/// and returns the lower 32 bits of the each product in a 128-bit vector of +/// [4 x i32]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VPMULLD / PMULLD instruction. +/// +/// \param __V1 +/// A 128-bit integer vector. +/// \param __V2 +/// A 128-bit integer vector. +/// \returns A 128-bit integer vector containing the products of both operands. +static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_mullo_epi32(__m128i __V1, + __m128i __V2) { + return (__m128i)((__v4su)__V1 * (__v4su)__V2); +} + +/// Multiplies corresponding even-indexed elements of two 128-bit +/// vectors of [4 x i32] and returns a 128-bit vector of [2 x i64] +/// containing the products. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VPMULDQ / PMULDQ instruction. +/// +/// \param __V1 +/// A 128-bit vector of [4 x i32]. +/// \param __V2 +/// A 128-bit vector of [4 x i32]. +/// \returns A 128-bit vector of [2 x i64] containing the products of both +/// operands. +static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_mul_epi32(__m128i __V1, + __m128i __V2) { + return (__m128i)__builtin_ia32_pmuldq128((__v4si)__V1, (__v4si)__V2); +} + +/* SSE4 Floating Point Dot Product Instructions. */ +/// Computes the dot product of the two 128-bit vectors of [4 x float] +/// and returns it in the elements of the 128-bit result vector of +/// [4 x float]. +/// +/// The immediate integer operand controls which input elements +/// will contribute to the dot product, and where the final results are +/// returned. +/// +/// \headerfile +/// +/// \code +/// __m128 _mm_dp_ps(__m128 X, __m128 Y, const int M); +/// \endcode +/// +/// This intrinsic corresponds to the VDPPS / DPPS instruction. +/// +/// \param X +/// A 128-bit vector of [4 x float]. +/// \param Y +/// A 128-bit vector of [4 x float]. +/// \param M +/// An immediate integer operand. Mask bits [7:4] determine which elements +/// of the input vectors are used, with bit [4] corresponding to the lowest +/// element and bit [7] corresponding to the highest element of each [4 x +/// float] vector. If a bit is set, the corresponding elements from the two +/// input vectors are used as an input for dot product; otherwise that input +/// is treated as zero. Bits [3:0] determine which elements of the result +/// will receive a copy of the final dot product, with bit [0] corresponding +/// to the lowest element and bit [3] corresponding to the highest element of +/// each [4 x float] subvector. If a bit is set, the dot product is returned +/// in the corresponding element; otherwise that element is set to zero. +/// \returns A 128-bit vector of [4 x float] containing the dot product. +#define _mm_dp_ps(X, Y, M) \ + ((__m128)__builtin_ia32_dpps((__v4sf)(__m128)(X), (__v4sf)(__m128)(Y), (M))) + +/// Computes the dot product of the two 128-bit vectors of [2 x double] +/// and returns it in the elements of the 128-bit result vector of +/// [2 x double]. +/// +/// The immediate integer operand controls which input +/// elements will contribute to the dot product, and where the final results +/// are returned. +/// +/// \headerfile +/// +/// \code +/// __m128d _mm_dp_pd(__m128d X, __m128d Y, const int M); +/// \endcode +/// +/// This intrinsic corresponds to the VDPPD / DPPD instruction. +/// +/// \param X +/// A 128-bit vector of [2 x double]. +/// \param Y +/// A 128-bit vector of [2 x double]. +/// \param M +/// An immediate integer operand. Mask bits [5:4] determine which elements +/// of the input vectors are used, with bit [4] corresponding to the lowest +/// element and bit [5] corresponding to the highest element of each of [2 x +/// double] vector. If a bit is set, the corresponding elements from the two +/// input vectors are used as an input for dot product; otherwise that input +/// is treated as zero. Bits [1:0] determine which elements of the result +/// will receive a copy of the final dot product, with bit [0] corresponding +/// to the lowest element and bit [1] corresponding to the highest element of +/// each [2 x double] vector. If a bit is set, the dot product is returned in +/// the corresponding element; otherwise that element is set to zero. +#define _mm_dp_pd(X, Y, M) \ + ((__m128d)__builtin_ia32_dppd((__v2df)(__m128d)(X), (__v2df)(__m128d)(Y), \ + (M))) + +/* SSE4 Streaming Load Hint Instruction. */ +/// Loads integer values from a 128-bit aligned memory location to a +/// 128-bit integer vector. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VMOVNTDQA / MOVNTDQA instruction. +/// +/// \param __V +/// A pointer to a 128-bit aligned memory location that contains the integer +/// values. +/// \returns A 128-bit integer vector containing the data stored at the +/// specified memory location. +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_stream_load_si128(const void *__V) { + return (__m128i)__builtin_nontemporal_load((const __v2di *)__V); +} + +/* SSE4 Packed Integer Min/Max Instructions. */ +/// Compares the corresponding elements of two 128-bit vectors of +/// [16 x i8] and returns a 128-bit vector of [16 x i8] containing the lesser +/// of the two values. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VPMINSB / PMINSB instruction. +/// +/// \param __V1 +/// A 128-bit vector of [16 x i8]. +/// \param __V2 +/// A 128-bit vector of [16 x i8] +/// \returns A 128-bit vector of [16 x i8] containing the lesser values. +static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_min_epi8(__m128i __V1, + __m128i __V2) { + return (__m128i)__builtin_elementwise_min((__v16qs)__V1, (__v16qs)__V2); +} + +/// Compares the corresponding elements of two 128-bit vectors of +/// [16 x i8] and returns a 128-bit vector of [16 x i8] containing the +/// greater value of the two. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VPMAXSB / PMAXSB instruction. +/// +/// \param __V1 +/// A 128-bit vector of [16 x i8]. +/// \param __V2 +/// A 128-bit vector of [16 x i8]. +/// \returns A 128-bit vector of [16 x i8] containing the greater values. +static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_max_epi8(__m128i __V1, + __m128i __V2) { + return (__m128i)__builtin_elementwise_max((__v16qs)__V1, (__v16qs)__V2); +} + +/// Compares the corresponding elements of two 128-bit vectors of +/// [8 x u16] and returns a 128-bit vector of [8 x u16] containing the lesser +/// value of the two. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VPMINUW / PMINUW instruction. +/// +/// \param __V1 +/// A 128-bit vector of [8 x u16]. +/// \param __V2 +/// A 128-bit vector of [8 x u16]. +/// \returns A 128-bit vector of [8 x u16] containing the lesser values. +static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_min_epu16(__m128i __V1, + __m128i __V2) { + return (__m128i)__builtin_elementwise_min((__v8hu)__V1, (__v8hu)__V2); +} + +/// Compares the corresponding elements of two 128-bit vectors of +/// [8 x u16] and returns a 128-bit vector of [8 x u16] containing the +/// greater value of the two. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VPMAXUW / PMAXUW instruction. +/// +/// \param __V1 +/// A 128-bit vector of [8 x u16]. +/// \param __V2 +/// A 128-bit vector of [8 x u16]. +/// \returns A 128-bit vector of [8 x u16] containing the greater values. +static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_max_epu16(__m128i __V1, + __m128i __V2) { + return (__m128i)__builtin_elementwise_max((__v8hu)__V1, (__v8hu)__V2); +} + +/// Compares the corresponding elements of two 128-bit vectors of +/// [4 x i32] and returns a 128-bit vector of [4 x i32] containing the lesser +/// value of the two. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VPMINSD / PMINSD instruction. +/// +/// \param __V1 +/// A 128-bit vector of [4 x i32]. +/// \param __V2 +/// A 128-bit vector of [4 x i32]. +/// \returns A 128-bit vector of [4 x i32] containing the lesser values. +static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_min_epi32(__m128i __V1, + __m128i __V2) { + return (__m128i)__builtin_elementwise_min((__v4si)__V1, (__v4si)__V2); +} + +/// Compares the corresponding elements of two 128-bit vectors of +/// [4 x i32] and returns a 128-bit vector of [4 x i32] containing the +/// greater value of the two. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VPMAXSD / PMAXSD instruction. +/// +/// \param __V1 +/// A 128-bit vector of [4 x i32]. +/// \param __V2 +/// A 128-bit vector of [4 x i32]. +/// \returns A 128-bit vector of [4 x i32] containing the greater values. +static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_max_epi32(__m128i __V1, + __m128i __V2) { + return (__m128i)__builtin_elementwise_max((__v4si)__V1, (__v4si)__V2); +} + +/// Compares the corresponding elements of two 128-bit vectors of +/// [4 x u32] and returns a 128-bit vector of [4 x u32] containing the lesser +/// value of the two. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VPMINUD / PMINUD instruction. +/// +/// \param __V1 +/// A 128-bit vector of [4 x u32]. +/// \param __V2 +/// A 128-bit vector of [4 x u32]. +/// \returns A 128-bit vector of [4 x u32] containing the lesser values. +static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_min_epu32(__m128i __V1, + __m128i __V2) { + return (__m128i)__builtin_elementwise_min((__v4su)__V1, (__v4su)__V2); +} + +/// Compares the corresponding elements of two 128-bit vectors of +/// [4 x u32] and returns a 128-bit vector of [4 x u32] containing the +/// greater value of the two. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VPMAXUD / PMAXUD instruction. +/// +/// \param __V1 +/// A 128-bit vector of [4 x u32]. +/// \param __V2 +/// A 128-bit vector of [4 x u32]. +/// \returns A 128-bit vector of [4 x u32] containing the greater values. +static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_max_epu32(__m128i __V1, + __m128i __V2) { + return (__m128i)__builtin_elementwise_max((__v4su)__V1, (__v4su)__V2); +} + +/* SSE4 Insertion and Extraction from XMM Register Instructions. */ +/// Takes the first argument \a X and inserts an element from the second +/// argument \a Y as selected by the third argument \a N. That result then +/// has elements zeroed out also as selected by the third argument \a N. The +/// resulting 128-bit vector of [4 x float] is then returned. +/// +/// \headerfile +/// +/// \code +/// __m128 _mm_insert_ps(__m128 X, __m128 Y, const int N); +/// \endcode +/// +/// This intrinsic corresponds to the VINSERTPS instruction. +/// +/// \param X +/// A 128-bit vector source operand of [4 x float]. With the exception of +/// those bits in the result copied from parameter \a Y and zeroed by bits +/// [3:0] of \a N, all bits from this parameter are copied to the result. +/// \param Y +/// A 128-bit vector source operand of [4 x float]. One single-precision +/// floating-point element from this source, as determined by the immediate +/// parameter, is copied to the result. +/// \param N +/// Specifies which bits from operand \a Y will be copied, which bits in the +/// result they will be copied to, and which bits in the result will be +/// cleared. The following assignments are made: \n +/// Bits [7:6] specify the bits to copy from operand \a Y: \n +/// 00: Selects bits [31:0] from operand \a Y. \n +/// 01: Selects bits [63:32] from operand \a Y. \n +/// 10: Selects bits [95:64] from operand \a Y. \n +/// 11: Selects bits [127:96] from operand \a Y. \n +/// Bits [5:4] specify the bits in the result to which the selected bits +/// from operand \a Y are copied: \n +/// 00: Copies the selected bits from \a Y to result bits [31:0]. \n +/// 01: Copies the selected bits from \a Y to result bits [63:32]. \n +/// 10: Copies the selected bits from \a Y to result bits [95:64]. \n +/// 11: Copies the selected bits from \a Y to result bits [127:96]. \n +/// Bits[3:0]: If any of these bits are set, the corresponding result +/// element is cleared. +/// \returns A 128-bit vector of [4 x float] containing the copied +/// single-precision floating point elements from the operands. +#define _mm_insert_ps(X, Y, N) __builtin_ia32_insertps128((X), (Y), (N)) + +/// Extracts a 32-bit integer from a 128-bit vector of [4 x float] and +/// returns it, using the immediate value parameter \a N as a selector. +/// +/// \headerfile +/// +/// \code +/// int _mm_extract_ps(__m128 X, const int N); +/// \endcode +/// +/// This intrinsic corresponds to the VEXTRACTPS / EXTRACTPS +/// instruction. +/// +/// \param X +/// A 128-bit vector of [4 x float]. +/// \param N +/// An immediate value. Bits [1:0] determines which bits from the argument +/// \a X are extracted and returned: \n +/// 00: Bits [31:0] of parameter \a X are returned. \n +/// 01: Bits [63:32] of parameter \a X are returned. \n +/// 10: Bits [95:64] of parameter \a X are returned. \n +/// 11: Bits [127:96] of parameter \a X are returned. +/// \returns A 32-bit integer containing the extracted 32 bits of float data. +#define _mm_extract_ps(X, N) \ + __builtin_bit_cast( \ + int, __builtin_ia32_vec_ext_v4sf((__v4sf)(__m128)(X), (int)(N))) + +/* Miscellaneous insert and extract macros. */ +/* Extract a single-precision float from X at index N into D. */ +#define _MM_EXTRACT_FLOAT(D, X, N) \ + do { \ + (D) = __builtin_ia32_vec_ext_v4sf((__v4sf)(__m128)(X), (int)(N)); \ + } while (0) + +/* Or together 2 sets of indexes (X and Y) with the zeroing bits (Z) to create + an index suitable for _mm_insert_ps. */ +#define _MM_MK_INSERTPS_NDX(X, Y, Z) (((X) << 6) | ((Y) << 4) | (Z)) + +/* Extract a float from X at index N into the first index of the return. */ +#define _MM_PICK_OUT_PS(X, N) \ + _mm_insert_ps(_mm_setzero_ps(), (X), _MM_MK_INSERTPS_NDX((N), 0, 0x0e)) + +/* Insert int into packed integer array at index. */ +/// Constructs a 128-bit vector of [16 x i8] by first making a copy of +/// the 128-bit integer vector parameter, and then inserting the lower 8 bits +/// of an integer parameter \a I into an offset specified by the immediate +/// value parameter \a N. +/// +/// \headerfile +/// +/// \code +/// __m128i _mm_insert_epi8(__m128i X, int I, const int N); +/// \endcode +/// +/// This intrinsic corresponds to the VPINSRB / PINSRB instruction. +/// +/// \param X +/// A 128-bit integer vector of [16 x i8]. This vector is copied to the +/// result and then one of the sixteen elements in the result vector is +/// replaced by the lower 8 bits of \a I. +/// \param I +/// An integer. The lower 8 bits of this operand are written to the result +/// beginning at the offset specified by \a N. +/// \param N +/// An immediate value. Bits [3:0] specify the bit offset in the result at +/// which the lower 8 bits of \a I are written. \n +/// 0000: Bits [7:0] of the result are used for insertion. \n +/// 0001: Bits [15:8] of the result are used for insertion. \n +/// 0010: Bits [23:16] of the result are used for insertion. \n +/// 0011: Bits [31:24] of the result are used for insertion. \n +/// 0100: Bits [39:32] of the result are used for insertion. \n +/// 0101: Bits [47:40] of the result are used for insertion. \n +/// 0110: Bits [55:48] of the result are used for insertion. \n +/// 0111: Bits [63:56] of the result are used for insertion. \n +/// 1000: Bits [71:64] of the result are used for insertion. \n +/// 1001: Bits [79:72] of the result are used for insertion. \n +/// 1010: Bits [87:80] of the result are used for insertion. \n +/// 1011: Bits [95:88] of the result are used for insertion. \n +/// 1100: Bits [103:96] of the result are used for insertion. \n +/// 1101: Bits [111:104] of the result are used for insertion. \n +/// 1110: Bits [119:112] of the result are used for insertion. \n +/// 1111: Bits [127:120] of the result are used for insertion. +/// \returns A 128-bit integer vector containing the constructed values. +#define _mm_insert_epi8(X, I, N) \ + ((__m128i)__builtin_ia32_vec_set_v16qi((__v16qi)(__m128i)(X), (int)(I), \ + (int)(N))) + +/// Constructs a 128-bit vector of [4 x i32] by first making a copy of +/// the 128-bit integer vector parameter, and then inserting the 32-bit +/// integer parameter \a I at the offset specified by the immediate value +/// parameter \a N. +/// +/// \headerfile +/// +/// \code +/// __m128i _mm_insert_epi32(__m128i X, int I, const int N); +/// \endcode +/// +/// This intrinsic corresponds to the VPINSRD / PINSRD instruction. +/// +/// \param X +/// A 128-bit integer vector of [4 x i32]. This vector is copied to the +/// result and then one of the four elements in the result vector is +/// replaced by \a I. +/// \param I +/// A 32-bit integer that is written to the result beginning at the offset +/// specified by \a N. +/// \param N +/// An immediate value. Bits [1:0] specify the bit offset in the result at +/// which the integer \a I is written. \n +/// 00: Bits [31:0] of the result are used for insertion. \n +/// 01: Bits [63:32] of the result are used for insertion. \n +/// 10: Bits [95:64] of the result are used for insertion. \n +/// 11: Bits [127:96] of the result are used for insertion. +/// \returns A 128-bit integer vector containing the constructed values. +#define _mm_insert_epi32(X, I, N) \ + ((__m128i)__builtin_ia32_vec_set_v4si((__v4si)(__m128i)(X), (int)(I), \ + (int)(N))) + +#ifdef __x86_64__ +/// Constructs a 128-bit vector of [2 x i64] by first making a copy of +/// the 128-bit integer vector parameter, and then inserting the 64-bit +/// integer parameter \a I, using the immediate value parameter \a N as an +/// insertion location selector. +/// +/// \headerfile +/// +/// \code +/// __m128i _mm_insert_epi64(__m128i X, long long I, const int N); +/// \endcode +/// +/// This intrinsic corresponds to the VPINSRQ / PINSRQ instruction. +/// +/// \param X +/// A 128-bit integer vector of [2 x i64]. This vector is copied to the +/// result and then one of the two elements in the result vector is replaced +/// by \a I. +/// \param I +/// A 64-bit integer that is written to the result beginning at the offset +/// specified by \a N. +/// \param N +/// An immediate value. Bit [0] specifies the bit offset in the result at +/// which the integer \a I is written. \n +/// 0: Bits [63:0] of the result are used for insertion. \n +/// 1: Bits [127:64] of the result are used for insertion. \n +/// \returns A 128-bit integer vector containing the constructed values. +#define _mm_insert_epi64(X, I, N) \ + ((__m128i)__builtin_ia32_vec_set_v2di((__v2di)(__m128i)(X), (long long)(I), \ + (int)(N))) +#endif /* __x86_64__ */ + +/* Extract int from packed integer array at index. This returns the element + * as a zero extended value, so it is unsigned. + */ +/// Extracts an 8-bit element from the 128-bit integer vector of +/// [16 x i8], using the immediate value parameter \a N as a selector. +/// +/// \headerfile +/// +/// \code +/// int _mm_extract_epi8(__m128i X, const int N); +/// \endcode +/// +/// This intrinsic corresponds to the VPEXTRB / PEXTRB instruction. +/// +/// \param X +/// A 128-bit integer vector. +/// \param N +/// An immediate value. Bits [3:0] specify which 8-bit vector element from +/// the argument \a X to extract and copy to the result. \n +/// 0000: Bits [7:0] of parameter \a X are extracted. \n +/// 0001: Bits [15:8] of the parameter \a X are extracted. \n +/// 0010: Bits [23:16] of the parameter \a X are extracted. \n +/// 0011: Bits [31:24] of the parameter \a X are extracted. \n +/// 0100: Bits [39:32] of the parameter \a X are extracted. \n +/// 0101: Bits [47:40] of the parameter \a X are extracted. \n +/// 0110: Bits [55:48] of the parameter \a X are extracted. \n +/// 0111: Bits [63:56] of the parameter \a X are extracted. \n +/// 1000: Bits [71:64] of the parameter \a X are extracted. \n +/// 1001: Bits [79:72] of the parameter \a X are extracted. \n +/// 1010: Bits [87:80] of the parameter \a X are extracted. \n +/// 1011: Bits [95:88] of the parameter \a X are extracted. \n +/// 1100: Bits [103:96] of the parameter \a X are extracted. \n +/// 1101: Bits [111:104] of the parameter \a X are extracted. \n +/// 1110: Bits [119:112] of the parameter \a X are extracted. \n +/// 1111: Bits [127:120] of the parameter \a X are extracted. +/// \returns An unsigned integer, whose lower 8 bits are selected from the +/// 128-bit integer vector parameter and the remaining bits are assigned +/// zeros. +#define _mm_extract_epi8(X, N) \ + ((int)(unsigned char)__builtin_ia32_vec_ext_v16qi((__v16qi)(__m128i)(X), \ + (int)(N))) + +/// Extracts a 32-bit element from the 128-bit integer vector of +/// [4 x i32], using the immediate value parameter \a N as a selector. +/// +/// \headerfile +/// +/// \code +/// int _mm_extract_epi32(__m128i X, const int N); +/// \endcode +/// +/// This intrinsic corresponds to the VPEXTRD / PEXTRD instruction. +/// +/// \param X +/// A 128-bit integer vector. +/// \param N +/// An immediate value. Bits [1:0] specify which 32-bit vector element from +/// the argument \a X to extract and copy to the result. \n +/// 00: Bits [31:0] of the parameter \a X are extracted. \n +/// 01: Bits [63:32] of the parameter \a X are extracted. \n +/// 10: Bits [95:64] of the parameter \a X are extracted. \n +/// 11: Bits [127:96] of the parameter \a X are exracted. +/// \returns An integer, whose lower 32 bits are selected from the 128-bit +/// integer vector parameter and the remaining bits are assigned zeros. +#define _mm_extract_epi32(X, N) \ + ((int)__builtin_ia32_vec_ext_v4si((__v4si)(__m128i)(X), (int)(N))) + +/// Extracts a 64-bit element from the 128-bit integer vector of +/// [2 x i64], using the immediate value parameter \a N as a selector. +/// +/// \headerfile +/// +/// \code +/// long long _mm_extract_epi64(__m128i X, const int N); +/// \endcode +/// +/// This intrinsic corresponds to the VPEXTRQ / PEXTRQ instruction +/// in 64-bit mode. +/// +/// \param X +/// A 128-bit integer vector. +/// \param N +/// An immediate value. Bit [0] specifies which 64-bit vector element from +/// the argument \a X to return. \n +/// 0: Bits [63:0] are returned. \n +/// 1: Bits [127:64] are returned. \n +/// \returns A 64-bit integer. +#define _mm_extract_epi64(X, N) \ + ((long long)__builtin_ia32_vec_ext_v2di((__v2di)(__m128i)(X), (int)(N))) + +/* SSE4 128-bit Packed Integer Comparisons. */ +/// Tests whether the specified bits in a 128-bit integer vector are all +/// zeros. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VPTEST / PTEST instruction. +/// +/// \param __M +/// A 128-bit integer vector containing the bits to be tested. +/// \param __V +/// A 128-bit integer vector selecting which bits to test in operand \a __M. +/// \returns TRUE if the specified bits are all zeros; FALSE otherwise. +static __inline__ int __DEFAULT_FN_ATTRS _mm_testz_si128(__m128i __M, + __m128i __V) { + return __builtin_ia32_ptestz128((__v2di)__M, (__v2di)__V); +} + +/// Tests whether the specified bits in a 128-bit integer vector are all +/// ones. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VPTEST / PTEST instruction. +/// +/// \param __M +/// A 128-bit integer vector containing the bits to be tested. +/// \param __V +/// A 128-bit integer vector selecting which bits to test in operand \a __M. +/// \returns TRUE if the specified bits are all ones; FALSE otherwise. +static __inline__ int __DEFAULT_FN_ATTRS _mm_testc_si128(__m128i __M, + __m128i __V) { + return __builtin_ia32_ptestc128((__v2di)__M, (__v2di)__V); +} + +/// Tests whether the specified bits in a 128-bit integer vector are +/// neither all zeros nor all ones. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VPTEST / PTEST instruction. +/// +/// \param __M +/// A 128-bit integer vector containing the bits to be tested. +/// \param __V +/// A 128-bit integer vector selecting which bits to test in operand \a __M. +/// \returns TRUE if the specified bits are neither all zeros nor all ones; +/// FALSE otherwise. +static __inline__ int __DEFAULT_FN_ATTRS _mm_testnzc_si128(__m128i __M, + __m128i __V) { + return __builtin_ia32_ptestnzc128((__v2di)__M, (__v2di)__V); +} + +/// Tests whether the specified bits in a 128-bit integer vector are all +/// ones. +/// +/// \headerfile +/// +/// \code +/// int _mm_test_all_ones(__m128i V); +/// \endcode +/// +/// This intrinsic corresponds to the VPTEST / PTEST instruction. +/// +/// \param V +/// A 128-bit integer vector containing the bits to be tested. +/// \returns TRUE if the bits specified in the operand are all set to 1; FALSE +/// otherwise. +#define _mm_test_all_ones(V) _mm_testc_si128((V), _mm_set1_epi32(-1)) + +/// Tests whether the specified bits in a 128-bit integer vector are +/// neither all zeros nor all ones. +/// +/// \headerfile +/// +/// \code +/// int _mm_test_mix_ones_zeros(__m128i M, __m128i V); +/// \endcode +/// +/// This intrinsic corresponds to the VPTEST / PTEST instruction. +/// +/// \param M +/// A 128-bit integer vector containing the bits to be tested. +/// \param V +/// A 128-bit integer vector selecting which bits to test in operand \a M. +/// \returns TRUE if the specified bits are neither all zeros nor all ones; +/// FALSE otherwise. +#define _mm_test_mix_ones_zeros(M, V) _mm_testnzc_si128((M), (V)) + +/// Tests whether the specified bits in a 128-bit integer vector are all +/// zeros. +/// +/// \headerfile +/// +/// \code +/// int _mm_test_all_zeros(__m128i M, __m128i V); +/// \endcode +/// +/// This intrinsic corresponds to the VPTEST / PTEST instruction. +/// +/// \param M +/// A 128-bit integer vector containing the bits to be tested. +/// \param V +/// A 128-bit integer vector selecting which bits to test in operand \a M. +/// \returns TRUE if the specified bits are all zeros; FALSE otherwise. +#define _mm_test_all_zeros(M, V) _mm_testz_si128((M), (V)) + +/* SSE4 64-bit Packed Integer Comparisons. */ +/// Compares each of the corresponding 64-bit values of the 128-bit +/// integer vectors for equality. +/// +/// Each comparison returns 0x0 for false, 0xFFFFFFFFFFFFFFFF for true. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VPCMPEQQ / PCMPEQQ instruction. +/// +/// \param __V1 +/// A 128-bit integer vector. +/// \param __V2 +/// A 128-bit integer vector. +/// \returns A 128-bit integer vector containing the comparison results. +static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cmpeq_epi64(__m128i __V1, + __m128i __V2) { + return (__m128i)((__v2di)__V1 == (__v2di)__V2); +} + +/* SSE4 Packed Integer Sign-Extension. */ +/// Sign-extends each of the lower eight 8-bit integer elements of a +/// 128-bit vector of [16 x i8] to 16-bit values and returns them in a +/// 128-bit vector of [8 x i16]. The upper eight elements of the input vector +/// are unused. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VPMOVSXBW / PMOVSXBW instruction. +/// +/// \param __V +/// A 128-bit vector of [16 x i8]. The lower eight 8-bit elements are +/// sign-extended to 16-bit values. +/// \returns A 128-bit vector of [8 x i16] containing the sign-extended values. +static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cvtepi8_epi16(__m128i __V) { + /* This function always performs a signed extension, but __v16qi is a char + which may be signed or unsigned, so use __v16qs. */ + return (__m128i) __builtin_convertvector( + __builtin_shufflevector((__v16qs)__V, (__v16qs)__V, 0, 1, 2, 3, 4, 5, 6, + 7), + __v8hi); +} + +/// Sign-extends each of the lower four 8-bit integer elements of a +/// 128-bit vector of [16 x i8] to 32-bit values and returns them in a +/// 128-bit vector of [4 x i32]. The upper twelve elements of the input +/// vector are unused. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VPMOVSXBD / PMOVSXBD instruction. +/// +/// \param __V +/// A 128-bit vector of [16 x i8]. The lower four 8-bit elements are +/// sign-extended to 32-bit values. +/// \returns A 128-bit vector of [4 x i32] containing the sign-extended values. +static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cvtepi8_epi32(__m128i __V) { + /* This function always performs a signed extension, but __v16qi is a char + which may be signed or unsigned, so use __v16qs. */ + return (__m128i) __builtin_convertvector( + __builtin_shufflevector((__v16qs)__V, (__v16qs)__V, 0, 1, 2, 3), __v4si); +} + +/// Sign-extends each of the lower two 8-bit integer elements of a +/// 128-bit integer vector of [16 x i8] to 64-bit values and returns them in +/// a 128-bit vector of [2 x i64]. The upper fourteen elements of the input +/// vector are unused. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VPMOVSXBQ / PMOVSXBQ instruction. +/// +/// \param __V +/// A 128-bit vector of [16 x i8]. The lower two 8-bit elements are +/// sign-extended to 64-bit values. +/// \returns A 128-bit vector of [2 x i64] containing the sign-extended values. +static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cvtepi8_epi64(__m128i __V) { + /* This function always performs a signed extension, but __v16qi is a char + which may be signed or unsigned, so use __v16qs. */ + return (__m128i) __builtin_convertvector( + __builtin_shufflevector((__v16qs)__V, (__v16qs)__V, 0, 1), __v2di); +} + +/// Sign-extends each of the lower four 16-bit integer elements of a +/// 128-bit integer vector of [8 x i16] to 32-bit values and returns them in +/// a 128-bit vector of [4 x i32]. The upper four elements of the input +/// vector are unused. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VPMOVSXWD / PMOVSXWD instruction. +/// +/// \param __V +/// A 128-bit vector of [8 x i16]. The lower four 16-bit elements are +/// sign-extended to 32-bit values. +/// \returns A 128-bit vector of [4 x i32] containing the sign-extended values. +static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cvtepi16_epi32(__m128i __V) { + return (__m128i) __builtin_convertvector( + __builtin_shufflevector((__v8hi)__V, (__v8hi)__V, 0, 1, 2, 3), __v4si); +} + +/// Sign-extends each of the lower two 16-bit integer elements of a +/// 128-bit integer vector of [8 x i16] to 64-bit values and returns them in +/// a 128-bit vector of [2 x i64]. The upper six elements of the input +/// vector are unused. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VPMOVSXWQ / PMOVSXWQ instruction. +/// +/// \param __V +/// A 128-bit vector of [8 x i16]. The lower two 16-bit elements are +/// sign-extended to 64-bit values. +/// \returns A 128-bit vector of [2 x i64] containing the sign-extended values. +static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cvtepi16_epi64(__m128i __V) { + return (__m128i) __builtin_convertvector( + __builtin_shufflevector((__v8hi)__V, (__v8hi)__V, 0, 1), __v2di); +} + +/// Sign-extends each of the lower two 32-bit integer elements of a +/// 128-bit integer vector of [4 x i32] to 64-bit values and returns them in +/// a 128-bit vector of [2 x i64]. The upper two elements of the input vector +/// are unused. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VPMOVSXDQ / PMOVSXDQ instruction. +/// +/// \param __V +/// A 128-bit vector of [4 x i32]. The lower two 32-bit elements are +/// sign-extended to 64-bit values. +/// \returns A 128-bit vector of [2 x i64] containing the sign-extended values. +static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cvtepi32_epi64(__m128i __V) { + return (__m128i) __builtin_convertvector( + __builtin_shufflevector((__v4si)__V, (__v4si)__V, 0, 1), __v2di); +} + +/* SSE4 Packed Integer Zero-Extension. */ +/// Zero-extends each of the lower eight 8-bit integer elements of a +/// 128-bit vector of [16 x i8] to 16-bit values and returns them in a +/// 128-bit vector of [8 x i16]. The upper eight elements of the input vector +/// are unused. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VPMOVZXBW / PMOVZXBW instruction. +/// +/// \param __V +/// A 128-bit vector of [16 x i8]. The lower eight 8-bit elements are +/// zero-extended to 16-bit values. +/// \returns A 128-bit vector of [8 x i16] containing the zero-extended values. +static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cvtepu8_epi16(__m128i __V) { + return (__m128i) __builtin_convertvector( + __builtin_shufflevector((__v16qu)__V, (__v16qu)__V, 0, 1, 2, 3, 4, 5, 6, + 7), + __v8hi); +} + +/// Zero-extends each of the lower four 8-bit integer elements of a +/// 128-bit vector of [16 x i8] to 32-bit values and returns them in a +/// 128-bit vector of [4 x i32]. The upper twelve elements of the input +/// vector are unused. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VPMOVZXBD / PMOVZXBD instruction. +/// +/// \param __V +/// A 128-bit vector of [16 x i8]. The lower four 8-bit elements are +/// zero-extended to 32-bit values. +/// \returns A 128-bit vector of [4 x i32] containing the zero-extended values. +static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cvtepu8_epi32(__m128i __V) { + return (__m128i) __builtin_convertvector( + __builtin_shufflevector((__v16qu)__V, (__v16qu)__V, 0, 1, 2, 3), __v4si); +} + +/// Zero-extends each of the lower two 8-bit integer elements of a +/// 128-bit integer vector of [16 x i8] to 64-bit values and returns them in +/// a 128-bit vector of [2 x i64]. The upper fourteen elements of the input +/// vector are unused. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VPMOVZXBQ / PMOVZXBQ instruction. +/// +/// \param __V +/// A 128-bit vector of [16 x i8]. The lower two 8-bit elements are +/// zero-extended to 64-bit values. +/// \returns A 128-bit vector of [2 x i64] containing the zero-extended values. +static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cvtepu8_epi64(__m128i __V) { + return (__m128i) __builtin_convertvector( + __builtin_shufflevector((__v16qu)__V, (__v16qu)__V, 0, 1), __v2di); +} + +/// Zero-extends each of the lower four 16-bit integer elements of a +/// 128-bit integer vector of [8 x i16] to 32-bit values and returns them in +/// a 128-bit vector of [4 x i32]. The upper four elements of the input +/// vector are unused. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VPMOVZXWD / PMOVZXWD instruction. +/// +/// \param __V +/// A 128-bit vector of [8 x i16]. The lower four 16-bit elements are +/// zero-extended to 32-bit values. +/// \returns A 128-bit vector of [4 x i32] containing the zero-extended values. +static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cvtepu16_epi32(__m128i __V) { + return (__m128i) __builtin_convertvector( + __builtin_shufflevector((__v8hu)__V, (__v8hu)__V, 0, 1, 2, 3), __v4si); +} + +/// Zero-extends each of the lower two 16-bit integer elements of a +/// 128-bit integer vector of [8 x i16] to 64-bit values and returns them in +/// a 128-bit vector of [2 x i64]. The upper six elements of the input vector +/// are unused. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VPMOVZXWQ / PMOVZXWQ instruction. +/// +/// \param __V +/// A 128-bit vector of [8 x i16]. The lower two 16-bit elements are +/// zero-extended to 64-bit values. +/// \returns A 128-bit vector of [2 x i64] containing the zero-extended values. +static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cvtepu16_epi64(__m128i __V) { + return (__m128i) __builtin_convertvector( + __builtin_shufflevector((__v8hu)__V, (__v8hu)__V, 0, 1), __v2di); +} + +/// Zero-extends each of the lower two 32-bit integer elements of a +/// 128-bit integer vector of [4 x i32] to 64-bit values and returns them in +/// a 128-bit vector of [2 x i64]. The upper two elements of the input vector +/// are unused. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VPMOVZXDQ / PMOVZXDQ instruction. +/// +/// \param __V +/// A 128-bit vector of [4 x i32]. The lower two 32-bit elements are +/// zero-extended to 64-bit values. +/// \returns A 128-bit vector of [2 x i64] containing the zero-extended values. +static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cvtepu32_epi64(__m128i __V) { + return (__m128i) __builtin_convertvector( + __builtin_shufflevector((__v4su)__V, (__v4su)__V, 0, 1), __v2di); +} + +/* SSE4 Pack with Unsigned Saturation. */ +/// Converts, with saturation, 32-bit signed integers from both 128-bit integer +/// vector operands into 16-bit unsigned integers, and returns the packed +/// result. +/// +/// Values greater than 0xFFFF are saturated to 0xFFFF. Values less than +/// 0x0000 are saturated to 0x0000. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VPACKUSDW / PACKUSDW instruction. +/// +/// \param __V1 +/// A 128-bit vector of [4 x i32]. The converted [4 x i16] values are +/// written to the lower 64 bits of the result. +/// \param __V2 +/// A 128-bit vector of [4 x i32]. The converted [4 x i16] values are +/// written to the higher 64 bits of the result. +/// \returns A 128-bit vector of [8 x i16] containing the converted values. +static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_packus_epi32(__m128i __V1, + __m128i __V2) { + return (__m128i)__builtin_ia32_packusdw128((__v4si)__V1, (__v4si)__V2); +} + +/* SSE4 Multiple Packed Sums of Absolute Difference. */ +/// Subtracts 8-bit unsigned integer values and computes the absolute +/// values of the differences to the corresponding bits in the destination. +/// Then sums of the absolute differences are returned according to the bit +/// fields in the immediate operand. +/// +/// \headerfile +/// +/// \code +/// __m128i _mm_mpsadbw_epu8(__m128i X, __m128i Y, const int M); +/// \endcode +/// +/// This intrinsic corresponds to the VMPSADBW / MPSADBW instruction. +/// +/// \param X +/// A 128-bit vector of [16 x i8]. +/// \param Y +/// A 128-bit vector of [16 x i8]. +/// \param M +/// An 8-bit immediate operand specifying how the absolute differences are to +/// be calculated, according to the following algorithm: +/// \code +/// // M2 represents bit 2 of the immediate operand +/// // M10 represents bits [1:0] of the immediate operand +/// i = M2 * 4; +/// j = M10 * 4; +/// for (k = 0; k < 8; k = k + 1) { +/// d0 = abs(X[i + k + 0] - Y[j + 0]); +/// d1 = abs(X[i + k + 1] - Y[j + 1]); +/// d2 = abs(X[i + k + 2] - Y[j + 2]); +/// d3 = abs(X[i + k + 3] - Y[j + 3]); +/// r[k] = d0 + d1 + d2 + d3; +/// } +/// \endcode +/// \returns A 128-bit integer vector containing the sums of the sets of +/// absolute differences between both operands. +#define _mm_mpsadbw_epu8(X, Y, M) \ + ((__m128i)__builtin_ia32_mpsadbw128((__v16qi)(__m128i)(X), \ + (__v16qi)(__m128i)(Y), (M))) + +/// Finds the minimum unsigned 16-bit element in the input 128-bit +/// vector of [8 x u16] and returns it and along with its index. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VPHMINPOSUW / PHMINPOSUW +/// instruction. +/// +/// \param __V +/// A 128-bit vector of [8 x u16]. +/// \returns A 128-bit value where bits [15:0] contain the minimum value found +/// in parameter \a __V, bits [18:16] contain the index of the minimum value +/// and the remaining bits are set to 0. +static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_minpos_epu16(__m128i __V) { + return (__m128i)__builtin_ia32_phminposuw128((__v8hi)__V); +} + +/* Handle the sse4.2 definitions here. */ + +/* These definitions are normally in nmmintrin.h, but gcc puts them in here + so we'll do the same. */ + +#undef __DEFAULT_FN_ATTRS +#define __DEFAULT_FN_ATTRS \ + __attribute__((__always_inline__, __nodebug__, __target__("sse4.2"))) + +/* These specify the type of data that we're comparing. */ +#define _SIDD_UBYTE_OPS 0x00 +#define _SIDD_UWORD_OPS 0x01 +#define _SIDD_SBYTE_OPS 0x02 +#define _SIDD_SWORD_OPS 0x03 + +/* These specify the type of comparison operation. */ +#define _SIDD_CMP_EQUAL_ANY 0x00 +#define _SIDD_CMP_RANGES 0x04 +#define _SIDD_CMP_EQUAL_EACH 0x08 +#define _SIDD_CMP_EQUAL_ORDERED 0x0c + +/* These macros specify the polarity of the operation. */ +#define _SIDD_POSITIVE_POLARITY 0x00 +#define _SIDD_NEGATIVE_POLARITY 0x10 +#define _SIDD_MASKED_POSITIVE_POLARITY 0x20 +#define _SIDD_MASKED_NEGATIVE_POLARITY 0x30 + +/* These macros are used in _mm_cmpXstri() to specify the return. */ +#define _SIDD_LEAST_SIGNIFICANT 0x00 +#define _SIDD_MOST_SIGNIFICANT 0x40 + +/* These macros are used in _mm_cmpXstri() to specify the return. */ +#define _SIDD_BIT_MASK 0x00 +#define _SIDD_UNIT_MASK 0x40 + +/* SSE4.2 Packed Comparison Intrinsics. */ +/// Uses the immediate operand \a M to perform a comparison of string +/// data with implicitly defined lengths that is contained in source operands +/// \a A and \a B. Returns a 128-bit integer vector representing the result +/// mask of the comparison. +/// +/// \headerfile +/// +/// \code +/// __m128i _mm_cmpistrm(__m128i A, __m128i B, const int M); +/// \endcode +/// +/// This intrinsic corresponds to the VPCMPISTRM / PCMPISTRM +/// instruction. +/// +/// \param A +/// A 128-bit integer vector containing one of the source operands to be +/// compared. +/// \param B +/// A 128-bit integer vector containing one of the source operands to be +/// compared. +/// \param M +/// An 8-bit immediate operand specifying whether the characters are bytes or +/// words, the type of comparison to perform, and the format of the return +/// value. \n +/// Bits [1:0]: Determine source data format. \n +/// 00: 16 unsigned bytes \n +/// 01: 8 unsigned words \n +/// 10: 16 signed bytes \n +/// 11: 8 signed words \n +/// Bits [3:2]: Determine comparison type and aggregation method. \n +/// 00: Subset: Each character in \a B is compared for equality with all +/// the characters in \a A. \n +/// 01: Ranges: Each character in \a B is compared to \a A. The comparison +/// basis is greater than or equal for even-indexed elements in \a A, +/// and less than or equal for odd-indexed elements in \a A. \n +/// 10: Match: Compare each pair of corresponding characters in \a A and +/// \a B for equality. \n +/// 11: Substring: Search \a B for substring matches of \a A. \n +/// Bits [5:4]: Determine whether to perform a one's complement on the bit +/// mask of the comparison results. \n +/// 00: No effect. \n +/// 01: Negate the bit mask. \n +/// 10: No effect. \n +/// 11: Negate the bit mask only for bits with an index less than or equal +/// to the size of \a A or \a B. \n +/// Bit [6]: Determines whether the result is zero-extended or expanded to 16 +/// bytes. \n +/// 0: The result is zero-extended to 16 bytes. \n +/// 1: The result is expanded to 16 bytes (this expansion is performed by +/// repeating each bit 8 or 16 times). +/// \returns Returns a 128-bit integer vector representing the result mask of +/// the comparison. +#define _mm_cmpistrm(A, B, M) \ + ((__m128i)__builtin_ia32_pcmpistrm128((__v16qi)(__m128i)(A), \ + (__v16qi)(__m128i)(B), (int)(M))) + +/// Uses the immediate operand \a M to perform a comparison of string +/// data with implicitly defined lengths that is contained in source operands +/// \a A and \a B. Returns an integer representing the result index of the +/// comparison. +/// +/// \headerfile +/// +/// \code +/// int _mm_cmpistri(__m128i A, __m128i B, const int M); +/// \endcode +/// +/// This intrinsic corresponds to the VPCMPISTRI / PCMPISTRI +/// instruction. +/// +/// \param A +/// A 128-bit integer vector containing one of the source operands to be +/// compared. +/// \param B +/// A 128-bit integer vector containing one of the source operands to be +/// compared. +/// \param M +/// An 8-bit immediate operand specifying whether the characters are bytes or +/// words, the type of comparison to perform, and the format of the return +/// value. \n +/// Bits [1:0]: Determine source data format. \n +/// 00: 16 unsigned bytes \n +/// 01: 8 unsigned words \n +/// 10: 16 signed bytes \n +/// 11: 8 signed words \n +/// Bits [3:2]: Determine comparison type and aggregation method. \n +/// 00: Subset: Each character in \a B is compared for equality with all +/// the characters in \a A. \n +/// 01: Ranges: Each character in \a B is compared to \a A. The comparison +/// basis is greater than or equal for even-indexed elements in \a A, +/// and less than or equal for odd-indexed elements in \a A. \n +/// 10: Match: Compare each pair of corresponding characters in \a A and +/// \a B for equality. \n +/// 11: Substring: Search B for substring matches of \a A. \n +/// Bits [5:4]: Determine whether to perform a one's complement on the bit +/// mask of the comparison results. \n +/// 00: No effect. \n +/// 01: Negate the bit mask. \n +/// 10: No effect. \n +/// 11: Negate the bit mask only for bits with an index less than or equal +/// to the size of \a A or \a B. \n +/// Bit [6]: Determines whether the index of the lowest set bit or the +/// highest set bit is returned. \n +/// 0: The index of the least significant set bit. \n +/// 1: The index of the most significant set bit. \n +/// \returns Returns an integer representing the result index of the comparison. +#define _mm_cmpistri(A, B, M) \ + ((int)__builtin_ia32_pcmpistri128((__v16qi)(__m128i)(A), \ + (__v16qi)(__m128i)(B), (int)(M))) + +/// Uses the immediate operand \a M to perform a comparison of string +/// data with explicitly defined lengths that is contained in source operands +/// \a A and \a B. Returns a 128-bit integer vector representing the result +/// mask of the comparison. +/// +/// \headerfile +/// +/// \code +/// __m128i _mm_cmpestrm(__m128i A, int LA, __m128i B, int LB, const int M); +/// \endcode +/// +/// This intrinsic corresponds to the VPCMPESTRM / PCMPESTRM +/// instruction. +/// +/// \param A +/// A 128-bit integer vector containing one of the source operands to be +/// compared. +/// \param LA +/// An integer that specifies the length of the string in \a A. +/// \param B +/// A 128-bit integer vector containing one of the source operands to be +/// compared. +/// \param LB +/// An integer that specifies the length of the string in \a B. +/// \param M +/// An 8-bit immediate operand specifying whether the characters are bytes or +/// words, the type of comparison to perform, and the format of the return +/// value. \n +/// Bits [1:0]: Determine source data format. \n +/// 00: 16 unsigned bytes \n +/// 01: 8 unsigned words \n +/// 10: 16 signed bytes \n +/// 11: 8 signed words \n +/// Bits [3:2]: Determine comparison type and aggregation method. \n +/// 00: Subset: Each character in \a B is compared for equality with all +/// the characters in \a A. \n +/// 01: Ranges: Each character in \a B is compared to \a A. The comparison +/// basis is greater than or equal for even-indexed elements in \a A, +/// and less than or equal for odd-indexed elements in \a A. \n +/// 10: Match: Compare each pair of corresponding characters in \a A and +/// \a B for equality. \n +/// 11: Substring: Search \a B for substring matches of \a A. \n +/// Bits [5:4]: Determine whether to perform a one's complement on the bit +/// mask of the comparison results. \n +/// 00: No effect. \n +/// 01: Negate the bit mask. \n +/// 10: No effect. \n +/// 11: Negate the bit mask only for bits with an index less than or equal +/// to the size of \a A or \a B. \n +/// Bit [6]: Determines whether the result is zero-extended or expanded to 16 +/// bytes. \n +/// 0: The result is zero-extended to 16 bytes. \n +/// 1: The result is expanded to 16 bytes (this expansion is performed by +/// repeating each bit 8 or 16 times). \n +/// \returns Returns a 128-bit integer vector representing the result mask of +/// the comparison. +#define _mm_cmpestrm(A, LA, B, LB, M) \ + ((__m128i)__builtin_ia32_pcmpestrm128((__v16qi)(__m128i)(A), (int)(LA), \ + (__v16qi)(__m128i)(B), (int)(LB), \ + (int)(M))) + +/// Uses the immediate operand \a M to perform a comparison of string +/// data with explicitly defined lengths that is contained in source operands +/// \a A and \a B. Returns an integer representing the result index of the +/// comparison. +/// +/// \headerfile +/// +/// \code +/// int _mm_cmpestri(__m128i A, int LA, __m128i B, int LB, const int M); +/// \endcode +/// +/// This intrinsic corresponds to the VPCMPESTRI / PCMPESTRI +/// instruction. +/// +/// \param A +/// A 128-bit integer vector containing one of the source operands to be +/// compared. +/// \param LA +/// An integer that specifies the length of the string in \a A. +/// \param B +/// A 128-bit integer vector containing one of the source operands to be +/// compared. +/// \param LB +/// An integer that specifies the length of the string in \a B. +/// \param M +/// An 8-bit immediate operand specifying whether the characters are bytes or +/// words, the type of comparison to perform, and the format of the return +/// value. \n +/// Bits [1:0]: Determine source data format. \n +/// 00: 16 unsigned bytes \n +/// 01: 8 unsigned words \n +/// 10: 16 signed bytes \n +/// 11: 8 signed words \n +/// Bits [3:2]: Determine comparison type and aggregation method. \n +/// 00: Subset: Each character in \a B is compared for equality with all +/// the characters in \a A. \n +/// 01: Ranges: Each character in \a B is compared to \a A. The comparison +/// basis is greater than or equal for even-indexed elements in \a A, +/// and less than or equal for odd-indexed elements in \a A. \n +/// 10: Match: Compare each pair of corresponding characters in \a A and +/// \a B for equality. \n +/// 11: Substring: Search B for substring matches of \a A. \n +/// Bits [5:4]: Determine whether to perform a one's complement on the bit +/// mask of the comparison results. \n +/// 00: No effect. \n +/// 01: Negate the bit mask. \n +/// 10: No effect. \n +/// 11: Negate the bit mask only for bits with an index less than or equal +/// to the size of \a A or \a B. \n +/// Bit [6]: Determines whether the index of the lowest set bit or the +/// highest set bit is returned. \n +/// 0: The index of the least significant set bit. \n +/// 1: The index of the most significant set bit. \n +/// \returns Returns an integer representing the result index of the comparison. +#define _mm_cmpestri(A, LA, B, LB, M) \ + ((int)__builtin_ia32_pcmpestri128((__v16qi)(__m128i)(A), (int)(LA), \ + (__v16qi)(__m128i)(B), (int)(LB), \ + (int)(M))) + +/* SSE4.2 Packed Comparison Intrinsics and EFlag Reading. */ +/// Uses the immediate operand \a M to perform a comparison of string +/// data with implicitly defined lengths that is contained in source operands +/// \a A and \a B. Returns 1 if the bit mask is zero and the length of the +/// string in \a B is the maximum, otherwise, returns 0. +/// +/// \headerfile +/// +/// \code +/// int _mm_cmpistra(__m128i A, __m128i B, const int M); +/// \endcode +/// +/// This intrinsic corresponds to the VPCMPISTRI / PCMPISTRI +/// instruction. +/// +/// \param A +/// A 128-bit integer vector containing one of the source operands to be +/// compared. +/// \param B +/// A 128-bit integer vector containing one of the source operands to be +/// compared. +/// \param M +/// An 8-bit immediate operand specifying whether the characters are bytes or +/// words and the type of comparison to perform. \n +/// Bits [1:0]: Determine source data format. \n +/// 00: 16 unsigned bytes \n +/// 01: 8 unsigned words \n +/// 10: 16 signed bytes \n +/// 11: 8 signed words \n +/// Bits [3:2]: Determine comparison type and aggregation method. \n +/// 00: Subset: Each character in \a B is compared for equality with all +/// the characters in \a A. \n +/// 01: Ranges: Each character in \a B is compared to \a A. The comparison +/// basis is greater than or equal for even-indexed elements in \a A, +/// and less than or equal for odd-indexed elements in \a A. \n +/// 10: Match: Compare each pair of corresponding characters in \a A and +/// \a B for equality. \n +/// 11: Substring: Search \a B for substring matches of \a A. \n +/// Bits [5:4]: Determine whether to perform a one's complement on the bit +/// mask of the comparison results. \n +/// 00: No effect. \n +/// 01: Negate the bit mask. \n +/// 10: No effect. \n +/// 11: Negate the bit mask only for bits with an index less than or equal +/// to the size of \a A or \a B. \n +/// \returns Returns 1 if the bit mask is zero and the length of the string in +/// \a B is the maximum; otherwise, returns 0. +#define _mm_cmpistra(A, B, M) \ + ((int)__builtin_ia32_pcmpistria128((__v16qi)(__m128i)(A), \ + (__v16qi)(__m128i)(B), (int)(M))) + +/// Uses the immediate operand \a M to perform a comparison of string +/// data with implicitly defined lengths that is contained in source operands +/// \a A and \a B. Returns 1 if the bit mask is non-zero, otherwise, returns +/// 0. +/// +/// \headerfile +/// +/// \code +/// int _mm_cmpistrc(__m128i A, __m128i B, const int M); +/// \endcode +/// +/// This intrinsic corresponds to the VPCMPISTRI / PCMPISTRI +/// instruction. +/// +/// \param A +/// A 128-bit integer vector containing one of the source operands to be +/// compared. +/// \param B +/// A 128-bit integer vector containing one of the source operands to be +/// compared. +/// \param M +/// An 8-bit immediate operand specifying whether the characters are bytes or +/// words and the type of comparison to perform. \n +/// Bits [1:0]: Determine source data format. \n +/// 00: 16 unsigned bytes \n +/// 01: 8 unsigned words \n +/// 10: 16 signed bytes \n +/// 11: 8 signed words \n +/// Bits [3:2]: Determine comparison type and aggregation method. \n +/// 00: Subset: Each character in \a B is compared for equality with all +/// the characters in \a A. \n +/// 01: Ranges: Each character in \a B is compared to \a A. The comparison +/// basis is greater than or equal for even-indexed elements in \a A, +/// and less than or equal for odd-indexed elements in \a A. \n +/// 10: Match: Compare each pair of corresponding characters in \a A and +/// \a B for equality. \n +/// 11: Substring: Search B for substring matches of \a A. \n +/// Bits [5:4]: Determine whether to perform a one's complement on the bit +/// mask of the comparison results. \n +/// 00: No effect. \n +/// 01: Negate the bit mask. \n +/// 10: No effect. \n +/// 11: Negate the bit mask only for bits with an index less than or equal +/// to the size of \a A or \a B. +/// \returns Returns 1 if the bit mask is non-zero, otherwise, returns 0. +#define _mm_cmpistrc(A, B, M) \ + ((int)__builtin_ia32_pcmpistric128((__v16qi)(__m128i)(A), \ + (__v16qi)(__m128i)(B), (int)(M))) + +/// Uses the immediate operand \a M to perform a comparison of string +/// data with implicitly defined lengths that is contained in source operands +/// \a A and \a B. Returns bit 0 of the resulting bit mask. +/// +/// \headerfile +/// +/// \code +/// int _mm_cmpistro(__m128i A, __m128i B, const int M); +/// \endcode +/// +/// This intrinsic corresponds to the VPCMPISTRI / PCMPISTRI +/// instruction. +/// +/// \param A +/// A 128-bit integer vector containing one of the source operands to be +/// compared. +/// \param B +/// A 128-bit integer vector containing one of the source operands to be +/// compared. +/// \param M +/// An 8-bit immediate operand specifying whether the characters are bytes or +/// words and the type of comparison to perform. \n +/// Bits [1:0]: Determine source data format. \n +/// 00: 16 unsigned bytes \n +/// 01: 8 unsigned words \n +/// 10: 16 signed bytes \n +/// 11: 8 signed words \n +/// Bits [3:2]: Determine comparison type and aggregation method. \n +/// 00: Subset: Each character in \a B is compared for equality with all +/// the characters in \a A. \n +/// 01: Ranges: Each character in \a B is compared to \a A. The comparison +/// basis is greater than or equal for even-indexed elements in \a A, +/// and less than or equal for odd-indexed elements in \a A. \n +/// 10: Match: Compare each pair of corresponding characters in \a A and +/// \a B for equality. \n +/// 11: Substring: Search B for substring matches of \a A. \n +/// Bits [5:4]: Determine whether to perform a one's complement on the bit +/// mask of the comparison results. \n +/// 00: No effect. \n +/// 01: Negate the bit mask. \n +/// 10: No effect. \n +/// 11: Negate the bit mask only for bits with an index less than or equal +/// to the size of \a A or \a B. \n +/// \returns Returns bit 0 of the resulting bit mask. +#define _mm_cmpistro(A, B, M) \ + ((int)__builtin_ia32_pcmpistrio128((__v16qi)(__m128i)(A), \ + (__v16qi)(__m128i)(B), (int)(M))) + +/// Uses the immediate operand \a M to perform a comparison of string +/// data with implicitly defined lengths that is contained in source operands +/// \a A and \a B. Returns 1 if the length of the string in \a A is less than +/// the maximum, otherwise, returns 0. +/// +/// \headerfile +/// +/// \code +/// int _mm_cmpistrs(__m128i A, __m128i B, const int M); +/// \endcode +/// +/// This intrinsic corresponds to the VPCMPISTRI / PCMPISTRI +/// instruction. +/// +/// \param A +/// A 128-bit integer vector containing one of the source operands to be +/// compared. +/// \param B +/// A 128-bit integer vector containing one of the source operands to be +/// compared. +/// \param M +/// An 8-bit immediate operand specifying whether the characters are bytes or +/// words and the type of comparison to perform. \n +/// Bits [1:0]: Determine source data format. \n +/// 00: 16 unsigned bytes \n +/// 01: 8 unsigned words \n +/// 10: 16 signed bytes \n +/// 11: 8 signed words \n +/// Bits [3:2]: Determine comparison type and aggregation method. \n +/// 00: Subset: Each character in \a B is compared for equality with all +/// the characters in \a A. \n +/// 01: Ranges: Each character in \a B is compared to \a A. The comparison +/// basis is greater than or equal for even-indexed elements in \a A, +/// and less than or equal for odd-indexed elements in \a A. \n +/// 10: Match: Compare each pair of corresponding characters in \a A and +/// \a B for equality. \n +/// 11: Substring: Search \a B for substring matches of \a A. \n +/// Bits [5:4]: Determine whether to perform a one's complement on the bit +/// mask of the comparison results. \n +/// 00: No effect. \n +/// 01: Negate the bit mask. \n +/// 10: No effect. \n +/// 11: Negate the bit mask only for bits with an index less than or equal +/// to the size of \a A or \a B. \n +/// \returns Returns 1 if the length of the string in \a A is less than the +/// maximum, otherwise, returns 0. +#define _mm_cmpistrs(A, B, M) \ + ((int)__builtin_ia32_pcmpistris128((__v16qi)(__m128i)(A), \ + (__v16qi)(__m128i)(B), (int)(M))) + +/// Uses the immediate operand \a M to perform a comparison of string +/// data with implicitly defined lengths that is contained in source operands +/// \a A and \a B. Returns 1 if the length of the string in \a B is less than +/// the maximum, otherwise, returns 0. +/// +/// \headerfile +/// +/// \code +/// int _mm_cmpistrz(__m128i A, __m128i B, const int M); +/// \endcode +/// +/// This intrinsic corresponds to the VPCMPISTRI / PCMPISTRI +/// instruction. +/// +/// \param A +/// A 128-bit integer vector containing one of the source operands to be +/// compared. +/// \param B +/// A 128-bit integer vector containing one of the source operands to be +/// compared. +/// \param M +/// An 8-bit immediate operand specifying whether the characters are bytes or +/// words and the type of comparison to perform. \n +/// Bits [1:0]: Determine source data format. \n +/// 00: 16 unsigned bytes \n +/// 01: 8 unsigned words \n +/// 10: 16 signed bytes \n +/// 11: 8 signed words \n +/// Bits [3:2]: Determine comparison type and aggregation method. \n +/// 00: Subset: Each character in \a B is compared for equality with all +/// the characters in \a A. \n +/// 01: Ranges: Each character in \a B is compared to \a A. The comparison +/// basis is greater than or equal for even-indexed elements in \a A, +/// and less than or equal for odd-indexed elements in \a A. \n +/// 10: Match: Compare each pair of corresponding characters in \a A and +/// \a B for equality. \n +/// 11: Substring: Search \a B for substring matches of \a A. \n +/// Bits [5:4]: Determine whether to perform a one's complement on the bit +/// mask of the comparison results. \n +/// 00: No effect. \n +/// 01: Negate the bit mask. \n +/// 10: No effect. \n +/// 11: Negate the bit mask only for bits with an index less than or equal +/// to the size of \a A or \a B. +/// \returns Returns 1 if the length of the string in \a B is less than the +/// maximum, otherwise, returns 0. +#define _mm_cmpistrz(A, B, M) \ + ((int)__builtin_ia32_pcmpistriz128((__v16qi)(__m128i)(A), \ + (__v16qi)(__m128i)(B), (int)(M))) + +/// Uses the immediate operand \a M to perform a comparison of string +/// data with explicitly defined lengths that is contained in source operands +/// \a A and \a B. Returns 1 if the bit mask is zero and the length of the +/// string in \a B is the maximum, otherwise, returns 0. +/// +/// \headerfile +/// +/// \code +/// int _mm_cmpestra(__m128i A, int LA, __m128i B, int LB, const int M); +/// \endcode +/// +/// This intrinsic corresponds to the VPCMPESTRI / PCMPESTRI +/// instruction. +/// +/// \param A +/// A 128-bit integer vector containing one of the source operands to be +/// compared. +/// \param LA +/// An integer that specifies the length of the string in \a A. +/// \param B +/// A 128-bit integer vector containing one of the source operands to be +/// compared. +/// \param LB +/// An integer that specifies the length of the string in \a B. +/// \param M +/// An 8-bit immediate operand specifying whether the characters are bytes or +/// words and the type of comparison to perform. \n +/// Bits [1:0]: Determine source data format. \n +/// 00: 16 unsigned bytes \n +/// 01: 8 unsigned words \n +/// 10: 16 signed bytes \n +/// 11: 8 signed words \n +/// Bits [3:2]: Determine comparison type and aggregation method. \n +/// 00: Subset: Each character in \a B is compared for equality with all +/// the characters in \a A. \n +/// 01: Ranges: Each character in \a B is compared to \a A. The comparison +/// basis is greater than or equal for even-indexed elements in \a A, +/// and less than or equal for odd-indexed elements in \a A. \n +/// 10: Match: Compare each pair of corresponding characters in \a A and +/// \a B for equality. \n +/// 11: Substring: Search \a B for substring matches of \a A. \n +/// Bits [5:4]: Determine whether to perform a one's complement on the bit +/// mask of the comparison results. \n +/// 00: No effect. \n +/// 01: Negate the bit mask. \n +/// 10: No effect. \n +/// 11: Negate the bit mask only for bits with an index less than or equal +/// to the size of \a A or \a B. +/// \returns Returns 1 if the bit mask is zero and the length of the string in +/// \a B is the maximum, otherwise, returns 0. +#define _mm_cmpestra(A, LA, B, LB, M) \ + ((int)__builtin_ia32_pcmpestria128((__v16qi)(__m128i)(A), (int)(LA), \ + (__v16qi)(__m128i)(B), (int)(LB), \ + (int)(M))) + +/// Uses the immediate operand \a M to perform a comparison of string +/// data with explicitly defined lengths that is contained in source operands +/// \a A and \a B. Returns 1 if the resulting mask is non-zero, otherwise, +/// returns 0. +/// +/// \headerfile +/// +/// \code +/// int _mm_cmpestrc(__m128i A, int LA, __m128i B, int LB, const int M); +/// \endcode +/// +/// This intrinsic corresponds to the VPCMPESTRI / PCMPESTRI +/// instruction. +/// +/// \param A +/// A 128-bit integer vector containing one of the source operands to be +/// compared. +/// \param LA +/// An integer that specifies the length of the string in \a A. +/// \param B +/// A 128-bit integer vector containing one of the source operands to be +/// compared. +/// \param LB +/// An integer that specifies the length of the string in \a B. +/// \param M +/// An 8-bit immediate operand specifying whether the characters are bytes or +/// words and the type of comparison to perform. \n +/// Bits [1:0]: Determine source data format. \n +/// 00: 16 unsigned bytes \n +/// 01: 8 unsigned words \n +/// 10: 16 signed bytes \n +/// 11: 8 signed words \n +/// Bits [3:2]: Determine comparison type and aggregation method. \n +/// 00: Subset: Each character in \a B is compared for equality with all +/// the characters in \a A. \n +/// 01: Ranges: Each character in \a B is compared to \a A. The comparison +/// basis is greater than or equal for even-indexed elements in \a A, +/// and less than or equal for odd-indexed elements in \a A. \n +/// 10: Match: Compare each pair of corresponding characters in \a A and +/// \a B for equality. \n +/// 11: Substring: Search \a B for substring matches of \a A. \n +/// Bits [5:4]: Determine whether to perform a one's complement on the bit +/// mask of the comparison results. \n +/// 00: No effect. \n +/// 01: Negate the bit mask. \n +/// 10: No effect. \n +/// 11: Negate the bit mask only for bits with an index less than or equal +/// to the size of \a A or \a B. \n +/// \returns Returns 1 if the resulting mask is non-zero, otherwise, returns 0. +#define _mm_cmpestrc(A, LA, B, LB, M) \ + ((int)__builtin_ia32_pcmpestric128((__v16qi)(__m128i)(A), (int)(LA), \ + (__v16qi)(__m128i)(B), (int)(LB), \ + (int)(M))) + +/// Uses the immediate operand \a M to perform a comparison of string +/// data with explicitly defined lengths that is contained in source operands +/// \a A and \a B. Returns bit 0 of the resulting bit mask. +/// +/// \headerfile +/// +/// \code +/// int _mm_cmpestro(__m128i A, int LA, __m128i B, int LB, const int M); +/// \endcode +/// +/// This intrinsic corresponds to the VPCMPESTRI / PCMPESTRI +/// instruction. +/// +/// \param A +/// A 128-bit integer vector containing one of the source operands to be +/// compared. +/// \param LA +/// An integer that specifies the length of the string in \a A. +/// \param B +/// A 128-bit integer vector containing one of the source operands to be +/// compared. +/// \param LB +/// An integer that specifies the length of the string in \a B. +/// \param M +/// An 8-bit immediate operand specifying whether the characters are bytes or +/// words and the type of comparison to perform. \n +/// Bits [1:0]: Determine source data format. \n +/// 00: 16 unsigned bytes \n +/// 01: 8 unsigned words \n +/// 10: 16 signed bytes \n +/// 11: 8 signed words \n +/// Bits [3:2]: Determine comparison type and aggregation method. \n +/// 00: Subset: Each character in \a B is compared for equality with all +/// the characters in \a A. \n +/// 01: Ranges: Each character in \a B is compared to \a A. The comparison +/// basis is greater than or equal for even-indexed elements in \a A, +/// and less than or equal for odd-indexed elements in \a A. \n +/// 10: Match: Compare each pair of corresponding characters in \a A and +/// \a B for equality. \n +/// 11: Substring: Search \a B for substring matches of \a A. \n +/// Bits [5:4]: Determine whether to perform a one's complement on the bit +/// mask of the comparison results. \n +/// 00: No effect. \n +/// 01: Negate the bit mask. \n +/// 10: No effect. \n +/// 11: Negate the bit mask only for bits with an index less than or equal +/// to the size of \a A or \a B. +/// \returns Returns bit 0 of the resulting bit mask. +#define _mm_cmpestro(A, LA, B, LB, M) \ + ((int)__builtin_ia32_pcmpestrio128((__v16qi)(__m128i)(A), (int)(LA), \ + (__v16qi)(__m128i)(B), (int)(LB), \ + (int)(M))) + +/// Uses the immediate operand \a M to perform a comparison of string +/// data with explicitly defined lengths that is contained in source operands +/// \a A and \a B. Returns 1 if the length of the string in \a A is less than +/// the maximum, otherwise, returns 0. +/// +/// \headerfile +/// +/// \code +/// int _mm_cmpestrs(__m128i A, int LA, __m128i B, int LB, const int M); +/// \endcode +/// +/// This intrinsic corresponds to the VPCMPESTRI / PCMPESTRI +/// instruction. +/// +/// \param A +/// A 128-bit integer vector containing one of the source operands to be +/// compared. +/// \param LA +/// An integer that specifies the length of the string in \a A. +/// \param B +/// A 128-bit integer vector containing one of the source operands to be +/// compared. +/// \param LB +/// An integer that specifies the length of the string in \a B. +/// \param M +/// An 8-bit immediate operand specifying whether the characters are bytes or +/// words and the type of comparison to perform. \n +/// Bits [1:0]: Determine source data format. \n +/// 00: 16 unsigned bytes \n +/// 01: 8 unsigned words \n +/// 10: 16 signed bytes \n +/// 11: 8 signed words \n +/// Bits [3:2]: Determine comparison type and aggregation method. \n +/// 00: Subset: Each character in \a B is compared for equality with all +/// the characters in \a A. \n +/// 01: Ranges: Each character in \a B is compared to \a A. The comparison +/// basis is greater than or equal for even-indexed elements in \a A, +/// and less than or equal for odd-indexed elements in \a A. \n +/// 10: Match: Compare each pair of corresponding characters in \a A and +/// \a B for equality. \n +/// 11: Substring: Search \a B for substring matches of \a A. \n +/// Bits [5:4]: Determine whether to perform a one's complement in the bit +/// mask of the comparison results. \n +/// 00: No effect. \n +/// 01: Negate the bit mask. \n +/// 10: No effect. \n +/// 11: Negate the bit mask only for bits with an index less than or equal +/// to the size of \a A or \a B. \n +/// \returns Returns 1 if the length of the string in \a A is less than the +/// maximum, otherwise, returns 0. +#define _mm_cmpestrs(A, LA, B, LB, M) \ + ((int)__builtin_ia32_pcmpestris128((__v16qi)(__m128i)(A), (int)(LA), \ + (__v16qi)(__m128i)(B), (int)(LB), \ + (int)(M))) + +/// Uses the immediate operand \a M to perform a comparison of string +/// data with explicitly defined lengths that is contained in source operands +/// \a A and \a B. Returns 1 if the length of the string in \a B is less than +/// the maximum, otherwise, returns 0. +/// +/// \headerfile +/// +/// \code +/// int _mm_cmpestrz(__m128i A, int LA, __m128i B, int LB, const int M); +/// \endcode +/// +/// This intrinsic corresponds to the VPCMPESTRI instruction. +/// +/// \param A +/// A 128-bit integer vector containing one of the source operands to be +/// compared. +/// \param LA +/// An integer that specifies the length of the string in \a A. +/// \param B +/// A 128-bit integer vector containing one of the source operands to be +/// compared. +/// \param LB +/// An integer that specifies the length of the string in \a B. +/// \param M +/// An 8-bit immediate operand specifying whether the characters are bytes or +/// words and the type of comparison to perform. \n +/// Bits [1:0]: Determine source data format. \n +/// 00: 16 unsigned bytes \n +/// 01: 8 unsigned words \n +/// 10: 16 signed bytes \n +/// 11: 8 signed words \n +/// Bits [3:2]: Determine comparison type and aggregation method. \n +/// 00: Subset: Each character in \a B is compared for equality with all +/// the characters in \a A. \n +/// 01: Ranges: Each character in \a B is compared to \a A. The comparison +/// basis is greater than or equal for even-indexed elements in \a A, +/// and less than or equal for odd-indexed elements in \a A. \n +/// 10: Match: Compare each pair of corresponding characters in \a A and +/// \a B for equality. \n +/// 11: Substring: Search \a B for substring matches of \a A. \n +/// Bits [5:4]: Determine whether to perform a one's complement on the bit +/// mask of the comparison results. \n +/// 00: No effect. \n +/// 01: Negate the bit mask. \n +/// 10: No effect. \n +/// 11: Negate the bit mask only for bits with an index less than or equal +/// to the size of \a A or \a B. +/// \returns Returns 1 if the length of the string in \a B is less than the +/// maximum, otherwise, returns 0. +#define _mm_cmpestrz(A, LA, B, LB, M) \ + ((int)__builtin_ia32_pcmpestriz128((__v16qi)(__m128i)(A), (int)(LA), \ + (__v16qi)(__m128i)(B), (int)(LB), \ + (int)(M))) + +/* SSE4.2 Compare Packed Data -- Greater Than. */ +/// Compares each of the corresponding 64-bit values of the 128-bit +/// integer vectors to determine if the values in the first operand are +/// greater than those in the second operand. +/// +/// Each comparison returns 0x0 for false, 0xFFFFFFFFFFFFFFFF for true. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VPCMPGTQ / PCMPGTQ instruction. +/// +/// \param __V1 +/// A 128-bit integer vector. +/// \param __V2 +/// A 128-bit integer vector. +/// \returns A 128-bit integer vector containing the comparison results. +static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cmpgt_epi64(__m128i __V1, + __m128i __V2) { + return (__m128i)((__v2di)__V1 > (__v2di)__V2); +} + +#undef __DEFAULT_FN_ATTRS + +#include "popcntintrin.h" + +#include "crc32intrin.h" + +#endif /* __SMMINTRIN_H */ diff --git a/third_party/intel/clang/tbmintrin.h b/third_party/intel/clang/tbmintrin.h new file mode 100644 index 000000000..f4e848a1c --- /dev/null +++ b/third_party/intel/clang/tbmintrin.h @@ -0,0 +1,140 @@ +/*===---- tbmintrin.h - TBM intrinsics -------------------------------------=== + * + * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. + * See https://llvm.org/LICENSE.txt for license information. + * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + * + *===-----------------------------------------------------------------------=== + */ + +#ifndef __X86INTRIN_H +#error "Never use directly; include instead." +#endif + +#ifndef __TBMINTRIN_H +#define __TBMINTRIN_H + +/* Define the default attributes for the functions in this file. */ +#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("tbm"))) + +#define __bextri_u32(a, b) \ + ((unsigned int)__builtin_ia32_bextri_u32((unsigned int)(a), \ + (unsigned int)(b))) + +static __inline__ unsigned int __DEFAULT_FN_ATTRS +__blcfill_u32(unsigned int __a) +{ + return __a & (__a + 1); +} + +static __inline__ unsigned int __DEFAULT_FN_ATTRS +__blci_u32(unsigned int __a) +{ + return __a | ~(__a + 1); +} + +static __inline__ unsigned int __DEFAULT_FN_ATTRS +__blcic_u32(unsigned int __a) +{ + return ~__a & (__a + 1); +} + +static __inline__ unsigned int __DEFAULT_FN_ATTRS +__blcmsk_u32(unsigned int __a) +{ + return __a ^ (__a + 1); +} + +static __inline__ unsigned int __DEFAULT_FN_ATTRS +__blcs_u32(unsigned int __a) +{ + return __a | (__a + 1); +} + +static __inline__ unsigned int __DEFAULT_FN_ATTRS +__blsfill_u32(unsigned int __a) +{ + return __a | (__a - 1); +} + +static __inline__ unsigned int __DEFAULT_FN_ATTRS +__blsic_u32(unsigned int __a) +{ + return ~__a | (__a - 1); +} + +static __inline__ unsigned int __DEFAULT_FN_ATTRS +__t1mskc_u32(unsigned int __a) +{ + return ~__a | (__a + 1); +} + +static __inline__ unsigned int __DEFAULT_FN_ATTRS +__tzmsk_u32(unsigned int __a) +{ + return ~__a & (__a - 1); +} + +#ifdef __x86_64__ +#define __bextri_u64(a, b) \ + ((unsigned long long)__builtin_ia32_bextri_u64((unsigned long long)(a), \ + (unsigned long long)(b))) + +static __inline__ unsigned long long __DEFAULT_FN_ATTRS +__blcfill_u64(unsigned long long __a) +{ + return __a & (__a + 1); +} + +static __inline__ unsigned long long __DEFAULT_FN_ATTRS +__blci_u64(unsigned long long __a) +{ + return __a | ~(__a + 1); +} + +static __inline__ unsigned long long __DEFAULT_FN_ATTRS +__blcic_u64(unsigned long long __a) +{ + return ~__a & (__a + 1); +} + +static __inline__ unsigned long long __DEFAULT_FN_ATTRS +__blcmsk_u64(unsigned long long __a) +{ + return __a ^ (__a + 1); +} + +static __inline__ unsigned long long __DEFAULT_FN_ATTRS +__blcs_u64(unsigned long long __a) +{ + return __a | (__a + 1); +} + +static __inline__ unsigned long long __DEFAULT_FN_ATTRS +__blsfill_u64(unsigned long long __a) +{ + return __a | (__a - 1); +} + +static __inline__ unsigned long long __DEFAULT_FN_ATTRS +__blsic_u64(unsigned long long __a) +{ + return ~__a | (__a - 1); +} + +static __inline__ unsigned long long __DEFAULT_FN_ATTRS +__t1mskc_u64(unsigned long long __a) +{ + return ~__a | (__a + 1); +} + +static __inline__ unsigned long long __DEFAULT_FN_ATTRS +__tzmsk_u64(unsigned long long __a) +{ + return ~__a & (__a - 1); +} +#endif + +#undef __DEFAULT_FN_ATTRS + +#endif /* __TBMINTRIN_H */ diff --git a/third_party/intel/clang/tmmintrin.h b/third_party/intel/clang/tmmintrin.h new file mode 100644 index 000000000..1674545c0 --- /dev/null +++ b/third_party/intel/clang/tmmintrin.h @@ -0,0 +1,784 @@ +/*===---- tmmintrin.h - SSSE3 intrinsics -----------------------------------=== + * + * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. + * See https://llvm.org/LICENSE.txt for license information. + * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + * + *===-----------------------------------------------------------------------=== + */ + +#ifndef __TMMINTRIN_H +#define __TMMINTRIN_H + +#if !defined(__i386__) && !defined(__x86_64__) +#error "This header is only meant to be used on x86 and x64 architecture" +#endif + +#include "pmmintrin.h" + +/* Define the default attributes for the functions in this file. */ +#define __DEFAULT_FN_ATTRS \ + __attribute__((__always_inline__, __nodebug__, \ + __target__("ssse3,no-evex512"), __min_vector_width__(64))) +#define __DEFAULT_FN_ATTRS_MMX \ + __attribute__((__always_inline__, __nodebug__, \ + __target__("mmx,ssse3,no-evex512"), \ + __min_vector_width__(64))) + +/// Computes the absolute value of each of the packed 8-bit signed +/// integers in the source operand and stores the 8-bit unsigned integer +/// results in the destination. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c PABSB instruction. +/// +/// \param __a +/// A 64-bit vector of [8 x i8]. +/// \returns A 64-bit integer vector containing the absolute values of the +/// elements in the operand. +static __inline__ __m64 __DEFAULT_FN_ATTRS_MMX +_mm_abs_pi8(__m64 __a) +{ + return (__m64)__builtin_ia32_pabsb((__v8qi)__a); +} + +/// Computes the absolute value of each of the packed 8-bit signed +/// integers in the source operand and stores the 8-bit unsigned integer +/// results in the destination. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c VPABSB instruction. +/// +/// \param __a +/// A 128-bit vector of [16 x i8]. +/// \returns A 128-bit integer vector containing the absolute values of the +/// elements in the operand. +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_abs_epi8(__m128i __a) +{ + return (__m128i)__builtin_elementwise_abs((__v16qs)__a); +} + +/// Computes the absolute value of each of the packed 16-bit signed +/// integers in the source operand and stores the 16-bit unsigned integer +/// results in the destination. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c PABSW instruction. +/// +/// \param __a +/// A 64-bit vector of [4 x i16]. +/// \returns A 64-bit integer vector containing the absolute values of the +/// elements in the operand. +static __inline__ __m64 __DEFAULT_FN_ATTRS_MMX +_mm_abs_pi16(__m64 __a) +{ + return (__m64)__builtin_ia32_pabsw((__v4hi)__a); +} + +/// Computes the absolute value of each of the packed 16-bit signed +/// integers in the source operand and stores the 16-bit unsigned integer +/// results in the destination. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c VPABSW instruction. +/// +/// \param __a +/// A 128-bit vector of [8 x i16]. +/// \returns A 128-bit integer vector containing the absolute values of the +/// elements in the operand. +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_abs_epi16(__m128i __a) +{ + return (__m128i)__builtin_elementwise_abs((__v8hi)__a); +} + +/// Computes the absolute value of each of the packed 32-bit signed +/// integers in the source operand and stores the 32-bit unsigned integer +/// results in the destination. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c PABSD instruction. +/// +/// \param __a +/// A 64-bit vector of [2 x i32]. +/// \returns A 64-bit integer vector containing the absolute values of the +/// elements in the operand. +static __inline__ __m64 __DEFAULT_FN_ATTRS_MMX +_mm_abs_pi32(__m64 __a) +{ + return (__m64)__builtin_ia32_pabsd((__v2si)__a); +} + +/// Computes the absolute value of each of the packed 32-bit signed +/// integers in the source operand and stores the 32-bit unsigned integer +/// results in the destination. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c VPABSD instruction. +/// +/// \param __a +/// A 128-bit vector of [4 x i32]. +/// \returns A 128-bit integer vector containing the absolute values of the +/// elements in the operand. +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_abs_epi32(__m128i __a) +{ + return (__m128i)__builtin_elementwise_abs((__v4si)__a); +} + +/// Concatenates the two 128-bit integer vector operands, and +/// right-shifts the result by the number of bytes specified in the immediate +/// operand. +/// +/// \headerfile +/// +/// \code +/// __m128i _mm_alignr_epi8(__m128i a, __m128i b, const int n); +/// \endcode +/// +/// This intrinsic corresponds to the \c PALIGNR instruction. +/// +/// \param a +/// A 128-bit vector of [16 x i8] containing one of the source operands. +/// \param b +/// A 128-bit vector of [16 x i8] containing one of the source operands. +/// \param n +/// An immediate operand specifying how many bytes to right-shift the result. +/// \returns A 128-bit integer vector containing the concatenated right-shifted +/// value. +#define _mm_alignr_epi8(a, b, n) \ + ((__m128i)__builtin_ia32_palignr128((__v16qi)(__m128i)(a), \ + (__v16qi)(__m128i)(b), (n))) + +/// Concatenates the two 64-bit integer vector operands, and right-shifts +/// the result by the number of bytes specified in the immediate operand. +/// +/// \headerfile +/// +/// \code +/// __m64 _mm_alignr_pi8(__m64 a, __m64 b, const int n); +/// \endcode +/// +/// This intrinsic corresponds to the \c PALIGNR instruction. +/// +/// \param a +/// A 64-bit vector of [8 x i8] containing one of the source operands. +/// \param b +/// A 64-bit vector of [8 x i8] containing one of the source operands. +/// \param n +/// An immediate operand specifying how many bytes to right-shift the result. +/// \returns A 64-bit integer vector containing the concatenated right-shifted +/// value. +#define _mm_alignr_pi8(a, b, n) \ + ((__m64)__builtin_ia32_palignr((__v8qi)(__m64)(a), (__v8qi)(__m64)(b), (n))) + +/// Horizontally adds the adjacent pairs of values contained in 2 packed +/// 128-bit vectors of [8 x i16]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c VPHADDW instruction. +/// +/// \param __a +/// A 128-bit vector of [8 x i16] containing one of the source operands. The +/// horizontal sums of the values are stored in the lower bits of the +/// destination. +/// \param __b +/// A 128-bit vector of [8 x i16] containing one of the source operands. The +/// horizontal sums of the values are stored in the upper bits of the +/// destination. +/// \returns A 128-bit vector of [8 x i16] containing the horizontal sums of +/// both operands. +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_hadd_epi16(__m128i __a, __m128i __b) +{ + return (__m128i)__builtin_ia32_phaddw128((__v8hi)__a, (__v8hi)__b); +} + +/// Horizontally adds the adjacent pairs of values contained in 2 packed +/// 128-bit vectors of [4 x i32]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c VPHADDD instruction. +/// +/// \param __a +/// A 128-bit vector of [4 x i32] containing one of the source operands. The +/// horizontal sums of the values are stored in the lower bits of the +/// destination. +/// \param __b +/// A 128-bit vector of [4 x i32] containing one of the source operands. The +/// horizontal sums of the values are stored in the upper bits of the +/// destination. +/// \returns A 128-bit vector of [4 x i32] containing the horizontal sums of +/// both operands. +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_hadd_epi32(__m128i __a, __m128i __b) +{ + return (__m128i)__builtin_ia32_phaddd128((__v4si)__a, (__v4si)__b); +} + +/// Horizontally adds the adjacent pairs of values contained in 2 packed +/// 64-bit vectors of [4 x i16]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c PHADDW instruction. +/// +/// \param __a +/// A 64-bit vector of [4 x i16] containing one of the source operands. The +/// horizontal sums of the values are stored in the lower bits of the +/// destination. +/// \param __b +/// A 64-bit vector of [4 x i16] containing one of the source operands. The +/// horizontal sums of the values are stored in the upper bits of the +/// destination. +/// \returns A 64-bit vector of [4 x i16] containing the horizontal sums of both +/// operands. +static __inline__ __m64 __DEFAULT_FN_ATTRS_MMX +_mm_hadd_pi16(__m64 __a, __m64 __b) +{ + return (__m64)__builtin_ia32_phaddw((__v4hi)__a, (__v4hi)__b); +} + +/// Horizontally adds the adjacent pairs of values contained in 2 packed +/// 64-bit vectors of [2 x i32]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c PHADDD instruction. +/// +/// \param __a +/// A 64-bit vector of [2 x i32] containing one of the source operands. The +/// horizontal sums of the values are stored in the lower bits of the +/// destination. +/// \param __b +/// A 64-bit vector of [2 x i32] containing one of the source operands. The +/// horizontal sums of the values are stored in the upper bits of the +/// destination. +/// \returns A 64-bit vector of [2 x i32] containing the horizontal sums of both +/// operands. +static __inline__ __m64 __DEFAULT_FN_ATTRS_MMX +_mm_hadd_pi32(__m64 __a, __m64 __b) +{ + return (__m64)__builtin_ia32_phaddd((__v2si)__a, (__v2si)__b); +} + +/// Horizontally adds, with saturation, the adjacent pairs of values contained +/// in two packed 128-bit vectors of [8 x i16]. +/// +/// Positive sums greater than 0x7FFF are saturated to 0x7FFF. Negative sums +/// less than 0x8000 are saturated to 0x8000. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c VPHADDSW instruction. +/// +/// \param __a +/// A 128-bit vector of [8 x i16] containing one of the source operands. The +/// horizontal sums of the values are stored in the lower bits of the +/// destination. +/// \param __b +/// A 128-bit vector of [8 x i16] containing one of the source operands. The +/// horizontal sums of the values are stored in the upper bits of the +/// destination. +/// \returns A 128-bit vector of [8 x i16] containing the horizontal saturated +/// sums of both operands. +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_hadds_epi16(__m128i __a, __m128i __b) +{ + return (__m128i)__builtin_ia32_phaddsw128((__v8hi)__a, (__v8hi)__b); +} + +/// Horizontally adds, with saturation, the adjacent pairs of values contained +/// in two packed 64-bit vectors of [4 x i16]. +/// +/// Positive sums greater than 0x7FFF are saturated to 0x7FFF. Negative sums +/// less than 0x8000 are saturated to 0x8000. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c PHADDSW instruction. +/// +/// \param __a +/// A 64-bit vector of [4 x i16] containing one of the source operands. The +/// horizontal sums of the values are stored in the lower bits of the +/// destination. +/// \param __b +/// A 64-bit vector of [4 x i16] containing one of the source operands. The +/// horizontal sums of the values are stored in the upper bits of the +/// destination. +/// \returns A 64-bit vector of [4 x i16] containing the horizontal saturated +/// sums of both operands. +static __inline__ __m64 __DEFAULT_FN_ATTRS_MMX +_mm_hadds_pi16(__m64 __a, __m64 __b) +{ + return (__m64)__builtin_ia32_phaddsw((__v4hi)__a, (__v4hi)__b); +} + +/// Horizontally subtracts the adjacent pairs of values contained in 2 +/// packed 128-bit vectors of [8 x i16]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c VPHSUBW instruction. +/// +/// \param __a +/// A 128-bit vector of [8 x i16] containing one of the source operands. The +/// horizontal differences between the values are stored in the lower bits of +/// the destination. +/// \param __b +/// A 128-bit vector of [8 x i16] containing one of the source operands. The +/// horizontal differences between the values are stored in the upper bits of +/// the destination. +/// \returns A 128-bit vector of [8 x i16] containing the horizontal differences +/// of both operands. +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_hsub_epi16(__m128i __a, __m128i __b) +{ + return (__m128i)__builtin_ia32_phsubw128((__v8hi)__a, (__v8hi)__b); +} + +/// Horizontally subtracts the adjacent pairs of values contained in 2 +/// packed 128-bit vectors of [4 x i32]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c VPHSUBD instruction. +/// +/// \param __a +/// A 128-bit vector of [4 x i32] containing one of the source operands. The +/// horizontal differences between the values are stored in the lower bits of +/// the destination. +/// \param __b +/// A 128-bit vector of [4 x i32] containing one of the source operands. The +/// horizontal differences between the values are stored in the upper bits of +/// the destination. +/// \returns A 128-bit vector of [4 x i32] containing the horizontal differences +/// of both operands. +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_hsub_epi32(__m128i __a, __m128i __b) +{ + return (__m128i)__builtin_ia32_phsubd128((__v4si)__a, (__v4si)__b); +} + +/// Horizontally subtracts the adjacent pairs of values contained in 2 +/// packed 64-bit vectors of [4 x i16]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c PHSUBW instruction. +/// +/// \param __a +/// A 64-bit vector of [4 x i16] containing one of the source operands. The +/// horizontal differences between the values are stored in the lower bits of +/// the destination. +/// \param __b +/// A 64-bit vector of [4 x i16] containing one of the source operands. The +/// horizontal differences between the values are stored in the upper bits of +/// the destination. +/// \returns A 64-bit vector of [4 x i16] containing the horizontal differences +/// of both operands. +static __inline__ __m64 __DEFAULT_FN_ATTRS_MMX +_mm_hsub_pi16(__m64 __a, __m64 __b) +{ + return (__m64)__builtin_ia32_phsubw((__v4hi)__a, (__v4hi)__b); +} + +/// Horizontally subtracts the adjacent pairs of values contained in 2 +/// packed 64-bit vectors of [2 x i32]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c PHSUBD instruction. +/// +/// \param __a +/// A 64-bit vector of [2 x i32] containing one of the source operands. The +/// horizontal differences between the values are stored in the lower bits of +/// the destination. +/// \param __b +/// A 64-bit vector of [2 x i32] containing one of the source operands. The +/// horizontal differences between the values are stored in the upper bits of +/// the destination. +/// \returns A 64-bit vector of [2 x i32] containing the horizontal differences +/// of both operands. +static __inline__ __m64 __DEFAULT_FN_ATTRS_MMX +_mm_hsub_pi32(__m64 __a, __m64 __b) +{ + return (__m64)__builtin_ia32_phsubd((__v2si)__a, (__v2si)__b); +} + +/// Horizontally subtracts, with saturation, the adjacent pairs of values +/// contained in two packed 128-bit vectors of [8 x i16]. +/// +/// Positive differences greater than 0x7FFF are saturated to 0x7FFF. +/// Negative differences less than 0x8000 are saturated to 0x8000. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c VPHSUBSW instruction. +/// +/// \param __a +/// A 128-bit vector of [8 x i16] containing one of the source operands. The +/// horizontal differences between the values are stored in the lower bits of +/// the destination. +/// \param __b +/// A 128-bit vector of [8 x i16] containing one of the source operands. The +/// horizontal differences between the values are stored in the upper bits of +/// the destination. +/// \returns A 128-bit vector of [8 x i16] containing the horizontal saturated +/// differences of both operands. +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_hsubs_epi16(__m128i __a, __m128i __b) +{ + return (__m128i)__builtin_ia32_phsubsw128((__v8hi)__a, (__v8hi)__b); +} + +/// Horizontally subtracts, with saturation, the adjacent pairs of values +/// contained in two packed 64-bit vectors of [4 x i16]. +/// +/// Positive differences greater than 0x7FFF are saturated to 0x7FFF. +/// Negative differences less than 0x8000 are saturated to 0x8000. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c PHSUBSW instruction. +/// +/// \param __a +/// A 64-bit vector of [4 x i16] containing one of the source operands. The +/// horizontal differences between the values are stored in the lower bits of +/// the destination. +/// \param __b +/// A 64-bit vector of [4 x i16] containing one of the source operands. The +/// horizontal differences between the values are stored in the upper bits of +/// the destination. +/// \returns A 64-bit vector of [4 x i16] containing the horizontal saturated +/// differences of both operands. +static __inline__ __m64 __DEFAULT_FN_ATTRS_MMX +_mm_hsubs_pi16(__m64 __a, __m64 __b) +{ + return (__m64)__builtin_ia32_phsubsw((__v4hi)__a, (__v4hi)__b); +} + +/// Multiplies corresponding pairs of packed 8-bit unsigned integer +/// values contained in the first source operand and packed 8-bit signed +/// integer values contained in the second source operand, adds pairs of +/// contiguous products with signed saturation, and writes the 16-bit sums to +/// the corresponding bits in the destination. +/// +/// For example, bits [7:0] of both operands are multiplied, bits [15:8] of +/// both operands are multiplied, and the sum of both results is written to +/// bits [15:0] of the destination. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c VPMADDUBSW instruction. +/// +/// \param __a +/// A 128-bit integer vector containing the first source operand. +/// \param __b +/// A 128-bit integer vector containing the second source operand. +/// \returns A 128-bit integer vector containing the sums of products of both +/// operands: \n +/// \a R0 := (\a __a0 * \a __b0) + (\a __a1 * \a __b1) \n +/// \a R1 := (\a __a2 * \a __b2) + (\a __a3 * \a __b3) \n +/// \a R2 := (\a __a4 * \a __b4) + (\a __a5 * \a __b5) \n +/// \a R3 := (\a __a6 * \a __b6) + (\a __a7 * \a __b7) \n +/// \a R4 := (\a __a8 * \a __b8) + (\a __a9 * \a __b9) \n +/// \a R5 := (\a __a10 * \a __b10) + (\a __a11 * \a __b11) \n +/// \a R6 := (\a __a12 * \a __b12) + (\a __a13 * \a __b13) \n +/// \a R7 := (\a __a14 * \a __b14) + (\a __a15 * \a __b15) +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_maddubs_epi16(__m128i __a, __m128i __b) +{ + return (__m128i)__builtin_ia32_pmaddubsw128((__v16qi)__a, (__v16qi)__b); +} + +/// Multiplies corresponding pairs of packed 8-bit unsigned integer +/// values contained in the first source operand and packed 8-bit signed +/// integer values contained in the second source operand, adds pairs of +/// contiguous products with signed saturation, and writes the 16-bit sums to +/// the corresponding bits in the destination. +/// +/// For example, bits [7:0] of both operands are multiplied, bits [15:8] of +/// both operands are multiplied, and the sum of both results is written to +/// bits [15:0] of the destination. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c PMADDUBSW instruction. +/// +/// \param __a +/// A 64-bit integer vector containing the first source operand. +/// \param __b +/// A 64-bit integer vector containing the second source operand. +/// \returns A 64-bit integer vector containing the sums of products of both +/// operands: \n +/// \a R0 := (\a __a0 * \a __b0) + (\a __a1 * \a __b1) \n +/// \a R1 := (\a __a2 * \a __b2) + (\a __a3 * \a __b3) \n +/// \a R2 := (\a __a4 * \a __b4) + (\a __a5 * \a __b5) \n +/// \a R3 := (\a __a6 * \a __b6) + (\a __a7 * \a __b7) +static __inline__ __m64 __DEFAULT_FN_ATTRS_MMX +_mm_maddubs_pi16(__m64 __a, __m64 __b) +{ + return (__m64)__builtin_ia32_pmaddubsw((__v8qi)__a, (__v8qi)__b); +} + +/// Multiplies packed 16-bit signed integer values, truncates the 32-bit +/// products to the 18 most significant bits by right-shifting, rounds the +/// truncated value by adding 1, and writes bits [16:1] to the destination. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c VPMULHRSW instruction. +/// +/// \param __a +/// A 128-bit vector of [8 x i16] containing one of the source operands. +/// \param __b +/// A 128-bit vector of [8 x i16] containing one of the source operands. +/// \returns A 128-bit vector of [8 x i16] containing the rounded and scaled +/// products of both operands. +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_mulhrs_epi16(__m128i __a, __m128i __b) +{ + return (__m128i)__builtin_ia32_pmulhrsw128((__v8hi)__a, (__v8hi)__b); +} + +/// Multiplies packed 16-bit signed integer values, truncates the 32-bit +/// products to the 18 most significant bits by right-shifting, rounds the +/// truncated value by adding 1, and writes bits [16:1] to the destination. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c PMULHRSW instruction. +/// +/// \param __a +/// A 64-bit vector of [4 x i16] containing one of the source operands. +/// \param __b +/// A 64-bit vector of [4 x i16] containing one of the source operands. +/// \returns A 64-bit vector of [4 x i16] containing the rounded and scaled +/// products of both operands. +static __inline__ __m64 __DEFAULT_FN_ATTRS_MMX +_mm_mulhrs_pi16(__m64 __a, __m64 __b) +{ + return (__m64)__builtin_ia32_pmulhrsw((__v4hi)__a, (__v4hi)__b); +} + +/// Copies the 8-bit integers from a 128-bit integer vector to the +/// destination or clears 8-bit values in the destination, as specified by +/// the second source operand. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c VPSHUFB instruction. +/// +/// \param __a +/// A 128-bit integer vector containing the values to be copied. +/// \param __b +/// A 128-bit integer vector containing control bytes corresponding to +/// positions in the destination: +/// Bit 7: \n +/// 1: Clear the corresponding byte in the destination. \n +/// 0: Copy the selected source byte to the corresponding byte in the +/// destination. \n +/// Bits [6:4] Reserved. \n +/// Bits [3:0] select the source byte to be copied. +/// \returns A 128-bit integer vector containing the copied or cleared values. +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_shuffle_epi8(__m128i __a, __m128i __b) +{ + return (__m128i)__builtin_ia32_pshufb128((__v16qi)__a, (__v16qi)__b); +} + +/// Copies the 8-bit integers from a 64-bit integer vector to the +/// destination or clears 8-bit values in the destination, as specified by +/// the second source operand. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c PSHUFB instruction. +/// +/// \param __a +/// A 64-bit integer vector containing the values to be copied. +/// \param __b +/// A 64-bit integer vector containing control bytes corresponding to +/// positions in the destination: +/// Bit 7: \n +/// 1: Clear the corresponding byte in the destination. \n +/// 0: Copy the selected source byte to the corresponding byte in the +/// destination. \n +/// Bits [3:0] select the source byte to be copied. +/// \returns A 64-bit integer vector containing the copied or cleared values. +static __inline__ __m64 __DEFAULT_FN_ATTRS_MMX +_mm_shuffle_pi8(__m64 __a, __m64 __b) +{ + return (__m64)__builtin_ia32_pshufb((__v8qi)__a, (__v8qi)__b); +} + +/// For each 8-bit integer in the first source operand, perform one of +/// the following actions as specified by the second source operand. +/// +/// If the byte in the second source is negative, calculate the two's +/// complement of the corresponding byte in the first source, and write that +/// value to the destination. If the byte in the second source is positive, +/// copy the corresponding byte from the first source to the destination. If +/// the byte in the second source is zero, clear the corresponding byte in +/// the destination. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c VPSIGNB instruction. +/// +/// \param __a +/// A 128-bit integer vector containing the values to be copied. +/// \param __b +/// A 128-bit integer vector containing control bytes corresponding to +/// positions in the destination. +/// \returns A 128-bit integer vector containing the resultant values. +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_sign_epi8(__m128i __a, __m128i __b) +{ + return (__m128i)__builtin_ia32_psignb128((__v16qi)__a, (__v16qi)__b); +} + +/// For each 16-bit integer in the first source operand, perform one of +/// the following actions as specified by the second source operand. +/// +/// If the word in the second source is negative, calculate the two's +/// complement of the corresponding word in the first source, and write that +/// value to the destination. If the word in the second source is positive, +/// copy the corresponding word from the first source to the destination. If +/// the word in the second source is zero, clear the corresponding word in +/// the destination. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c VPSIGNW instruction. +/// +/// \param __a +/// A 128-bit integer vector containing the values to be copied. +/// \param __b +/// A 128-bit integer vector containing control words corresponding to +/// positions in the destination. +/// \returns A 128-bit integer vector containing the resultant values. +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_sign_epi16(__m128i __a, __m128i __b) +{ + return (__m128i)__builtin_ia32_psignw128((__v8hi)__a, (__v8hi)__b); +} + +/// For each 32-bit integer in the first source operand, perform one of +/// the following actions as specified by the second source operand. +/// +/// If the doubleword in the second source is negative, calculate the two's +/// complement of the corresponding word in the first source, and write that +/// value to the destination. If the doubleword in the second source is +/// positive, copy the corresponding word from the first source to the +/// destination. If the doubleword in the second source is zero, clear the +/// corresponding word in the destination. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c VPSIGND instruction. +/// +/// \param __a +/// A 128-bit integer vector containing the values to be copied. +/// \param __b +/// A 128-bit integer vector containing control doublewords corresponding to +/// positions in the destination. +/// \returns A 128-bit integer vector containing the resultant values. +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_sign_epi32(__m128i __a, __m128i __b) +{ + return (__m128i)__builtin_ia32_psignd128((__v4si)__a, (__v4si)__b); +} + +/// For each 8-bit integer in the first source operand, perform one of +/// the following actions as specified by the second source operand. +/// +/// If the byte in the second source is negative, calculate the two's +/// complement of the corresponding byte in the first source, and write that +/// value to the destination. If the byte in the second source is positive, +/// copy the corresponding byte from the first source to the destination. If +/// the byte in the second source is zero, clear the corresponding byte in +/// the destination. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c PSIGNB instruction. +/// +/// \param __a +/// A 64-bit integer vector containing the values to be copied. +/// \param __b +/// A 64-bit integer vector containing control bytes corresponding to +/// positions in the destination. +/// \returns A 64-bit integer vector containing the resultant values. +static __inline__ __m64 __DEFAULT_FN_ATTRS_MMX +_mm_sign_pi8(__m64 __a, __m64 __b) +{ + return (__m64)__builtin_ia32_psignb((__v8qi)__a, (__v8qi)__b); +} + +/// For each 16-bit integer in the first source operand, perform one of +/// the following actions as specified by the second source operand. +/// +/// If the word in the second source is negative, calculate the two's +/// complement of the corresponding word in the first source, and write that +/// value to the destination. If the word in the second source is positive, +/// copy the corresponding word from the first source to the destination. If +/// the word in the second source is zero, clear the corresponding word in +/// the destination. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c PSIGNW instruction. +/// +/// \param __a +/// A 64-bit integer vector containing the values to be copied. +/// \param __b +/// A 64-bit integer vector containing control words corresponding to +/// positions in the destination. +/// \returns A 64-bit integer vector containing the resultant values. +static __inline__ __m64 __DEFAULT_FN_ATTRS_MMX +_mm_sign_pi16(__m64 __a, __m64 __b) +{ + return (__m64)__builtin_ia32_psignw((__v4hi)__a, (__v4hi)__b); +} + +/// For each 32-bit integer in the first source operand, perform one of +/// the following actions as specified by the second source operand. +/// +/// If the doubleword in the second source is negative, calculate the two's +/// complement of the corresponding doubleword in the first source, and +/// write that value to the destination. If the doubleword in the second +/// source is positive, copy the corresponding doubleword from the first +/// source to the destination. If the doubleword in the second source is +/// zero, clear the corresponding doubleword in the destination. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c PSIGND instruction. +/// +/// \param __a +/// A 64-bit integer vector containing the values to be copied. +/// \param __b +/// A 64-bit integer vector containing two control doublewords corresponding +/// to positions in the destination. +/// \returns A 64-bit integer vector containing the resultant values. +static __inline__ __m64 __DEFAULT_FN_ATTRS_MMX +_mm_sign_pi32(__m64 __a, __m64 __b) +{ + return (__m64)__builtin_ia32_psignd((__v2si)__a, (__v2si)__b); +} + +#undef __DEFAULT_FN_ATTRS +#undef __DEFAULT_FN_ATTRS_MMX + +#endif /* __TMMINTRIN_H */ diff --git a/third_party/intel/clang/tsxldtrkintrin.h b/third_party/intel/clang/tsxldtrkintrin.h new file mode 100644 index 000000000..491823e93 --- /dev/null +++ b/third_party/intel/clang/tsxldtrkintrin.h @@ -0,0 +1,56 @@ +/*===------------- tsxldtrkintrin.h - tsxldtrk intrinsics ------------------=== + * + * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. + * See https://llvm.org/LICENSE.txt for license information. + * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + * + *===-----------------------------------------------------------------------=== + */ + +#ifndef __IMMINTRIN_H +#error "Never use directly; include instead." +#endif + +#ifndef __TSXLDTRKINTRIN_H +#define __TSXLDTRKINTRIN_H + +/* Define the default attributes for the functions in this file */ +#define _DEFAULT_FN_ATTRS \ + __attribute__((__always_inline__, __nodebug__, __target__("tsxldtrk"))) + +/// Marks the start of an TSX (RTM) suspend load address tracking region. If +/// this intrinsic is used inside a transactional region, subsequent loads +/// are not added to the read set of the transaction. If it's used inside a +/// suspend load address tracking region it will cause transaction abort. +/// If it's used outside of a transactional region it behaves like a NOP. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c XSUSLDTRK instruction. +/// +static __inline__ void _DEFAULT_FN_ATTRS +_xsusldtrk (void) +{ + __builtin_ia32_xsusldtrk(); +} + +/// Marks the end of an TSX (RTM) suspend load address tracking region. If this +/// intrinsic is used inside a suspend load address tracking region it will +/// end the suspend region and all following load addresses will be added to +/// the transaction read set. If it's used inside an active transaction but +/// not in a suspend region it will cause transaction abort. If it's used +/// outside of a transactional region it behaves like a NOP. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c XRESLDTRK instruction. +/// +static __inline__ void _DEFAULT_FN_ATTRS +_xresldtrk (void) +{ + __builtin_ia32_xresldtrk(); +} + +#undef _DEFAULT_FN_ATTRS + +#endif /* __TSXLDTRKINTRIN_H */ diff --git a/third_party/intel/clang/uintrintrin.h b/third_party/intel/clang/uintrintrin.h new file mode 100644 index 000000000..135dc814c --- /dev/null +++ b/third_party/intel/clang/uintrintrin.h @@ -0,0 +1,157 @@ +/*===------------------ uintrintrin.h - UINTR intrinsics -------------------=== + * + * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. + * See https://llvm.org/LICENSE.txt for license information. + * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + * + *===-----------------------------------------------------------------------=== + */ + +#ifndef __X86GPRINTRIN_H +#error "Never use directly; include instead." +#endif + +#ifndef __UINTRINTRIN_H +#define __UINTRINTRIN_H + +/* Define the default attributes for the functions in this file */ +#define __DEFAULT_FN_ATTRS \ + __attribute__((__always_inline__, __nodebug__, __target__("uintr"))) + +#ifdef __x86_64__ + +struct __uintr_frame +{ + unsigned long long rip; + unsigned long long rflags; + unsigned long long rsp; +}; + +/// Clears the user interrupt flag (UIF). Its effect takes place immediately: a +/// user interrupt cannot be delivered on the instruction boundary following +/// CLUI. Can be executed only if CR4.UINT = 1, the logical processor is in +/// 64-bit mode, and software is not executing inside an enclave; otherwise, +/// each causes an invalid-opcode exception. Causes a transactional abort if +/// executed inside a transactional region; the abort loads EAX as it would +/// had it been due to an execution of CLI. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the CLUI instruction. +/// +/// \code{.operation} +/// UIF := 0 +/// \endcode +static __inline__ void __DEFAULT_FN_ATTRS +_clui (void) +{ + __builtin_ia32_clui(); +} + +/// Sets the user interrupt flag (UIF). Its effect takes place immediately; a +/// user interrupt may be delivered on the instruction boundary following +/// STUI. Can be executed only if CR4.UINT = 1, the logical processor is in +/// 64-bit mode, and software is not executing inside an enclave; otherwise, +/// each causes an invalid-opcode exception. Causes a transactional abort if +/// executed inside a transactional region; the abort loads EAX as it would +/// had it been due to an execution of STI. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the STUI instruction. +/// +/// \code{.operation} +/// UIF := 1 +/// \endcode +static __inline__ void __DEFAULT_FN_ATTRS +_stui (void) +{ + __builtin_ia32_stui(); +} + +/// Get the current value of the user interrupt flag (UIF). Can be executed +/// regardless of CPL and inside a transactional region. Can be executed only +/// if CR4.UINT = 1, the logical processor is in 64-bit mode, and software is +/// not executing inside an enclave; otherwise, it causes an invalid-opcode +/// exception. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the TESTUI instruction. +/// +/// \returns The current value of the user interrupt flag (UIF). +/// +/// \code{.operation} +/// CF := UIF +/// ZF := 0 +/// AF := 0 +/// OF := 0 +/// PF := 0 +/// SF := 0 +/// dst := CF +/// \endcode +static __inline__ unsigned char __DEFAULT_FN_ATTRS +_testui (void) +{ + return __builtin_ia32_testui(); +} + +/// Send interprocessor user interrupt. Can be executed only if +/// CR4.UINT = IA32_UINT_TT[0] = 1, the logical processor is in 64-bit mode, +/// and software is not executing inside an enclave; otherwise, it causes an +/// invalid-opcode exception. May be executed at any privilege level, all of +/// its memory accesses are performed with supervisor privilege. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the SENDUIPI instruction +/// +/// \param __a +/// Index of user-interrupt target table entry in user-interrupt target +/// table. +/// +/// \code{.operation} +/// IF __a > UITTSZ +/// GP (0) +/// FI +/// tempUITTE := MEM[UITTADDR + (a<<4)] +/// // tempUITTE must be valid, and can't have any reserved bit set +/// IF (tempUITTE.V == 0 OR tempUITTE[7:1] != 0) +/// GP (0) +/// FI +/// tempUPID := MEM[tempUITTE.UPIDADDR] // under lock +/// // tempUPID can't have any reserved bit set +/// IF (tempUPID[15:2] != 0 OR tempUPID[31:24] != 0) +/// GP (0) // release lock +/// FI +/// tempUPID.PIR[tempUITTE.UV] := 1; +/// IF (tempUPID.SN == 0 AND tempUPID.ON == 0) +/// tempUPID.ON := 1 +/// sendNotify := 1 +/// ELSE +/// sendNotify := 0 +/// FI +/// MEM[tempUITTE.UPIDADDR] := tempUPID // release lock +/// IF sendNotify == 1 +/// IF IA32_APIC_BASE[10] == 1 // local APIC is in x2APIC mode +/// // send ordinary IPI with vector tempUPID.NV to 32-bit physical APIC +/// // ID tempUPID.NDST +/// SendOrdinaryIPI(tempUPID.NV, tempUPID.NDST) +/// ELSE +/// // send ordinary IPI with vector tempUPID.NV to 8-bit physical APIC +/// // ID tempUPID.NDST[15:8] +/// SendOrdinaryIPI(tempUPID.NV, tempUPID.NDST[15:8]) +/// FI +/// FI +/// \endcode +static __inline__ void __DEFAULT_FN_ATTRS +_senduipi (unsigned long long __a) +{ + __builtin_ia32_senduipi(__a); +} + +#endif /* __x86_64__ */ + +#undef __DEFAULT_FN_ATTRS + +#endif /* __UINTRINTRIN_H */ diff --git a/third_party/intel/clang/usermsrintrin.h b/third_party/intel/clang/usermsrintrin.h new file mode 100644 index 000000000..613883767 --- /dev/null +++ b/third_party/intel/clang/usermsrintrin.h @@ -0,0 +1,51 @@ +/*===--------------- usermsrintrin.h - USERMSR intrinsics -----------------=== + * + * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. + * See https://llvm.org/LICENSE.txt for license information. + * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + * + *===-----------------------------------------------------------------------=== + */ +#ifndef __X86GPRINTRIN_H +#error "Never use directly; include instead." +#endif // __X86GPRINTRIN_H + +#ifndef __USERMSRINTRIN_H +#define __USERMSRINTRIN_H +#ifdef __x86_64__ + +/// Reads the contents of a 64-bit MSR specified in \a __A into \a dst. +/// +/// This intrinsic corresponds to the URDMSR instruction. +/// \param __A +/// An unsigned long long. +/// +/// \code{.operation} +/// DEST := MSR[__A] +/// \endcode +static __inline__ unsigned long long + __attribute__((__always_inline__, __nodebug__, __target__("usermsr"))) + _urdmsr(unsigned long long __A) { + return __builtin_ia32_urdmsr(__A); +} + +/// Writes the contents of \a __B into the 64-bit MSR specified in \a __A. +/// +/// This intrinsic corresponds to the UWRMSR instruction. +/// +/// \param __A +/// An unsigned long long. +/// \param __B +/// An unsigned long long. +/// +/// \code{.operation} +/// MSR[__A] := __B +/// \endcode +static __inline__ void + __attribute__((__always_inline__, __nodebug__, __target__("usermsr"))) + _uwrmsr(unsigned long long __A, unsigned long long __B) { + return __builtin_ia32_uwrmsr(__A, __B); +} + +#endif // __x86_64__ +#endif // __USERMSRINTRIN_H diff --git a/third_party/intel/clang/vaesintrin.h b/third_party/intel/clang/vaesintrin.h new file mode 100644 index 000000000..d7c162f5c --- /dev/null +++ b/third_party/intel/clang/vaesintrin.h @@ -0,0 +1,87 @@ +/*===------------------ vaesintrin.h - VAES intrinsics ---------------------=== + * + * + * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. + * See https://llvm.org/LICENSE.txt for license information. + * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + * + *===-----------------------------------------------------------------------=== + */ +#ifndef __IMMINTRIN_H +#error "Never use directly; include instead." +#endif + +#ifndef __VAESINTRIN_H +#define __VAESINTRIN_H + +/* Default attributes for YMM forms. */ +#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("vaes"), __min_vector_width__(256))) + +/* Default attributes for ZMM forms. */ +#define __DEFAULT_FN_ATTRS_F \ + __attribute__((__always_inline__, __nodebug__, \ + __target__("avx512f,evex512,vaes"), \ + __min_vector_width__(512))) + +static __inline__ __m256i __DEFAULT_FN_ATTRS + _mm256_aesenc_epi128(__m256i __A, __m256i __B) +{ + return (__m256i) __builtin_ia32_aesenc256((__v4di) __A, + (__v4di) __B); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS + _mm256_aesdec_epi128(__m256i __A, __m256i __B) +{ + return (__m256i) __builtin_ia32_aesdec256((__v4di) __A, + (__v4di) __B); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS + _mm256_aesenclast_epi128(__m256i __A, __m256i __B) +{ + return (__m256i) __builtin_ia32_aesenclast256((__v4di) __A, + (__v4di) __B); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS + _mm256_aesdeclast_epi128(__m256i __A, __m256i __B) +{ + return (__m256i) __builtin_ia32_aesdeclast256((__v4di) __A, + (__v4di) __B); +} + +#ifdef __AVX512FINTRIN_H +static __inline__ __m512i __DEFAULT_FN_ATTRS_F + _mm512_aesenc_epi128(__m512i __A, __m512i __B) +{ + return (__m512i) __builtin_ia32_aesenc512((__v8di) __A, + (__v8di) __B); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS_F + _mm512_aesdec_epi128(__m512i __A, __m512i __B) +{ + return (__m512i) __builtin_ia32_aesdec512((__v8di) __A, + (__v8di) __B); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS_F + _mm512_aesenclast_epi128(__m512i __A, __m512i __B) +{ + return (__m512i) __builtin_ia32_aesenclast512((__v8di) __A, + (__v8di) __B); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS_F + _mm512_aesdeclast_epi128(__m512i __A, __m512i __B) +{ + return (__m512i) __builtin_ia32_aesdeclast512((__v8di) __A, + (__v8di) __B); +} +#endif // __AVX512FINTRIN_H + +#undef __DEFAULT_FN_ATTRS +#undef __DEFAULT_FN_ATTRS_F + +#endif // __VAESINTRIN_H diff --git a/third_party/intel/clang/vpclmulqdqintrin.h b/third_party/intel/clang/vpclmulqdqintrin.h new file mode 100644 index 000000000..485692ea2 --- /dev/null +++ b/third_party/intel/clang/vpclmulqdqintrin.h @@ -0,0 +1,30 @@ +/*===------------ vpclmulqdqintrin.h - VPCLMULQDQ intrinsics ---------------=== + * + * + * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. + * See https://llvm.org/LICENSE.txt for license information. + * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + * + *===-----------------------------------------------------------------------=== + */ +#ifndef __IMMINTRIN_H +#error "Never use directly; include instead." +#endif + +#ifndef __VPCLMULQDQINTRIN_H +#define __VPCLMULQDQINTRIN_H + +#define _mm256_clmulepi64_epi128(A, B, I) \ + ((__m256i)__builtin_ia32_pclmulqdq256((__v4di)(__m256i)(A), \ + (__v4di)(__m256i)(B), \ + (char)(I))) + +#ifdef __AVX512FINTRIN_H +#define _mm512_clmulepi64_epi128(A, B, I) \ + ((__m512i)__builtin_ia32_pclmulqdq512((__v8di)(__m512i)(A), \ + (__v8di)(__m512i)(B), \ + (char)(I))) +#endif // __AVX512FINTRIN_H + +#endif /* __VPCLMULQDQINTRIN_H */ + diff --git a/third_party/intel/clang/waitpkgintrin.h b/third_party/intel/clang/waitpkgintrin.h new file mode 100644 index 000000000..7ecada4cf --- /dev/null +++ b/third_party/intel/clang/waitpkgintrin.h @@ -0,0 +1,42 @@ +/*===----------------------- waitpkgintrin.h - WAITPKG --------------------=== + * + * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. + * See https://llvm.org/LICENSE.txt for license information. + * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + * + *===-----------------------------------------------------------------------=== + */ +#if !defined __X86INTRIN_H && !defined __IMMINTRIN_H +#error "Never use directly; include instead." +#endif + +#ifndef __WAITPKGINTRIN_H +#define __WAITPKGINTRIN_H + +/* Define the default attributes for the functions in this file. */ +#define __DEFAULT_FN_ATTRS \ + __attribute__((__always_inline__, __nodebug__, __target__("waitpkg"))) + +static __inline__ void __DEFAULT_FN_ATTRS +_umonitor (void * __address) +{ + __builtin_ia32_umonitor (__address); +} + +static __inline__ unsigned char __DEFAULT_FN_ATTRS +_umwait (unsigned int __control, unsigned long long __counter) +{ + return __builtin_ia32_umwait (__control, + (unsigned int)(__counter >> 32), (unsigned int)__counter); +} + +static __inline__ unsigned char __DEFAULT_FN_ATTRS +_tpause (unsigned int __control, unsigned long long __counter) +{ + return __builtin_ia32_tpause (__control, + (unsigned int)(__counter >> 32), (unsigned int)__counter); +} + +#undef __DEFAULT_FN_ATTRS + +#endif /* __WAITPKGINTRIN_H */ diff --git a/third_party/intel/clang/wbnoinvdintrin.h b/third_party/intel/clang/wbnoinvdintrin.h new file mode 100644 index 000000000..cac0347ef --- /dev/null +++ b/third_party/intel/clang/wbnoinvdintrin.h @@ -0,0 +1,24 @@ +/*===-------------- wbnoinvdintrin.h - wbnoinvd intrinsic-------------------=== + * + * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. + * See https://llvm.org/LICENSE.txt for license information. + * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + * + *===-----------------------------------------------------------------------=== + */ + +#if !defined __X86INTRIN_H && !defined __IMMINTRIN_H +#error "Never use directly; include instead." +#endif + +#ifndef __WBNOINVDINTRIN_H +#define __WBNOINVDINTRIN_H + +static __inline__ void + __attribute__((__always_inline__, __nodebug__, __target__("wbnoinvd"))) +_wbnoinvd (void) +{ + __builtin_ia32_wbnoinvd (); +} + +#endif /* __WBNOINVDINTRIN_H */ diff --git a/third_party/intel/clang/wmmintrin.h b/third_party/intel/clang/wmmintrin.h new file mode 100644 index 000000000..f3121e1c3 --- /dev/null +++ b/third_party/intel/clang/wmmintrin.h @@ -0,0 +1,23 @@ +/*===---- wmmintrin.h - AES intrinsics ------------------------------------=== + * + * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. + * See https://llvm.org/LICENSE.txt for license information. + * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + * + *===-----------------------------------------------------------------------=== + */ + +#ifndef __WMMINTRIN_H +#define __WMMINTRIN_H + +#if !defined(__i386__) && !defined(__x86_64__) +#error "This header is only meant to be used on x86 and x64 architecture" +#endif + +#include "emmintrin.h" + +#include "__wmmintrin_aes.h" + +#include "__wmmintrin_pclmul.h" + +#endif /* __WMMINTRIN_H */ diff --git a/third_party/intel/clang/x86gprintrin.h b/third_party/intel/clang/x86gprintrin.h new file mode 100644 index 000000000..f8447ed4a --- /dev/null +++ b/third_party/intel/clang/x86gprintrin.h @@ -0,0 +1,63 @@ +/*===--------------- x86gprintrin.h - X86 GPR intrinsics ------------------=== + * + * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. + * See https://llvm.org/LICENSE.txt for license information. + * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + * + *===-----------------------------------------------------------------------=== + */ + +#ifndef __X86GPRINTRIN_H +#define __X86GPRINTRIN_H + +#if !defined(__SCE__) || __has_feature(modules) || defined(__HRESET__) +#include "hresetintrin.h" +#endif + +#if !defined(__SCE__) || __has_feature(modules) || defined(__UINTR__) +#include "uintrintrin.h" +#endif + +#if !defined(__SCE__) || __has_feature(modules) || defined(__USERMSR__) +#include "usermsrintrin.h" +#endif + +#if !defined(__SCE__) || __has_feature(modules) || defined(__CRC32__) +#include "crc32intrin.h" +#endif + +#if !defined(__SCE__) || __has_feature(modules) || defined(__PRFCHI__) +#include "prfchiintrin.h" +#endif + +#if !defined(__SCE__) || __has_feature(modules) || defined(__RAOINT__) +#include "raointintrin.h" +#endif + +#if !defined(__SCE__) || __has_feature(modules) || defined(__CMPCCXADD__) +#include "cmpccxaddintrin.h" +#endif + +#if defined(__i386__) +#define __SAVE_GPRBX "mov {%%ebx, %%eax |eax, ebx};" +#define __RESTORE_GPRBX "mov {%%eax, %%ebx |ebx, eax};" +#define __TMPGPR "eax" +#else +// When in 64-bit target, the 32-bit operands generate a 32-bit result, +// zero-extended to a 64-bit result in the destination general-purpose, +// It means "mov x %ebx" will clobber the higher 32 bits of rbx, so we +// should preserve the 64-bit register rbx. +#define __SAVE_GPRBX "mov {%%rbx, %%rax |rax, rbx};" +#define __RESTORE_GPRBX "mov {%%rax, %%rbx |rbx, rax};" +#define __TMPGPR "rax" +#endif + +#define __SSC_MARK(__Tag) \ + __asm__ __volatile__( __SAVE_GPRBX \ + "mov {%0, %%ebx|ebx, %0}; " \ + ".byte 0x64, 0x67, 0x90; " \ + __RESTORE_GPRBX \ + ::"i"(__Tag) \ + : __TMPGPR ); + +#endif /* __X86GPRINTRIN_H */ diff --git a/third_party/intel/clang/x86intrin.h b/third_party/intel/clang/x86intrin.h new file mode 100644 index 000000000..ceae912cf --- /dev/null +++ b/third_party/intel/clang/x86intrin.h @@ -0,0 +1,53 @@ +/*===---- x86intrin.h - X86 intrinsics -------------------------------------=== + * + * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. + * See https://llvm.org/LICENSE.txt for license information. + * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + * + *===-----------------------------------------------------------------------=== + */ + +#ifndef __X86INTRIN_H +#define __X86INTRIN_H + +#include "ia32intrin.h" + +#include "immintrin.h" + +#if !defined(__SCE__) || __has_feature(modules) || defined(__PRFCHW__) +#include "prfchwintrin.h" +#endif + +#if !defined(__SCE__) || __has_feature(modules) || defined(__SSE4A__) +#include "ammintrin.h" +#endif + +#if !defined(__SCE__) || __has_feature(modules) || defined(__FMA4__) +#include "fma4intrin.h" +#endif + +#if !defined(__SCE__) || __has_feature(modules) || defined(__XOP__) +#include "xopintrin.h" +#endif + +#if !defined(__SCE__) || __has_feature(modules) || defined(__TBM__) +#include "tbmintrin.h" +#endif + +#if !defined(__SCE__) || __has_feature(modules) || defined(__LWP__) +#include "lwpintrin.h" +#endif + +#if !defined(__SCE__) || __has_feature(modules) || defined(__MWAITX__) +#include "mwaitxintrin.h" +#endif + +#if !defined(__SCE__) || __has_feature(modules) || defined(__CLZERO__) +#include "clzerointrin.h" +#endif + +#if !defined(__SCE__) || __has_feature(modules) || defined(__RDPRU__) +#include "rdpruintrin.h" +#endif + +#endif /* __X86INTRIN_H */ diff --git a/third_party/intel/clang/xmmintrin.h b/third_party/intel/clang/xmmintrin.h new file mode 100644 index 000000000..6a371c48f --- /dev/null +++ b/third_party/intel/clang/xmmintrin.h @@ -0,0 +1,3207 @@ +/*===---- xmmintrin.h - SSE intrinsics -------------------------------------=== + * + * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. + * See https://llvm.org/LICENSE.txt for license information. + * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + * + *===-----------------------------------------------------------------------=== + */ + +#ifndef __XMMINTRIN_H +#define __XMMINTRIN_H + +#if !defined(__i386__) && !defined(__x86_64__) +#error "This header is only meant to be used on x86 and x64 architecture" +#endif + +#include "mmintrin.h" + +typedef int __v4si __attribute__((__vector_size__(16))); +typedef float __v4sf __attribute__((__vector_size__(16))); +typedef float __m128 __attribute__((__vector_size__(16), __aligned__(16))); + +typedef float __m128_u __attribute__((__vector_size__(16), __aligned__(1))); + +/* Unsigned types */ +typedef unsigned int __v4su __attribute__((__vector_size__(16))); + +/* This header should only be included in a hosted environment as it depends on + * a standard library to provide allocation routines. */ +#if __STDC_HOSTED__ +#include "mm_malloc.h" +#endif + +/* Define the default attributes for the functions in this file. */ +#define __DEFAULT_FN_ATTRS \ + __attribute__((__always_inline__, __nodebug__, __target__("sse,no-evex512"), \ + __min_vector_width__(128))) +#define __DEFAULT_FN_ATTRS_MMX \ + __attribute__((__always_inline__, __nodebug__, \ + __target__("mmx,sse,no-evex512"), __min_vector_width__(64))) + +/// Adds the 32-bit float values in the low-order bits of the operands. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VADDSS / ADDSS instructions. +/// +/// \param __a +/// A 128-bit vector of [4 x float] containing one of the source operands. +/// The lower 32 bits of this operand are used in the calculation. +/// \param __b +/// A 128-bit vector of [4 x float] containing one of the source operands. +/// The lower 32 bits of this operand are used in the calculation. +/// \returns A 128-bit vector of [4 x float] whose lower 32 bits contain the sum +/// of the lower 32 bits of both operands. The upper 96 bits are copied from +/// the upper 96 bits of the first source operand. +static __inline__ __m128 __DEFAULT_FN_ATTRS +_mm_add_ss(__m128 __a, __m128 __b) +{ + __a[0] += __b[0]; + return __a; +} + +/// Adds two 128-bit vectors of [4 x float], and returns the results of +/// the addition. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VADDPS / ADDPS instructions. +/// +/// \param __a +/// A 128-bit vector of [4 x float] containing one of the source operands. +/// \param __b +/// A 128-bit vector of [4 x float] containing one of the source operands. +/// \returns A 128-bit vector of [4 x float] containing the sums of both +/// operands. +static __inline__ __m128 __DEFAULT_FN_ATTRS +_mm_add_ps(__m128 __a, __m128 __b) +{ + return (__m128)((__v4sf)__a + (__v4sf)__b); +} + +/// Subtracts the 32-bit float value in the low-order bits of the second +/// operand from the corresponding value in the first operand. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VSUBSS / SUBSS instructions. +/// +/// \param __a +/// A 128-bit vector of [4 x float] containing the minuend. The lower 32 bits +/// of this operand are used in the calculation. +/// \param __b +/// A 128-bit vector of [4 x float] containing the subtrahend. The lower 32 +/// bits of this operand are used in the calculation. +/// \returns A 128-bit vector of [4 x float] whose lower 32 bits contain the +/// difference of the lower 32 bits of both operands. The upper 96 bits are +/// copied from the upper 96 bits of the first source operand. +static __inline__ __m128 __DEFAULT_FN_ATTRS +_mm_sub_ss(__m128 __a, __m128 __b) +{ + __a[0] -= __b[0]; + return __a; +} + +/// Subtracts each of the values of the second operand from the first +/// operand, both of which are 128-bit vectors of [4 x float] and returns +/// the results of the subtraction. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VSUBPS / SUBPS instructions. +/// +/// \param __a +/// A 128-bit vector of [4 x float] containing the minuend. +/// \param __b +/// A 128-bit vector of [4 x float] containing the subtrahend. +/// \returns A 128-bit vector of [4 x float] containing the differences between +/// both operands. +static __inline__ __m128 __DEFAULT_FN_ATTRS +_mm_sub_ps(__m128 __a, __m128 __b) +{ + return (__m128)((__v4sf)__a - (__v4sf)__b); +} + +/// Multiplies two 32-bit float values in the low-order bits of the +/// operands. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VMULSS / MULSS instructions. +/// +/// \param __a +/// A 128-bit vector of [4 x float] containing one of the source operands. +/// The lower 32 bits of this operand are used in the calculation. +/// \param __b +/// A 128-bit vector of [4 x float] containing one of the source operands. +/// The lower 32 bits of this operand are used in the calculation. +/// \returns A 128-bit vector of [4 x float] containing the product of the lower +/// 32 bits of both operands. The upper 96 bits are copied from the upper 96 +/// bits of the first source operand. +static __inline__ __m128 __DEFAULT_FN_ATTRS +_mm_mul_ss(__m128 __a, __m128 __b) +{ + __a[0] *= __b[0]; + return __a; +} + +/// Multiplies two 128-bit vectors of [4 x float] and returns the +/// results of the multiplication. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VMULPS / MULPS instructions. +/// +/// \param __a +/// A 128-bit vector of [4 x float] containing one of the source operands. +/// \param __b +/// A 128-bit vector of [4 x float] containing one of the source operands. +/// \returns A 128-bit vector of [4 x float] containing the products of both +/// operands. +static __inline__ __m128 __DEFAULT_FN_ATTRS +_mm_mul_ps(__m128 __a, __m128 __b) +{ + return (__m128)((__v4sf)__a * (__v4sf)__b); +} + +/// Divides the value in the low-order 32 bits of the first operand by +/// the corresponding value in the second operand. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VDIVSS / DIVSS instructions. +/// +/// \param __a +/// A 128-bit vector of [4 x float] containing the dividend. The lower 32 +/// bits of this operand are used in the calculation. +/// \param __b +/// A 128-bit vector of [4 x float] containing the divisor. The lower 32 bits +/// of this operand are used in the calculation. +/// \returns A 128-bit vector of [4 x float] containing the quotients of the +/// lower 32 bits of both operands. The upper 96 bits are copied from the +/// upper 96 bits of the first source operand. +static __inline__ __m128 __DEFAULT_FN_ATTRS +_mm_div_ss(__m128 __a, __m128 __b) +{ + __a[0] /= __b[0]; + return __a; +} + +/// Divides two 128-bit vectors of [4 x float]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VDIVPS / DIVPS instructions. +/// +/// \param __a +/// A 128-bit vector of [4 x float] containing the dividend. +/// \param __b +/// A 128-bit vector of [4 x float] containing the divisor. +/// \returns A 128-bit vector of [4 x float] containing the quotients of both +/// operands. +static __inline__ __m128 __DEFAULT_FN_ATTRS +_mm_div_ps(__m128 __a, __m128 __b) +{ + return (__m128)((__v4sf)__a / (__v4sf)__b); +} + +/// Calculates the square root of the value stored in the low-order bits +/// of a 128-bit vector of [4 x float]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VSQRTSS / SQRTSS instructions. +/// +/// \param __a +/// A 128-bit vector of [4 x float]. The lower 32 bits of this operand are +/// used in the calculation. +/// \returns A 128-bit vector of [4 x float] containing the square root of the +/// value in the low-order bits of the operand. +static __inline__ __m128 __DEFAULT_FN_ATTRS +_mm_sqrt_ss(__m128 __a) +{ + return (__m128)__builtin_ia32_sqrtss((__v4sf)__a); +} + +/// Calculates the square roots of the values stored in a 128-bit vector +/// of [4 x float]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VSQRTPS / SQRTPS instructions. +/// +/// \param __a +/// A 128-bit vector of [4 x float]. +/// \returns A 128-bit vector of [4 x float] containing the square roots of the +/// values in the operand. +static __inline__ __m128 __DEFAULT_FN_ATTRS +_mm_sqrt_ps(__m128 __a) +{ + return __builtin_ia32_sqrtps((__v4sf)__a); +} + +/// Calculates the approximate reciprocal of the value stored in the +/// low-order bits of a 128-bit vector of [4 x float]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VRCPSS / RCPSS instructions. +/// +/// \param __a +/// A 128-bit vector of [4 x float]. The lower 32 bits of this operand are +/// used in the calculation. +/// \returns A 128-bit vector of [4 x float] containing the approximate +/// reciprocal of the value in the low-order bits of the operand. +static __inline__ __m128 __DEFAULT_FN_ATTRS +_mm_rcp_ss(__m128 __a) +{ + return (__m128)__builtin_ia32_rcpss((__v4sf)__a); +} + +/// Calculates the approximate reciprocals of the values stored in a +/// 128-bit vector of [4 x float]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VRCPPS / RCPPS instructions. +/// +/// \param __a +/// A 128-bit vector of [4 x float]. +/// \returns A 128-bit vector of [4 x float] containing the approximate +/// reciprocals of the values in the operand. +static __inline__ __m128 __DEFAULT_FN_ATTRS +_mm_rcp_ps(__m128 __a) +{ + return (__m128)__builtin_ia32_rcpps((__v4sf)__a); +} + +/// Calculates the approximate reciprocal of the square root of the value +/// stored in the low-order bits of a 128-bit vector of [4 x float]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VRSQRTSS / RSQRTSS instructions. +/// +/// \param __a +/// A 128-bit vector of [4 x float]. The lower 32 bits of this operand are +/// used in the calculation. +/// \returns A 128-bit vector of [4 x float] containing the approximate +/// reciprocal of the square root of the value in the low-order bits of the +/// operand. +static __inline__ __m128 __DEFAULT_FN_ATTRS +_mm_rsqrt_ss(__m128 __a) +{ + return __builtin_ia32_rsqrtss((__v4sf)__a); +} + +/// Calculates the approximate reciprocals of the square roots of the +/// values stored in a 128-bit vector of [4 x float]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VRSQRTPS / RSQRTPS instructions. +/// +/// \param __a +/// A 128-bit vector of [4 x float]. +/// \returns A 128-bit vector of [4 x float] containing the approximate +/// reciprocals of the square roots of the values in the operand. +static __inline__ __m128 __DEFAULT_FN_ATTRS +_mm_rsqrt_ps(__m128 __a) +{ + return __builtin_ia32_rsqrtps((__v4sf)__a); +} + +/// Compares two 32-bit float values in the low-order bits of both +/// operands and returns the lesser value in the low-order bits of the +/// vector of [4 x float]. +/// +/// If either value in a comparison is NaN, returns the value from \a __b. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VMINSS / MINSS instructions. +/// +/// \param __a +/// A 128-bit vector of [4 x float] containing one of the operands. The lower +/// 32 bits of this operand are used in the comparison. +/// \param __b +/// A 128-bit vector of [4 x float] containing one of the operands. The lower +/// 32 bits of this operand are used in the comparison. +/// \returns A 128-bit vector of [4 x float] whose lower 32 bits contain the +/// minimum value between both operands. The upper 96 bits are copied from +/// the upper 96 bits of the first source operand. +static __inline__ __m128 __DEFAULT_FN_ATTRS +_mm_min_ss(__m128 __a, __m128 __b) +{ + return __builtin_ia32_minss((__v4sf)__a, (__v4sf)__b); +} + +/// Compares two 128-bit vectors of [4 x float] and returns the lesser +/// of each pair of values. +/// +/// If either value in a comparison is NaN, returns the value from \a __b. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VMINPS / MINPS instructions. +/// +/// \param __a +/// A 128-bit vector of [4 x float] containing one of the operands. +/// \param __b +/// A 128-bit vector of [4 x float] containing one of the operands. +/// \returns A 128-bit vector of [4 x float] containing the minimum values +/// between both operands. +static __inline__ __m128 __DEFAULT_FN_ATTRS +_mm_min_ps(__m128 __a, __m128 __b) +{ + return __builtin_ia32_minps((__v4sf)__a, (__v4sf)__b); +} + +/// Compares two 32-bit float values in the low-order bits of both +/// operands and returns the greater value in the low-order bits of a 128-bit +/// vector of [4 x float]. +/// +/// If either value in a comparison is NaN, returns the value from \a __b. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VMAXSS / MAXSS instructions. +/// +/// \param __a +/// A 128-bit vector of [4 x float] containing one of the operands. The lower +/// 32 bits of this operand are used in the comparison. +/// \param __b +/// A 128-bit vector of [4 x float] containing one of the operands. The lower +/// 32 bits of this operand are used in the comparison. +/// \returns A 128-bit vector of [4 x float] whose lower 32 bits contain the +/// maximum value between both operands. The upper 96 bits are copied from +/// the upper 96 bits of the first source operand. +static __inline__ __m128 __DEFAULT_FN_ATTRS +_mm_max_ss(__m128 __a, __m128 __b) +{ + return __builtin_ia32_maxss((__v4sf)__a, (__v4sf)__b); +} + +/// Compares two 128-bit vectors of [4 x float] and returns the greater +/// of each pair of values. +/// +/// If either value in a comparison is NaN, returns the value from \a __b. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VMAXPS / MAXPS instructions. +/// +/// \param __a +/// A 128-bit vector of [4 x float] containing one of the operands. +/// \param __b +/// A 128-bit vector of [4 x float] containing one of the operands. +/// \returns A 128-bit vector of [4 x float] containing the maximum values +/// between both operands. +static __inline__ __m128 __DEFAULT_FN_ATTRS +_mm_max_ps(__m128 __a, __m128 __b) +{ + return __builtin_ia32_maxps((__v4sf)__a, (__v4sf)__b); +} + +/// Performs a bitwise AND of two 128-bit vectors of [4 x float]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VANDPS / ANDPS instructions. +/// +/// \param __a +/// A 128-bit vector containing one of the source operands. +/// \param __b +/// A 128-bit vector containing one of the source operands. +/// \returns A 128-bit vector of [4 x float] containing the bitwise AND of the +/// values between both operands. +static __inline__ __m128 __DEFAULT_FN_ATTRS +_mm_and_ps(__m128 __a, __m128 __b) +{ + return (__m128)((__v4su)__a & (__v4su)__b); +} + +/// Performs a bitwise AND of two 128-bit vectors of [4 x float], using +/// the one's complement of the values contained in the first source +/// operand. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VANDNPS / ANDNPS instructions. +/// +/// \param __a +/// A 128-bit vector of [4 x float] containing the first source operand. The +/// one's complement of this value is used in the bitwise AND. +/// \param __b +/// A 128-bit vector of [4 x float] containing the second source operand. +/// \returns A 128-bit vector of [4 x float] containing the bitwise AND of the +/// one's complement of the first operand and the values in the second +/// operand. +static __inline__ __m128 __DEFAULT_FN_ATTRS +_mm_andnot_ps(__m128 __a, __m128 __b) +{ + return (__m128)(~(__v4su)__a & (__v4su)__b); +} + +/// Performs a bitwise OR of two 128-bit vectors of [4 x float]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VORPS / ORPS instructions. +/// +/// \param __a +/// A 128-bit vector of [4 x float] containing one of the source operands. +/// \param __b +/// A 128-bit vector of [4 x float] containing one of the source operands. +/// \returns A 128-bit vector of [4 x float] containing the bitwise OR of the +/// values between both operands. +static __inline__ __m128 __DEFAULT_FN_ATTRS +_mm_or_ps(__m128 __a, __m128 __b) +{ + return (__m128)((__v4su)__a | (__v4su)__b); +} + +/// Performs a bitwise exclusive OR of two 128-bit vectors of +/// [4 x float]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VXORPS / XORPS instructions. +/// +/// \param __a +/// A 128-bit vector of [4 x float] containing one of the source operands. +/// \param __b +/// A 128-bit vector of [4 x float] containing one of the source operands. +/// \returns A 128-bit vector of [4 x float] containing the bitwise exclusive OR +/// of the values between both operands. +static __inline__ __m128 __DEFAULT_FN_ATTRS +_mm_xor_ps(__m128 __a, __m128 __b) +{ + return (__m128)((__v4su)__a ^ (__v4su)__b); +} + +/// Compares two 32-bit float values in the low-order bits of both +/// operands for equality. +/// +/// The comparison returns 0x0 for false, 0xFFFFFFFF for true, in the +/// low-order bits of a vector [4 x float]. +/// If either value in a comparison is NaN, returns false. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VCMPEQSS / CMPEQSS instructions. +/// +/// \param __a +/// A 128-bit vector of [4 x float] containing one of the operands. The lower +/// 32 bits of this operand are used in the comparison. +/// \param __b +/// A 128-bit vector of [4 x float] containing one of the operands. The lower +/// 32 bits of this operand are used in the comparison. +/// \returns A 128-bit vector of [4 x float] containing the comparison results +/// in the low-order bits. +static __inline__ __m128 __DEFAULT_FN_ATTRS +_mm_cmpeq_ss(__m128 __a, __m128 __b) +{ + return (__m128)__builtin_ia32_cmpeqss((__v4sf)__a, (__v4sf)__b); +} + +/// Compares each of the corresponding 32-bit float values of the +/// 128-bit vectors of [4 x float] for equality. +/// +/// Each comparison returns 0x0 for false, 0xFFFFFFFF for true. +/// If either value in a comparison is NaN, returns false. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VCMPEQPS / CMPEQPS instructions. +/// +/// \param __a +/// A 128-bit vector of [4 x float]. +/// \param __b +/// A 128-bit vector of [4 x float]. +/// \returns A 128-bit vector of [4 x float] containing the comparison results. +static __inline__ __m128 __DEFAULT_FN_ATTRS +_mm_cmpeq_ps(__m128 __a, __m128 __b) +{ + return (__m128)__builtin_ia32_cmpeqps((__v4sf)__a, (__v4sf)__b); +} + +/// Compares two 32-bit float values in the low-order bits of both +/// operands to determine if the value in the first operand is less than the +/// corresponding value in the second operand. +/// +/// The comparison returns 0x0 for false, 0xFFFFFFFF for true, in the +/// low-order bits of a vector of [4 x float]. +/// If either value in a comparison is NaN, returns false. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VCMPLTSS / CMPLTSS instructions. +/// +/// \param __a +/// A 128-bit vector of [4 x float] containing one of the operands. The lower +/// 32 bits of this operand are used in the comparison. +/// \param __b +/// A 128-bit vector of [4 x float] containing one of the operands. The lower +/// 32 bits of this operand are used in the comparison. +/// \returns A 128-bit vector of [4 x float] containing the comparison results +/// in the low-order bits. +static __inline__ __m128 __DEFAULT_FN_ATTRS +_mm_cmplt_ss(__m128 __a, __m128 __b) +{ + return (__m128)__builtin_ia32_cmpltss((__v4sf)__a, (__v4sf)__b); +} + +/// Compares each of the corresponding 32-bit float values of the +/// 128-bit vectors of [4 x float] to determine if the values in the first +/// operand are less than those in the second operand. +/// +/// Each comparison returns 0x0 for false, 0xFFFFFFFFFFFFFFFF for true. +/// If either value in a comparison is NaN, returns false. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VCMPLTPS / CMPLTPS instructions. +/// +/// \param __a +/// A 128-bit vector of [4 x float]. +/// \param __b +/// A 128-bit vector of [4 x float]. +/// \returns A 128-bit vector of [4 x float] containing the comparison results. +static __inline__ __m128 __DEFAULT_FN_ATTRS +_mm_cmplt_ps(__m128 __a, __m128 __b) +{ + return (__m128)__builtin_ia32_cmpltps((__v4sf)__a, (__v4sf)__b); +} + +/// Compares two 32-bit float values in the low-order bits of both +/// operands to determine if the value in the first operand is less than or +/// equal to the corresponding value in the second operand. +/// +/// The comparison returns 0x0 for false, 0xFFFFFFFFFFFFFFFF for true, in +/// the low-order bits of a vector of [4 x float]. +/// If either value in a comparison is NaN, returns false. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VCMPLESS / CMPLESS instructions. +/// +/// \param __a +/// A 128-bit vector of [4 x float] containing one of the operands. The lower +/// 32 bits of this operand are used in the comparison. +/// \param __b +/// A 128-bit vector of [4 x float] containing one of the operands. The lower +/// 32 bits of this operand are used in the comparison. +/// \returns A 128-bit vector of [4 x float] containing the comparison results +/// in the low-order bits. +static __inline__ __m128 __DEFAULT_FN_ATTRS +_mm_cmple_ss(__m128 __a, __m128 __b) +{ + return (__m128)__builtin_ia32_cmpless((__v4sf)__a, (__v4sf)__b); +} + +/// Compares each of the corresponding 32-bit float values of the +/// 128-bit vectors of [4 x float] to determine if the values in the first +/// operand are less than or equal to those in the second operand. +/// +/// Each comparison returns 0x0 for false, 0xFFFFFFFF for true. +/// If either value in a comparison is NaN, returns false. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VCMPLEPS / CMPLEPS instructions. +/// +/// \param __a +/// A 128-bit vector of [4 x float]. +/// \param __b +/// A 128-bit vector of [4 x float]. +/// \returns A 128-bit vector of [4 x float] containing the comparison results. +static __inline__ __m128 __DEFAULT_FN_ATTRS +_mm_cmple_ps(__m128 __a, __m128 __b) +{ + return (__m128)__builtin_ia32_cmpleps((__v4sf)__a, (__v4sf)__b); +} + +/// Compares two 32-bit float values in the low-order bits of both +/// operands to determine if the value in the first operand is greater than +/// the corresponding value in the second operand. +/// +/// The comparison returns 0x0 for false, 0xFFFFFFFF for true, in the +/// low-order bits of a vector of [4 x float]. +/// If either value in a comparison is NaN, returns false. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VCMPLTSS / CMPLTSS instructions. +/// +/// \param __a +/// A 128-bit vector of [4 x float] containing one of the operands. The lower +/// 32 bits of this operand are used in the comparison. +/// \param __b +/// A 128-bit vector of [4 x float] containing one of the operands. The lower +/// 32 bits of this operand are used in the comparison. +/// \returns A 128-bit vector of [4 x float] containing the comparison results +/// in the low-order bits. +static __inline__ __m128 __DEFAULT_FN_ATTRS +_mm_cmpgt_ss(__m128 __a, __m128 __b) +{ + return (__m128)__builtin_shufflevector((__v4sf)__a, + (__v4sf)__builtin_ia32_cmpltss((__v4sf)__b, (__v4sf)__a), + 4, 1, 2, 3); +} + +/// Compares each of the corresponding 32-bit float values of the +/// 128-bit vectors of [4 x float] to determine if the values in the first +/// operand are greater than those in the second operand. +/// +/// Each comparison returns 0x0 for false, 0xFFFFFFFF for true. +/// If either value in a comparison is NaN, returns false. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VCMPLTPS / CMPLTPS instructions. +/// +/// \param __a +/// A 128-bit vector of [4 x float]. +/// \param __b +/// A 128-bit vector of [4 x float]. +/// \returns A 128-bit vector of [4 x float] containing the comparison results. +static __inline__ __m128 __DEFAULT_FN_ATTRS +_mm_cmpgt_ps(__m128 __a, __m128 __b) +{ + return (__m128)__builtin_ia32_cmpltps((__v4sf)__b, (__v4sf)__a); +} + +/// Compares two 32-bit float values in the low-order bits of both +/// operands to determine if the value in the first operand is greater than +/// or equal to the corresponding value in the second operand. +/// +/// Each comparison returns 0x0 for false, 0xFFFFFFFF for true, in the +/// low-order bits of a vector of [4 x float]. +/// If either value in a comparison is NaN, returns false. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VCMPLESS / CMPLESS instructions. +/// +/// \param __a +/// A 128-bit vector of [4 x float] containing one of the operands. The lower +/// 32 bits of this operand are used in the comparison. +/// \param __b +/// A 128-bit vector of [4 x float] containing one of the operands. The lower +/// 32 bits of this operand are used in the comparison. +/// \returns A 128-bit vector of [4 x float] containing the comparison results +/// in the low-order bits. +static __inline__ __m128 __DEFAULT_FN_ATTRS +_mm_cmpge_ss(__m128 __a, __m128 __b) +{ + return (__m128)__builtin_shufflevector((__v4sf)__a, + (__v4sf)__builtin_ia32_cmpless((__v4sf)__b, (__v4sf)__a), + 4, 1, 2, 3); +} + +/// Compares each of the corresponding 32-bit float values of the +/// 128-bit vectors of [4 x float] to determine if the values in the first +/// operand are greater than or equal to those in the second operand. +/// +/// Each comparison returns 0x0 for false, 0xFFFFFFFFFFFFFFFF for true. +/// If either value in a comparison is NaN, returns false. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VCMPLEPS / CMPLEPS instructions. +/// +/// \param __a +/// A 128-bit vector of [4 x float]. +/// \param __b +/// A 128-bit vector of [4 x float]. +/// \returns A 128-bit vector of [4 x float] containing the comparison results. +static __inline__ __m128 __DEFAULT_FN_ATTRS +_mm_cmpge_ps(__m128 __a, __m128 __b) +{ + return (__m128)__builtin_ia32_cmpleps((__v4sf)__b, (__v4sf)__a); +} + +/// Compares two 32-bit float values in the low-order bits of both operands +/// for inequality. +/// +/// The comparison returns 0x0 for false, 0xFFFFFFFF for true, in the +/// low-order bits of a vector of [4 x float]. +/// If either value in a comparison is NaN, returns true. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VCMPNEQSS / CMPNEQSS +/// instructions. +/// +/// \param __a +/// A 128-bit vector of [4 x float] containing one of the operands. The lower +/// 32 bits of this operand are used in the comparison. +/// \param __b +/// A 128-bit vector of [4 x float] containing one of the operands. The lower +/// 32 bits of this operand are used in the comparison. +/// \returns A 128-bit vector of [4 x float] containing the comparison results +/// in the low-order bits. +static __inline__ __m128 __DEFAULT_FN_ATTRS +_mm_cmpneq_ss(__m128 __a, __m128 __b) +{ + return (__m128)__builtin_ia32_cmpneqss((__v4sf)__a, (__v4sf)__b); +} + +/// Compares each of the corresponding 32-bit float values of the +/// 128-bit vectors of [4 x float] for inequality. +/// +/// Each comparison returns 0x0 for false, 0xFFFFFFFF for true. +/// If either value in a comparison is NaN, returns true. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VCMPNEQPS / CMPNEQPS +/// instructions. +/// +/// \param __a +/// A 128-bit vector of [4 x float]. +/// \param __b +/// A 128-bit vector of [4 x float]. +/// \returns A 128-bit vector of [4 x float] containing the comparison results. +static __inline__ __m128 __DEFAULT_FN_ATTRS +_mm_cmpneq_ps(__m128 __a, __m128 __b) +{ + return (__m128)__builtin_ia32_cmpneqps((__v4sf)__a, (__v4sf)__b); +} + +/// Compares two 32-bit float values in the low-order bits of both +/// operands to determine if the value in the first operand is not less than +/// the corresponding value in the second operand. +/// +/// Each comparison returns 0x0 for false, 0xFFFFFFFF for true, in the +/// low-order bits of a vector of [4 x float]. +/// If either value in a comparison is NaN, returns true. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VCMPNLTSS / CMPNLTSS +/// instructions. +/// +/// \param __a +/// A 128-bit vector of [4 x float] containing one of the operands. The lower +/// 32 bits of this operand are used in the comparison. +/// \param __b +/// A 128-bit vector of [4 x float] containing one of the operands. The lower +/// 32 bits of this operand are used in the comparison. +/// \returns A 128-bit vector of [4 x float] containing the comparison results +/// in the low-order bits. +static __inline__ __m128 __DEFAULT_FN_ATTRS +_mm_cmpnlt_ss(__m128 __a, __m128 __b) +{ + return (__m128)__builtin_ia32_cmpnltss((__v4sf)__a, (__v4sf)__b); +} + +/// Compares each of the corresponding 32-bit float values of the +/// 128-bit vectors of [4 x float] to determine if the values in the first +/// operand are not less than those in the second operand. +/// +/// Each comparison returns 0x0 for false, 0xFFFFFFFF for true. +/// If either value in a comparison is NaN, returns true. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VCMPNLTPS / CMPNLTPS +/// instructions. +/// +/// \param __a +/// A 128-bit vector of [4 x float]. +/// \param __b +/// A 128-bit vector of [4 x float]. +/// \returns A 128-bit vector of [4 x float] containing the comparison results. +static __inline__ __m128 __DEFAULT_FN_ATTRS +_mm_cmpnlt_ps(__m128 __a, __m128 __b) +{ + return (__m128)__builtin_ia32_cmpnltps((__v4sf)__a, (__v4sf)__b); +} + +/// Compares two 32-bit float values in the low-order bits of both +/// operands to determine if the value in the first operand is not less than +/// or equal to the corresponding value in the second operand. +/// +/// Each comparison returns 0x0 for false, 0xFFFFFFFF for true, in the +/// low-order bits of a vector of [4 x float]. +/// If either value in a comparison is NaN, returns true. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VCMPNLESS / CMPNLESS +/// instructions. +/// +/// \param __a +/// A 128-bit vector of [4 x float] containing one of the operands. The lower +/// 32 bits of this operand are used in the comparison. +/// \param __b +/// A 128-bit vector of [4 x float] containing one of the operands. The lower +/// 32 bits of this operand are used in the comparison. +/// \returns A 128-bit vector of [4 x float] containing the comparison results +/// in the low-order bits. +static __inline__ __m128 __DEFAULT_FN_ATTRS +_mm_cmpnle_ss(__m128 __a, __m128 __b) +{ + return (__m128)__builtin_ia32_cmpnless((__v4sf)__a, (__v4sf)__b); +} + +/// Compares each of the corresponding 32-bit float values of the +/// 128-bit vectors of [4 x float] to determine if the values in the first +/// operand are not less than or equal to those in the second operand. +/// +/// Each comparison returns 0x0 for false, 0xFFFFFFFF for true. +/// If either value in a comparison is NaN, returns true. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VCMPNLEPS / CMPNLEPS +/// instructions. +/// +/// \param __a +/// A 128-bit vector of [4 x float]. +/// \param __b +/// A 128-bit vector of [4 x float]. +/// \returns A 128-bit vector of [4 x float] containing the comparison results. +static __inline__ __m128 __DEFAULT_FN_ATTRS +_mm_cmpnle_ps(__m128 __a, __m128 __b) +{ + return (__m128)__builtin_ia32_cmpnleps((__v4sf)__a, (__v4sf)__b); +} + +/// Compares two 32-bit float values in the low-order bits of both +/// operands to determine if the value in the first operand is not greater +/// than the corresponding value in the second operand. +/// +/// Each comparison returns 0x0 for false, 0xFFFFFFFF for true, in the +/// low-order bits of a vector of [4 x float]. +/// If either value in a comparison is NaN, returns true. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VCMPNLTSS / CMPNLTSS +/// instructions. +/// +/// \param __a +/// A 128-bit vector of [4 x float] containing one of the operands. The lower +/// 32 bits of this operand are used in the comparison. +/// \param __b +/// A 128-bit vector of [4 x float] containing one of the operands. The lower +/// 32 bits of this operand are used in the comparison. +/// \returns A 128-bit vector of [4 x float] containing the comparison results +/// in the low-order bits. +static __inline__ __m128 __DEFAULT_FN_ATTRS +_mm_cmpngt_ss(__m128 __a, __m128 __b) +{ + return (__m128)__builtin_shufflevector((__v4sf)__a, + (__v4sf)__builtin_ia32_cmpnltss((__v4sf)__b, (__v4sf)__a), + 4, 1, 2, 3); +} + +/// Compares each of the corresponding 32-bit float values of the +/// 128-bit vectors of [4 x float] to determine if the values in the first +/// operand are not greater than those in the second operand. +/// +/// Each comparison returns 0x0 for false, 0xFFFFFFFF for true. +/// If either value in a comparison is NaN, returns true. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VCMPNLTPS / CMPNLTPS +/// instructions. +/// +/// \param __a +/// A 128-bit vector of [4 x float]. +/// \param __b +/// A 128-bit vector of [4 x float]. +/// \returns A 128-bit vector of [4 x float] containing the comparison results. +static __inline__ __m128 __DEFAULT_FN_ATTRS +_mm_cmpngt_ps(__m128 __a, __m128 __b) +{ + return (__m128)__builtin_ia32_cmpnltps((__v4sf)__b, (__v4sf)__a); +} + +/// Compares two 32-bit float values in the low-order bits of both +/// operands to determine if the value in the first operand is not greater +/// than or equal to the corresponding value in the second operand. +/// +/// Each comparison returns 0x0 for false, 0xFFFFFFFF for true, in the +/// low-order bits of a vector of [4 x float]. +/// If either value in a comparison is NaN, returns true. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VCMPNLESS / CMPNLESS +/// instructions. +/// +/// \param __a +/// A 128-bit vector of [4 x float] containing one of the operands. The lower +/// 32 bits of this operand are used in the comparison. +/// \param __b +/// A 128-bit vector of [4 x float] containing one of the operands. The lower +/// 32 bits of this operand are used in the comparison. +/// \returns A 128-bit vector of [4 x float] containing the comparison results +/// in the low-order bits. +static __inline__ __m128 __DEFAULT_FN_ATTRS +_mm_cmpnge_ss(__m128 __a, __m128 __b) +{ + return (__m128)__builtin_shufflevector((__v4sf)__a, + (__v4sf)__builtin_ia32_cmpnless((__v4sf)__b, (__v4sf)__a), + 4, 1, 2, 3); +} + +/// Compares each of the corresponding 32-bit float values of the +/// 128-bit vectors of [4 x float] to determine if the values in the first +/// operand are not greater than or equal to those in the second operand. +/// +/// Each comparison returns 0x0 for false, 0xFFFFFFFF for true. +/// If either value in a comparison is NaN, returns true. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VCMPNLEPS / CMPNLEPS +/// instructions. +/// +/// \param __a +/// A 128-bit vector of [4 x float]. +/// \param __b +/// A 128-bit vector of [4 x float]. +/// \returns A 128-bit vector of [4 x float] containing the comparison results. +static __inline__ __m128 __DEFAULT_FN_ATTRS +_mm_cmpnge_ps(__m128 __a, __m128 __b) +{ + return (__m128)__builtin_ia32_cmpnleps((__v4sf)__b, (__v4sf)__a); +} + +/// Compares two 32-bit float values in the low-order bits of both +/// operands to determine if the value in the first operand is ordered with +/// respect to the corresponding value in the second operand. +/// +/// A pair of floating-point values are ordered with respect to each +/// other if neither value is a NaN. Each comparison returns 0x0 for false, +/// 0xFFFFFFFF for true. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VCMPORDSS / CMPORDSS +/// instructions. +/// +/// \param __a +/// A 128-bit vector of [4 x float] containing one of the operands. The lower +/// 32 bits of this operand are used in the comparison. +/// \param __b +/// A 128-bit vector of [4 x float] containing one of the operands. The lower +/// 32 bits of this operand are used in the comparison. +/// \returns A 128-bit vector of [4 x float] containing the comparison results +/// in the low-order bits. +static __inline__ __m128 __DEFAULT_FN_ATTRS +_mm_cmpord_ss(__m128 __a, __m128 __b) +{ + return (__m128)__builtin_ia32_cmpordss((__v4sf)__a, (__v4sf)__b); +} + +/// Compares each of the corresponding 32-bit float values of the +/// 128-bit vectors of [4 x float] to determine if the values in the first +/// operand are ordered with respect to those in the second operand. +/// +/// A pair of floating-point values are ordered with respect to each +/// other if neither value is a NaN. Each comparison returns 0x0 for false, +/// 0xFFFFFFFF for true. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VCMPORDPS / CMPORDPS +/// instructions. +/// +/// \param __a +/// A 128-bit vector of [4 x float]. +/// \param __b +/// A 128-bit vector of [4 x float]. +/// \returns A 128-bit vector of [4 x float] containing the comparison results. +static __inline__ __m128 __DEFAULT_FN_ATTRS +_mm_cmpord_ps(__m128 __a, __m128 __b) +{ + return (__m128)__builtin_ia32_cmpordps((__v4sf)__a, (__v4sf)__b); +} + +/// Compares two 32-bit float values in the low-order bits of both +/// operands to determine if the value in the first operand is unordered +/// with respect to the corresponding value in the second operand. +/// +/// A pair of double-precision values are unordered with respect to each +/// other if one or both values are NaN. Each comparison returns 0x0 for +/// false, 0xFFFFFFFF for true. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VCMPUNORDSS / CMPUNORDSS +/// instructions. +/// +/// \param __a +/// A 128-bit vector of [4 x float] containing one of the operands. The lower +/// 32 bits of this operand are used in the comparison. +/// \param __b +/// A 128-bit vector of [4 x float] containing one of the operands. The lower +/// 32 bits of this operand are used in the comparison. +/// \returns A 128-bit vector of [4 x float] containing the comparison results +/// in the low-order bits. +static __inline__ __m128 __DEFAULT_FN_ATTRS +_mm_cmpunord_ss(__m128 __a, __m128 __b) +{ + return (__m128)__builtin_ia32_cmpunordss((__v4sf)__a, (__v4sf)__b); +} + +/// Compares each of the corresponding 32-bit float values of the +/// 128-bit vectors of [4 x float] to determine if the values in the first +/// operand are unordered with respect to those in the second operand. +/// +/// A pair of double-precision values are unordered with respect to each +/// other if one or both values are NaN. Each comparison returns 0x0 for +/// false, 0xFFFFFFFFFFFFFFFF for true. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VCMPUNORDPS / CMPUNORDPS +/// instructions. +/// +/// \param __a +/// A 128-bit vector of [4 x float]. +/// \param __b +/// A 128-bit vector of [4 x float]. +/// \returns A 128-bit vector of [4 x float] containing the comparison results. +static __inline__ __m128 __DEFAULT_FN_ATTRS +_mm_cmpunord_ps(__m128 __a, __m128 __b) +{ + return (__m128)__builtin_ia32_cmpunordps((__v4sf)__a, (__v4sf)__b); +} + +/// Compares two 32-bit float values in the low-order bits of both +/// operands for equality. +/// +/// The comparison returns 0 for false, 1 for true. If either value in a +/// comparison is NaN, returns 0. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VCOMISS / COMISS +/// instructions. +/// +/// \param __a +/// A 128-bit vector of [4 x float]. The lower 32 bits of this operand are +/// used in the comparison. +/// \param __b +/// A 128-bit vector of [4 x float]. The lower 32 bits of this operand are +/// used in the comparison. +/// \returns An integer containing the comparison results. +static __inline__ int __DEFAULT_FN_ATTRS +_mm_comieq_ss(__m128 __a, __m128 __b) +{ + return __builtin_ia32_comieq((__v4sf)__a, (__v4sf)__b); +} + +/// Compares two 32-bit float values in the low-order bits of both +/// operands to determine if the first operand is less than the second +/// operand. +/// +/// The comparison returns 0 for false, 1 for true. If either value in a +/// comparison is NaN, returns 0. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VCOMISS / COMISS +/// instructions. +/// +/// \param __a +/// A 128-bit vector of [4 x float]. The lower 32 bits of this operand are +/// used in the comparison. +/// \param __b +/// A 128-bit vector of [4 x float]. The lower 32 bits of this operand are +/// used in the comparison. +/// \returns An integer containing the comparison results. +static __inline__ int __DEFAULT_FN_ATTRS +_mm_comilt_ss(__m128 __a, __m128 __b) +{ + return __builtin_ia32_comilt((__v4sf)__a, (__v4sf)__b); +} + +/// Compares two 32-bit float values in the low-order bits of both +/// operands to determine if the first operand is less than or equal to the +/// second operand. +/// +/// The comparison returns 0 for false, 1 for true. If either value in a +/// comparison is NaN, returns 0. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VCOMISS / COMISS instructions. +/// +/// \param __a +/// A 128-bit vector of [4 x float]. The lower 32 bits of this operand are +/// used in the comparison. +/// \param __b +/// A 128-bit vector of [4 x float]. The lower 32 bits of this operand are +/// used in the comparison. +/// \returns An integer containing the comparison results. +static __inline__ int __DEFAULT_FN_ATTRS +_mm_comile_ss(__m128 __a, __m128 __b) +{ + return __builtin_ia32_comile((__v4sf)__a, (__v4sf)__b); +} + +/// Compares two 32-bit float values in the low-order bits of both +/// operands to determine if the first operand is greater than the second +/// operand. +/// +/// The comparison returns 0 for false, 1 for true. If either value in a +/// comparison is NaN, returns 0. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VCOMISS / COMISS instructions. +/// +/// \param __a +/// A 128-bit vector of [4 x float]. The lower 32 bits of this operand are +/// used in the comparison. +/// \param __b +/// A 128-bit vector of [4 x float]. The lower 32 bits of this operand are +/// used in the comparison. +/// \returns An integer containing the comparison results. +static __inline__ int __DEFAULT_FN_ATTRS +_mm_comigt_ss(__m128 __a, __m128 __b) +{ + return __builtin_ia32_comigt((__v4sf)__a, (__v4sf)__b); +} + +/// Compares two 32-bit float values in the low-order bits of both +/// operands to determine if the first operand is greater than or equal to +/// the second operand. +/// +/// The comparison returns 0 for false, 1 for true. If either value in a +/// comparison is NaN, returns 0. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VCOMISS / COMISS instructions. +/// +/// \param __a +/// A 128-bit vector of [4 x float]. The lower 32 bits of this operand are +/// used in the comparison. +/// \param __b +/// A 128-bit vector of [4 x float]. The lower 32 bits of this operand are +/// used in the comparison. +/// \returns An integer containing the comparison results. +static __inline__ int __DEFAULT_FN_ATTRS +_mm_comige_ss(__m128 __a, __m128 __b) +{ + return __builtin_ia32_comige((__v4sf)__a, (__v4sf)__b); +} + +/// Compares two 32-bit float values in the low-order bits of both +/// operands to determine if the first operand is not equal to the second +/// operand. +/// +/// The comparison returns 0 for false, 1 for true. If either value in a +/// comparison is NaN, returns 1. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VCOMISS / COMISS instructions. +/// +/// \param __a +/// A 128-bit vector of [4 x float]. The lower 32 bits of this operand are +/// used in the comparison. +/// \param __b +/// A 128-bit vector of [4 x float]. The lower 32 bits of this operand are +/// used in the comparison. +/// \returns An integer containing the comparison results. +static __inline__ int __DEFAULT_FN_ATTRS +_mm_comineq_ss(__m128 __a, __m128 __b) +{ + return __builtin_ia32_comineq((__v4sf)__a, (__v4sf)__b); +} + +/// Performs an unordered comparison of two 32-bit float values using +/// the low-order bits of both operands to determine equality. +/// +/// The comparison returns 0 for false, 1 for true. If either value in a +/// comparison is NaN, returns 0. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VUCOMISS / UCOMISS instructions. +/// +/// \param __a +/// A 128-bit vector of [4 x float]. The lower 32 bits of this operand are +/// used in the comparison. +/// \param __b +/// A 128-bit vector of [4 x float]. The lower 32 bits of this operand are +/// used in the comparison. +/// \returns An integer containing the comparison results. +static __inline__ int __DEFAULT_FN_ATTRS +_mm_ucomieq_ss(__m128 __a, __m128 __b) +{ + return __builtin_ia32_ucomieq((__v4sf)__a, (__v4sf)__b); +} + +/// Performs an unordered comparison of two 32-bit float values using +/// the low-order bits of both operands to determine if the first operand is +/// less than the second operand. +/// +/// The comparison returns 0 for false, 1 for true. If either value in a +/// comparison is NaN, returns 0. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VUCOMISS / UCOMISS instructions. +/// +/// \param __a +/// A 128-bit vector of [4 x float]. The lower 32 bits of this operand are +/// used in the comparison. +/// \param __b +/// A 128-bit vector of [4 x float]. The lower 32 bits of this operand are +/// used in the comparison. +/// \returns An integer containing the comparison results. +static __inline__ int __DEFAULT_FN_ATTRS +_mm_ucomilt_ss(__m128 __a, __m128 __b) +{ + return __builtin_ia32_ucomilt((__v4sf)__a, (__v4sf)__b); +} + +/// Performs an unordered comparison of two 32-bit float values using +/// the low-order bits of both operands to determine if the first operand is +/// less than or equal to the second operand. +/// +/// The comparison returns 0 for false, 1 for true. If either value in a +/// comparison is NaN, returns 0. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VUCOMISS / UCOMISS instructions. +/// +/// \param __a +/// A 128-bit vector of [4 x float]. The lower 32 bits of this operand are +/// used in the comparison. +/// \param __b +/// A 128-bit vector of [4 x float]. The lower 32 bits of this operand are +/// used in the comparison. +/// \returns An integer containing the comparison results. +static __inline__ int __DEFAULT_FN_ATTRS +_mm_ucomile_ss(__m128 __a, __m128 __b) +{ + return __builtin_ia32_ucomile((__v4sf)__a, (__v4sf)__b); +} + +/// Performs an unordered comparison of two 32-bit float values using +/// the low-order bits of both operands to determine if the first operand is +/// greater than the second operand. +/// +/// The comparison returns 0 for false, 1 for true. If either value in a +/// comparison is NaN, returns 0. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VUCOMISS / UCOMISS instructions. +/// +/// \param __a +/// A 128-bit vector of [4 x float]. The lower 32 bits of this operand are +/// used in the comparison. +/// \param __b +/// A 128-bit vector of [4 x float]. The lower 32 bits of this operand are +/// used in the comparison. +/// \returns An integer containing the comparison results. +static __inline__ int __DEFAULT_FN_ATTRS +_mm_ucomigt_ss(__m128 __a, __m128 __b) +{ + return __builtin_ia32_ucomigt((__v4sf)__a, (__v4sf)__b); +} + +/// Performs an unordered comparison of two 32-bit float values using +/// the low-order bits of both operands to determine if the first operand is +/// greater than or equal to the second operand. +/// +/// The comparison returns 0 for false, 1 for true. If either value in a +/// comparison is NaN, returns 0. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VUCOMISS / UCOMISS instructions. +/// +/// \param __a +/// A 128-bit vector of [4 x float]. The lower 32 bits of this operand are +/// used in the comparison. +/// \param __b +/// A 128-bit vector of [4 x float]. The lower 32 bits of this operand are +/// used in the comparison. +/// \returns An integer containing the comparison results. +static __inline__ int __DEFAULT_FN_ATTRS +_mm_ucomige_ss(__m128 __a, __m128 __b) +{ + return __builtin_ia32_ucomige((__v4sf)__a, (__v4sf)__b); +} + +/// Performs an unordered comparison of two 32-bit float values using +/// the low-order bits of both operands to determine inequality. +/// +/// The comparison returns 0 for false, 1 for true. If either value in a +/// comparison is NaN, returns 0. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VUCOMISS / UCOMISS instructions. +/// +/// \param __a +/// A 128-bit vector of [4 x float]. The lower 32 bits of this operand are +/// used in the comparison. +/// \param __b +/// A 128-bit vector of [4 x float]. The lower 32 bits of this operand are +/// used in the comparison. +/// \returns An integer containing the comparison results. +static __inline__ int __DEFAULT_FN_ATTRS +_mm_ucomineq_ss(__m128 __a, __m128 __b) +{ + return __builtin_ia32_ucomineq((__v4sf)__a, (__v4sf)__b); +} + +/// Converts a float value contained in the lower 32 bits of a vector of +/// [4 x float] into a 32-bit integer. +/// +/// If the converted value does not fit in a 32-bit integer, raises a +/// floating-point invalid exception. If the exception is masked, returns +/// the most negative integer. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VCVTSS2SI / CVTSS2SI +/// instructions. +/// +/// \param __a +/// A 128-bit vector of [4 x float]. The lower 32 bits of this operand are +/// used in the conversion. +/// \returns A 32-bit integer containing the converted value. +static __inline__ int __DEFAULT_FN_ATTRS +_mm_cvtss_si32(__m128 __a) +{ + return __builtin_ia32_cvtss2si((__v4sf)__a); +} + +/// Converts a float value contained in the lower 32 bits of a vector of +/// [4 x float] into a 32-bit integer. +/// +/// If the converted value does not fit in a 32-bit integer, raises a +/// floating-point invalid exception. If the exception is masked, returns +/// the most negative integer. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VCVTSS2SI / CVTSS2SI +/// instructions. +/// +/// \param __a +/// A 128-bit vector of [4 x float]. The lower 32 bits of this operand are +/// used in the conversion. +/// \returns A 32-bit integer containing the converted value. +static __inline__ int __DEFAULT_FN_ATTRS +_mm_cvt_ss2si(__m128 __a) +{ + return _mm_cvtss_si32(__a); +} + +#ifdef __x86_64__ + +/// Converts a float value contained in the lower 32 bits of a vector of +/// [4 x float] into a 64-bit integer. +/// +/// If the converted value does not fit in a 32-bit integer, raises a +/// floating-point invalid exception. If the exception is masked, returns +/// the most negative integer. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VCVTSS2SI / CVTSS2SI +/// instructions. +/// +/// \param __a +/// A 128-bit vector of [4 x float]. The lower 32 bits of this operand are +/// used in the conversion. +/// \returns A 64-bit integer containing the converted value. +static __inline__ long long __DEFAULT_FN_ATTRS +_mm_cvtss_si64(__m128 __a) +{ + return __builtin_ia32_cvtss2si64((__v4sf)__a); +} + +#endif + +/// Converts two low-order float values in a 128-bit vector of +/// [4 x float] into a 64-bit vector of [2 x i32]. +/// +/// If a converted value does not fit in a 32-bit integer, raises a +/// floating-point invalid exception. If the exception is masked, returns +/// the most negative integer. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the CVTPS2PI instruction. +/// +/// \param __a +/// A 128-bit vector of [4 x float]. +/// \returns A 64-bit integer vector containing the converted values. +static __inline__ __m64 __DEFAULT_FN_ATTRS_MMX +_mm_cvtps_pi32(__m128 __a) +{ + return (__m64)__builtin_ia32_cvtps2pi((__v4sf)__a); +} + +/// Converts two low-order float values in a 128-bit vector of +/// [4 x float] into a 64-bit vector of [2 x i32]. +/// +/// If a converted value does not fit in a 32-bit integer, raises a +/// floating-point invalid exception. If the exception is masked, returns +/// the most negative integer. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the CVTPS2PI instruction. +/// +/// \param __a +/// A 128-bit vector of [4 x float]. +/// \returns A 64-bit integer vector containing the converted values. +static __inline__ __m64 __DEFAULT_FN_ATTRS_MMX +_mm_cvt_ps2pi(__m128 __a) +{ + return _mm_cvtps_pi32(__a); +} + +/// Converts the lower (first) element of a vector of [4 x float] into a signed +/// truncated (rounded toward zero) 32-bit integer. +/// +/// If the converted value does not fit in a 32-bit integer, raises a +/// floating-point invalid exception. If the exception is masked, returns +/// the most negative integer. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VCVTTSS2SI / CVTTSS2SI +/// instructions. +/// +/// \param __a +/// A 128-bit vector of [4 x float]. The lower 32 bits of this operand are +/// used in the conversion. +/// \returns A 32-bit integer containing the converted value. +static __inline__ int __DEFAULT_FN_ATTRS +_mm_cvttss_si32(__m128 __a) +{ + return __builtin_ia32_cvttss2si((__v4sf)__a); +} + +/// Converts the lower (first) element of a vector of [4 x float] into a signed +/// truncated (rounded toward zero) 32-bit integer. +/// +/// If the converted value does not fit in a 32-bit integer, raises a +/// floating-point invalid exception. If the exception is masked, returns +/// the most negative integer. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VCVTTSS2SI / CVTTSS2SI +/// instructions. +/// +/// \param __a +/// A 128-bit vector of [4 x float]. The lower 32 bits of this operand are +/// used in the conversion. +/// \returns A 32-bit integer containing the converted value. +static __inline__ int __DEFAULT_FN_ATTRS +_mm_cvtt_ss2si(__m128 __a) +{ + return _mm_cvttss_si32(__a); +} + +#ifdef __x86_64__ +/// Converts the lower (first) element of a vector of [4 x float] into a signed +/// truncated (rounded toward zero) 64-bit integer. +/// +/// If the converted value does not fit in a 64-bit integer, raises a +/// floating-point invalid exception. If the exception is masked, returns +/// the most negative integer. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VCVTTSS2SI / CVTTSS2SI +/// instructions. +/// +/// \param __a +/// A 128-bit vector of [4 x float]. The lower 32 bits of this operand are +/// used in the conversion. +/// \returns A 64-bit integer containing the converted value. +static __inline__ long long __DEFAULT_FN_ATTRS +_mm_cvttss_si64(__m128 __a) +{ + return __builtin_ia32_cvttss2si64((__v4sf)__a); +} +#endif + +/// Converts the lower (first) two elements of a 128-bit vector of [4 x float] +/// into two signed truncated (rounded toward zero) 32-bit integers, +/// returned in a 64-bit vector of [2 x i32]. +/// +/// If a converted value does not fit in a 32-bit integer, raises a +/// floating-point invalid exception. If the exception is masked, returns +/// the most negative integer. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the CVTTPS2PI / VTTPS2PI +/// instructions. +/// +/// \param __a +/// A 128-bit vector of [4 x float]. +/// \returns A 64-bit integer vector containing the converted values. +static __inline__ __m64 __DEFAULT_FN_ATTRS_MMX +_mm_cvttps_pi32(__m128 __a) +{ + return (__m64)__builtin_ia32_cvttps2pi((__v4sf)__a); +} + +/// Converts the lower (first) two elements of a 128-bit vector of [4 x float] +/// into two signed truncated (rounded toward zero) 64-bit integers, +/// returned in a 64-bit vector of [2 x i32]. +/// +/// If a converted value does not fit in a 32-bit integer, raises a +/// floating-point invalid exception. If the exception is masked, returns +/// the most negative integer. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the CVTTPS2PI instruction. +/// +/// \param __a +/// A 128-bit vector of [4 x float]. +/// \returns A 64-bit integer vector containing the converted values. +static __inline__ __m64 __DEFAULT_FN_ATTRS_MMX +_mm_cvtt_ps2pi(__m128 __a) +{ + return _mm_cvttps_pi32(__a); +} + +/// Converts a 32-bit signed integer value into a floating point value +/// and writes it to the lower 32 bits of the destination. The remaining +/// higher order elements of the destination vector are copied from the +/// corresponding elements in the first operand. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VCVTSI2SS / CVTSI2SS instruction. +/// +/// \param __a +/// A 128-bit vector of [4 x float]. +/// \param __b +/// A 32-bit signed integer operand containing the value to be converted. +/// \returns A 128-bit vector of [4 x float] whose lower 32 bits contain the +/// converted value of the second operand. The upper 96 bits are copied from +/// the upper 96 bits of the first operand. +static __inline__ __m128 __DEFAULT_FN_ATTRS +_mm_cvtsi32_ss(__m128 __a, int __b) +{ + __a[0] = __b; + return __a; +} + +/// Converts a 32-bit signed integer value into a floating point value +/// and writes it to the lower 32 bits of the destination. The remaining +/// higher order elements of the destination are copied from the +/// corresponding elements in the first operand. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VCVTSI2SS / CVTSI2SS instruction. +/// +/// \param __a +/// A 128-bit vector of [4 x float]. +/// \param __b +/// A 32-bit signed integer operand containing the value to be converted. +/// \returns A 128-bit vector of [4 x float] whose lower 32 bits contain the +/// converted value of the second operand. The upper 96 bits are copied from +/// the upper 96 bits of the first operand. +static __inline__ __m128 __DEFAULT_FN_ATTRS +_mm_cvt_si2ss(__m128 __a, int __b) +{ + return _mm_cvtsi32_ss(__a, __b); +} + +#ifdef __x86_64__ + +/// Converts a 64-bit signed integer value into a floating point value +/// and writes it to the lower 32 bits of the destination. The remaining +/// higher order elements of the destination are copied from the +/// corresponding elements in the first operand. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VCVTSI2SS / CVTSI2SS instruction. +/// +/// \param __a +/// A 128-bit vector of [4 x float]. +/// \param __b +/// A 64-bit signed integer operand containing the value to be converted. +/// \returns A 128-bit vector of [4 x float] whose lower 32 bits contain the +/// converted value of the second operand. The upper 96 bits are copied from +/// the upper 96 bits of the first operand. +static __inline__ __m128 __DEFAULT_FN_ATTRS +_mm_cvtsi64_ss(__m128 __a, long long __b) +{ + __a[0] = __b; + return __a; +} + +#endif + +/// Converts two elements of a 64-bit vector of [2 x i32] into two +/// floating point values and writes them to the lower 64-bits of the +/// destination. The remaining higher order elements of the destination are +/// copied from the corresponding elements in the first operand. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the CVTPI2PS instruction. +/// +/// \param __a +/// A 128-bit vector of [4 x float]. +/// \param __b +/// A 64-bit vector of [2 x i32]. The elements in this vector are converted +/// and written to the corresponding low-order elements in the destination. +/// \returns A 128-bit vector of [4 x float] whose lower 64 bits contain the +/// converted value of the second operand. The upper 64 bits are copied from +/// the upper 64 bits of the first operand. +static __inline__ __m128 __DEFAULT_FN_ATTRS_MMX +_mm_cvtpi32_ps(__m128 __a, __m64 __b) +{ + return __builtin_ia32_cvtpi2ps((__v4sf)__a, (__v2si)__b); +} + +/// Converts two elements of a 64-bit vector of [2 x i32] into two +/// floating point values and writes them to the lower 64-bits of the +/// destination. The remaining higher order elements of the destination are +/// copied from the corresponding elements in the first operand. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the CVTPI2PS instruction. +/// +/// \param __a +/// A 128-bit vector of [4 x float]. +/// \param __b +/// A 64-bit vector of [2 x i32]. The elements in this vector are converted +/// and written to the corresponding low-order elements in the destination. +/// \returns A 128-bit vector of [4 x float] whose lower 64 bits contain the +/// converted value from the second operand. The upper 64 bits are copied +/// from the upper 64 bits of the first operand. +static __inline__ __m128 __DEFAULT_FN_ATTRS_MMX +_mm_cvt_pi2ps(__m128 __a, __m64 __b) +{ + return _mm_cvtpi32_ps(__a, __b); +} + +/// Extracts a float value contained in the lower 32 bits of a vector of +/// [4 x float]. +/// +/// \headerfile +/// +/// This intrinsic has no corresponding instruction. +/// +/// \param __a +/// A 128-bit vector of [4 x float]. The lower 32 bits of this operand are +/// used in the extraction. +/// \returns A 32-bit float containing the extracted value. +static __inline__ float __DEFAULT_FN_ATTRS +_mm_cvtss_f32(__m128 __a) +{ + return __a[0]; +} + +/// Loads two packed float values from the address \a __p into the +/// high-order bits of a 128-bit vector of [4 x float]. The low-order bits +/// are copied from the low-order bits of the first operand. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VMOVHPD / MOVHPD instruction. +/// +/// \param __a +/// A 128-bit vector of [4 x float]. Bits [63:0] are written to bits [63:0] +/// of the destination. +/// \param __p +/// A pointer to two packed float values. Bits [63:0] are written to bits +/// [127:64] of the destination. +/// \returns A 128-bit vector of [4 x float] containing the moved values. +static __inline__ __m128 __DEFAULT_FN_ATTRS +_mm_loadh_pi(__m128 __a, const __m64 *__p) +{ + typedef float __mm_loadh_pi_v2f32 __attribute__((__vector_size__(8))); + struct __mm_loadh_pi_struct { + __mm_loadh_pi_v2f32 __u; + } __attribute__((__packed__, __may_alias__)); + __mm_loadh_pi_v2f32 __b = ((const struct __mm_loadh_pi_struct*)__p)->__u; + __m128 __bb = __builtin_shufflevector(__b, __b, 0, 1, 0, 1); + return __builtin_shufflevector(__a, __bb, 0, 1, 4, 5); +} + +/// Loads two packed float values from the address \a __p into the +/// low-order bits of a 128-bit vector of [4 x float]. The high-order bits +/// are copied from the high-order bits of the first operand. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VMOVLPD / MOVLPD instruction. +/// +/// \param __a +/// A 128-bit vector of [4 x float]. Bits [127:64] are written to bits +/// [127:64] of the destination. +/// \param __p +/// A pointer to two packed float values. Bits [63:0] are written to bits +/// [63:0] of the destination. +/// \returns A 128-bit vector of [4 x float] containing the moved values. +static __inline__ __m128 __DEFAULT_FN_ATTRS +_mm_loadl_pi(__m128 __a, const __m64 *__p) +{ + typedef float __mm_loadl_pi_v2f32 __attribute__((__vector_size__(8))); + struct __mm_loadl_pi_struct { + __mm_loadl_pi_v2f32 __u; + } __attribute__((__packed__, __may_alias__)); + __mm_loadl_pi_v2f32 __b = ((const struct __mm_loadl_pi_struct*)__p)->__u; + __m128 __bb = __builtin_shufflevector(__b, __b, 0, 1, 0, 1); + return __builtin_shufflevector(__a, __bb, 4, 5, 2, 3); +} + +/// Constructs a 128-bit floating-point vector of [4 x float]. The lower +/// 32 bits of the vector are initialized with the single-precision +/// floating-point value loaded from a specified memory location. The upper +/// 96 bits are set to zero. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VMOVSS / MOVSS instruction. +/// +/// \param __p +/// A pointer to a 32-bit memory location containing a single-precision +/// floating-point value. +/// \returns An initialized 128-bit floating-point vector of [4 x float]. The +/// lower 32 bits contain the value loaded from the memory location. The +/// upper 96 bits are set to zero. +static __inline__ __m128 __DEFAULT_FN_ATTRS +_mm_load_ss(const float *__p) +{ + struct __mm_load_ss_struct { + float __u; + } __attribute__((__packed__, __may_alias__)); + float __u = ((const struct __mm_load_ss_struct*)__p)->__u; + return __extension__ (__m128){ __u, 0, 0, 0 }; +} + +/// Loads a 32-bit float value and duplicates it to all four vector +/// elements of a 128-bit vector of [4 x float]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VBROADCASTSS / MOVSS + shuffling +/// instruction. +/// +/// \param __p +/// A pointer to a float value to be loaded and duplicated. +/// \returns A 128-bit vector of [4 x float] containing the loaded and +/// duplicated values. +static __inline__ __m128 __DEFAULT_FN_ATTRS +_mm_load1_ps(const float *__p) +{ + struct __mm_load1_ps_struct { + float __u; + } __attribute__((__packed__, __may_alias__)); + float __u = ((const struct __mm_load1_ps_struct*)__p)->__u; + return __extension__ (__m128){ __u, __u, __u, __u }; +} + +#define _mm_load_ps1(p) _mm_load1_ps(p) + +/// Loads a 128-bit floating-point vector of [4 x float] from an aligned +/// memory location. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VMOVAPS / MOVAPS instruction. +/// +/// \param __p +/// A pointer to a 128-bit memory location. The address of the memory +/// location has to be 128-bit aligned. +/// \returns A 128-bit vector of [4 x float] containing the loaded values. +static __inline__ __m128 __DEFAULT_FN_ATTRS +_mm_load_ps(const float *__p) +{ + return *(const __m128*)__p; +} + +/// Loads a 128-bit floating-point vector of [4 x float] from an +/// unaligned memory location. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VMOVUPS / MOVUPS instruction. +/// +/// \param __p +/// A pointer to a 128-bit memory location. The address of the memory +/// location does not have to be aligned. +/// \returns A 128-bit vector of [4 x float] containing the loaded values. +static __inline__ __m128 __DEFAULT_FN_ATTRS +_mm_loadu_ps(const float *__p) +{ + struct __loadu_ps { + __m128_u __v; + } __attribute__((__packed__, __may_alias__)); + return ((const struct __loadu_ps*)__p)->__v; +} + +/// Loads four packed float values, in reverse order, from an aligned +/// memory location to 32-bit elements in a 128-bit vector of [4 x float]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VMOVAPS / MOVAPS + shuffling +/// instruction. +/// +/// \param __p +/// A pointer to a 128-bit memory location. The address of the memory +/// location has to be 128-bit aligned. +/// \returns A 128-bit vector of [4 x float] containing the moved values, loaded +/// in reverse order. +static __inline__ __m128 __DEFAULT_FN_ATTRS +_mm_loadr_ps(const float *__p) +{ + __m128 __a = _mm_load_ps(__p); + return __builtin_shufflevector((__v4sf)__a, (__v4sf)__a, 3, 2, 1, 0); +} + +/// Create a 128-bit vector of [4 x float] with undefined values. +/// +/// \headerfile +/// +/// This intrinsic has no corresponding instruction. +/// +/// \returns A 128-bit vector of [4 x float] containing undefined values. +static __inline__ __m128 __DEFAULT_FN_ATTRS +_mm_undefined_ps(void) +{ + return (__m128)__builtin_ia32_undef128(); +} + +/// Constructs a 128-bit floating-point vector of [4 x float]. The lower +/// 32 bits of the vector are initialized with the specified single-precision +/// floating-point value. The upper 96 bits are set to zero. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VMOVSS / MOVSS instruction. +/// +/// \param __w +/// A single-precision floating-point value used to initialize the lower 32 +/// bits of the result. +/// \returns An initialized 128-bit floating-point vector of [4 x float]. The +/// lower 32 bits contain the value provided in the source operand. The +/// upper 96 bits are set to zero. +static __inline__ __m128 __DEFAULT_FN_ATTRS +_mm_set_ss(float __w) +{ + return __extension__ (__m128){ __w, 0, 0, 0 }; +} + +/// Constructs a 128-bit floating-point vector of [4 x float], with each +/// of the four single-precision floating-point vector elements set to the +/// specified single-precision floating-point value. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VPERMILPS / PERMILPS instruction. +/// +/// \param __w +/// A single-precision floating-point value used to initialize each vector +/// element of the result. +/// \returns An initialized 128-bit floating-point vector of [4 x float]. +static __inline__ __m128 __DEFAULT_FN_ATTRS +_mm_set1_ps(float __w) +{ + return __extension__ (__m128){ __w, __w, __w, __w }; +} + +/* Microsoft specific. */ +/// Constructs a 128-bit floating-point vector of [4 x float], with each +/// of the four single-precision floating-point vector elements set to the +/// specified single-precision floating-point value. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VPERMILPS / PERMILPS instruction. +/// +/// \param __w +/// A single-precision floating-point value used to initialize each vector +/// element of the result. +/// \returns An initialized 128-bit floating-point vector of [4 x float]. +static __inline__ __m128 __DEFAULT_FN_ATTRS +_mm_set_ps1(float __w) +{ + return _mm_set1_ps(__w); +} + +/// Constructs a 128-bit floating-point vector of [4 x float] +/// initialized with the specified single-precision floating-point values. +/// +/// \headerfile +/// +/// This intrinsic is a utility function and does not correspond to a specific +/// instruction. +/// +/// \param __z +/// A single-precision floating-point value used to initialize bits [127:96] +/// of the result. +/// \param __y +/// A single-precision floating-point value used to initialize bits [95:64] +/// of the result. +/// \param __x +/// A single-precision floating-point value used to initialize bits [63:32] +/// of the result. +/// \param __w +/// A single-precision floating-point value used to initialize bits [31:0] +/// of the result. +/// \returns An initialized 128-bit floating-point vector of [4 x float]. +static __inline__ __m128 __DEFAULT_FN_ATTRS +_mm_set_ps(float __z, float __y, float __x, float __w) +{ + return __extension__ (__m128){ __w, __x, __y, __z }; +} + +/// Constructs a 128-bit floating-point vector of [4 x float], +/// initialized in reverse order with the specified 32-bit single-precision +/// float-point values. +/// +/// \headerfile +/// +/// This intrinsic is a utility function and does not correspond to a specific +/// instruction. +/// +/// \param __z +/// A single-precision floating-point value used to initialize bits [31:0] +/// of the result. +/// \param __y +/// A single-precision floating-point value used to initialize bits [63:32] +/// of the result. +/// \param __x +/// A single-precision floating-point value used to initialize bits [95:64] +/// of the result. +/// \param __w +/// A single-precision floating-point value used to initialize bits [127:96] +/// of the result. +/// \returns An initialized 128-bit floating-point vector of [4 x float]. +static __inline__ __m128 __DEFAULT_FN_ATTRS +_mm_setr_ps(float __z, float __y, float __x, float __w) +{ + return __extension__ (__m128){ __z, __y, __x, __w }; +} + +/// Constructs a 128-bit floating-point vector of [4 x float] initialized +/// to zero. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VXORPS / XORPS instruction. +/// +/// \returns An initialized 128-bit floating-point vector of [4 x float] with +/// all elements set to zero. +static __inline__ __m128 __DEFAULT_FN_ATTRS +_mm_setzero_ps(void) +{ + return __extension__ (__m128){ 0.0f, 0.0f, 0.0f, 0.0f }; +} + +/// Stores the upper 64 bits of a 128-bit vector of [4 x float] to a +/// memory location. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VPEXTRQ / PEXTRQ instruction. +/// +/// \param __p +/// A pointer to a 64-bit memory location. +/// \param __a +/// A 128-bit vector of [4 x float] containing the values to be stored. +static __inline__ void __DEFAULT_FN_ATTRS +_mm_storeh_pi(__m64 *__p, __m128 __a) +{ + typedef float __mm_storeh_pi_v2f32 __attribute__((__vector_size__(8))); + struct __mm_storeh_pi_struct { + __mm_storeh_pi_v2f32 __u; + } __attribute__((__packed__, __may_alias__)); + ((struct __mm_storeh_pi_struct*)__p)->__u = __builtin_shufflevector(__a, __a, 2, 3); +} + +/// Stores the lower 64 bits of a 128-bit vector of [4 x float] to a +/// memory location. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VMOVLPS / MOVLPS instruction. +/// +/// \param __p +/// A pointer to a memory location that will receive the float values. +/// \param __a +/// A 128-bit vector of [4 x float] containing the values to be stored. +static __inline__ void __DEFAULT_FN_ATTRS +_mm_storel_pi(__m64 *__p, __m128 __a) +{ + typedef float __mm_storeh_pi_v2f32 __attribute__((__vector_size__(8))); + struct __mm_storeh_pi_struct { + __mm_storeh_pi_v2f32 __u; + } __attribute__((__packed__, __may_alias__)); + ((struct __mm_storeh_pi_struct*)__p)->__u = __builtin_shufflevector(__a, __a, 0, 1); +} + +/// Stores the lower 32 bits of a 128-bit vector of [4 x float] to a +/// memory location. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VMOVSS / MOVSS instruction. +/// +/// \param __p +/// A pointer to a 32-bit memory location. +/// \param __a +/// A 128-bit vector of [4 x float] containing the value to be stored. +static __inline__ void __DEFAULT_FN_ATTRS +_mm_store_ss(float *__p, __m128 __a) +{ + struct __mm_store_ss_struct { + float __u; + } __attribute__((__packed__, __may_alias__)); + ((struct __mm_store_ss_struct*)__p)->__u = __a[0]; +} + +/// Stores a 128-bit vector of [4 x float] to an unaligned memory +/// location. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VMOVUPS / MOVUPS instruction. +/// +/// \param __p +/// A pointer to a 128-bit memory location. The address of the memory +/// location does not have to be aligned. +/// \param __a +/// A 128-bit vector of [4 x float] containing the values to be stored. +static __inline__ void __DEFAULT_FN_ATTRS +_mm_storeu_ps(float *__p, __m128 __a) +{ + struct __storeu_ps { + __m128_u __v; + } __attribute__((__packed__, __may_alias__)); + ((struct __storeu_ps*)__p)->__v = __a; +} + +/// Stores a 128-bit vector of [4 x float] into an aligned memory +/// location. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VMOVAPS / MOVAPS instruction. +/// +/// \param __p +/// A pointer to a 128-bit memory location. The address of the memory +/// location has to be 16-byte aligned. +/// \param __a +/// A 128-bit vector of [4 x float] containing the values to be stored. +static __inline__ void __DEFAULT_FN_ATTRS +_mm_store_ps(float *__p, __m128 __a) +{ + *(__m128*)__p = __a; +} + +/// Stores the lower 32 bits of a 128-bit vector of [4 x float] into +/// four contiguous elements in an aligned memory location. +/// +/// \headerfile +/// +/// This intrinsic corresponds to VMOVAPS / MOVAPS + shuffling +/// instruction. +/// +/// \param __p +/// A pointer to a 128-bit memory location. +/// \param __a +/// A 128-bit vector of [4 x float] whose lower 32 bits are stored to each +/// of the four contiguous elements pointed by \a __p. +static __inline__ void __DEFAULT_FN_ATTRS +_mm_store1_ps(float *__p, __m128 __a) +{ + __a = __builtin_shufflevector((__v4sf)__a, (__v4sf)__a, 0, 0, 0, 0); + _mm_store_ps(__p, __a); +} + +/// Stores the lower 32 bits of a 128-bit vector of [4 x float] into +/// four contiguous elements in an aligned memory location. +/// +/// \headerfile +/// +/// This intrinsic corresponds to VMOVAPS / MOVAPS + shuffling +/// instruction. +/// +/// \param __p +/// A pointer to a 128-bit memory location. +/// \param __a +/// A 128-bit vector of [4 x float] whose lower 32 bits are stored to each +/// of the four contiguous elements pointed by \a __p. +static __inline__ void __DEFAULT_FN_ATTRS +_mm_store_ps1(float *__p, __m128 __a) +{ + _mm_store1_ps(__p, __a); +} + +/// Stores float values from a 128-bit vector of [4 x float] to an +/// aligned memory location in reverse order. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VMOVAPS / MOVAPS + shuffling +/// instruction. +/// +/// \param __p +/// A pointer to a 128-bit memory location. The address of the memory +/// location has to be 128-bit aligned. +/// \param __a +/// A 128-bit vector of [4 x float] containing the values to be stored. +static __inline__ void __DEFAULT_FN_ATTRS +_mm_storer_ps(float *__p, __m128 __a) +{ + __a = __builtin_shufflevector((__v4sf)__a, (__v4sf)__a, 3, 2, 1, 0); + _mm_store_ps(__p, __a); +} + +#define _MM_HINT_ET0 7 +#define _MM_HINT_ET1 6 +#define _MM_HINT_T0 3 +#define _MM_HINT_T1 2 +#define _MM_HINT_T2 1 +#define _MM_HINT_NTA 0 + +#ifndef _MSC_VER +/* FIXME: We have to #define this because "sel" must be a constant integer, and + Sema doesn't do any form of constant propagation yet. */ + +/// Loads one cache line of data from the specified address to a location +/// closer to the processor. +/// +/// \headerfile +/// +/// \code +/// void _mm_prefetch(const void *a, const int sel); +/// \endcode +/// +/// This intrinsic corresponds to the PREFETCHNTA instruction. +/// +/// \param a +/// A pointer to a memory location containing a cache line of data. +/// \param sel +/// A predefined integer constant specifying the type of prefetch +/// operation: \n +/// _MM_HINT_NTA: Move data using the non-temporal access (NTA) hint. The +/// PREFETCHNTA instruction will be generated. \n +/// _MM_HINT_T0: Move data using the T0 hint. The PREFETCHT0 instruction will +/// be generated. \n +/// _MM_HINT_T1: Move data using the T1 hint. The PREFETCHT1 instruction will +/// be generated. \n +/// _MM_HINT_T2: Move data using the T2 hint. The PREFETCHT2 instruction will +/// be generated. +#define _mm_prefetch(a, sel) (__builtin_prefetch((const void *)(a), \ + ((sel) >> 2) & 1, (sel) & 0x3)) +#endif + +/// Stores a 64-bit integer in the specified aligned memory location. To +/// minimize caching, the data is flagged as non-temporal (unlikely to be +/// used again soon). +/// +/// \headerfile +/// +/// This intrinsic corresponds to the MOVNTQ instruction. +/// +/// \param __p +/// A pointer to an aligned memory location used to store the register value. +/// \param __a +/// A 64-bit integer containing the value to be stored. +static __inline__ void __DEFAULT_FN_ATTRS_MMX +_mm_stream_pi(void *__p, __m64 __a) +{ + __builtin_ia32_movntq((__m64 *)__p, __a); +} + +/// Moves packed float values from a 128-bit vector of [4 x float] to a +/// 128-bit aligned memory location. To minimize caching, the data is flagged +/// as non-temporal (unlikely to be used again soon). +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VMOVNTPS / MOVNTPS instruction. +/// +/// \param __p +/// A pointer to a 128-bit aligned memory location that will receive the +/// single-precision floating-point values. +/// \param __a +/// A 128-bit vector of [4 x float] containing the values to be moved. +static __inline__ void __DEFAULT_FN_ATTRS +_mm_stream_ps(void *__p, __m128 __a) +{ + __builtin_nontemporal_store((__v4sf)__a, (__v4sf*)__p); +} + +#if defined(__cplusplus) +extern "C" { +#endif + +/// Forces strong memory ordering (serialization) between store +/// instructions preceding this instruction and store instructions following +/// this instruction, ensuring the system completes all previous stores +/// before executing subsequent stores. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the SFENCE instruction. +/// +void _mm_sfence(void); + +#if defined(__cplusplus) +} // extern "C" +#endif + +/// Extracts 16-bit element from a 64-bit vector of [4 x i16] and +/// returns it, as specified by the immediate integer operand. +/// +/// \headerfile +/// +/// \code +/// int _mm_extract_pi16(__m64 a, int n); +/// \endcode +/// +/// This intrinsic corresponds to the VPEXTRW / PEXTRW instruction. +/// +/// \param a +/// A 64-bit vector of [4 x i16]. +/// \param n +/// An immediate integer operand that determines which bits are extracted: \n +/// 0: Bits [15:0] are copied to the destination. \n +/// 1: Bits [31:16] are copied to the destination. \n +/// 2: Bits [47:32] are copied to the destination. \n +/// 3: Bits [63:48] are copied to the destination. +/// \returns A 16-bit integer containing the extracted 16 bits of packed data. +#define _mm_extract_pi16(a, n) \ + ((int)__builtin_ia32_vec_ext_v4hi((__v4hi)a, (int)n)) + +/// Copies data from the 64-bit vector of [4 x i16] to the destination, +/// and inserts the lower 16-bits of an integer operand at the 16-bit offset +/// specified by the immediate operand \a n. +/// +/// \headerfile +/// +/// \code +/// __m64 _mm_insert_pi16(__m64 a, int d, int n); +/// \endcode +/// +/// This intrinsic corresponds to the PINSRW instruction. +/// +/// \param a +/// A 64-bit vector of [4 x i16]. +/// \param d +/// An integer. The lower 16-bit value from this operand is written to the +/// destination at the offset specified by operand \a n. +/// \param n +/// An immediate integer operant that determines which the bits to be used +/// in the destination. \n +/// 0: Bits [15:0] are copied to the destination. \n +/// 1: Bits [31:16] are copied to the destination. \n +/// 2: Bits [47:32] are copied to the destination. \n +/// 3: Bits [63:48] are copied to the destination. \n +/// The remaining bits in the destination are copied from the corresponding +/// bits in operand \a a. +/// \returns A 64-bit integer vector containing the copied packed data from the +/// operands. +#define _mm_insert_pi16(a, d, n) \ + ((__m64)__builtin_ia32_vec_set_v4hi((__v4hi)a, (int)d, (int)n)) + +/// Compares each of the corresponding packed 16-bit integer values of +/// the 64-bit integer vectors, and writes the greater value to the +/// corresponding bits in the destination. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the PMAXSW instruction. +/// +/// \param __a +/// A 64-bit integer vector containing one of the source operands. +/// \param __b +/// A 64-bit integer vector containing one of the source operands. +/// \returns A 64-bit integer vector containing the comparison results. +static __inline__ __m64 __DEFAULT_FN_ATTRS_MMX +_mm_max_pi16(__m64 __a, __m64 __b) +{ + return (__m64)__builtin_ia32_pmaxsw((__v4hi)__a, (__v4hi)__b); +} + +/// Compares each of the corresponding packed 8-bit unsigned integer +/// values of the 64-bit integer vectors, and writes the greater value to the +/// corresponding bits in the destination. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the PMAXUB instruction. +/// +/// \param __a +/// A 64-bit integer vector containing one of the source operands. +/// \param __b +/// A 64-bit integer vector containing one of the source operands. +/// \returns A 64-bit integer vector containing the comparison results. +static __inline__ __m64 __DEFAULT_FN_ATTRS_MMX +_mm_max_pu8(__m64 __a, __m64 __b) +{ + return (__m64)__builtin_ia32_pmaxub((__v8qi)__a, (__v8qi)__b); +} + +/// Compares each of the corresponding packed 16-bit integer values of +/// the 64-bit integer vectors, and writes the lesser value to the +/// corresponding bits in the destination. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the PMINSW instruction. +/// +/// \param __a +/// A 64-bit integer vector containing one of the source operands. +/// \param __b +/// A 64-bit integer vector containing one of the source operands. +/// \returns A 64-bit integer vector containing the comparison results. +static __inline__ __m64 __DEFAULT_FN_ATTRS_MMX +_mm_min_pi16(__m64 __a, __m64 __b) +{ + return (__m64)__builtin_ia32_pminsw((__v4hi)__a, (__v4hi)__b); +} + +/// Compares each of the corresponding packed 8-bit unsigned integer +/// values of the 64-bit integer vectors, and writes the lesser value to the +/// corresponding bits in the destination. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the PMINUB instruction. +/// +/// \param __a +/// A 64-bit integer vector containing one of the source operands. +/// \param __b +/// A 64-bit integer vector containing one of the source operands. +/// \returns A 64-bit integer vector containing the comparison results. +static __inline__ __m64 __DEFAULT_FN_ATTRS_MMX +_mm_min_pu8(__m64 __a, __m64 __b) +{ + return (__m64)__builtin_ia32_pminub((__v8qi)__a, (__v8qi)__b); +} + +/// Takes the most significant bit from each 8-bit element in a 64-bit +/// integer vector to create an 8-bit mask value. Zero-extends the value to +/// 32-bit integer and writes it to the destination. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the PMOVMSKB instruction. +/// +/// \param __a +/// A 64-bit integer vector containing the values with bits to be extracted. +/// \returns The most significant bit from each 8-bit element in \a __a, +/// written to bits [7:0]. +static __inline__ int __DEFAULT_FN_ATTRS_MMX +_mm_movemask_pi8(__m64 __a) +{ + return __builtin_ia32_pmovmskb((__v8qi)__a); +} + +/// Multiplies packed 16-bit unsigned integer values and writes the +/// high-order 16 bits of each 32-bit product to the corresponding bits in +/// the destination. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the PMULHUW instruction. +/// +/// \param __a +/// A 64-bit integer vector containing one of the source operands. +/// \param __b +/// A 64-bit integer vector containing one of the source operands. +/// \returns A 64-bit integer vector containing the products of both operands. +static __inline__ __m64 __DEFAULT_FN_ATTRS_MMX +_mm_mulhi_pu16(__m64 __a, __m64 __b) +{ + return (__m64)__builtin_ia32_pmulhuw((__v4hi)__a, (__v4hi)__b); +} + +/// Shuffles the 4 16-bit integers from a 64-bit integer vector to the +/// destination, as specified by the immediate value operand. +/// +/// \headerfile +/// +/// \code +/// __m64 _mm_shuffle_pi16(__m64 a, const int n); +/// \endcode +/// +/// This intrinsic corresponds to the PSHUFW instruction. +/// +/// \param a +/// A 64-bit integer vector containing the values to be shuffled. +/// \param n +/// An immediate value containing an 8-bit value specifying which elements to +/// copy from \a a. The destinations within the 64-bit destination are +/// assigned values as follows: \n +/// Bits [1:0] are used to assign values to bits [15:0] in the +/// destination. \n +/// Bits [3:2] are used to assign values to bits [31:16] in the +/// destination. \n +/// Bits [5:4] are used to assign values to bits [47:32] in the +/// destination. \n +/// Bits [7:6] are used to assign values to bits [63:48] in the +/// destination. \n +/// Bit value assignments: \n +/// 00: assigned from bits [15:0] of \a a. \n +/// 01: assigned from bits [31:16] of \a a. \n +/// 10: assigned from bits [47:32] of \a a. \n +/// 11: assigned from bits [63:48] of \a a. \n +/// Note: To generate a mask, you can use the \c _MM_SHUFFLE macro. +/// _MM_SHUFFLE(b6, b4, b2, b0) can create an 8-bit mask of the form +/// [b6, b4, b2, b0]. +/// \returns A 64-bit integer vector containing the shuffled values. +#define _mm_shuffle_pi16(a, n) \ + ((__m64)__builtin_ia32_pshufw((__v4hi)(__m64)(a), (n))) + +/// Conditionally copies the values from each 8-bit element in the first +/// 64-bit integer vector operand to the specified memory location, as +/// specified by the most significant bit in the corresponding element in the +/// second 64-bit integer vector operand. +/// +/// To minimize caching, the data is flagged as non-temporal +/// (unlikely to be used again soon). +/// +/// \headerfile +/// +/// This intrinsic corresponds to the MASKMOVQ instruction. +/// +/// \param __d +/// A 64-bit integer vector containing the values with elements to be copied. +/// \param __n +/// A 64-bit integer vector operand. The most significant bit from each 8-bit +/// element determines whether the corresponding element in operand \a __d +/// is copied. If the most significant bit of a given element is 1, the +/// corresponding element in operand \a __d is copied. +/// \param __p +/// A pointer to a 64-bit memory location that will receive the conditionally +/// copied integer values. The address of the memory location does not have +/// to be aligned. +static __inline__ void __DEFAULT_FN_ATTRS_MMX +_mm_maskmove_si64(__m64 __d, __m64 __n, char *__p) +{ + __builtin_ia32_maskmovq((__v8qi)__d, (__v8qi)__n, __p); +} + +/// Computes the rounded averages of the packed unsigned 8-bit integer +/// values and writes the averages to the corresponding bits in the +/// destination. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the PAVGB instruction. +/// +/// \param __a +/// A 64-bit integer vector containing one of the source operands. +/// \param __b +/// A 64-bit integer vector containing one of the source operands. +/// \returns A 64-bit integer vector containing the averages of both operands. +static __inline__ __m64 __DEFAULT_FN_ATTRS_MMX +_mm_avg_pu8(__m64 __a, __m64 __b) +{ + return (__m64)__builtin_ia32_pavgb((__v8qi)__a, (__v8qi)__b); +} + +/// Computes the rounded averages of the packed unsigned 16-bit integer +/// values and writes the averages to the corresponding bits in the +/// destination. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the PAVGW instruction. +/// +/// \param __a +/// A 64-bit integer vector containing one of the source operands. +/// \param __b +/// A 64-bit integer vector containing one of the source operands. +/// \returns A 64-bit integer vector containing the averages of both operands. +static __inline__ __m64 __DEFAULT_FN_ATTRS_MMX +_mm_avg_pu16(__m64 __a, __m64 __b) +{ + return (__m64)__builtin_ia32_pavgw((__v4hi)__a, (__v4hi)__b); +} + +/// Subtracts the corresponding 8-bit unsigned integer values of the two +/// 64-bit vector operands and computes the absolute value for each of the +/// difference. Then sum of the 8 absolute differences is written to the +/// bits [15:0] of the destination; the remaining bits [63:16] are cleared. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the PSADBW instruction. +/// +/// \param __a +/// A 64-bit integer vector containing one of the source operands. +/// \param __b +/// A 64-bit integer vector containing one of the source operands. +/// \returns A 64-bit integer vector whose lower 16 bits contain the sums of the +/// sets of absolute differences between both operands. The upper bits are +/// cleared. +static __inline__ __m64 __DEFAULT_FN_ATTRS_MMX +_mm_sad_pu8(__m64 __a, __m64 __b) +{ + return (__m64)__builtin_ia32_psadbw((__v8qi)__a, (__v8qi)__b); +} + +#if defined(__cplusplus) +extern "C" { +#endif + +/// Returns the contents of the MXCSR register as a 32-bit unsigned +/// integer value. +/// +/// There are several groups of macros associated with this +/// intrinsic, including: +///
    +///
  • +/// For checking exception states: _MM_EXCEPT_INVALID, _MM_EXCEPT_DIV_ZERO, +/// _MM_EXCEPT_DENORM, _MM_EXCEPT_OVERFLOW, _MM_EXCEPT_UNDERFLOW, +/// _MM_EXCEPT_INEXACT. There is a convenience wrapper +/// _MM_GET_EXCEPTION_STATE(). +///
  • +///
  • +/// For checking exception masks: _MM_MASK_UNDERFLOW, _MM_MASK_OVERFLOW, +/// _MM_MASK_INVALID, _MM_MASK_DENORM, _MM_MASK_DIV_ZERO, _MM_MASK_INEXACT. +/// There is a convenience wrapper _MM_GET_EXCEPTION_MASK(). +///
  • +///
  • +/// For checking rounding modes: _MM_ROUND_NEAREST, _MM_ROUND_DOWN, +/// _MM_ROUND_UP, _MM_ROUND_TOWARD_ZERO. There is a convenience wrapper +/// _MM_GET_ROUNDING_MODE(). +///
  • +///
  • +/// For checking flush-to-zero mode: _MM_FLUSH_ZERO_ON, _MM_FLUSH_ZERO_OFF. +/// There is a convenience wrapper _MM_GET_FLUSH_ZERO_MODE(). +///
  • +///
  • +/// For checking denormals-are-zero mode: _MM_DENORMALS_ZERO_ON, +/// _MM_DENORMALS_ZERO_OFF. There is a convenience wrapper +/// _MM_GET_DENORMALS_ZERO_MODE(). +///
  • +///
+/// +/// For example, the following expression checks if an overflow exception has +/// occurred: +/// \code +/// ( _mm_getcsr() & _MM_EXCEPT_OVERFLOW ) +/// \endcode +/// +/// The following expression gets the current rounding mode: +/// \code +/// _MM_GET_ROUNDING_MODE() +/// \endcode +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VSTMXCSR / STMXCSR instruction. +/// +/// \returns A 32-bit unsigned integer containing the contents of the MXCSR +/// register. +unsigned int _mm_getcsr(void); + +/// Sets the MXCSR register with the 32-bit unsigned integer value. +/// +/// There are several groups of macros associated with this intrinsic, +/// including: +///
    +///
  • +/// For setting exception states: _MM_EXCEPT_INVALID, _MM_EXCEPT_DIV_ZERO, +/// _MM_EXCEPT_DENORM, _MM_EXCEPT_OVERFLOW, _MM_EXCEPT_UNDERFLOW, +/// _MM_EXCEPT_INEXACT. There is a convenience wrapper +/// _MM_SET_EXCEPTION_STATE(x) where x is one of these macros. +///
  • +///
  • +/// For setting exception masks: _MM_MASK_UNDERFLOW, _MM_MASK_OVERFLOW, +/// _MM_MASK_INVALID, _MM_MASK_DENORM, _MM_MASK_DIV_ZERO, _MM_MASK_INEXACT. +/// There is a convenience wrapper _MM_SET_EXCEPTION_MASK(x) where x is one +/// of these macros. +///
  • +///
  • +/// For setting rounding modes: _MM_ROUND_NEAREST, _MM_ROUND_DOWN, +/// _MM_ROUND_UP, _MM_ROUND_TOWARD_ZERO. There is a convenience wrapper +/// _MM_SET_ROUNDING_MODE(x) where x is one of these macros. +///
  • +///
  • +/// For setting flush-to-zero mode: _MM_FLUSH_ZERO_ON, _MM_FLUSH_ZERO_OFF. +/// There is a convenience wrapper _MM_SET_FLUSH_ZERO_MODE(x) where x is +/// one of these macros. +///
  • +///
  • +/// For setting denormals-are-zero mode: _MM_DENORMALS_ZERO_ON, +/// _MM_DENORMALS_ZERO_OFF. There is a convenience wrapper +/// _MM_SET_DENORMALS_ZERO_MODE(x) where x is one of these macros. +///
  • +///
+/// +/// For example, the following expression causes subsequent floating-point +/// operations to round up: +/// _mm_setcsr(_mm_getcsr() | _MM_ROUND_UP) +/// +/// The following example sets the DAZ and FTZ flags: +/// \code +/// void setFlags() { +/// _MM_SET_FLUSH_ZERO_MODE(_MM_FLUSH_ZERO_ON); +/// _MM_SET_DENORMALS_ZERO_MODE(_MM_DENORMALS_ZERO_ON); +/// } +/// \endcode +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VLDMXCSR / LDMXCSR instruction. +/// +/// \param __i +/// A 32-bit unsigned integer value to be written to the MXCSR register. +void _mm_setcsr(unsigned int __i); + +#if defined(__cplusplus) +} // extern "C" +#endif + +/// Selects 4 float values from the 128-bit operands of [4 x float], as +/// specified by the immediate value operand. +/// +/// \headerfile +/// +/// \code +/// __m128 _mm_shuffle_ps(__m128 a, __m128 b, const int mask); +/// \endcode +/// +/// This intrinsic corresponds to the VSHUFPS / SHUFPS instruction. +/// +/// \param a +/// A 128-bit vector of [4 x float]. +/// \param b +/// A 128-bit vector of [4 x float]. +/// \param mask +/// An immediate value containing an 8-bit value specifying which elements to +/// copy from \a a and \a b. \n +/// Bits [3:0] specify the values copied from operand \a a. \n +/// Bits [7:4] specify the values copied from operand \a b. \n +/// The destinations within the 128-bit destination are assigned values as +/// follows: \n +/// Bits [1:0] are used to assign values to bits [31:0] in the +/// destination. \n +/// Bits [3:2] are used to assign values to bits [63:32] in the +/// destination. \n +/// Bits [5:4] are used to assign values to bits [95:64] in the +/// destination. \n +/// Bits [7:6] are used to assign values to bits [127:96] in the +/// destination. \n +/// Bit value assignments: \n +/// 00: Bits [31:0] copied from the specified operand. \n +/// 01: Bits [63:32] copied from the specified operand. \n +/// 10: Bits [95:64] copied from the specified operand. \n +/// 11: Bits [127:96] copied from the specified operand. \n +/// Note: To generate a mask, you can use the \c _MM_SHUFFLE macro. +/// _MM_SHUFFLE(b6, b4, b2, b0) can create an 8-bit mask of the form +/// [b6, b4, b2, b0]. +/// \returns A 128-bit vector of [4 x float] containing the shuffled values. +#define _mm_shuffle_ps(a, b, mask) \ + ((__m128)__builtin_ia32_shufps((__v4sf)(__m128)(a), (__v4sf)(__m128)(b), \ + (int)(mask))) + +/// Unpacks the high-order (index 2,3) values from two 128-bit vectors of +/// [4 x float] and interleaves them into a 128-bit vector of [4 x float]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VUNPCKHPS / UNPCKHPS instruction. +/// +/// \param __a +/// A 128-bit vector of [4 x float]. \n +/// Bits [95:64] are written to bits [31:0] of the destination. \n +/// Bits [127:96] are written to bits [95:64] of the destination. +/// \param __b +/// A 128-bit vector of [4 x float]. +/// Bits [95:64] are written to bits [63:32] of the destination. \n +/// Bits [127:96] are written to bits [127:96] of the destination. +/// \returns A 128-bit vector of [4 x float] containing the interleaved values. +static __inline__ __m128 __DEFAULT_FN_ATTRS +_mm_unpackhi_ps(__m128 __a, __m128 __b) +{ + return __builtin_shufflevector((__v4sf)__a, (__v4sf)__b, 2, 6, 3, 7); +} + +/// Unpacks the low-order (index 0,1) values from two 128-bit vectors of +/// [4 x float] and interleaves them into a 128-bit vector of [4 x float]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VUNPCKLPS / UNPCKLPS instruction. +/// +/// \param __a +/// A 128-bit vector of [4 x float]. \n +/// Bits [31:0] are written to bits [31:0] of the destination. \n +/// Bits [63:32] are written to bits [95:64] of the destination. +/// \param __b +/// A 128-bit vector of [4 x float]. \n +/// Bits [31:0] are written to bits [63:32] of the destination. \n +/// Bits [63:32] are written to bits [127:96] of the destination. +/// \returns A 128-bit vector of [4 x float] containing the interleaved values. +static __inline__ __m128 __DEFAULT_FN_ATTRS +_mm_unpacklo_ps(__m128 __a, __m128 __b) +{ + return __builtin_shufflevector((__v4sf)__a, (__v4sf)__b, 0, 4, 1, 5); +} + +/// Constructs a 128-bit floating-point vector of [4 x float]. The lower +/// 32 bits are set to the lower 32 bits of the second parameter. The upper +/// 96 bits are set to the upper 96 bits of the first parameter. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VBLENDPS / BLENDPS / MOVSS +/// instruction. +/// +/// \param __a +/// A 128-bit floating-point vector of [4 x float]. The upper 96 bits are +/// written to the upper 96 bits of the result. +/// \param __b +/// A 128-bit floating-point vector of [4 x float]. The lower 32 bits are +/// written to the lower 32 bits of the result. +/// \returns A 128-bit floating-point vector of [4 x float]. +static __inline__ __m128 __DEFAULT_FN_ATTRS +_mm_move_ss(__m128 __a, __m128 __b) +{ + __a[0] = __b[0]; + return __a; +} + +/// Constructs a 128-bit floating-point vector of [4 x float]. The lower +/// 64 bits are set to the upper 64 bits of the second parameter. The upper +/// 64 bits are set to the upper 64 bits of the first parameter. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VUNPCKHPD / UNPCKHPD instruction. +/// +/// \param __a +/// A 128-bit floating-point vector of [4 x float]. The upper 64 bits are +/// written to the upper 64 bits of the result. +/// \param __b +/// A 128-bit floating-point vector of [4 x float]. The upper 64 bits are +/// written to the lower 64 bits of the result. +/// \returns A 128-bit floating-point vector of [4 x float]. +static __inline__ __m128 __DEFAULT_FN_ATTRS +_mm_movehl_ps(__m128 __a, __m128 __b) +{ + return __builtin_shufflevector((__v4sf)__a, (__v4sf)__b, 6, 7, 2, 3); +} + +/// Constructs a 128-bit floating-point vector of [4 x float]. The lower +/// 64 bits are set to the lower 64 bits of the first parameter. The upper +/// 64 bits are set to the lower 64 bits of the second parameter. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VUNPCKLPD / UNPCKLPD instruction. +/// +/// \param __a +/// A 128-bit floating-point vector of [4 x float]. The lower 64 bits are +/// written to the lower 64 bits of the result. +/// \param __b +/// A 128-bit floating-point vector of [4 x float]. The lower 64 bits are +/// written to the upper 64 bits of the result. +/// \returns A 128-bit floating-point vector of [4 x float]. +static __inline__ __m128 __DEFAULT_FN_ATTRS +_mm_movelh_ps(__m128 __a, __m128 __b) +{ + return __builtin_shufflevector((__v4sf)__a, (__v4sf)__b, 0, 1, 4, 5); +} + +/// Converts a 64-bit vector of [4 x i16] into a 128-bit vector of [4 x +/// float]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the CVTPI2PS + COMPOSITE instruction. +/// +/// \param __a +/// A 64-bit vector of [4 x i16]. The elements of the destination are copied +/// from the corresponding elements in this operand. +/// \returns A 128-bit vector of [4 x float] containing the copied and converted +/// values from the operand. +static __inline__ __m128 __DEFAULT_FN_ATTRS_MMX +_mm_cvtpi16_ps(__m64 __a) +{ + __m64 __b, __c; + __m128 __r; + + __b = _mm_setzero_si64(); + __b = _mm_cmpgt_pi16(__b, __a); + __c = _mm_unpackhi_pi16(__a, __b); + __r = _mm_setzero_ps(); + __r = _mm_cvtpi32_ps(__r, __c); + __r = _mm_movelh_ps(__r, __r); + __c = _mm_unpacklo_pi16(__a, __b); + __r = _mm_cvtpi32_ps(__r, __c); + + return __r; +} + +/// Converts a 64-bit vector of 16-bit unsigned integer values into a +/// 128-bit vector of [4 x float]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the CVTPI2PS + COMPOSITE instruction. +/// +/// \param __a +/// A 64-bit vector of 16-bit unsigned integer values. The elements of the +/// destination are copied from the corresponding elements in this operand. +/// \returns A 128-bit vector of [4 x float] containing the copied and converted +/// values from the operand. +static __inline__ __m128 __DEFAULT_FN_ATTRS_MMX +_mm_cvtpu16_ps(__m64 __a) +{ + __m64 __b, __c; + __m128 __r; + + __b = _mm_setzero_si64(); + __c = _mm_unpackhi_pi16(__a, __b); + __r = _mm_setzero_ps(); + __r = _mm_cvtpi32_ps(__r, __c); + __r = _mm_movelh_ps(__r, __r); + __c = _mm_unpacklo_pi16(__a, __b); + __r = _mm_cvtpi32_ps(__r, __c); + + return __r; +} + +/// Converts the lower four 8-bit values from a 64-bit vector of [8 x i8] +/// into a 128-bit vector of [4 x float]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the CVTPI2PS + COMPOSITE instruction. +/// +/// \param __a +/// A 64-bit vector of [8 x i8]. The elements of the destination are copied +/// from the corresponding lower 4 elements in this operand. +/// \returns A 128-bit vector of [4 x float] containing the copied and converted +/// values from the operand. +static __inline__ __m128 __DEFAULT_FN_ATTRS_MMX +_mm_cvtpi8_ps(__m64 __a) +{ + __m64 __b; + + __b = _mm_setzero_si64(); + __b = _mm_cmpgt_pi8(__b, __a); + __b = _mm_unpacklo_pi8(__a, __b); + + return _mm_cvtpi16_ps(__b); +} + +/// Converts the lower four unsigned 8-bit integer values from a 64-bit +/// vector of [8 x u8] into a 128-bit vector of [4 x float]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the CVTPI2PS + COMPOSITE instruction. +/// +/// \param __a +/// A 64-bit vector of unsigned 8-bit integer values. The elements of the +/// destination are copied from the corresponding lower 4 elements in this +/// operand. +/// \returns A 128-bit vector of [4 x float] containing the copied and converted +/// values from the source operand. +static __inline__ __m128 __DEFAULT_FN_ATTRS_MMX +_mm_cvtpu8_ps(__m64 __a) +{ + __m64 __b; + + __b = _mm_setzero_si64(); + __b = _mm_unpacklo_pi8(__a, __b); + + return _mm_cvtpi16_ps(__b); +} + +/// Converts the two 32-bit signed integer values from each 64-bit vector +/// operand of [2 x i32] into a 128-bit vector of [4 x float]. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the CVTPI2PS + COMPOSITE instruction. +/// +/// \param __a +/// A 64-bit vector of [2 x i32]. The lower elements of the destination are +/// copied from the elements in this operand. +/// \param __b +/// A 64-bit vector of [2 x i32]. The upper elements of the destination are +/// copied from the elements in this operand. +/// \returns A 128-bit vector of [4 x float] whose lower 64 bits contain the +/// copied and converted values from the first operand. The upper 64 bits +/// contain the copied and converted values from the second operand. +static __inline__ __m128 __DEFAULT_FN_ATTRS_MMX +_mm_cvtpi32x2_ps(__m64 __a, __m64 __b) +{ + __m128 __c; + + __c = _mm_setzero_ps(); + __c = _mm_cvtpi32_ps(__c, __b); + __c = _mm_movelh_ps(__c, __c); + + return _mm_cvtpi32_ps(__c, __a); +} + +/// Converts each single-precision floating-point element of a 128-bit +/// floating-point vector of [4 x float] into a 16-bit signed integer, and +/// packs the results into a 64-bit integer vector of [4 x i16]. +/// +/// If the floating-point element is NaN or infinity, or if the +/// floating-point element is greater than 0x7FFFFFFF or less than -0x8000, +/// it is converted to 0x8000. Otherwise if the floating-point element is +/// greater than 0x7FFF, it is converted to 0x7FFF. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the CVTPS2PI + COMPOSITE instruction. +/// +/// \param __a +/// A 128-bit floating-point vector of [4 x float]. +/// \returns A 64-bit integer vector of [4 x i16] containing the converted +/// values. +static __inline__ __m64 __DEFAULT_FN_ATTRS_MMX +_mm_cvtps_pi16(__m128 __a) +{ + __m64 __b, __c; + + __b = _mm_cvtps_pi32(__a); + __a = _mm_movehl_ps(__a, __a); + __c = _mm_cvtps_pi32(__a); + + return _mm_packs_pi32(__b, __c); +} + +/// Converts each single-precision floating-point element of a 128-bit +/// floating-point vector of [4 x float] into an 8-bit signed integer, and +/// packs the results into the lower 32 bits of a 64-bit integer vector of +/// [8 x i8]. The upper 32 bits of the vector are set to 0. +/// +/// If the floating-point element is NaN or infinity, or if the +/// floating-point element is greater than 0x7FFFFFFF or less than -0x80, it +/// is converted to 0x80. Otherwise if the floating-point element is greater +/// than 0x7F, it is converted to 0x7F. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the CVTPS2PI + COMPOSITE instruction. +/// +/// \param __a +/// 128-bit floating-point vector of [4 x float]. +/// \returns A 64-bit integer vector of [8 x i8]. The lower 32 bits contain the +/// converted values and the uppper 32 bits are set to zero. +static __inline__ __m64 __DEFAULT_FN_ATTRS_MMX +_mm_cvtps_pi8(__m128 __a) +{ + __m64 __b, __c; + + __b = _mm_cvtps_pi16(__a); + __c = _mm_setzero_si64(); + + return _mm_packs_pi16(__b, __c); +} + +/// Extracts the sign bits from each single-precision floating-point +/// element of a 128-bit floating-point vector of [4 x float] and returns the +/// sign bits in bits [0:3] of the result. Bits [31:4] of the result are set +/// to zero. +/// +/// \headerfile +/// +/// This intrinsic corresponds to the VMOVMSKPS / MOVMSKPS instruction. +/// +/// \param __a +/// A 128-bit floating-point vector of [4 x float]. +/// \returns A 32-bit integer value. Bits [3:0] contain the sign bits from each +/// single-precision floating-point element of the parameter. Bits [31:4] are +/// set to zero. +static __inline__ int __DEFAULT_FN_ATTRS +_mm_movemask_ps(__m128 __a) +{ + return __builtin_ia32_movmskps((__v4sf)__a); +} + +/* Compare */ +#define _CMP_EQ_OQ 0x00 /* Equal (ordered, non-signaling) */ +#define _CMP_LT_OS 0x01 /* Less-than (ordered, signaling) */ +#define _CMP_LE_OS 0x02 /* Less-than-or-equal (ordered, signaling) */ +#define _CMP_UNORD_Q 0x03 /* Unordered (non-signaling) */ +#define _CMP_NEQ_UQ 0x04 /* Not-equal (unordered, non-signaling) */ +#define _CMP_NLT_US 0x05 /* Not-less-than (unordered, signaling) */ +#define _CMP_NLE_US 0x06 /* Not-less-than-or-equal (unordered, signaling) */ +#define _CMP_ORD_Q 0x07 /* Ordered (non-signaling) */ + +/// Compares each of the corresponding values of two 128-bit vectors of +/// [4 x float], using the operation specified by the immediate integer +/// operand. +/// +/// Each comparison returns 0x0 for false, 0xFFFFFFFF for true. +/// If either value in a comparison is NaN, comparisons that are ordered +/// return false, and comparisons that are unordered return true. +/// +/// \headerfile +/// +/// \code +/// __m128 _mm_cmp_ps(__m128 a, __m128 b, const int c); +/// \endcode +/// +/// This intrinsic corresponds to the (V)CMPPS instruction. +/// +/// \param a +/// A 128-bit vector of [4 x float]. +/// \param b +/// A 128-bit vector of [4 x float]. +/// \param c +/// An immediate integer operand, with bits [4:0] specifying which comparison +/// operation to use: \n +/// 0x00: Equal (ordered, non-signaling) \n +/// 0x01: Less-than (ordered, signaling) \n +/// 0x02: Less-than-or-equal (ordered, signaling) \n +/// 0x03: Unordered (non-signaling) \n +/// 0x04: Not-equal (unordered, non-signaling) \n +/// 0x05: Not-less-than (unordered, signaling) \n +/// 0x06: Not-less-than-or-equal (unordered, signaling) \n +/// 0x07: Ordered (non-signaling) \n +/// \returns A 128-bit vector of [4 x float] containing the comparison results. +#define _mm_cmp_ps(a, b, c) \ + ((__m128)__builtin_ia32_cmpps((__v4sf)(__m128)(a), (__v4sf)(__m128)(b), (c))) + +/// Compares each of the corresponding scalar values of two 128-bit +/// vectors of [4 x float], using the operation specified by the immediate +/// integer operand. +/// +/// Each comparison returns 0x0 for false, 0xFFFFFFFF for true. +/// If either value in a comparison is NaN, comparisons that are ordered +/// return false, and comparisons that are unordered return true. +/// +/// \headerfile +/// +/// \code +/// __m128 _mm_cmp_ss(__m128 a, __m128 b, const int c); +/// \endcode +/// +/// This intrinsic corresponds to the (V)CMPSS instruction. +/// +/// \param a +/// A 128-bit vector of [4 x float]. +/// \param b +/// A 128-bit vector of [4 x float]. +/// \param c +/// An immediate integer operand, with bits [4:0] specifying which comparison +/// operation to use: \n +/// 0x00: Equal (ordered, non-signaling) \n +/// 0x01: Less-than (ordered, signaling) \n +/// 0x02: Less-than-or-equal (ordered, signaling) \n +/// 0x03: Unordered (non-signaling) \n +/// 0x04: Not-equal (unordered, non-signaling) \n +/// 0x05: Not-less-than (unordered, signaling) \n +/// 0x06: Not-less-than-or-equal (unordered, signaling) \n +/// 0x07: Ordered (non-signaling) \n +/// \returns A 128-bit vector of [4 x float] containing the comparison results. +#define _mm_cmp_ss(a, b, c) \ + ((__m128)__builtin_ia32_cmpss((__v4sf)(__m128)(a), (__v4sf)(__m128)(b), (c))) + +#define _MM_ALIGN16 __attribute__((aligned(16))) + +#define _MM_SHUFFLE(z, y, x, w) (((z) << 6) | ((y) << 4) | ((x) << 2) | (w)) + +#define _MM_EXCEPT_INVALID (0x0001U) +#define _MM_EXCEPT_DENORM (0x0002U) +#define _MM_EXCEPT_DIV_ZERO (0x0004U) +#define _MM_EXCEPT_OVERFLOW (0x0008U) +#define _MM_EXCEPT_UNDERFLOW (0x0010U) +#define _MM_EXCEPT_INEXACT (0x0020U) +#define _MM_EXCEPT_MASK (0x003fU) + +#define _MM_MASK_INVALID (0x0080U) +#define _MM_MASK_DENORM (0x0100U) +#define _MM_MASK_DIV_ZERO (0x0200U) +#define _MM_MASK_OVERFLOW (0x0400U) +#define _MM_MASK_UNDERFLOW (0x0800U) +#define _MM_MASK_INEXACT (0x1000U) +#define _MM_MASK_MASK (0x1f80U) + +#define _MM_ROUND_NEAREST (0x0000U) +#define _MM_ROUND_DOWN (0x2000U) +#define _MM_ROUND_UP (0x4000U) +#define _MM_ROUND_TOWARD_ZERO (0x6000U) +#define _MM_ROUND_MASK (0x6000U) + +#define _MM_FLUSH_ZERO_MASK (0x8000U) +#define _MM_FLUSH_ZERO_ON (0x8000U) +#define _MM_FLUSH_ZERO_OFF (0x0000U) + +#define _MM_GET_EXCEPTION_MASK() (_mm_getcsr() & _MM_MASK_MASK) +#define _MM_GET_EXCEPTION_STATE() (_mm_getcsr() & _MM_EXCEPT_MASK) +#define _MM_GET_FLUSH_ZERO_MODE() (_mm_getcsr() & _MM_FLUSH_ZERO_MASK) +#define _MM_GET_ROUNDING_MODE() (_mm_getcsr() & _MM_ROUND_MASK) + +#define _MM_SET_EXCEPTION_MASK(x) (_mm_setcsr((_mm_getcsr() & ~_MM_MASK_MASK) | (x))) +#define _MM_SET_EXCEPTION_STATE(x) (_mm_setcsr((_mm_getcsr() & ~_MM_EXCEPT_MASK) | (x))) +#define _MM_SET_FLUSH_ZERO_MODE(x) (_mm_setcsr((_mm_getcsr() & ~_MM_FLUSH_ZERO_MASK) | (x))) +#define _MM_SET_ROUNDING_MODE(x) (_mm_setcsr((_mm_getcsr() & ~_MM_ROUND_MASK) | (x))) + +#define _MM_TRANSPOSE4_PS(row0, row1, row2, row3) \ +do { \ + __m128 tmp3, tmp2, tmp1, tmp0; \ + tmp0 = _mm_unpacklo_ps((row0), (row1)); \ + tmp2 = _mm_unpacklo_ps((row2), (row3)); \ + tmp1 = _mm_unpackhi_ps((row0), (row1)); \ + tmp3 = _mm_unpackhi_ps((row2), (row3)); \ + (row0) = _mm_movelh_ps(tmp0, tmp2); \ + (row1) = _mm_movehl_ps(tmp2, tmp0); \ + (row2) = _mm_movelh_ps(tmp1, tmp3); \ + (row3) = _mm_movehl_ps(tmp3, tmp1); \ +} while (0) + +/* Aliases for compatibility. */ +#define _m_pextrw _mm_extract_pi16 +#define _m_pinsrw _mm_insert_pi16 +#define _m_pmaxsw _mm_max_pi16 +#define _m_pmaxub _mm_max_pu8 +#define _m_pminsw _mm_min_pi16 +#define _m_pminub _mm_min_pu8 +#define _m_pmovmskb _mm_movemask_pi8 +#define _m_pmulhuw _mm_mulhi_pu16 +#define _m_pshufw _mm_shuffle_pi16 +#define _m_maskmovq _mm_maskmove_si64 +#define _m_pavgb _mm_avg_pu8 +#define _m_pavgw _mm_avg_pu16 +#define _m_psadbw _mm_sad_pu8 +#define _m_ _mm_ + +#undef __DEFAULT_FN_ATTRS +#undef __DEFAULT_FN_ATTRS_MMX + +/* Ugly hack for backwards-compatibility (compatible with gcc) */ +#if defined(__SSE2__) && !__building_module(_Builtin_intrinsics) +#include "emmintrin.h" +#endif + +#endif /* __XMMINTRIN_H */ diff --git a/third_party/intel/clang/xopintrin.h b/third_party/intel/clang/xopintrin.h new file mode 100644 index 000000000..976cdf490 --- /dev/null +++ b/third_party/intel/clang/xopintrin.h @@ -0,0 +1,770 @@ +/*===---- xopintrin.h - XOP intrinsics -------------------------------------=== + * + * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. + * See https://llvm.org/LICENSE.txt for license information. + * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + * + *===-----------------------------------------------------------------------=== + */ + +#ifndef __X86INTRIN_H +#error "Never use directly; include instead." +#endif + +#ifndef __XOPINTRIN_H +#define __XOPINTRIN_H + +#include + +/* Define the default attributes for the functions in this file. */ +#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("xop"), __min_vector_width__(128))) +#define __DEFAULT_FN_ATTRS256 __attribute__((__always_inline__, __nodebug__, __target__("xop"), __min_vector_width__(256))) + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_maccs_epi16(__m128i __A, __m128i __B, __m128i __C) +{ + return (__m128i)__builtin_ia32_vpmacssww((__v8hi)__A, (__v8hi)__B, (__v8hi)__C); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_macc_epi16(__m128i __A, __m128i __B, __m128i __C) +{ + return (__m128i)__builtin_ia32_vpmacsww((__v8hi)__A, (__v8hi)__B, (__v8hi)__C); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_maccsd_epi16(__m128i __A, __m128i __B, __m128i __C) +{ + return (__m128i)__builtin_ia32_vpmacsswd((__v8hi)__A, (__v8hi)__B, (__v4si)__C); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_maccd_epi16(__m128i __A, __m128i __B, __m128i __C) +{ + return (__m128i)__builtin_ia32_vpmacswd((__v8hi)__A, (__v8hi)__B, (__v4si)__C); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_maccs_epi32(__m128i __A, __m128i __B, __m128i __C) +{ + return (__m128i)__builtin_ia32_vpmacssdd((__v4si)__A, (__v4si)__B, (__v4si)__C); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_macc_epi32(__m128i __A, __m128i __B, __m128i __C) +{ + return (__m128i)__builtin_ia32_vpmacsdd((__v4si)__A, (__v4si)__B, (__v4si)__C); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_maccslo_epi32(__m128i __A, __m128i __B, __m128i __C) +{ + return (__m128i)__builtin_ia32_vpmacssdql((__v4si)__A, (__v4si)__B, (__v2di)__C); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_macclo_epi32(__m128i __A, __m128i __B, __m128i __C) +{ + return (__m128i)__builtin_ia32_vpmacsdql((__v4si)__A, (__v4si)__B, (__v2di)__C); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_maccshi_epi32(__m128i __A, __m128i __B, __m128i __C) +{ + return (__m128i)__builtin_ia32_vpmacssdqh((__v4si)__A, (__v4si)__B, (__v2di)__C); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_macchi_epi32(__m128i __A, __m128i __B, __m128i __C) +{ + return (__m128i)__builtin_ia32_vpmacsdqh((__v4si)__A, (__v4si)__B, (__v2di)__C); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_maddsd_epi16(__m128i __A, __m128i __B, __m128i __C) +{ + return (__m128i)__builtin_ia32_vpmadcsswd((__v8hi)__A, (__v8hi)__B, (__v4si)__C); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_maddd_epi16(__m128i __A, __m128i __B, __m128i __C) +{ + return (__m128i)__builtin_ia32_vpmadcswd((__v8hi)__A, (__v8hi)__B, (__v4si)__C); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_haddw_epi8(__m128i __A) +{ + return (__m128i)__builtin_ia32_vphaddbw((__v16qi)__A); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_haddd_epi8(__m128i __A) +{ + return (__m128i)__builtin_ia32_vphaddbd((__v16qi)__A); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_haddq_epi8(__m128i __A) +{ + return (__m128i)__builtin_ia32_vphaddbq((__v16qi)__A); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_haddd_epi16(__m128i __A) +{ + return (__m128i)__builtin_ia32_vphaddwd((__v8hi)__A); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_haddq_epi16(__m128i __A) +{ + return (__m128i)__builtin_ia32_vphaddwq((__v8hi)__A); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_haddq_epi32(__m128i __A) +{ + return (__m128i)__builtin_ia32_vphadddq((__v4si)__A); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_haddw_epu8(__m128i __A) +{ + return (__m128i)__builtin_ia32_vphaddubw((__v16qi)__A); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_haddd_epu8(__m128i __A) +{ + return (__m128i)__builtin_ia32_vphaddubd((__v16qi)__A); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_haddq_epu8(__m128i __A) +{ + return (__m128i)__builtin_ia32_vphaddubq((__v16qi)__A); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_haddd_epu16(__m128i __A) +{ + return (__m128i)__builtin_ia32_vphadduwd((__v8hi)__A); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_haddq_epu16(__m128i __A) +{ + return (__m128i)__builtin_ia32_vphadduwq((__v8hi)__A); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_haddq_epu32(__m128i __A) +{ + return (__m128i)__builtin_ia32_vphaddudq((__v4si)__A); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_hsubw_epi8(__m128i __A) +{ + return (__m128i)__builtin_ia32_vphsubbw((__v16qi)__A); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_hsubd_epi16(__m128i __A) +{ + return (__m128i)__builtin_ia32_vphsubwd((__v8hi)__A); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_hsubq_epi32(__m128i __A) +{ + return (__m128i)__builtin_ia32_vphsubdq((__v4si)__A); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_cmov_si128(__m128i __A, __m128i __B, __m128i __C) +{ + return (__m128i)(((__v2du)__A & (__v2du)__C) | ((__v2du)__B & ~(__v2du)__C)); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_cmov_si256(__m256i __A, __m256i __B, __m256i __C) +{ + return (__m256i)(((__v4du)__A & (__v4du)__C) | ((__v4du)__B & ~(__v4du)__C)); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_perm_epi8(__m128i __A, __m128i __B, __m128i __C) +{ + return (__m128i)__builtin_ia32_vpperm((__v16qi)__A, (__v16qi)__B, (__v16qi)__C); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_rot_epi8(__m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_vprotb((__v16qi)__A, (__v16qi)__B); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_rot_epi16(__m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_vprotw((__v8hi)__A, (__v8hi)__B); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_rot_epi32(__m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_vprotd((__v4si)__A, (__v4si)__B); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_rot_epi64(__m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_vprotq((__v2di)__A, (__v2di)__B); +} + +#define _mm_roti_epi8(A, N) \ + ((__m128i)__builtin_ia32_vprotbi((__v16qi)(__m128i)(A), (N))) + +#define _mm_roti_epi16(A, N) \ + ((__m128i)__builtin_ia32_vprotwi((__v8hi)(__m128i)(A), (N))) + +#define _mm_roti_epi32(A, N) \ + ((__m128i)__builtin_ia32_vprotdi((__v4si)(__m128i)(A), (N))) + +#define _mm_roti_epi64(A, N) \ + ((__m128i)__builtin_ia32_vprotqi((__v2di)(__m128i)(A), (N))) + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_shl_epi8(__m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_vpshlb((__v16qi)__A, (__v16qi)__B); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_shl_epi16(__m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_vpshlw((__v8hi)__A, (__v8hi)__B); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_shl_epi32(__m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_vpshld((__v4si)__A, (__v4si)__B); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_shl_epi64(__m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_vpshlq((__v2di)__A, (__v2di)__B); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_sha_epi8(__m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_vpshab((__v16qi)__A, (__v16qi)__B); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_sha_epi16(__m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_vpshaw((__v8hi)__A, (__v8hi)__B); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_sha_epi32(__m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_vpshad((__v4si)__A, (__v4si)__B); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_sha_epi64(__m128i __A, __m128i __B) +{ + return (__m128i)__builtin_ia32_vpshaq((__v2di)__A, (__v2di)__B); +} + +#define _mm_com_epu8(A, B, N) \ + ((__m128i)__builtin_ia32_vpcomub((__v16qi)(__m128i)(A), \ + (__v16qi)(__m128i)(B), (N))) + +#define _mm_com_epu16(A, B, N) \ + ((__m128i)__builtin_ia32_vpcomuw((__v8hi)(__m128i)(A), \ + (__v8hi)(__m128i)(B), (N))) + +#define _mm_com_epu32(A, B, N) \ + ((__m128i)__builtin_ia32_vpcomud((__v4si)(__m128i)(A), \ + (__v4si)(__m128i)(B), (N))) + +#define _mm_com_epu64(A, B, N) \ + ((__m128i)__builtin_ia32_vpcomuq((__v2di)(__m128i)(A), \ + (__v2di)(__m128i)(B), (N))) + +#define _mm_com_epi8(A, B, N) \ + ((__m128i)__builtin_ia32_vpcomb((__v16qi)(__m128i)(A), \ + (__v16qi)(__m128i)(B), (N))) + +#define _mm_com_epi16(A, B, N) \ + ((__m128i)__builtin_ia32_vpcomw((__v8hi)(__m128i)(A), \ + (__v8hi)(__m128i)(B), (N))) + +#define _mm_com_epi32(A, B, N) \ + ((__m128i)__builtin_ia32_vpcomd((__v4si)(__m128i)(A), \ + (__v4si)(__m128i)(B), (N))) + +#define _mm_com_epi64(A, B, N) \ + ((__m128i)__builtin_ia32_vpcomq((__v2di)(__m128i)(A), \ + (__v2di)(__m128i)(B), (N))) + +#define _MM_PCOMCTRL_LT 0 +#define _MM_PCOMCTRL_LE 1 +#define _MM_PCOMCTRL_GT 2 +#define _MM_PCOMCTRL_GE 3 +#define _MM_PCOMCTRL_EQ 4 +#define _MM_PCOMCTRL_NEQ 5 +#define _MM_PCOMCTRL_FALSE 6 +#define _MM_PCOMCTRL_TRUE 7 + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_comlt_epu8(__m128i __A, __m128i __B) +{ + return _mm_com_epu8(__A, __B, _MM_PCOMCTRL_LT); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_comle_epu8(__m128i __A, __m128i __B) +{ + return _mm_com_epu8(__A, __B, _MM_PCOMCTRL_LE); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_comgt_epu8(__m128i __A, __m128i __B) +{ + return _mm_com_epu8(__A, __B, _MM_PCOMCTRL_GT); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_comge_epu8(__m128i __A, __m128i __B) +{ + return _mm_com_epu8(__A, __B, _MM_PCOMCTRL_GE); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_comeq_epu8(__m128i __A, __m128i __B) +{ + return _mm_com_epu8(__A, __B, _MM_PCOMCTRL_EQ); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_comneq_epu8(__m128i __A, __m128i __B) +{ + return _mm_com_epu8(__A, __B, _MM_PCOMCTRL_NEQ); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_comfalse_epu8(__m128i __A, __m128i __B) +{ + return _mm_com_epu8(__A, __B, _MM_PCOMCTRL_FALSE); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_comtrue_epu8(__m128i __A, __m128i __B) +{ + return _mm_com_epu8(__A, __B, _MM_PCOMCTRL_TRUE); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_comlt_epu16(__m128i __A, __m128i __B) +{ + return _mm_com_epu16(__A, __B, _MM_PCOMCTRL_LT); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_comle_epu16(__m128i __A, __m128i __B) +{ + return _mm_com_epu16(__A, __B, _MM_PCOMCTRL_LE); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_comgt_epu16(__m128i __A, __m128i __B) +{ + return _mm_com_epu16(__A, __B, _MM_PCOMCTRL_GT); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_comge_epu16(__m128i __A, __m128i __B) +{ + return _mm_com_epu16(__A, __B, _MM_PCOMCTRL_GE); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_comeq_epu16(__m128i __A, __m128i __B) +{ + return _mm_com_epu16(__A, __B, _MM_PCOMCTRL_EQ); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_comneq_epu16(__m128i __A, __m128i __B) +{ + return _mm_com_epu16(__A, __B, _MM_PCOMCTRL_NEQ); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_comfalse_epu16(__m128i __A, __m128i __B) +{ + return _mm_com_epu16(__A, __B, _MM_PCOMCTRL_FALSE); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_comtrue_epu16(__m128i __A, __m128i __B) +{ + return _mm_com_epu16(__A, __B, _MM_PCOMCTRL_TRUE); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_comlt_epu32(__m128i __A, __m128i __B) +{ + return _mm_com_epu32(__A, __B, _MM_PCOMCTRL_LT); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_comle_epu32(__m128i __A, __m128i __B) +{ + return _mm_com_epu32(__A, __B, _MM_PCOMCTRL_LE); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_comgt_epu32(__m128i __A, __m128i __B) +{ + return _mm_com_epu32(__A, __B, _MM_PCOMCTRL_GT); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_comge_epu32(__m128i __A, __m128i __B) +{ + return _mm_com_epu32(__A, __B, _MM_PCOMCTRL_GE); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_comeq_epu32(__m128i __A, __m128i __B) +{ + return _mm_com_epu32(__A, __B, _MM_PCOMCTRL_EQ); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_comneq_epu32(__m128i __A, __m128i __B) +{ + return _mm_com_epu32(__A, __B, _MM_PCOMCTRL_NEQ); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_comfalse_epu32(__m128i __A, __m128i __B) +{ + return _mm_com_epu32(__A, __B, _MM_PCOMCTRL_FALSE); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_comtrue_epu32(__m128i __A, __m128i __B) +{ + return _mm_com_epu32(__A, __B, _MM_PCOMCTRL_TRUE); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_comlt_epu64(__m128i __A, __m128i __B) +{ + return _mm_com_epu64(__A, __B, _MM_PCOMCTRL_LT); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_comle_epu64(__m128i __A, __m128i __B) +{ + return _mm_com_epu64(__A, __B, _MM_PCOMCTRL_LE); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_comgt_epu64(__m128i __A, __m128i __B) +{ + return _mm_com_epu64(__A, __B, _MM_PCOMCTRL_GT); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_comge_epu64(__m128i __A, __m128i __B) +{ + return _mm_com_epu64(__A, __B, _MM_PCOMCTRL_GE); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_comeq_epu64(__m128i __A, __m128i __B) +{ + return _mm_com_epu64(__A, __B, _MM_PCOMCTRL_EQ); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_comneq_epu64(__m128i __A, __m128i __B) +{ + return _mm_com_epu64(__A, __B, _MM_PCOMCTRL_NEQ); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_comfalse_epu64(__m128i __A, __m128i __B) +{ + return _mm_com_epu64(__A, __B, _MM_PCOMCTRL_FALSE); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_comtrue_epu64(__m128i __A, __m128i __B) +{ + return _mm_com_epu64(__A, __B, _MM_PCOMCTRL_TRUE); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_comlt_epi8(__m128i __A, __m128i __B) +{ + return _mm_com_epi8(__A, __B, _MM_PCOMCTRL_LT); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_comle_epi8(__m128i __A, __m128i __B) +{ + return _mm_com_epi8(__A, __B, _MM_PCOMCTRL_LE); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_comgt_epi8(__m128i __A, __m128i __B) +{ + return _mm_com_epi8(__A, __B, _MM_PCOMCTRL_GT); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_comge_epi8(__m128i __A, __m128i __B) +{ + return _mm_com_epi8(__A, __B, _MM_PCOMCTRL_GE); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_comeq_epi8(__m128i __A, __m128i __B) +{ + return _mm_com_epi8(__A, __B, _MM_PCOMCTRL_EQ); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_comneq_epi8(__m128i __A, __m128i __B) +{ + return _mm_com_epi8(__A, __B, _MM_PCOMCTRL_NEQ); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_comfalse_epi8(__m128i __A, __m128i __B) +{ + return _mm_com_epi8(__A, __B, _MM_PCOMCTRL_FALSE); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_comtrue_epi8(__m128i __A, __m128i __B) +{ + return _mm_com_epi8(__A, __B, _MM_PCOMCTRL_TRUE); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_comlt_epi16(__m128i __A, __m128i __B) +{ + return _mm_com_epi16(__A, __B, _MM_PCOMCTRL_LT); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_comle_epi16(__m128i __A, __m128i __B) +{ + return _mm_com_epi16(__A, __B, _MM_PCOMCTRL_LE); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_comgt_epi16(__m128i __A, __m128i __B) +{ + return _mm_com_epi16(__A, __B, _MM_PCOMCTRL_GT); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_comge_epi16(__m128i __A, __m128i __B) +{ + return _mm_com_epi16(__A, __B, _MM_PCOMCTRL_GE); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_comeq_epi16(__m128i __A, __m128i __B) +{ + return _mm_com_epi16(__A, __B, _MM_PCOMCTRL_EQ); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_comneq_epi16(__m128i __A, __m128i __B) +{ + return _mm_com_epi16(__A, __B, _MM_PCOMCTRL_NEQ); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_comfalse_epi16(__m128i __A, __m128i __B) +{ + return _mm_com_epi16(__A, __B, _MM_PCOMCTRL_FALSE); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_comtrue_epi16(__m128i __A, __m128i __B) +{ + return _mm_com_epi16(__A, __B, _MM_PCOMCTRL_TRUE); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_comlt_epi32(__m128i __A, __m128i __B) +{ + return _mm_com_epi32(__A, __B, _MM_PCOMCTRL_LT); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_comle_epi32(__m128i __A, __m128i __B) +{ + return _mm_com_epi32(__A, __B, _MM_PCOMCTRL_LE); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_comgt_epi32(__m128i __A, __m128i __B) +{ + return _mm_com_epi32(__A, __B, _MM_PCOMCTRL_GT); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_comge_epi32(__m128i __A, __m128i __B) +{ + return _mm_com_epi32(__A, __B, _MM_PCOMCTRL_GE); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_comeq_epi32(__m128i __A, __m128i __B) +{ + return _mm_com_epi32(__A, __B, _MM_PCOMCTRL_EQ); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_comneq_epi32(__m128i __A, __m128i __B) +{ + return _mm_com_epi32(__A, __B, _MM_PCOMCTRL_NEQ); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_comfalse_epi32(__m128i __A, __m128i __B) +{ + return _mm_com_epi32(__A, __B, _MM_PCOMCTRL_FALSE); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_comtrue_epi32(__m128i __A, __m128i __B) +{ + return _mm_com_epi32(__A, __B, _MM_PCOMCTRL_TRUE); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_comlt_epi64(__m128i __A, __m128i __B) +{ + return _mm_com_epi64(__A, __B, _MM_PCOMCTRL_LT); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_comle_epi64(__m128i __A, __m128i __B) +{ + return _mm_com_epi64(__A, __B, _MM_PCOMCTRL_LE); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_comgt_epi64(__m128i __A, __m128i __B) +{ + return _mm_com_epi64(__A, __B, _MM_PCOMCTRL_GT); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_comge_epi64(__m128i __A, __m128i __B) +{ + return _mm_com_epi64(__A, __B, _MM_PCOMCTRL_GE); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_comeq_epi64(__m128i __A, __m128i __B) +{ + return _mm_com_epi64(__A, __B, _MM_PCOMCTRL_EQ); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_comneq_epi64(__m128i __A, __m128i __B) +{ + return _mm_com_epi64(__A, __B, _MM_PCOMCTRL_NEQ); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_comfalse_epi64(__m128i __A, __m128i __B) +{ + return _mm_com_epi64(__A, __B, _MM_PCOMCTRL_FALSE); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_comtrue_epi64(__m128i __A, __m128i __B) +{ + return _mm_com_epi64(__A, __B, _MM_PCOMCTRL_TRUE); +} + +#define _mm_permute2_pd(X, Y, C, I) \ + ((__m128d)__builtin_ia32_vpermil2pd((__v2df)(__m128d)(X), \ + (__v2df)(__m128d)(Y), \ + (__v2di)(__m128i)(C), (I))) + +#define _mm256_permute2_pd(X, Y, C, I) \ + ((__m256d)__builtin_ia32_vpermil2pd256((__v4df)(__m256d)(X), \ + (__v4df)(__m256d)(Y), \ + (__v4di)(__m256i)(C), (I))) + +#define _mm_permute2_ps(X, Y, C, I) \ + ((__m128)__builtin_ia32_vpermil2ps((__v4sf)(__m128)(X), (__v4sf)(__m128)(Y), \ + (__v4si)(__m128i)(C), (I))) + +#define _mm256_permute2_ps(X, Y, C, I) \ + ((__m256)__builtin_ia32_vpermil2ps256((__v8sf)(__m256)(X), \ + (__v8sf)(__m256)(Y), \ + (__v8si)(__m256i)(C), (I))) + +static __inline__ __m128 __DEFAULT_FN_ATTRS +_mm_frcz_ss(__m128 __A) +{ + return (__m128)__builtin_ia32_vfrczss((__v4sf)__A); +} + +static __inline__ __m128d __DEFAULT_FN_ATTRS +_mm_frcz_sd(__m128d __A) +{ + return (__m128d)__builtin_ia32_vfrczsd((__v2df)__A); +} + +static __inline__ __m128 __DEFAULT_FN_ATTRS +_mm_frcz_ps(__m128 __A) +{ + return (__m128)__builtin_ia32_vfrczps((__v4sf)__A); +} + +static __inline__ __m128d __DEFAULT_FN_ATTRS +_mm_frcz_pd(__m128d __A) +{ + return (__m128d)__builtin_ia32_vfrczpd((__v2df)__A); +} + +static __inline__ __m256 __DEFAULT_FN_ATTRS256 +_mm256_frcz_ps(__m256 __A) +{ + return (__m256)__builtin_ia32_vfrczps256((__v8sf)__A); +} + +static __inline__ __m256d __DEFAULT_FN_ATTRS256 +_mm256_frcz_pd(__m256d __A) +{ + return (__m256d)__builtin_ia32_vfrczpd256((__v4df)__A); +} + +#undef __DEFAULT_FN_ATTRS +#undef __DEFAULT_FN_ATTRS256 + +#endif /* __XOPINTRIN_H */ diff --git a/third_party/intel/clang/xsavecintrin.h b/third_party/intel/clang/xsavecintrin.h new file mode 100644 index 000000000..1f2d00120 --- /dev/null +++ b/third_party/intel/clang/xsavecintrin.h @@ -0,0 +1,84 @@ +/*===---- xsavecintrin.h - XSAVEC intrinsic --------------------------------=== + * + * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. + * See https://llvm.org/LICENSE.txt for license information. + * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + * + *===-----------------------------------------------------------------------=== + */ + +#ifndef __IMMINTRIN_H +#error "Never use directly; include instead." +#endif + +#ifndef __XSAVECINTRIN_H +#define __XSAVECINTRIN_H + +/* Define the default attributes for the functions in this file. */ +#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("xsavec"))) + +/// Performs a full or partial save of processor state to the memory at +/// \a __p. The exact state saved depends on the 64-bit mask \a __m and +/// processor control register \c XCR0. +/// +/// \code{.operation} +/// mask[62:0] := __m[62:0] AND XCR0[62:0] +/// FOR i := 0 TO 62 +/// IF mask[i] == 1 +/// CASE (i) OF +/// 0: save X87 FPU state +/// 1: save SSE state +/// DEFAULT: __p.Ext_Save_Area[i] := ProcessorState[i] +/// FI +/// ENDFOR +/// __p.Header.XSTATE_BV[62:0] := INIT_FUNCTION(mask[62:0]) +/// \endcode +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c XSAVEC instruction. +/// +/// \param __p +/// Pointer to the save area; must be 64-byte aligned. +/// \param __m +/// A 64-bit mask indicating what state should be saved. +static __inline__ void __DEFAULT_FN_ATTRS +_xsavec(void *__p, unsigned long long __m) { + __builtin_ia32_xsavec(__p, __m); +} + +#ifdef __x86_64__ +/// Performs a full or partial save of processor state to the memory at +/// \a __p. The exact state saved depends on the 64-bit mask \a __m and +/// processor control register \c XCR0. +/// +/// \code{.operation} +/// mask[62:0] := __m[62:0] AND XCR0[62:0] +/// FOR i := 0 TO 62 +/// IF mask[i] == 1 +/// CASE (i) OF +/// 0: save X87 FPU state +/// 1: save SSE state +/// DEFAULT: __p.Ext_Save_Area[i] := ProcessorState[i] +/// FI +/// ENDFOR +/// __p.Header.XSTATE_BV[62:0] := INIT_FUNCTION(mask[62:0]) +/// \endcode +/// +/// \headerfile +/// +/// This intrinsic corresponds to the \c XSAVEC64 instruction. +/// +/// \param __p +/// Pointer to the save area; must be 64-byte aligned. +/// \param __m +/// A 64-bit mask indicating what state should be saved. +static __inline__ void __DEFAULT_FN_ATTRS +_xsavec64(void *__p, unsigned long long __m) { + __builtin_ia32_xsavec64(__p, __m); +} +#endif + +#undef __DEFAULT_FN_ATTRS + +#endif diff --git a/third_party/intel/clang/xsaveintrin.h b/third_party/intel/clang/xsaveintrin.h new file mode 100644 index 000000000..9429db6dd --- /dev/null +++ b/third_party/intel/clang/xsaveintrin.h @@ -0,0 +1,63 @@ +/*===---- xsaveintrin.h - XSAVE intrinsic ----------------------------------=== + * + * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. + * See https://llvm.org/LICENSE.txt for license information. + * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + * + *===-----------------------------------------------------------------------=== + */ + +#ifndef __IMMINTRIN_H +#error "Never use directly; include instead." +#endif + +#ifndef __XSAVEINTRIN_H +#define __XSAVEINTRIN_H + +#ifdef _MSC_VER +#define _XCR_XFEATURE_ENABLED_MASK 0 +#endif + +/* Define the default attributes for the functions in this file. */ +#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("xsave"))) + +static __inline__ void __DEFAULT_FN_ATTRS +_xsave(void *__p, unsigned long long __m) { + __builtin_ia32_xsave(__p, __m); +} + +static __inline__ void __DEFAULT_FN_ATTRS +_xrstor(void *__p, unsigned long long __m) { + __builtin_ia32_xrstor(__p, __m); +} + +#ifndef _MSC_VER +#define _xgetbv(A) __builtin_ia32_xgetbv((long long)(A)) +#define _xsetbv(A, B) __builtin_ia32_xsetbv((unsigned int)(A), (unsigned long long)(B)) +#else +#ifdef __cplusplus +extern "C" { +#endif +unsigned __int64 __cdecl _xgetbv(unsigned int); +void __cdecl _xsetbv(unsigned int, unsigned __int64); +#ifdef __cplusplus +} +#endif +#endif /* _MSC_VER */ + +#ifdef __x86_64__ +static __inline__ void __DEFAULT_FN_ATTRS +_xsave64(void *__p, unsigned long long __m) { + __builtin_ia32_xsave64(__p, __m); +} + +static __inline__ void __DEFAULT_FN_ATTRS +_xrstor64(void *__p, unsigned long long __m) { + __builtin_ia32_xrstor64(__p, __m); +} + +#endif + +#undef __DEFAULT_FN_ATTRS + +#endif diff --git a/third_party/intel/clang/xsaveoptintrin.h b/third_party/intel/clang/xsaveoptintrin.h new file mode 100644 index 000000000..89a4c44db --- /dev/null +++ b/third_party/intel/clang/xsaveoptintrin.h @@ -0,0 +1,34 @@ +/*===---- xsaveoptintrin.h - XSAVEOPT intrinsic ----------------------------=== + * + * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. + * See https://llvm.org/LICENSE.txt for license information. + * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + * + *===-----------------------------------------------------------------------=== + */ + +#ifndef __IMMINTRIN_H +#error "Never use directly; include instead." +#endif + +#ifndef __XSAVEOPTINTRIN_H +#define __XSAVEOPTINTRIN_H + +/* Define the default attributes for the functions in this file. */ +#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("xsaveopt"))) + +static __inline__ void __DEFAULT_FN_ATTRS +_xsaveopt(void *__p, unsigned long long __m) { + __builtin_ia32_xsaveopt(__p, __m); +} + +#ifdef __x86_64__ +static __inline__ void __DEFAULT_FN_ATTRS +_xsaveopt64(void *__p, unsigned long long __m) { + __builtin_ia32_xsaveopt64(__p, __m); +} +#endif + +#undef __DEFAULT_FN_ATTRS + +#endif diff --git a/third_party/intel/clang/xsavesintrin.h b/third_party/intel/clang/xsavesintrin.h new file mode 100644 index 000000000..3f99219a2 --- /dev/null +++ b/third_party/intel/clang/xsavesintrin.h @@ -0,0 +1,44 @@ +/*===---- xsavesintrin.h - XSAVES intrinsic --------------------------------=== + * + * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. + * See https://llvm.org/LICENSE.txt for license information. + * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + * + *===-----------------------------------------------------------------------=== + */ + +#ifndef __IMMINTRIN_H +#error "Never use directly; include instead." +#endif + +#ifndef __XSAVESINTRIN_H +#define __XSAVESINTRIN_H + +/* Define the default attributes for the functions in this file. */ +#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("xsaves"))) + +static __inline__ void __DEFAULT_FN_ATTRS +_xsaves(void *__p, unsigned long long __m) { + __builtin_ia32_xsaves(__p, __m); +} + +static __inline__ void __DEFAULT_FN_ATTRS +_xrstors(void *__p, unsigned long long __m) { + __builtin_ia32_xrstors(__p, __m); +} + +#ifdef __x86_64__ +static __inline__ void __DEFAULT_FN_ATTRS +_xrstors64(void *__p, unsigned long long __m) { + __builtin_ia32_xrstors64(__p, __m); +} + +static __inline__ void __DEFAULT_FN_ATTRS +_xsaves64(void *__p, unsigned long long __m) { + __builtin_ia32_xsaves64(__p, __m); +} +#endif + +#undef __DEFAULT_FN_ATTRS + +#endif diff --git a/third_party/intel/clang/xtestintrin.h b/third_party/intel/clang/xtestintrin.h new file mode 100644 index 000000000..7d19e3733 --- /dev/null +++ b/third_party/intel/clang/xtestintrin.h @@ -0,0 +1,27 @@ +/*===---- xtestintrin.h - XTEST intrinsic ----------------------------------=== + * + * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. + * See https://llvm.org/LICENSE.txt for license information. + * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + * + *===-----------------------------------------------------------------------=== + */ + +#ifndef __IMMINTRIN_H +#error "Never use directly; include instead." +#endif + +#ifndef __XTESTINTRIN_H +#define __XTESTINTRIN_H + +/* xtest returns non-zero if the instruction is executed within an RTM or active + * HLE region. */ +/* FIXME: This can be an either or for RTM/HLE. Deal with this when HLE is + * supported. */ +static __inline__ int + __attribute__((__always_inline__, __nodebug__, __target__("rtm"))) + _xtest(void) { + return __builtin_ia32_xtest(); +} + +#endif diff --git a/third_party/intel/mm_malloc.internal.h b/third_party/intel/mm_malloc.internal.h index 4a41f80c8..2443d5f0b 100644 --- a/third_party/intel/mm_malloc.internal.h +++ b/third_party/intel/mm_malloc.internal.h @@ -5,7 +5,7 @@ #ifndef __cplusplus extern int posix_memalign (void **, size_t, size_t); #else -extern "C" int posix_memalign (void **, size_t, size_t) throw (); +extern "C" int posix_memalign (void **, size_t, size_t); #endif static __inline void * _mm_malloc (size_t __size, size_t __alignment) diff --git a/third_party/libunwind/AddressSpace.hpp b/third_party/libunwind/AddressSpace.hpp index f7c627141..26ee10c52 100644 --- a/third_party/libunwind/AddressSpace.hpp +++ b/third_party/libunwind/AddressSpace.hpp @@ -94,12 +94,12 @@ namespace libunwind { // __eh_frame_hdr_start = SIZEOF(.eh_frame_hdr) > 0 ? ADDR(.eh_frame_hdr) : 0; // __eh_frame_hdr_end = SIZEOF(.eh_frame_hdr) > 0 ? . : 0; -extern char __eh_frame_start; -extern char __eh_frame_end; +extern char __eh_frame_start __attribute__((__weak__)); // [jart] +extern char __eh_frame_end __attribute__((__weak__)); // [jart] #if defined(_LIBUNWIND_SUPPORT_DWARF_INDEX) -extern char __eh_frame_hdr_start; -extern char __eh_frame_hdr_end; +extern char __eh_frame_hdr_start __attribute__((__weak__)); // [jart] +extern char __eh_frame_hdr_end __attribute__((__weak__)); // [jart] #endif #elif defined(_LIBUNWIND_ARM_EHABI) && defined(_LIBUNWIND_IS_BAREMETAL) diff --git a/third_party/libunwind/BUILD.mk b/third_party/libunwind/BUILD.mk index 242d4f8d1..560df58b4 100644 --- a/third_party/libunwind/BUILD.mk +++ b/third_party/libunwind/BUILD.mk @@ -20,6 +20,7 @@ THIRD_PARTY_LIBUNWIND_A_HDRS = \ third_party/libunwind/include/__libunwind_config.h \ third_party/libunwind/include/libunwind.h \ third_party/libunwind/include/unwind.h \ + third_party/libunwind/assembly.h \ third_party/libunwind/config.h \ third_party/libunwind/cet_unwind.h \ third_party/libunwind/dwarf2.h \ @@ -35,18 +36,23 @@ THIRD_PARTY_LIBUNWIND_A_SRCS_CC = \ third_party/libunwind/libunwind.cc THIRD_PARTY_LIBUNWIND_A_SRCS_C = \ - third_party/libunwind/Unwind-sjlj.c \ third_party/libunwind/UnwindLevel1-gcc-ext.c \ third_party/libunwind/UnwindLevel1.c \ third_party/libunwind/gcc_personality_v0.c +THIRD_PARTY_LIBUNWIND_A_SRCS_S = \ + third_party/libunwind/UnwindRegistersRestore.S \ + third_party/libunwind/UnwindRegistersSave.S \ + THIRD_PARTY_LIBUNWIND_A_SRCS = \ $(THIRD_PARTY_LIBUNWIND_A_SRCS_C) \ - $(THIRD_PARTY_LIBUNWIND_A_SRCS_CC) + $(THIRD_PARTY_LIBUNWIND_A_SRCS_CC) \ + $(THIRD_PARTY_LIBUNWIND_A_SRCS_S) \ THIRD_PARTY_LIBUNWIND_A_OBJS = \ $(THIRD_PARTY_LIBUNWIND_A_SRCS_C:%.c=o/$(MODE)/%.o) \ - $(THIRD_PARTY_LIBUNWIND_A_SRCS_CC:%.cc=o/$(MODE)/%.o) + $(THIRD_PARTY_LIBUNWIND_A_SRCS_CC:%.cc=o/$(MODE)/%.o) \ + $(THIRD_PARTY_LIBUNWIND_A_SRCS_S:%.S=o/$(MODE)/%.o) \ THIRD_PARTY_LIBUNWIND_A_CHECKS = \ $(THIRD_PARTY_LIBUNWIND_A).pkg \ @@ -55,7 +61,9 @@ THIRD_PARTY_LIBUNWIND_A_CHECKS = \ THIRD_PARTY_LIBUNWIND_A_DIRECTDEPS = \ LIBC_CALLS \ LIBC_INTRIN \ - LIBC_STDIO + LIBC_STDIO \ + LIBC_MEM \ + LIBC_THREAD \ THIRD_PARTY_LIBUNWIND_A_DEPS := \ $(call uniq,$(foreach x,$(THIRD_PARTY_LIBUNWIND_A_DIRECTDEPS),$($(x)))) @@ -75,7 +83,20 @@ $(THIRD_PARTY_LIBUNWIND_A_OBJS): private \ -fno-sanitize=all \ -ffunction-sections \ -fdata-sections \ - -D_LIBUNWIND_USE_DLADDR=0 + -D_LIBUNWIND_USE_DLADDR=0 \ + -D_LIBUNWIND_IS_BAREMETAL=1 \ + +# avoid cyclic dependency on libcxxabi +o/$(MODE)/third_party/libunwind/libunwind.o: \ + COPTS += \ + -fno-rtti \ + +o/$(MODE)/third_party/libunwind/UnwindRegistersRestore.o: third_party/libunwind/UnwindRegistersRestore.S + @$(COMPILE) -AOBJECTIFY.S $(OBJECTIFY.S) $(OUTPUT_OPTION) -c $< +o/$(MODE)/third_party/libunwind/UnwindRegistersSave.o: third_party/libunwind/UnwindRegistersSave.S + @$(COMPILE) -AOBJECTIFY.S $(OBJECTIFY.S) $(OUTPUT_OPTION) -c $< + +$(THIRD_PARTY_LIBUNWIND_A_OBJS): third_party/libunwind/BUILD.mk THIRD_PARTY_LIBUNWIND_LIBS = $(foreach x,$(THIRD_PARTY_LIBUNWIND_ARTIFACTS),$($(x))) THIRD_PARTY_LIBUNWIND_SRCS = $(foreach x,$(THIRD_PARTY_LIBUNWIND_ARTIFACTS),$($(x)_SRCS)) diff --git a/third_party/libunwind/Unwind-sjlj.c b/third_party/libunwind/Unwind-sjlj.c deleted file mode 100644 index 514358e5b..000000000 --- a/third_party/libunwind/Unwind-sjlj.c +++ /dev/null @@ -1,530 +0,0 @@ -//===----------------------------------------------------------------------===// -// -// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -// See https://llvm.org/LICENSE.txt for license information. -// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -// -// -// Implements setjump-longjump based C++ exceptions -// -//===----------------------------------------------------------------------===// - -#include "third_party/libunwind/include/unwind.h" - -#include "libc/isystem/inttypes.h" -#include "libc/isystem/stdint.h" -#include "libc/isystem/stdbool.h" -#include "libc/isystem/stdlib.h" - -#include "third_party/libunwind/config.h" - -/// With SJLJ based exceptions, any function that has a catch clause or needs to -/// do any clean up when an exception propagates through it, needs to call -/// \c _Unwind_SjLj_Register at the start of the function and -/// \c _Unwind_SjLj_Unregister at the end. The register function is called with -/// the address of a block of memory in the function's stack frame. The runtime -/// keeps a linked list (stack) of these blocks - one per thread. The calling -/// function also sets the personality and lsda fields of the block. - -#if defined(_LIBUNWIND_BUILD_SJLJ_APIS) - -typedef uintptr_t _Unwind_Word __attribute__((__mode__(__unwind_word__))); - -struct _Unwind_FunctionContext { - // next function in stack of handlers - struct _Unwind_FunctionContext *prev; - -#if defined(__ve__) - // VE requires to store 64 bit pointers in the buffer for SjLj exception. - // We expand the size of values defined here. This size must be matched - // to the size returned by TargetMachine::getSjLjDataSize(). - - // set by calling function before registering to be the landing pad - uint64_t resumeLocation; - - // set by personality handler to be parameters passed to landing pad function - uint64_t resumeParameters[4]; -#else - // set by calling function before registering to be the landing pad - uint32_t resumeLocation; - - // set by personality handler to be parameters passed to landing pad function - _Unwind_Word resumeParameters[4]; -#endif - - // set by calling function before registering - _Unwind_Personality_Fn personality; // arm offset=24 - uintptr_t lsda; // arm offset=28 - - // variable length array, contains registers to restore - // 0 = r7, 1 = pc, 2 = sp - void *jbuf[]; -}; - -#if defined(_LIBUNWIND_HAS_NO_THREADS) -# define _LIBUNWIND_THREAD_LOCAL -#else -# if __STDC_VERSION__ >= 201112L -# define _LIBUNWIND_THREAD_LOCAL _Thread_local -# elif defined(_MSC_VER) -# define _LIBUNWIND_THREAD_LOCAL __declspec(thread) -# elif defined(__GNUC__) || defined(__clang__) -# define _LIBUNWIND_THREAD_LOCAL __thread -# else -# error Unable to create thread local storage -# endif -#endif - - -#if !defined(FOR_DYLD) - -#if defined(__APPLE__) -#include -#else -static _LIBUNWIND_THREAD_LOCAL struct _Unwind_FunctionContext *stack = NULL; -#endif - -static struct _Unwind_FunctionContext *__Unwind_SjLj_GetTopOfFunctionStack() { -#if defined(__APPLE__) - return _pthread_getspecific_direct(__PTK_LIBC_DYLD_Unwind_SjLj_Key); -#else - return stack; -#endif -} - -static void -__Unwind_SjLj_SetTopOfFunctionStack(struct _Unwind_FunctionContext *fc) { -#if defined(__APPLE__) - _pthread_setspecific_direct(__PTK_LIBC_DYLD_Unwind_SjLj_Key, fc); -#else - stack = fc; -#endif -} - -#endif - - -/// Called at start of each function that catches exceptions -_LIBUNWIND_EXPORT void -_Unwind_SjLj_Register(struct _Unwind_FunctionContext *fc) { - fc->prev = __Unwind_SjLj_GetTopOfFunctionStack(); - __Unwind_SjLj_SetTopOfFunctionStack(fc); -} - - -/// Called at end of each function that catches exceptions -_LIBUNWIND_EXPORT void -_Unwind_SjLj_Unregister(struct _Unwind_FunctionContext *fc) { - __Unwind_SjLj_SetTopOfFunctionStack(fc->prev); -} - - -static _Unwind_Reason_Code -unwind_phase1(struct _Unwind_Exception *exception_object) { - _Unwind_FunctionContext_t c = __Unwind_SjLj_GetTopOfFunctionStack(); - _LIBUNWIND_TRACE_UNWINDING("unwind_phase1: initial function-context=%p", - (void *)c); - - // walk each frame looking for a place to stop - for (bool handlerNotFound = true; handlerNotFound; c = c->prev) { - - // check for no more frames - if (c == NULL) { - _LIBUNWIND_TRACE_UNWINDING("unwind_phase1(ex_ojb=%p): reached " - "bottom => _URC_END_OF_STACK", - (void *)exception_object); - return _URC_END_OF_STACK; - } - - _LIBUNWIND_TRACE_UNWINDING("unwind_phase1: function-context=%p", (void *)c); - // if there is a personality routine, ask it if it will want to stop at this - // frame - if (c->personality != NULL) { - _LIBUNWIND_TRACE_UNWINDING("unwind_phase1(ex_ojb=%p): calling " - "personality function %p", - (void *)exception_object, - (void *)c->personality); - _Unwind_Reason_Code personalityResult = (*c->personality)( - 1, _UA_SEARCH_PHASE, exception_object->exception_class, - exception_object, (struct _Unwind_Context *)c); - switch (personalityResult) { - case _URC_HANDLER_FOUND: - // found a catch clause or locals that need destructing in this frame - // stop search and remember function context - handlerNotFound = false; - exception_object->private_2 = (uintptr_t) c; - _LIBUNWIND_TRACE_UNWINDING("unwind_phase1(ex_ojb=%p): " - "_URC_HANDLER_FOUND", - (void *)exception_object); - return _URC_NO_REASON; - - case _URC_CONTINUE_UNWIND: - _LIBUNWIND_TRACE_UNWINDING("unwind_phase1(ex_ojb=%p): " - "_URC_CONTINUE_UNWIND", - (void *)exception_object); - // continue unwinding - break; - - default: - // something went wrong - _LIBUNWIND_TRACE_UNWINDING( - "unwind_phase1(ex_ojb=%p): _URC_FATAL_PHASE1_ERROR", - (void *)exception_object); - return _URC_FATAL_PHASE1_ERROR; - } - } - } - return _URC_NO_REASON; -} - - -static _Unwind_Reason_Code -unwind_phase2(struct _Unwind_Exception *exception_object) { - _LIBUNWIND_TRACE_UNWINDING("unwind_phase2(ex_ojb=%p)", - (void *)exception_object); - - // walk each frame until we reach where search phase said to stop - _Unwind_FunctionContext_t c = __Unwind_SjLj_GetTopOfFunctionStack(); - while (true) { - _LIBUNWIND_TRACE_UNWINDING("unwind_phase2s(ex_ojb=%p): context=%p", - (void *)exception_object, (void *)c); - - // check for no more frames - if (c == NULL) { - _LIBUNWIND_TRACE_UNWINDING( - "unwind_phase2(ex_ojb=%p): __unw_step() reached " - "bottom => _URC_END_OF_STACK", - (void *)exception_object); - return _URC_END_OF_STACK; - } - - // if there is a personality routine, tell it we are unwinding - if (c->personality != NULL) { - _Unwind_Action action = _UA_CLEANUP_PHASE; - if ((uintptr_t) c == exception_object->private_2) - action = (_Unwind_Action)( - _UA_CLEANUP_PHASE | - _UA_HANDLER_FRAME); // tell personality this was the frame it marked - // in phase 1 - _Unwind_Reason_Code personalityResult = - (*c->personality)(1, action, exception_object->exception_class, - exception_object, (struct _Unwind_Context *)c); - switch (personalityResult) { - case _URC_CONTINUE_UNWIND: - // continue unwinding - _LIBUNWIND_TRACE_UNWINDING( - "unwind_phase2(ex_ojb=%p): _URC_CONTINUE_UNWIND", - (void *)exception_object); - if ((uintptr_t) c == exception_object->private_2) { - // phase 1 said we would stop at this frame, but we did not... - _LIBUNWIND_ABORT("during phase1 personality function said it would " - "stop here, but now if phase2 it did not stop here"); - } - break; - case _URC_INSTALL_CONTEXT: - _LIBUNWIND_TRACE_UNWINDING("unwind_phase2(ex_ojb=%p): " - "_URC_INSTALL_CONTEXT, will resume at " - "landing pad %p", - (void *)exception_object, c->jbuf[1]); - // personality routine says to transfer control to landing pad - // we may get control back if landing pad calls _Unwind_Resume() - __Unwind_SjLj_SetTopOfFunctionStack(c); - __builtin_longjmp(c->jbuf, 1); - // __unw_resume() only returns if there was an error - return _URC_FATAL_PHASE2_ERROR; - default: - // something went wrong - _LIBUNWIND_DEBUG_LOG("personality function returned unknown result %d", - personalityResult); - return _URC_FATAL_PHASE2_ERROR; - } - } - c = c->prev; - } - - // clean up phase did not resume at the frame that the search phase said it - // would - return _URC_FATAL_PHASE2_ERROR; -} - - -static _Unwind_Reason_Code -unwind_phase2_forced(struct _Unwind_Exception *exception_object, - _Unwind_Stop_Fn stop, void *stop_parameter) { - // walk each frame until we reach where search phase said to stop - _Unwind_FunctionContext_t c = __Unwind_SjLj_GetTopOfFunctionStack(); - while (true) { - - // get next frame (skip over first which is _Unwind_RaiseException) - if (c == NULL) { - _LIBUNWIND_TRACE_UNWINDING( - "unwind_phase2(ex_ojb=%p): __unw_step() reached " - "bottom => _URC_END_OF_STACK", - (void *)exception_object); - return _URC_END_OF_STACK; - } - - // call stop function at each frame - _Unwind_Action action = - (_Unwind_Action)(_UA_FORCE_UNWIND | _UA_CLEANUP_PHASE); - _Unwind_Reason_Code stopResult = - (*stop)(1, action, exception_object->exception_class, exception_object, - (struct _Unwind_Context *)c, stop_parameter); - _LIBUNWIND_TRACE_UNWINDING("unwind_phase2_forced(ex_ojb=%p): " - "stop function returned %d", - (void *)exception_object, stopResult); - if (stopResult != _URC_NO_REASON) { - _LIBUNWIND_TRACE_UNWINDING("unwind_phase2_forced(ex_ojb=%p): " - "stopped by stop function", - (void *)exception_object); - return _URC_FATAL_PHASE2_ERROR; - } - - // if there is a personality routine, tell it we are unwinding - if (c->personality != NULL) { - _Unwind_Personality_Fn p = (_Unwind_Personality_Fn)c->personality; - _LIBUNWIND_TRACE_UNWINDING("unwind_phase2_forced(ex_ojb=%p): " - "calling personality function %p", - (void *)exception_object, (void *)p); - _Unwind_Reason_Code personalityResult = - (*p)(1, action, exception_object->exception_class, exception_object, - (struct _Unwind_Context *)c); - switch (personalityResult) { - case _URC_CONTINUE_UNWIND: - _LIBUNWIND_TRACE_UNWINDING("unwind_phase2_forced(ex_ojb=%p): " - "personality returned _URC_CONTINUE_UNWIND", - (void *)exception_object); - // destructors called, continue unwinding - break; - case _URC_INSTALL_CONTEXT: - _LIBUNWIND_TRACE_UNWINDING("unwind_phase2_forced(ex_ojb=%p): " - "personality returned _URC_INSTALL_CONTEXT", - (void *)exception_object); - // we may get control back if landing pad calls _Unwind_Resume() - __Unwind_SjLj_SetTopOfFunctionStack(c); - __builtin_longjmp(c->jbuf, 1); - break; - default: - // something went wrong - _LIBUNWIND_TRACE_UNWINDING("unwind_phase2_forced(ex_ojb=%p): " - "personality returned %d, " - "_URC_FATAL_PHASE2_ERROR", - (void *)exception_object, personalityResult); - return _URC_FATAL_PHASE2_ERROR; - } - } - c = c->prev; - } - - // call stop function one last time and tell it we've reached the end of the - // stack - _LIBUNWIND_TRACE_UNWINDING("unwind_phase2_forced(ex_ojb=%p): calling stop " - "function with _UA_END_OF_STACK", - (void *)exception_object); - _Unwind_Action lastAction = - (_Unwind_Action)(_UA_FORCE_UNWIND | _UA_CLEANUP_PHASE | _UA_END_OF_STACK); - (*stop)(1, lastAction, exception_object->exception_class, exception_object, - (struct _Unwind_Context *)c, stop_parameter); - - // clean up phase did not resume at the frame that the search phase said it - // would - return _URC_FATAL_PHASE2_ERROR; -} - - -/// Called by __cxa_throw. Only returns if there is a fatal error -_LIBUNWIND_EXPORT _Unwind_Reason_Code -_Unwind_SjLj_RaiseException(struct _Unwind_Exception *exception_object) { - _LIBUNWIND_TRACE_API("_Unwind_SjLj_RaiseException(ex_obj=%p)", - (void *)exception_object); - - // mark that this is a non-forced unwind, so _Unwind_Resume() can do the right - // thing - exception_object->private_1 = 0; - exception_object->private_2 = 0; - - // phase 1: the search phase - _Unwind_Reason_Code phase1 = unwind_phase1(exception_object); - if (phase1 != _URC_NO_REASON) - return phase1; - - // phase 2: the clean up phase - return unwind_phase2(exception_object); -} - - - -/// When _Unwind_RaiseException() is in phase2, it hands control -/// to the personality function at each frame. The personality -/// may force a jump to a landing pad in that function, the landing -/// pad code may then call _Unwind_Resume() to continue with the -/// unwinding. Note: the call to _Unwind_Resume() is from compiler -/// generated user code. All other _Unwind_* routines are called -/// by the C++ runtime __cxa_* routines. -/// -/// Re-throwing an exception is implemented by having the code call -/// __cxa_rethrow() which in turn calls _Unwind_Resume_or_Rethrow() -_LIBUNWIND_EXPORT void -_Unwind_SjLj_Resume(struct _Unwind_Exception *exception_object) { - _LIBUNWIND_TRACE_API("_Unwind_SjLj_Resume(ex_obj=%p)", - (void *)exception_object); - - if (exception_object->private_1 != 0) - unwind_phase2_forced(exception_object, - (_Unwind_Stop_Fn) exception_object->private_1, - (void *)exception_object->private_2); - else - unwind_phase2(exception_object); - - // clients assume _Unwind_Resume() does not return, so all we can do is abort. - _LIBUNWIND_ABORT("_Unwind_SjLj_Resume() can't return"); -} - - -/// Called by __cxa_rethrow(). -_LIBUNWIND_EXPORT _Unwind_Reason_Code -_Unwind_SjLj_Resume_or_Rethrow(struct _Unwind_Exception *exception_object) { - _LIBUNWIND_TRACE_API("__Unwind_SjLj_Resume_or_Rethrow(ex_obj=%p), " - "private_1=%" PRIuPTR, - (void *)exception_object, exception_object->private_1); - // If this is non-forced and a stopping place was found, then this is a - // re-throw. - // Call _Unwind_RaiseException() as if this was a new exception. - if (exception_object->private_1 == 0) { - return _Unwind_SjLj_RaiseException(exception_object); - // should return if there is no catch clause, so that __cxa_rethrow can call - // std::terminate() - } - - // Call through to _Unwind_Resume() which distinguishes between forced and - // regular exceptions. - _Unwind_SjLj_Resume(exception_object); - _LIBUNWIND_ABORT("__Unwind_SjLj_Resume_or_Rethrow() called " - "_Unwind_SjLj_Resume() which unexpectedly returned"); -} - - -/// Called by personality handler during phase 2 to get LSDA for current frame. -_LIBUNWIND_EXPORT uintptr_t -_Unwind_GetLanguageSpecificData(struct _Unwind_Context *context) { - _Unwind_FunctionContext_t ufc = (_Unwind_FunctionContext_t) context; - _LIBUNWIND_TRACE_API("_Unwind_GetLanguageSpecificData(context=%p) " - "=> 0x%" PRIuPTR, - (void *)context, ufc->lsda); - return ufc->lsda; -} - - -/// Called by personality handler during phase 2 to get register values. -_LIBUNWIND_EXPORT uintptr_t _Unwind_GetGR(struct _Unwind_Context *context, - int index) { - _LIBUNWIND_TRACE_API("_Unwind_GetGR(context=%p, reg=%d)", (void *)context, - index); - _Unwind_FunctionContext_t ufc = (_Unwind_FunctionContext_t) context; - return ufc->resumeParameters[index]; -} - - -/// Called by personality handler during phase 2 to alter register values. -_LIBUNWIND_EXPORT void _Unwind_SetGR(struct _Unwind_Context *context, int index, - uintptr_t new_value) { - _LIBUNWIND_TRACE_API("_Unwind_SetGR(context=%p, reg=%d, value=0x%" PRIuPTR - ")", - (void *)context, index, new_value); - _Unwind_FunctionContext_t ufc = (_Unwind_FunctionContext_t) context; - ufc->resumeParameters[index] = new_value; -} - - -/// Called by personality handler during phase 2 to get instruction pointer. -_LIBUNWIND_EXPORT uintptr_t _Unwind_GetIP(struct _Unwind_Context *context) { - _Unwind_FunctionContext_t ufc = (_Unwind_FunctionContext_t) context; - _LIBUNWIND_TRACE_API("_Unwind_GetIP(context=%p) => 0x%" PRIu32, - (void *)context, ufc->resumeLocation + 1); - return ufc->resumeLocation + 1; -} - - -/// Called by personality handler during phase 2 to get instruction pointer. -/// ipBefore is a boolean that says if IP is already adjusted to be the call -/// site address. Normally IP is the return address. -_LIBUNWIND_EXPORT uintptr_t _Unwind_GetIPInfo(struct _Unwind_Context *context, - int *ipBefore) { - _Unwind_FunctionContext_t ufc = (_Unwind_FunctionContext_t) context; - *ipBefore = 0; - _LIBUNWIND_TRACE_API("_Unwind_GetIPInfo(context=%p, %p) => 0x%" PRIu32, - (void *)context, (void *)ipBefore, - ufc->resumeLocation + 1); - return ufc->resumeLocation + 1; -} - - -/// Called by personality handler during phase 2 to alter instruction pointer. -_LIBUNWIND_EXPORT void _Unwind_SetIP(struct _Unwind_Context *context, - uintptr_t new_value) { - _LIBUNWIND_TRACE_API("_Unwind_SetIP(context=%p, value=0x%" PRIuPTR ")", - (void *)context, new_value); - _Unwind_FunctionContext_t ufc = (_Unwind_FunctionContext_t) context; - ufc->resumeLocation = new_value - 1; -} - - -/// Called by personality handler during phase 2 to find the start of the -/// function. -_LIBUNWIND_EXPORT uintptr_t -_Unwind_GetRegionStart(struct _Unwind_Context *context) { - // Not supported or needed for sjlj based unwinding - (void)context; - _LIBUNWIND_TRACE_API("_Unwind_GetRegionStart(context=%p)", (void *)context); - return 0; -} - - -/// Called by personality handler during phase 2 if a foreign exception -/// is caught. -_LIBUNWIND_EXPORT void -_Unwind_DeleteException(struct _Unwind_Exception *exception_object) { - _LIBUNWIND_TRACE_API("_Unwind_DeleteException(ex_obj=%p)", - (void *)exception_object); - if (exception_object->exception_cleanup != NULL) - (*exception_object->exception_cleanup)(_URC_FOREIGN_EXCEPTION_CAUGHT, - exception_object); -} - - - -/// Called by personality handler during phase 2 to get base address for data -/// relative encodings. -_LIBUNWIND_EXPORT uintptr_t -_Unwind_GetDataRelBase(struct _Unwind_Context *context) { - // Not supported or needed for sjlj based unwinding - (void)context; - _LIBUNWIND_TRACE_API("_Unwind_GetDataRelBase(context=%p)", (void *)context); - _LIBUNWIND_ABORT("_Unwind_GetDataRelBase() not implemented"); -} - - -/// Called by personality handler during phase 2 to get base address for text -/// relative encodings. -_LIBUNWIND_EXPORT uintptr_t -_Unwind_GetTextRelBase(struct _Unwind_Context *context) { - // Not supported or needed for sjlj based unwinding - (void)context; - _LIBUNWIND_TRACE_API("_Unwind_GetTextRelBase(context=%p)", (void *)context); - _LIBUNWIND_ABORT("_Unwind_GetTextRelBase() not implemented"); -} - - -/// Called by personality handler to get "Call Frame Area" for current frame. -_LIBUNWIND_EXPORT uintptr_t _Unwind_GetCFA(struct _Unwind_Context *context) { - _LIBUNWIND_TRACE_API("_Unwind_GetCFA(context=%p)", (void *)context); - if (context != NULL) { - _Unwind_FunctionContext_t ufc = (_Unwind_FunctionContext_t) context; - // Setjmp/longjmp based exceptions don't have a true CFA. - // Instead, the SP in the jmpbuf is the closest approximation. - return (uintptr_t) ufc->jbuf[2]; - } - return 0; -} - -#endif // defined(_LIBUNWIND_BUILD_SJLJ_APIS) diff --git a/third_party/libunwind/UnwindRegistersRestore.S b/third_party/libunwind/UnwindRegistersRestore.S new file mode 100644 index 000000000..180a66582 --- /dev/null +++ b/third_party/libunwind/UnwindRegistersRestore.S @@ -0,0 +1,1256 @@ +//===----------------------------------------------------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#include "assembly.h" + +#define FROM_0_TO_15 0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15 +#define FROM_16_TO_31 16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31 + +#define FROM_0_TO_31 0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31 +#define FROM_32_TO_63 32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50,51,52,53,54,55,56,57,58,59,60,61,62,63 + +#if defined(_AIX) + .toc +#else + .text +#endif + +#if !defined(__USING_SJLJ_EXCEPTIONS__) && !defined(__wasm__) + +#if defined(__i386__) +DEFINE_LIBUNWIND_FUNCTION(__libunwind_Registers_x86_jumpto) +# +# extern "C" void __libunwind_Registers_x86_jumpto(Registers_x86 *); +# +# On entry: +# + + +# +-----------------------+ +# + thread_state pointer + +# +-----------------------+ +# + return address + +# +-----------------------+ <-- SP +# + + + + _LIBUNWIND_CET_ENDBR + movl 4(%esp), %eax + # set up eax and ret on new stack location + movl 28(%eax), %edx # edx holds new stack pointer + subl $8,%edx + movl %edx, 28(%eax) + movl 0(%eax), %ebx + movl %ebx, 0(%edx) + movl 40(%eax), %ebx + movl %ebx, 4(%edx) + # we now have ret and eax pushed onto where new stack will be + # restore all registers + movl 4(%eax), %ebx + movl 8(%eax), %ecx + movl 12(%eax), %edx + movl 16(%eax), %edi + movl 20(%eax), %esi + movl 24(%eax), %ebp + movl 28(%eax), %esp + # skip ss + # skip eflags + pop %eax # eax was already pushed on new stack + pop %ecx + jmp *%ecx + # skip cs + # skip ds + # skip es + # skip fs + # skip gs + +#elif defined(__x86_64__) + +DEFINE_LIBUNWIND_FUNCTION(__libunwind_Registers_x86_64_jumpto) +# +# extern "C" void __libunwind_Registers_x86_64_jumpto(Registers_x86_64 *); +# +#if defined(_WIN64) +# On entry, thread_state pointer is in rcx; move it into rdi +# to share restore code below. Since this routine restores and +# overwrites all registers, we can use the same registers for +# pointers and temporaries as on unix even though win64 normally +# mustn't clobber some of them. + movq %rcx, %rdi +#else +# On entry, thread_state pointer is in rdi +#endif + + _LIBUNWIND_CET_ENDBR + movq 56(%rdi), %rax # rax holds new stack pointer + subq $16, %rax + movq %rax, 56(%rdi) + movq 32(%rdi), %rbx # store new rdi on new stack + movq %rbx, 0(%rax) + movq 128(%rdi), %rbx # store new rip on new stack + movq %rbx, 8(%rax) + # restore all registers + movq 0(%rdi), %rax + movq 8(%rdi), %rbx + movq 16(%rdi), %rcx + movq 24(%rdi), %rdx + # restore rdi later + movq 40(%rdi), %rsi + movq 48(%rdi), %rbp + # restore rsp later + movq 64(%rdi), %r8 + movq 72(%rdi), %r9 + movq 80(%rdi), %r10 + movq 88(%rdi), %r11 + movq 96(%rdi), %r12 + movq 104(%rdi), %r13 + movq 112(%rdi), %r14 + movq 120(%rdi), %r15 + # skip rflags + # skip cs + # skip fs + # skip gs + +#if defined(_WIN64) + movdqu 176(%rdi),%xmm0 + movdqu 192(%rdi),%xmm1 + movdqu 208(%rdi),%xmm2 + movdqu 224(%rdi),%xmm3 + movdqu 240(%rdi),%xmm4 + movdqu 256(%rdi),%xmm5 + movdqu 272(%rdi),%xmm6 + movdqu 288(%rdi),%xmm7 + movdqu 304(%rdi),%xmm8 + movdqu 320(%rdi),%xmm9 + movdqu 336(%rdi),%xmm10 + movdqu 352(%rdi),%xmm11 + movdqu 368(%rdi),%xmm12 + movdqu 384(%rdi),%xmm13 + movdqu 400(%rdi),%xmm14 + movdqu 416(%rdi),%xmm15 +#endif + movq 56(%rdi), %rsp # cut back rsp to new location + pop %rdi # rdi was saved here earlier + pop %rcx + jmpq *%rcx + + +#elif defined(__powerpc64__) + +DEFINE_LIBUNWIND_FUNCTION(_ZN9libunwind15Registers_ppc646jumptoEv) +// +// void libunwind::Registers_ppc64::jumpto() +// +// On entry: +// thread_state pointer is in r3 +// + +// load register (GPR) +#define PPC64_LR(n) \ + ld n, (8 * (n + 2))(3) + + // restore integral registers + // skip r0 for now + // skip r1 for now + PPC64_LR(2) + // skip r3 for now + // skip r4 for now + // skip r5 for now + PPC64_LR(6) + PPC64_LR(7) + PPC64_LR(8) + PPC64_LR(9) + PPC64_LR(10) + PPC64_LR(11) + PPC64_LR(12) + PPC64_LR(13) + PPC64_LR(14) + PPC64_LR(15) + PPC64_LR(16) + PPC64_LR(17) + PPC64_LR(18) + PPC64_LR(19) + PPC64_LR(20) + PPC64_LR(21) + PPC64_LR(22) + PPC64_LR(23) + PPC64_LR(24) + PPC64_LR(25) + PPC64_LR(26) + PPC64_LR(27) + PPC64_LR(28) + PPC64_LR(29) + PPC64_LR(30) + PPC64_LR(31) + +#if defined(__VSX__) + + // restore VS registers + // (note that this also restores floating point registers and V registers, + // because part of VS is mapped to these registers) + + addi 4, 3, PPC64_OFFS_FP + +// load VS register +#ifdef __LITTLE_ENDIAN__ +// For little-endian targets, we need a swap since lxvd2x will load the register +// in the incorrect doubleword order. +// FIXME: when supporting targets older than Power9 on LE is no longer required, +// this can be changed to simply `lxv n, (16 * n)(4)`. +#define PPC64_LVS(n) \ + lxvd2x n, 0, 4 ;\ + xxswapd n, n ;\ + addi 4, 4, 16 +#else +#define PPC64_LVS(n) \ + lxvd2x n, 0, 4 ;\ + addi 4, 4, 16 +#endif + + // restore the first 32 VS regs (and also all floating point regs) + PPC64_LVS(0) + PPC64_LVS(1) + PPC64_LVS(2) + PPC64_LVS(3) + PPC64_LVS(4) + PPC64_LVS(5) + PPC64_LVS(6) + PPC64_LVS(7) + PPC64_LVS(8) + PPC64_LVS(9) + PPC64_LVS(10) + PPC64_LVS(11) + PPC64_LVS(12) + PPC64_LVS(13) + PPC64_LVS(14) + PPC64_LVS(15) + PPC64_LVS(16) + PPC64_LVS(17) + PPC64_LVS(18) + PPC64_LVS(19) + PPC64_LVS(20) + PPC64_LVS(21) + PPC64_LVS(22) + PPC64_LVS(23) + PPC64_LVS(24) + PPC64_LVS(25) + PPC64_LVS(26) + PPC64_LVS(27) + PPC64_LVS(28) + PPC64_LVS(29) + PPC64_LVS(30) + PPC64_LVS(31) + +#ifdef __LITTLE_ENDIAN__ +#define PPC64_CLVS_RESTORE(n) \ + addi 4, 3, PPC64_OFFS_FP + n * 16 ;\ + lxvd2x n, 0, 4 ;\ + xxswapd n, n +#else +#define PPC64_CLVS_RESTORE(n) \ + addi 4, 3, PPC64_OFFS_FP + n * 16 ;\ + lxvd2x n, 0, 4 +#endif + +#if !defined(_AIX) + // use VRSAVE to conditionally restore the remaining VS regs, that are + // where the V regs are mapped. In the AIX ABI, VRSAVE is not used. + ld 5, PPC64_OFFS_VRSAVE(3) // test VRsave + cmpwi 5, 0 + beq Lnovec + +// conditionally load VS +#define PPC64_CLVSl(n) \ + andis. 0, 5, (1 PPC_LEFT_SHIFT(47-n)) ;\ + beq Ldone##n ;\ + PPC64_CLVS_RESTORE(n) ;\ +Ldone##n: + +#define PPC64_CLVSh(n) \ + andi. 0, 5, (1 PPC_LEFT_SHIFT(63-n)) ;\ + beq Ldone##n ;\ + PPC64_CLVS_RESTORE(n) ;\ +Ldone##n: + +#else + +#define PPC64_CLVSl(n) PPC64_CLVS_RESTORE(n) +#define PPC64_CLVSh(n) PPC64_CLVS_RESTORE(n) + +#endif // !defined(_AIX) + + PPC64_CLVSl(32) + PPC64_CLVSl(33) + PPC64_CLVSl(34) + PPC64_CLVSl(35) + PPC64_CLVSl(36) + PPC64_CLVSl(37) + PPC64_CLVSl(38) + PPC64_CLVSl(39) + PPC64_CLVSl(40) + PPC64_CLVSl(41) + PPC64_CLVSl(42) + PPC64_CLVSl(43) + PPC64_CLVSl(44) + PPC64_CLVSl(45) + PPC64_CLVSl(46) + PPC64_CLVSl(47) + PPC64_CLVSh(48) + PPC64_CLVSh(49) + PPC64_CLVSh(50) + PPC64_CLVSh(51) + PPC64_CLVSh(52) + PPC64_CLVSh(53) + PPC64_CLVSh(54) + PPC64_CLVSh(55) + PPC64_CLVSh(56) + PPC64_CLVSh(57) + PPC64_CLVSh(58) + PPC64_CLVSh(59) + PPC64_CLVSh(60) + PPC64_CLVSh(61) + PPC64_CLVSh(62) + PPC64_CLVSh(63) + +#else + +// load FP register +#define PPC64_LF(n) \ + lfd n, (PPC64_OFFS_FP + n * 16)(3) + + // restore float registers + PPC64_LF(0) + PPC64_LF(1) + PPC64_LF(2) + PPC64_LF(3) + PPC64_LF(4) + PPC64_LF(5) + PPC64_LF(6) + PPC64_LF(7) + PPC64_LF(8) + PPC64_LF(9) + PPC64_LF(10) + PPC64_LF(11) + PPC64_LF(12) + PPC64_LF(13) + PPC64_LF(14) + PPC64_LF(15) + PPC64_LF(16) + PPC64_LF(17) + PPC64_LF(18) + PPC64_LF(19) + PPC64_LF(20) + PPC64_LF(21) + PPC64_LF(22) + PPC64_LF(23) + PPC64_LF(24) + PPC64_LF(25) + PPC64_LF(26) + PPC64_LF(27) + PPC64_LF(28) + PPC64_LF(29) + PPC64_LF(30) + PPC64_LF(31) + +#if defined(__ALTIVEC__) + +#define PPC64_CLV_UNALIGNED_RESTORE(n) \ + ld 0, (PPC64_OFFS_V + n * 16)(3) ;\ + std 0, 0(4) ;\ + ld 0, (PPC64_OFFS_V + n * 16 + 8)(3) ;\ + std 0, 8(4) ;\ + lvx n, 0, 4 + +#if !defined(_AIX) + // restore vector registers if any are in use. In the AIX ABI, VRSAVE is + // not used. + ld 5, PPC64_OFFS_VRSAVE(3) // test VRsave + cmpwi 5, 0 + beq Lnovec + +#define PPC64_CLV_UNALIGNEDl(n) \ + andis. 0, 5, (1 PPC_LEFT_SHIFT(15-n)) ;\ + beq Ldone##n ;\ + PPC64_CLV_UNALIGNED_RESTORE(n) ;\ +Ldone ## n: + +#define PPC64_CLV_UNALIGNEDh(n) \ + andi. 0, 5, (1 PPC_LEFT_SHIFT(31-n)) ;\ + beq Ldone##n ;\ + PPC64_CLV_UNALIGNED_RESTORE(n) ;\ +Ldone ## n: + +#else + +#define PPC64_CLV_UNALIGNEDl(n) PPC64_CLV_UNALIGNED_RESTORE(n) +#define PPC64_CLV_UNALIGNEDh(n) PPC64_CLV_UNALIGNED_RESTORE(n) + +#endif // !defined(_AIX) + + subi 4, 1, 16 + // r4 is now a 16-byte aligned pointer into the red zone + // the _vectorScalarRegisters may not be 16-byte aligned + // so copy via red zone temp buffer + + PPC64_CLV_UNALIGNEDl(0) + PPC64_CLV_UNALIGNEDl(1) + PPC64_CLV_UNALIGNEDl(2) + PPC64_CLV_UNALIGNEDl(3) + PPC64_CLV_UNALIGNEDl(4) + PPC64_CLV_UNALIGNEDl(5) + PPC64_CLV_UNALIGNEDl(6) + PPC64_CLV_UNALIGNEDl(7) + PPC64_CLV_UNALIGNEDl(8) + PPC64_CLV_UNALIGNEDl(9) + PPC64_CLV_UNALIGNEDl(10) + PPC64_CLV_UNALIGNEDl(11) + PPC64_CLV_UNALIGNEDl(12) + PPC64_CLV_UNALIGNEDl(13) + PPC64_CLV_UNALIGNEDl(14) + PPC64_CLV_UNALIGNEDl(15) + PPC64_CLV_UNALIGNEDh(16) + PPC64_CLV_UNALIGNEDh(17) + PPC64_CLV_UNALIGNEDh(18) + PPC64_CLV_UNALIGNEDh(19) + PPC64_CLV_UNALIGNEDh(20) + PPC64_CLV_UNALIGNEDh(21) + PPC64_CLV_UNALIGNEDh(22) + PPC64_CLV_UNALIGNEDh(23) + PPC64_CLV_UNALIGNEDh(24) + PPC64_CLV_UNALIGNEDh(25) + PPC64_CLV_UNALIGNEDh(26) + PPC64_CLV_UNALIGNEDh(27) + PPC64_CLV_UNALIGNEDh(28) + PPC64_CLV_UNALIGNEDh(29) + PPC64_CLV_UNALIGNEDh(30) + PPC64_CLV_UNALIGNEDh(31) + +#endif +#endif + +Lnovec: + ld 0, PPC64_OFFS_CR(3) + mtcr 0 + ld 0, PPC64_OFFS_SRR0(3) + mtctr 0 + +#if defined(_AIX) + // After setting GPR1 to a higher address, AIX wipes out the original + // stack space below that address invalidated by the new GPR1 value. Use + // GPR0 to save the value of GPR3 in the context before it is wiped out. + // This compromises the content of GPR0 which is a volatile register. + ld 0, (8 * (3 + 2))(3) +#else + PPC64_LR(0) +#endif + PPC64_LR(5) + PPC64_LR(4) + PPC64_LR(1) +#if defined(_AIX) + mr 3, 0 +#else + PPC64_LR(3) +#endif + bctr + +#elif defined(__powerpc__) + +DEFINE_LIBUNWIND_FUNCTION(_ZN9libunwind13Registers_ppc6jumptoEv) +// +// void libunwind::Registers_ppc::jumpto() +// +// On entry: +// thread_state pointer is in r3 +// + + // restore integral registers + // skip r0 for now + // skip r1 for now + lwz 2, 16(3) + // skip r3 for now + // skip r4 for now + // skip r5 for now + lwz 6, 32(3) + lwz 7, 36(3) + lwz 8, 40(3) + lwz 9, 44(3) + lwz 10, 48(3) + lwz 11, 52(3) + lwz 12, 56(3) + lwz 13, 60(3) + lwz 14, 64(3) + lwz 15, 68(3) + lwz 16, 72(3) + lwz 17, 76(3) + lwz 18, 80(3) + lwz 19, 84(3) + lwz 20, 88(3) + lwz 21, 92(3) + lwz 22, 96(3) + lwz 23,100(3) + lwz 24,104(3) + lwz 25,108(3) + lwz 26,112(3) + lwz 27,116(3) + lwz 28,120(3) + lwz 29,124(3) + lwz 30,128(3) + lwz 31,132(3) + +#ifndef __NO_FPRS__ + // restore float registers + lfd 0, 160(3) + lfd 1, 168(3) + lfd 2, 176(3) + lfd 3, 184(3) + lfd 4, 192(3) + lfd 5, 200(3) + lfd 6, 208(3) + lfd 7, 216(3) + lfd 8, 224(3) + lfd 9, 232(3) + lfd 10,240(3) + lfd 11,248(3) + lfd 12,256(3) + lfd 13,264(3) + lfd 14,272(3) + lfd 15,280(3) + lfd 16,288(3) + lfd 17,296(3) + lfd 18,304(3) + lfd 19,312(3) + lfd 20,320(3) + lfd 21,328(3) + lfd 22,336(3) + lfd 23,344(3) + lfd 24,352(3) + lfd 25,360(3) + lfd 26,368(3) + lfd 27,376(3) + lfd 28,384(3) + lfd 29,392(3) + lfd 30,400(3) + lfd 31,408(3) +#endif + +#if defined(__ALTIVEC__) + +#define LOAD_VECTOR_RESTORE(_index) \ + lwz 0, 424+_index*16(3) SEPARATOR \ + stw 0, 0(4) SEPARATOR \ + lwz 0, 424+_index*16+4(3) SEPARATOR \ + stw 0, 4(4) SEPARATOR \ + lwz 0, 424+_index*16+8(3) SEPARATOR \ + stw 0, 8(4) SEPARATOR \ + lwz 0, 424+_index*16+12(3) SEPARATOR \ + stw 0, 12(4) SEPARATOR \ + lvx _index, 0, 4 + +#if !defined(_AIX) + // restore vector registers if any are in use. In the AIX ABI, VRSAVE + // is not used. + lwz 5, 156(3) // test VRsave + cmpwi 5, 0 + beq Lnovec + +#define LOAD_VECTOR_UNALIGNEDl(_index) \ + andis. 0, 5, (1 PPC_LEFT_SHIFT(15-_index)) SEPARATOR \ + beq Ldone ## _index SEPARATOR \ + LOAD_VECTOR_RESTORE(_index) SEPARATOR \ + Ldone ## _index: + +#define LOAD_VECTOR_UNALIGNEDh(_index) \ + andi. 0, 5, (1 PPC_LEFT_SHIFT(31-_index)) SEPARATOR \ + beq Ldone ## _index SEPARATOR \ + LOAD_VECTOR_RESTORE(_index) SEPARATOR \ + Ldone ## _index: + +#else + +#define LOAD_VECTOR_UNALIGNEDl(_index) LOAD_VECTOR_RESTORE(_index) +#define LOAD_VECTOR_UNALIGNEDh(_index) LOAD_VECTOR_RESTORE(_index) + +#endif // !defined(_AIX) + + subi 4, 1, 16 + rlwinm 4, 4, 0, 0, 27 // mask low 4-bits + // r4 is now a 16-byte aligned pointer into the red zone + // the _vectorRegisters may not be 16-byte aligned so copy via red zone temp buffer + + LOAD_VECTOR_UNALIGNEDl(0) + LOAD_VECTOR_UNALIGNEDl(1) + LOAD_VECTOR_UNALIGNEDl(2) + LOAD_VECTOR_UNALIGNEDl(3) + LOAD_VECTOR_UNALIGNEDl(4) + LOAD_VECTOR_UNALIGNEDl(5) + LOAD_VECTOR_UNALIGNEDl(6) + LOAD_VECTOR_UNALIGNEDl(7) + LOAD_VECTOR_UNALIGNEDl(8) + LOAD_VECTOR_UNALIGNEDl(9) + LOAD_VECTOR_UNALIGNEDl(10) + LOAD_VECTOR_UNALIGNEDl(11) + LOAD_VECTOR_UNALIGNEDl(12) + LOAD_VECTOR_UNALIGNEDl(13) + LOAD_VECTOR_UNALIGNEDl(14) + LOAD_VECTOR_UNALIGNEDl(15) + LOAD_VECTOR_UNALIGNEDh(16) + LOAD_VECTOR_UNALIGNEDh(17) + LOAD_VECTOR_UNALIGNEDh(18) + LOAD_VECTOR_UNALIGNEDh(19) + LOAD_VECTOR_UNALIGNEDh(20) + LOAD_VECTOR_UNALIGNEDh(21) + LOAD_VECTOR_UNALIGNEDh(22) + LOAD_VECTOR_UNALIGNEDh(23) + LOAD_VECTOR_UNALIGNEDh(24) + LOAD_VECTOR_UNALIGNEDh(25) + LOAD_VECTOR_UNALIGNEDh(26) + LOAD_VECTOR_UNALIGNEDh(27) + LOAD_VECTOR_UNALIGNEDh(28) + LOAD_VECTOR_UNALIGNEDh(29) + LOAD_VECTOR_UNALIGNEDh(30) + LOAD_VECTOR_UNALIGNEDh(31) +#endif + +Lnovec: + lwz 0, 136(3) // __cr + mtcr 0 + lwz 0, 148(3) // __ctr + mtctr 0 + lwz 0, 0(3) // __ssr0 + mtctr 0 + lwz 0, 8(3) // do r0 now + lwz 5, 28(3) // do r5 now + lwz 4, 24(3) // do r4 now + lwz 1, 12(3) // do sp now + lwz 3, 20(3) // do r3 last + bctr + +#elif defined(__aarch64__) + +#if defined(__ARM_FEATURE_GCS_DEFAULT) +.arch_extension gcs +#endif + +// +// extern "C" void __libunwind_Registers_arm64_jumpto(Registers_arm64 *); +// +// On entry: +// thread_state pointer is in x0 +// + .p2align 2 +DEFINE_LIBUNWIND_FUNCTION(__libunwind_Registers_arm64_jumpto) + // skip restore of x0,x1 for now + ldp x2, x3, [x0, #0x010] + ldp x4, x5, [x0, #0x020] + ldp x6, x7, [x0, #0x030] + ldp x8, x9, [x0, #0x040] + ldp x10,x11, [x0, #0x050] + ldp x12,x13, [x0, #0x060] + ldp x14,x15, [x0, #0x070] + // x16 and x17 were clobbered by the call into the unwinder, so no point in + // restoring them. + ldp x18,x19, [x0, #0x090] + ldp x20,x21, [x0, #0x0A0] + ldp x22,x23, [x0, #0x0B0] + ldp x24,x25, [x0, #0x0C0] + ldp x26,x27, [x0, #0x0D0] + ldp x28,x29, [x0, #0x0E0] + ldr x30, [x0, #0x100] // restore pc into lr + + ldp d0, d1, [x0, #0x110] + ldp d2, d3, [x0, #0x120] + ldp d4, d5, [x0, #0x130] + ldp d6, d7, [x0, #0x140] + ldp d8, d9, [x0, #0x150] + ldp d10,d11, [x0, #0x160] + ldp d12,d13, [x0, #0x170] + ldp d14,d15, [x0, #0x180] + ldp d16,d17, [x0, #0x190] + ldp d18,d19, [x0, #0x1A0] + ldp d20,d21, [x0, #0x1B0] + ldp d22,d23, [x0, #0x1C0] + ldp d24,d25, [x0, #0x1D0] + ldp d26,d27, [x0, #0x1E0] + ldp d28,d29, [x0, #0x1F0] + ldr d30, [x0, #0x200] + ldr d31, [x0, #0x208] + + // Finally, restore sp. This must be done after the last read from the + // context struct, because it is allocated on the stack, and an exception + // could clobber the de-allocated portion of the stack after sp has been + // restored. + ldr x16, [x0, #0x0F8] + ldp x0, x1, [x0, #0x000] // restore x0,x1 + mov sp,x16 // restore sp +#if defined(__ARM_FEATURE_GCS_DEFAULT) + // If GCS is enabled we need to push the address we're returning to onto the + // GCS stack. We can't just return using br, as there won't be a BTI landing + // pad instruction at the destination. + mov x16, #1 + chkfeat x16 + cbnz x16, Lnogcs + gcspushm x30 +Lnogcs: +#endif + ret x30 // jump to pc + +#elif defined(__arm__) && !defined(__APPLE__) + +#if !defined(__ARM_ARCH_ISA_ARM) +#if (__ARM_ARCH_ISA_THUMB == 2) + .syntax unified +#endif + .thumb +#endif + +@ +@ void libunwind::Registers_arm::restoreCoreAndJumpTo() +@ +@ On entry: +@ thread_state pointer is in r0 +@ + .p2align 2 +DEFINE_LIBUNWIND_FUNCTION(_ZN9libunwind13Registers_arm20restoreCoreAndJumpToEv) +#if !defined(__ARM_ARCH_ISA_ARM) && __ARM_ARCH_ISA_THUMB == 1 + @ r8-r11: ldm into r1-r4, then mov to r8-r11 + adds r0, #0x20 + ldm r0!, {r1-r4} + subs r0, #0x30 + mov r8, r1 + mov r9, r2 + mov r10, r3 + mov r11, r4 + @ r12 does not need loading, it it the intra-procedure-call scratch register + ldr r2, [r0, #0x34] + ldr r3, [r0, #0x3c] + mov sp, r2 + mov lr, r3 @ restore pc into lr + ldm r0, {r0-r7} +#else + @ Use lr as base so that r0 can be restored. + mov lr, r0 + @ 32bit thumb-2 restrictions for ldm: + @ . the sp (r13) cannot be in the list + @ . the pc (r15) and lr (r14) cannot both be in the list in an LDM instruction + ldm lr, {r0-r12} + ldr sp, [lr, #52] + ldr lr, [lr, #60] @ restore pc into lr +#endif +#if defined(__ARM_FEATURE_BTI_DEFAULT) && !defined(__ARM_ARCH_ISA_ARM) + // 'bx' is not BTI setting when used with lr, therefore r12 is used instead + mov r12, lr + JMP(r12) +#else + JMP(lr) +#endif + +@ +@ static void libunwind::Registers_arm::restoreVFPWithFLDMD(unw_fpreg_t* values) +@ +@ On entry: +@ values pointer is in r0 +@ + .p2align 2 +#if defined(__ELF__) + .fpu vfpv3-d16 +#endif +DEFINE_LIBUNWIND_FUNCTION(_ZN9libunwind13Registers_arm19restoreVFPWithFLDMDEPv) + @ VFP and iwMMX instructions are only available when compiling with the flags + @ that enable them. We do not want to do that in the library (because we do not + @ want the compiler to generate instructions that access those) but this is + @ only accessed if the personality routine needs these registers. Use of + @ these registers implies they are, actually, available on the target, so + @ it's ok to execute. + @ So, generate the instruction using the corresponding coprocessor mnemonic. + vldmia r0, {d0-d15} + JMP(lr) + +@ +@ static void libunwind::Registers_arm::restoreVFPWithFLDMX(unw_fpreg_t* values) +@ +@ On entry: +@ values pointer is in r0 +@ + .p2align 2 +#if defined(__ELF__) + .fpu vfpv3-d16 +#endif +DEFINE_LIBUNWIND_FUNCTION(_ZN9libunwind13Registers_arm19restoreVFPWithFLDMXEPv) + vldmia r0, {d0-d15} @ fldmiax is deprecated in ARMv7+ and now behaves like vldmia + JMP(lr) + +@ +@ static void libunwind::Registers_arm::restoreVFPv3(unw_fpreg_t* values) +@ +@ On entry: +@ values pointer is in r0 +@ + .p2align 2 +#if defined(__ELF__) + .fpu vfpv3 +#endif +DEFINE_LIBUNWIND_FUNCTION(_ZN9libunwind13Registers_arm12restoreVFPv3EPv) + vldmia r0, {d16-d31} + JMP(lr) + +#if defined(__ARM_WMMX) + +@ +@ static void libunwind::Registers_arm::restoreiWMMX(unw_fpreg_t* values) +@ +@ On entry: +@ values pointer is in r0 +@ + .p2align 2 +#if defined(__ELF__) + .arch armv5te +#endif +DEFINE_LIBUNWIND_FUNCTION(_ZN9libunwind13Registers_arm12restoreiWMMXEPv) + ldcl p1, cr0, [r0], #8 @ wldrd wR0, [r0], #8 + ldcl p1, cr1, [r0], #8 @ wldrd wR1, [r0], #8 + ldcl p1, cr2, [r0], #8 @ wldrd wR2, [r0], #8 + ldcl p1, cr3, [r0], #8 @ wldrd wR3, [r0], #8 + ldcl p1, cr4, [r0], #8 @ wldrd wR4, [r0], #8 + ldcl p1, cr5, [r0], #8 @ wldrd wR5, [r0], #8 + ldcl p1, cr6, [r0], #8 @ wldrd wR6, [r0], #8 + ldcl p1, cr7, [r0], #8 @ wldrd wR7, [r0], #8 + ldcl p1, cr8, [r0], #8 @ wldrd wR8, [r0], #8 + ldcl p1, cr9, [r0], #8 @ wldrd wR9, [r0], #8 + ldcl p1, cr10, [r0], #8 @ wldrd wR10, [r0], #8 + ldcl p1, cr11, [r0], #8 @ wldrd wR11, [r0], #8 + ldcl p1, cr12, [r0], #8 @ wldrd wR12, [r0], #8 + ldcl p1, cr13, [r0], #8 @ wldrd wR13, [r0], #8 + ldcl p1, cr14, [r0], #8 @ wldrd wR14, [r0], #8 + ldcl p1, cr15, [r0], #8 @ wldrd wR15, [r0], #8 + JMP(lr) + +@ +@ static void libunwind::Registers_arm::restoreiWMMXControl(unw_uint32_t* values) +@ +@ On entry: +@ values pointer is in r0 +@ + .p2align 2 +#if defined(__ELF__) + .arch armv5te +#endif +DEFINE_LIBUNWIND_FUNCTION(_ZN9libunwind13Registers_arm19restoreiWMMXControlEPj) + ldc2 p1, cr8, [r0], #4 @ wldrw wCGR0, [r0], #4 + ldc2 p1, cr9, [r0], #4 @ wldrw wCGR1, [r0], #4 + ldc2 p1, cr10, [r0], #4 @ wldrw wCGR2, [r0], #4 + ldc2 p1, cr11, [r0], #4 @ wldrw wCGR3, [r0], #4 + JMP(lr) + +#endif + +#elif defined(__or1k__) + +DEFINE_LIBUNWIND_FUNCTION(_ZN9libunwind14Registers_or1k6jumptoEv) +# +# void libunwind::Registers_or1k::jumpto() +# +# On entry: +# thread_state pointer is in r3 +# + + # restore integral registers + l.lwz r0, 0(r3) + l.lwz r1, 4(r3) + l.lwz r2, 8(r3) + # skip r3 for now + l.lwz r4, 16(r3) + l.lwz r5, 20(r3) + l.lwz r6, 24(r3) + l.lwz r7, 28(r3) + l.lwz r8, 32(r3) + # skip r9 + l.lwz r10, 40(r3) + l.lwz r11, 44(r3) + l.lwz r12, 48(r3) + l.lwz r13, 52(r3) + l.lwz r14, 56(r3) + l.lwz r15, 60(r3) + l.lwz r16, 64(r3) + l.lwz r17, 68(r3) + l.lwz r18, 72(r3) + l.lwz r19, 76(r3) + l.lwz r20, 80(r3) + l.lwz r21, 84(r3) + l.lwz r22, 88(r3) + l.lwz r23, 92(r3) + l.lwz r24, 96(r3) + l.lwz r25,100(r3) + l.lwz r26,104(r3) + l.lwz r27,108(r3) + l.lwz r28,112(r3) + l.lwz r29,116(r3) + l.lwz r30,120(r3) + l.lwz r31,124(r3) + + # load new pc into ra + l.lwz r9, 128(r3) + + # at last, restore r3 + l.lwz r3, 12(r3) + + # jump to pc + l.jr r9 + l.nop + +#elif defined(__hexagon__) +# On entry: +# thread_state pointer is in r2 +DEFINE_LIBUNWIND_FUNCTION(_ZN9libunwind17Registers_hexagon6jumptoEv) +# +# void libunwind::Registers_hexagon::jumpto() +# + r8 = memw(r0+#32) + r9 = memw(r0+#36) + r10 = memw(r0+#40) + r11 = memw(r0+#44) + + r12 = memw(r0+#48) + r13 = memw(r0+#52) + r14 = memw(r0+#56) + r15 = memw(r0+#60) + + r16 = memw(r0+#64) + r17 = memw(r0+#68) + r18 = memw(r0+#72) + r19 = memw(r0+#76) + + r20 = memw(r0+#80) + r21 = memw(r0+#84) + r22 = memw(r0+#88) + r23 = memw(r0+#92) + + r24 = memw(r0+#96) + r25 = memw(r0+#100) + r26 = memw(r0+#104) + r27 = memw(r0+#108) + + r28 = memw(r0+#112) + r29 = memw(r0+#116) + r30 = memw(r0+#120) + r31 = memw(r0+#132) + + r1 = memw(r0+#128) + c4 = r1 // Predicate register + r1 = memw(r0+#4) + r0 = memw(r0) + jumpr r31 +#elif defined(__mips__) && defined(_ABIO32) && _MIPS_SIM == _ABIO32 + +// +// void libunwind::Registers_mips_o32::jumpto() +// +// On entry: +// thread state pointer is in a0 ($4) +// +DEFINE_LIBUNWIND_FUNCTION(_ZN9libunwind18Registers_mips_o326jumptoEv) + .set push + .set noat + .set noreorder + .set nomacro +#ifdef __mips_hard_float +#if __mips_fpr != 64 + ldc1 $f0, (4 * 36 + 8 * 0)($4) + ldc1 $f2, (4 * 36 + 8 * 2)($4) + ldc1 $f4, (4 * 36 + 8 * 4)($4) + ldc1 $f6, (4 * 36 + 8 * 6)($4) + ldc1 $f8, (4 * 36 + 8 * 8)($4) + ldc1 $f10, (4 * 36 + 8 * 10)($4) + ldc1 $f12, (4 * 36 + 8 * 12)($4) + ldc1 $f14, (4 * 36 + 8 * 14)($4) + ldc1 $f16, (4 * 36 + 8 * 16)($4) + ldc1 $f18, (4 * 36 + 8 * 18)($4) + ldc1 $f20, (4 * 36 + 8 * 20)($4) + ldc1 $f22, (4 * 36 + 8 * 22)($4) + ldc1 $f24, (4 * 36 + 8 * 24)($4) + ldc1 $f26, (4 * 36 + 8 * 26)($4) + ldc1 $f28, (4 * 36 + 8 * 28)($4) + ldc1 $f30, (4 * 36 + 8 * 30)($4) +#else + ldc1 $f0, (4 * 36 + 8 * 0)($4) + ldc1 $f1, (4 * 36 + 8 * 1)($4) + ldc1 $f2, (4 * 36 + 8 * 2)($4) + ldc1 $f3, (4 * 36 + 8 * 3)($4) + ldc1 $f4, (4 * 36 + 8 * 4)($4) + ldc1 $f5, (4 * 36 + 8 * 5)($4) + ldc1 $f6, (4 * 36 + 8 * 6)($4) + ldc1 $f7, (4 * 36 + 8 * 7)($4) + ldc1 $f8, (4 * 36 + 8 * 8)($4) + ldc1 $f9, (4 * 36 + 8 * 9)($4) + ldc1 $f10, (4 * 36 + 8 * 10)($4) + ldc1 $f11, (4 * 36 + 8 * 11)($4) + ldc1 $f12, (4 * 36 + 8 * 12)($4) + ldc1 $f13, (4 * 36 + 8 * 13)($4) + ldc1 $f14, (4 * 36 + 8 * 14)($4) + ldc1 $f15, (4 * 36 + 8 * 15)($4) + ldc1 $f16, (4 * 36 + 8 * 16)($4) + ldc1 $f17, (4 * 36 + 8 * 17)($4) + ldc1 $f18, (4 * 36 + 8 * 18)($4) + ldc1 $f19, (4 * 36 + 8 * 19)($4) + ldc1 $f20, (4 * 36 + 8 * 20)($4) + ldc1 $f21, (4 * 36 + 8 * 21)($4) + ldc1 $f22, (4 * 36 + 8 * 22)($4) + ldc1 $f23, (4 * 36 + 8 * 23)($4) + ldc1 $f24, (4 * 36 + 8 * 24)($4) + ldc1 $f25, (4 * 36 + 8 * 25)($4) + ldc1 $f26, (4 * 36 + 8 * 26)($4) + ldc1 $f27, (4 * 36 + 8 * 27)($4) + ldc1 $f28, (4 * 36 + 8 * 28)($4) + ldc1 $f29, (4 * 36 + 8 * 29)($4) + ldc1 $f30, (4 * 36 + 8 * 30)($4) + ldc1 $f31, (4 * 36 + 8 * 31)($4) +#endif +#endif +#if __mips_isa_rev < 6 + // restore hi and lo + lw $8, (4 * 33)($4) + mthi $8 + lw $8, (4 * 34)($4) + mtlo $8 +#endif + // r0 is zero + lw $1, (4 * 1)($4) + lw $2, (4 * 2)($4) + lw $3, (4 * 3)($4) + // skip a0 for now + lw $5, (4 * 5)($4) + lw $6, (4 * 6)($4) + lw $7, (4 * 7)($4) + lw $8, (4 * 8)($4) + lw $9, (4 * 9)($4) + lw $10, (4 * 10)($4) + lw $11, (4 * 11)($4) + lw $12, (4 * 12)($4) + lw $13, (4 * 13)($4) + lw $14, (4 * 14)($4) + lw $15, (4 * 15)($4) + lw $16, (4 * 16)($4) + lw $17, (4 * 17)($4) + lw $18, (4 * 18)($4) + lw $19, (4 * 19)($4) + lw $20, (4 * 20)($4) + lw $21, (4 * 21)($4) + lw $22, (4 * 22)($4) + lw $23, (4 * 23)($4) + lw $24, (4 * 24)($4) + lw $25, (4 * 25)($4) + lw $26, (4 * 26)($4) + lw $27, (4 * 27)($4) + lw $28, (4 * 28)($4) + lw $29, (4 * 29)($4) + lw $30, (4 * 30)($4) + // load new pc into ra + lw $31, (4 * 32)($4) + // jump to ra, load a0 in the delay slot + jr $31 + lw $4, (4 * 4)($4) + .set pop + +#elif defined(__mips64) + +// +// void libunwind::Registers_mips_newabi::jumpto() +// +// On entry: +// thread state pointer is in a0 ($4) +// +DEFINE_LIBUNWIND_FUNCTION(_ZN9libunwind21Registers_mips_newabi6jumptoEv) + .set push + .set noat + .set noreorder + .set nomacro +#ifdef __mips_hard_float + .irp i,FROM_0_TO_31 + ldc1 $f\i, (280+8*\i)($4) + .endr +#endif +#if __mips_isa_rev < 6 + // restore hi and lo + ld $8, (8 * 33)($4) + mthi $8 + ld $8, (8 * 34)($4) + mtlo $8 +#endif + // r0 is zero + ld $1, (8 * 1)($4) + ld $2, (8 * 2)($4) + ld $3, (8 * 3)($4) + // skip a0 for now + .irp i,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30 + ld $\i, (8 * \i)($4) + .endr + // load new pc into ra + ld $31, (8 * 32)($4) + // jump to ra, load a0 in the delay slot + jr $31 + ld $4, (8 * 4)($4) + .set pop + +#elif defined(__sparc__) && defined(__arch64__) + +DEFINE_LIBUNWIND_FUNCTION(_ZN9libunwind17Registers_sparc646jumptoEv) +// +// void libunwind::Registers_sparc64::jumpto() +// +// On entry: +// thread_state pointer is in %o0 +// + .register %g2, #scratch + .register %g3, #scratch + .register %g6, #scratch + .register %g7, #scratch + flushw + ldx [%o0 + 0x08], %g1 + ldx [%o0 + 0x10], %g2 + ldx [%o0 + 0x18], %g3 + ldx [%o0 + 0x20], %g4 + ldx [%o0 + 0x28], %g5 + ldx [%o0 + 0x30], %g6 + ldx [%o0 + 0x38], %g7 + ldx [%o0 + 0x48], %o1 + ldx [%o0 + 0x50], %o2 + ldx [%o0 + 0x58], %o3 + ldx [%o0 + 0x60], %o4 + ldx [%o0 + 0x68], %o5 + ldx [%o0 + 0x70], %o6 + ldx [%o0 + 0x78], %o7 + ldx [%o0 + 0x80], %l0 + ldx [%o0 + 0x88], %l1 + ldx [%o0 + 0x90], %l2 + ldx [%o0 + 0x98], %l3 + ldx [%o0 + 0xa0], %l4 + ldx [%o0 + 0xa8], %l5 + ldx [%o0 + 0xb0], %l6 + ldx [%o0 + 0xb8], %l7 + ldx [%o0 + 0xc0], %i0 + ldx [%o0 + 0xc8], %i1 + ldx [%o0 + 0xd0], %i2 + ldx [%o0 + 0xd8], %i3 + ldx [%o0 + 0xe0], %i4 + ldx [%o0 + 0xe8], %i5 + ldx [%o0 + 0xf0], %i6 + ldx [%o0 + 0xf8], %i7 + jmp %o7 + ldx [%o0 + 0x40], %o0 + +#elif defined(__sparc__) + +// +// void libunwind::Registers_sparc_o32::jumpto() +// +// On entry: +// thread_state pointer is in o0 +// +DEFINE_LIBUNWIND_FUNCTION(_ZN9libunwind15Registers_sparc6jumptoEv) + ta 3 + ldd [%o0 + 64], %l0 + ldd [%o0 + 72], %l2 + ldd [%o0 + 80], %l4 + ldd [%o0 + 88], %l6 + ldd [%o0 + 96], %i0 + ldd [%o0 + 104], %i2 + ldd [%o0 + 112], %i4 + ldd [%o0 + 120], %i6 + ld [%o0 + 60], %o7 + jmp %o7 + nop + +#elif defined(__riscv) + +// +// void libunwind::Registers_riscv::jumpto() +// +// On entry: +// thread_state pointer is in a0 +// + .p2align 2 +DEFINE_LIBUNWIND_FUNCTION(_ZN9libunwind15Registers_riscv6jumptoEv) +# if defined(__riscv_flen) + .irp i,FROM_0_TO_31 + FLOAD f\i, (RISCV_FOFFSET + RISCV_FSIZE * \i)(a0) + .endr +# endif + + // x0 is zero + ILOAD x1, (RISCV_ISIZE * 0)(a0) // restore pc into ra + .irp i,2,3,4,5,6,7,8,9 + ILOAD x\i, (RISCV_ISIZE * \i)(a0) + .endr + // skip a0 for now +#if defined(__riscv_32e) + .irp i,11,12,13,14,15 +#else + .irp i,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31 +#endif + ILOAD x\i, (RISCV_ISIZE * \i)(a0) + .endr + ILOAD x10, (RISCV_ISIZE * 10)(a0) // restore a0 + + ret // jump to ra + +#elif defined(__s390x__) + +DEFINE_LIBUNWIND_FUNCTION(_ZN9libunwind15Registers_s390x6jumptoEv) +// +// void libunwind::Registers_s390x::jumpto() +// +// On entry: +// thread_state pointer is in r2 +// + + // Skip PSWM, but load PSWA into r1 + lg %r1, 8(%r2) + + // Restore FPRs + .irp i,FROM_0_TO_15 + ld %f\i, (144+8*\i)(%r2) + .endr + + // Restore GPRs - skipping %r0 and %r1 + lmg %r2, %r15, 32(%r2) + + // Return to PSWA (was loaded into %r1 above) + br %r1 + +#elif defined(__loongarch__) && __loongarch_grlen == 64 + +// +// void libunwind::Registers_loongarch::jumpto() +// +// On entry: +// thread_state pointer is in $a0($r4) +// + .p2align 2 +DEFINE_LIBUNWIND_FUNCTION(_ZN9libunwind19Registers_loongarch6jumptoEv) +# if __loongarch_frlen == 64 + .irp i,FROM_0_TO_31 + fld.d $f\i, $a0, (8 * 33 + 8 * \i) + .endr +# endif + + // $r0 is zero + .irp i,1,2,3 + ld.d $r\i, $a0, (8 * \i) + .endr + // skip $a0 for now + .irp i,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31 + ld.d $r\i, $a0, (8 * \i) + .endr + + ld.d $ra, $a0, (8 * 32) // load new pc into $ra + ld.d $a0, $a0, (8 * 4) // restore $a0 last + + jr $ra + +#endif + +#endif /* !defined(__USING_SJLJ_EXCEPTIONS__) && !defined(__wasm__) */ + +NO_EXEC_STACK_DIRECTIVE + diff --git a/third_party/libunwind/UnwindRegistersSave.S b/third_party/libunwind/UnwindRegistersSave.S new file mode 100644 index 000000000..fab234fcd --- /dev/null +++ b/third_party/libunwind/UnwindRegistersSave.S @@ -0,0 +1,1186 @@ +//===----------------------------------------------------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#include "assembly.h" + +#define FROM_0_TO_15 0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15 +#define FROM_16_TO_31 16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31 + +#define FROM_0_TO_31 0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31 +#define FROM_32_TO_63 32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50,51,52,53,54,55,56,57,58,59,60,61,62,63 + +#if defined(_AIX) + .toc +#else + .text +#endif + +#if !defined(__USING_SJLJ_EXCEPTIONS__) && !defined(__wasm__) + +#if defined(__i386__) + +# +# extern int __unw_getcontext(unw_context_t* thread_state) +# +# On entry: +# + + +# +-----------------------+ +# + thread_state pointer + +# +-----------------------+ +# + return address + +# +-----------------------+ <-- SP +# + + +# +DEFINE_LIBUNWIND_FUNCTION(__unw_getcontext) + + _LIBUNWIND_CET_ENDBR + push %eax + movl 8(%esp), %eax + movl %ebx, 4(%eax) + movl %ecx, 8(%eax) + movl %edx, 12(%eax) + movl %edi, 16(%eax) + movl %esi, 20(%eax) + movl %ebp, 24(%eax) + movl %esp, %edx + addl $8, %edx + movl %edx, 28(%eax) # store what sp was at call site as esp + # skip ss + # skip eflags + movl 4(%esp), %edx + movl %edx, 40(%eax) # store return address as eip + # skip cs + # skip ds + # skip es + # skip fs + # skip gs + movl (%esp), %edx + movl %edx, (%eax) # store original eax + popl %eax + xorl %eax, %eax # return UNW_ESUCCESS + ret + +#elif defined(__x86_64__) + +# +# extern int __unw_getcontext(unw_context_t* thread_state) +# +# On entry: +# thread_state pointer is in rdi +# +DEFINE_LIBUNWIND_FUNCTION(__unw_getcontext) +#if defined(_WIN64) +#define PTR %rcx +#define TMP %rdx +#else +#define PTR %rdi +#define TMP %rsi +#endif + + _LIBUNWIND_CET_ENDBR + movq %rax, (PTR) + movq %rbx, 8(PTR) + movq %rcx, 16(PTR) + movq %rdx, 24(PTR) + movq %rdi, 32(PTR) + movq %rsi, 40(PTR) + movq %rbp, 48(PTR) + movq %rsp, 56(PTR) + addq $8, 56(PTR) + movq %r8, 64(PTR) + movq %r9, 72(PTR) + movq %r10, 80(PTR) + movq %r11, 88(PTR) + movq %r12, 96(PTR) + movq %r13,104(PTR) + movq %r14,112(PTR) + movq %r15,120(PTR) + movq (%rsp),TMP + movq TMP,128(PTR) # store return address as rip + # skip rflags + # skip cs + # skip fs + # skip gs + +#if defined(_WIN64) + movdqu %xmm0,176(PTR) + movdqu %xmm1,192(PTR) + movdqu %xmm2,208(PTR) + movdqu %xmm3,224(PTR) + movdqu %xmm4,240(PTR) + movdqu %xmm5,256(PTR) + movdqu %xmm6,272(PTR) + movdqu %xmm7,288(PTR) + movdqu %xmm8,304(PTR) + movdqu %xmm9,320(PTR) + movdqu %xmm10,336(PTR) + movdqu %xmm11,352(PTR) + movdqu %xmm12,368(PTR) + movdqu %xmm13,384(PTR) + movdqu %xmm14,400(PTR) + movdqu %xmm15,416(PTR) +#endif + xorl %eax, %eax # return UNW_ESUCCESS + ret + +#elif defined(__mips__) && defined(_ABIO32) && _MIPS_SIM == _ABIO32 + +# +# extern int __unw_getcontext(unw_context_t* thread_state) +# +# On entry: +# thread_state pointer is in a0 ($4) +# +DEFINE_LIBUNWIND_FUNCTION(__unw_getcontext) + .set push + .set noat + .set noreorder + .set nomacro + sw $1, (4 * 1)($4) + sw $2, (4 * 2)($4) + sw $3, (4 * 3)($4) + sw $4, (4 * 4)($4) + sw $5, (4 * 5)($4) + sw $6, (4 * 6)($4) + sw $7, (4 * 7)($4) + sw $8, (4 * 8)($4) + sw $9, (4 * 9)($4) + sw $10, (4 * 10)($4) + sw $11, (4 * 11)($4) + sw $12, (4 * 12)($4) + sw $13, (4 * 13)($4) + sw $14, (4 * 14)($4) + sw $15, (4 * 15)($4) + sw $16, (4 * 16)($4) + sw $17, (4 * 17)($4) + sw $18, (4 * 18)($4) + sw $19, (4 * 19)($4) + sw $20, (4 * 20)($4) + sw $21, (4 * 21)($4) + sw $22, (4 * 22)($4) + sw $23, (4 * 23)($4) + sw $24, (4 * 24)($4) + sw $25, (4 * 25)($4) + sw $26, (4 * 26)($4) + sw $27, (4 * 27)($4) + sw $28, (4 * 28)($4) + sw $29, (4 * 29)($4) + sw $30, (4 * 30)($4) + sw $31, (4 * 31)($4) + # Store return address to pc + sw $31, (4 * 32)($4) +#if __mips_isa_rev < 6 + # hi and lo + mfhi $8 + sw $8, (4 * 33)($4) + mflo $8 + sw $8, (4 * 34)($4) +#endif +#ifdef __mips_hard_float +#if __mips_fpr != 64 + sdc1 $f0, (4 * 36 + 8 * 0)($4) + sdc1 $f2, (4 * 36 + 8 * 2)($4) + sdc1 $f4, (4 * 36 + 8 * 4)($4) + sdc1 $f6, (4 * 36 + 8 * 6)($4) + sdc1 $f8, (4 * 36 + 8 * 8)($4) + sdc1 $f10, (4 * 36 + 8 * 10)($4) + sdc1 $f12, (4 * 36 + 8 * 12)($4) + sdc1 $f14, (4 * 36 + 8 * 14)($4) + sdc1 $f16, (4 * 36 + 8 * 16)($4) + sdc1 $f18, (4 * 36 + 8 * 18)($4) + sdc1 $f20, (4 * 36 + 8 * 20)($4) + sdc1 $f22, (4 * 36 + 8 * 22)($4) + sdc1 $f24, (4 * 36 + 8 * 24)($4) + sdc1 $f26, (4 * 36 + 8 * 26)($4) + sdc1 $f28, (4 * 36 + 8 * 28)($4) + sdc1 $f30, (4 * 36 + 8 * 30)($4) +#else + sdc1 $f0, (4 * 36 + 8 * 0)($4) + sdc1 $f1, (4 * 36 + 8 * 1)($4) + sdc1 $f2, (4 * 36 + 8 * 2)($4) + sdc1 $f3, (4 * 36 + 8 * 3)($4) + sdc1 $f4, (4 * 36 + 8 * 4)($4) + sdc1 $f5, (4 * 36 + 8 * 5)($4) + sdc1 $f6, (4 * 36 + 8 * 6)($4) + sdc1 $f7, (4 * 36 + 8 * 7)($4) + sdc1 $f8, (4 * 36 + 8 * 8)($4) + sdc1 $f9, (4 * 36 + 8 * 9)($4) + sdc1 $f10, (4 * 36 + 8 * 10)($4) + sdc1 $f11, (4 * 36 + 8 * 11)($4) + sdc1 $f12, (4 * 36 + 8 * 12)($4) + sdc1 $f13, (4 * 36 + 8 * 13)($4) + sdc1 $f14, (4 * 36 + 8 * 14)($4) + sdc1 $f15, (4 * 36 + 8 * 15)($4) + sdc1 $f16, (4 * 36 + 8 * 16)($4) + sdc1 $f17, (4 * 36 + 8 * 17)($4) + sdc1 $f18, (4 * 36 + 8 * 18)($4) + sdc1 $f19, (4 * 36 + 8 * 19)($4) + sdc1 $f20, (4 * 36 + 8 * 20)($4) + sdc1 $f21, (4 * 36 + 8 * 21)($4) + sdc1 $f22, (4 * 36 + 8 * 22)($4) + sdc1 $f23, (4 * 36 + 8 * 23)($4) + sdc1 $f24, (4 * 36 + 8 * 24)($4) + sdc1 $f25, (4 * 36 + 8 * 25)($4) + sdc1 $f26, (4 * 36 + 8 * 26)($4) + sdc1 $f27, (4 * 36 + 8 * 27)($4) + sdc1 $f28, (4 * 36 + 8 * 28)($4) + sdc1 $f29, (4 * 36 + 8 * 29)($4) + sdc1 $f30, (4 * 36 + 8 * 30)($4) + sdc1 $f31, (4 * 36 + 8 * 31)($4) +#endif +#endif + jr $31 + # return UNW_ESUCCESS + or $2, $0, $0 + .set pop + +#elif defined(__mips64) + +# +# extern int __unw_getcontext(unw_context_t* thread_state) +# +# On entry: +# thread_state pointer is in a0 ($4) +# +DEFINE_LIBUNWIND_FUNCTION(__unw_getcontext) + .set push + .set noat + .set noreorder + .set nomacro + .irp i,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31 + sd $\i, (8 * \i)($4) + .endr + # Store return address to pc + sd $31, (8 * 32)($4) +#if __mips_isa_rev < 6 + # hi and lo + mfhi $8 + sd $8, (8 * 33)($4) + mflo $8 + sd $8, (8 * 34)($4) +#endif +#ifdef __mips_hard_float + .irp i,FROM_0_TO_31 + sdc1 $f\i, (280+8*\i)($4) + .endr +#endif + jr $31 + # return UNW_ESUCCESS + or $2, $0, $0 + .set pop + +# elif defined(__mips__) + +# +# extern int __unw_getcontext(unw_context_t* thread_state) +# +# Just trap for the time being. +DEFINE_LIBUNWIND_FUNCTION(__unw_getcontext) + teq $0, $0 + +#elif defined(__powerpc64__) + +// +// extern int __unw_getcontext(unw_context_t* thread_state) +// +// On entry: +// thread_state pointer is in r3 +// +#if defined(_AIX) +DEFINE_LIBUNWIND_FUNCTION_AND_WEAK_ALIAS(__unw_getcontext, unw_getcontext) +#else +DEFINE_LIBUNWIND_FUNCTION(__unw_getcontext) +#endif +// store register (GPR) +#define PPC64_STR(n) \ + std n, (8 * (n + 2))(3) + + // save GPRs + PPC64_STR(0) + mflr 0 + std 0, PPC64_OFFS_SRR0(3) // store lr as ssr0 + PPC64_STR(1) + PPC64_STR(4) // Save r4 first since it will be used for fixing r2. +#if defined(_AIX) + // The TOC register (r2) was changed by the glue code if unw_getcontext + // is called from a different module. Save the original TOC register + // in the context if this is the case. + mflr 4 + lwz 4, 0(4) // Get the first instruction at the return address. + xoris 0, 4, 0xe841 // Is it reloading the TOC register "ld 2,40(1)"? + cmplwi 0, 0x28 + bne 0, LnoR2Fix // No need to fix up r2 if it is not. + ld 2, 40(1) // Use the saved TOC register in the stack. +LnoR2Fix: +#endif + PPC64_STR(2) + PPC64_STR(3) + PPC64_STR(5) + PPC64_STR(6) + PPC64_STR(7) + PPC64_STR(8) + PPC64_STR(9) + PPC64_STR(10) + PPC64_STR(11) + PPC64_STR(12) + PPC64_STR(13) + PPC64_STR(14) + PPC64_STR(15) + PPC64_STR(16) + PPC64_STR(17) + PPC64_STR(18) + PPC64_STR(19) + PPC64_STR(20) + PPC64_STR(21) + PPC64_STR(22) + PPC64_STR(23) + PPC64_STR(24) + PPC64_STR(25) + PPC64_STR(26) + PPC64_STR(27) + PPC64_STR(28) + PPC64_STR(29) + PPC64_STR(30) + PPC64_STR(31) + + mfcr 0 + std 0, PPC64_OFFS_CR(3) + mfxer 0 + std 0, PPC64_OFFS_XER(3) +#if defined(_AIX) + // LR value saved from the register is not used, initialize it to 0. + li 0, 0 +#else + mflr 0 +#endif + std 0, PPC64_OFFS_LR(3) + mfctr 0 + std 0, PPC64_OFFS_CTR(3) + mfvrsave 0 + std 0, PPC64_OFFS_VRSAVE(3) + +#if defined(__VSX__) + // save VS registers + // (note that this also saves floating point registers and V registers, + // because part of VS is mapped to these registers) + + addi 4, 3, PPC64_OFFS_FP + +// store VS register +#ifdef __LITTLE_ENDIAN__ +// For little-endian targets, we need a swap since stxvd2x will store the +// register in the incorrect doubleword order. +// FIXME: when supporting targets older than Power9 on LE is no longer required +// this can be changed to simply `stxv n, 16 * n(4)`. +#define PPC64_STVS(n) \ + xxswapd n, n ;\ + stxvd2x n, 0, 4 ;\ + addi 4, 4, 16 +#else +#define PPC64_STVS(n) \ + stxvd2x n, 0, 4 ;\ + addi 4, 4, 16 +#endif + + PPC64_STVS(0) + PPC64_STVS(1) + PPC64_STVS(2) + PPC64_STVS(3) + PPC64_STVS(4) + PPC64_STVS(5) + PPC64_STVS(6) + PPC64_STVS(7) + PPC64_STVS(8) + PPC64_STVS(9) + PPC64_STVS(10) + PPC64_STVS(11) + PPC64_STVS(12) + PPC64_STVS(13) + PPC64_STVS(14) + PPC64_STVS(15) + PPC64_STVS(16) + PPC64_STVS(17) + PPC64_STVS(18) + PPC64_STVS(19) + PPC64_STVS(20) + PPC64_STVS(21) + PPC64_STVS(22) + PPC64_STVS(23) + PPC64_STVS(24) + PPC64_STVS(25) + PPC64_STVS(26) + PPC64_STVS(27) + PPC64_STVS(28) + PPC64_STVS(29) + PPC64_STVS(30) + PPC64_STVS(31) + PPC64_STVS(32) + PPC64_STVS(33) + PPC64_STVS(34) + PPC64_STVS(35) + PPC64_STVS(36) + PPC64_STVS(37) + PPC64_STVS(38) + PPC64_STVS(39) + PPC64_STVS(40) + PPC64_STVS(41) + PPC64_STVS(42) + PPC64_STVS(43) + PPC64_STVS(44) + PPC64_STVS(45) + PPC64_STVS(46) + PPC64_STVS(47) + PPC64_STVS(48) + PPC64_STVS(49) + PPC64_STVS(50) + PPC64_STVS(51) + PPC64_STVS(52) + PPC64_STVS(53) + PPC64_STVS(54) + PPC64_STVS(55) + PPC64_STVS(56) + PPC64_STVS(57) + PPC64_STVS(58) + PPC64_STVS(59) + PPC64_STVS(60) + PPC64_STVS(61) + PPC64_STVS(62) + PPC64_STVS(63) + +#else + +// store FP register +#define PPC64_STF(n) \ + stfd n, (PPC64_OFFS_FP + n * 16)(3) + + // save float registers + PPC64_STF(0) + PPC64_STF(1) + PPC64_STF(2) + PPC64_STF(3) + PPC64_STF(4) + PPC64_STF(5) + PPC64_STF(6) + PPC64_STF(7) + PPC64_STF(8) + PPC64_STF(9) + PPC64_STF(10) + PPC64_STF(11) + PPC64_STF(12) + PPC64_STF(13) + PPC64_STF(14) + PPC64_STF(15) + PPC64_STF(16) + PPC64_STF(17) + PPC64_STF(18) + PPC64_STF(19) + PPC64_STF(20) + PPC64_STF(21) + PPC64_STF(22) + PPC64_STF(23) + PPC64_STF(24) + PPC64_STF(25) + PPC64_STF(26) + PPC64_STF(27) + PPC64_STF(28) + PPC64_STF(29) + PPC64_STF(30) + PPC64_STF(31) + +#if defined(__ALTIVEC__) + // save vector registers + + // Use 16-bytes below the stack pointer as an + // aligned buffer to save each vector register. + // Note that the stack pointer is always 16-byte aligned. + subi 4, 1, 16 + +#define PPC64_STV_UNALIGNED(n) \ + stvx n, 0, 4 ;\ + ld 5, 0(4) ;\ + std 5, (PPC64_OFFS_V + n * 16)(3) ;\ + ld 5, 8(4) ;\ + std 5, (PPC64_OFFS_V + n * 16 + 8)(3) + + PPC64_STV_UNALIGNED(0) + PPC64_STV_UNALIGNED(1) + PPC64_STV_UNALIGNED(2) + PPC64_STV_UNALIGNED(3) + PPC64_STV_UNALIGNED(4) + PPC64_STV_UNALIGNED(5) + PPC64_STV_UNALIGNED(6) + PPC64_STV_UNALIGNED(7) + PPC64_STV_UNALIGNED(8) + PPC64_STV_UNALIGNED(9) + PPC64_STV_UNALIGNED(10) + PPC64_STV_UNALIGNED(11) + PPC64_STV_UNALIGNED(12) + PPC64_STV_UNALIGNED(13) + PPC64_STV_UNALIGNED(14) + PPC64_STV_UNALIGNED(15) + PPC64_STV_UNALIGNED(16) + PPC64_STV_UNALIGNED(17) + PPC64_STV_UNALIGNED(18) + PPC64_STV_UNALIGNED(19) + PPC64_STV_UNALIGNED(20) + PPC64_STV_UNALIGNED(21) + PPC64_STV_UNALIGNED(22) + PPC64_STV_UNALIGNED(23) + PPC64_STV_UNALIGNED(24) + PPC64_STV_UNALIGNED(25) + PPC64_STV_UNALIGNED(26) + PPC64_STV_UNALIGNED(27) + PPC64_STV_UNALIGNED(28) + PPC64_STV_UNALIGNED(29) + PPC64_STV_UNALIGNED(30) + PPC64_STV_UNALIGNED(31) + +#endif +#endif + + li 3, 0 // return UNW_ESUCCESS + blr + + +#elif defined(__powerpc__) + +// +// extern int unw_getcontext(unw_context_t* thread_state) +// +// On entry: +// thread_state pointer is in r3 +// +#if defined(_AIX) +DEFINE_LIBUNWIND_FUNCTION_AND_WEAK_ALIAS(__unw_getcontext, unw_getcontext) +#else +DEFINE_LIBUNWIND_FUNCTION(__unw_getcontext) +#endif + stw 0, 8(3) + mflr 0 + stw 0, 0(3) // store lr as ssr0 + stw 1, 12(3) + stw 4, 24(3) // Save r4 first since it will be used for fixing r2. +#if defined(_AIX) + // The TOC register (r2) was changed by the glue code if unw_getcontext + // is called from a different module. Save the original TOC register + // in the context if this is the case. + mflr 4 + lwz 4, 0(4) // Get the instruction at the return address. + xoris 0, 4, 0x8041 // Is it reloading the TOC register "lwz 2,20(1)"? + cmplwi 0, 0x14 + bne 0, LnoR2Fix // No need to fix up r2 if it is not. + lwz 2, 20(1) // Use the saved TOC register in the stack. +LnoR2Fix: +#endif + stw 2, 16(3) + stw 3, 20(3) + stw 5, 28(3) + stw 6, 32(3) + stw 7, 36(3) + stw 8, 40(3) + stw 9, 44(3) + stw 10, 48(3) + stw 11, 52(3) + stw 12, 56(3) + stw 13, 60(3) + stw 14, 64(3) + stw 15, 68(3) + stw 16, 72(3) + stw 17, 76(3) + stw 18, 80(3) + stw 19, 84(3) + stw 20, 88(3) + stw 21, 92(3) + stw 22, 96(3) + stw 23,100(3) + stw 24,104(3) + stw 25,108(3) + stw 26,112(3) + stw 27,116(3) + stw 28,120(3) + stw 29,124(3) + stw 30,128(3) + stw 31,132(3) + +#if defined(__ALTIVEC__) + // save VRSave register + mfspr 0, 256 + stw 0, 156(3) +#endif + // save CR registers + mfcr 0 + stw 0, 136(3) +#if defined(_AIX) + // LR value from the register is not used, initialize it to 0. + li 0, 0 + stw 0, 144(3) +#endif + // save CTR register + mfctr 0 + stw 0, 148(3) + +#if !defined(__NO_FPRS__) + // save float registers + stfd 0, 160(3) + stfd 1, 168(3) + stfd 2, 176(3) + stfd 3, 184(3) + stfd 4, 192(3) + stfd 5, 200(3) + stfd 6, 208(3) + stfd 7, 216(3) + stfd 8, 224(3) + stfd 9, 232(3) + stfd 10,240(3) + stfd 11,248(3) + stfd 12,256(3) + stfd 13,264(3) + stfd 14,272(3) + stfd 15,280(3) + stfd 16,288(3) + stfd 17,296(3) + stfd 18,304(3) + stfd 19,312(3) + stfd 20,320(3) + stfd 21,328(3) + stfd 22,336(3) + stfd 23,344(3) + stfd 24,352(3) + stfd 25,360(3) + stfd 26,368(3) + stfd 27,376(3) + stfd 28,384(3) + stfd 29,392(3) + stfd 30,400(3) + stfd 31,408(3) +#endif + +#if defined(__ALTIVEC__) + // save vector registers + + subi 4, 1, 16 + rlwinm 4, 4, 0, 0, 27 // mask low 4-bits + // r4 is now a 16-byte aligned pointer into the red zone + +#define SAVE_VECTOR_UNALIGNED(_vec, _offset) \ + stvx _vec, 0, 4 SEPARATOR \ + lwz 5, 0(4) SEPARATOR \ + stw 5, _offset(3) SEPARATOR \ + lwz 5, 4(4) SEPARATOR \ + stw 5, _offset+4(3) SEPARATOR \ + lwz 5, 8(4) SEPARATOR \ + stw 5, _offset+8(3) SEPARATOR \ + lwz 5, 12(4) SEPARATOR \ + stw 5, _offset+12(3) + + SAVE_VECTOR_UNALIGNED( 0, 424+0x000) + SAVE_VECTOR_UNALIGNED( 1, 424+0x010) + SAVE_VECTOR_UNALIGNED( 2, 424+0x020) + SAVE_VECTOR_UNALIGNED( 3, 424+0x030) + SAVE_VECTOR_UNALIGNED( 4, 424+0x040) + SAVE_VECTOR_UNALIGNED( 5, 424+0x050) + SAVE_VECTOR_UNALIGNED( 6, 424+0x060) + SAVE_VECTOR_UNALIGNED( 7, 424+0x070) + SAVE_VECTOR_UNALIGNED( 8, 424+0x080) + SAVE_VECTOR_UNALIGNED( 9, 424+0x090) + SAVE_VECTOR_UNALIGNED(10, 424+0x0A0) + SAVE_VECTOR_UNALIGNED(11, 424+0x0B0) + SAVE_VECTOR_UNALIGNED(12, 424+0x0C0) + SAVE_VECTOR_UNALIGNED(13, 424+0x0D0) + SAVE_VECTOR_UNALIGNED(14, 424+0x0E0) + SAVE_VECTOR_UNALIGNED(15, 424+0x0F0) + SAVE_VECTOR_UNALIGNED(16, 424+0x100) + SAVE_VECTOR_UNALIGNED(17, 424+0x110) + SAVE_VECTOR_UNALIGNED(18, 424+0x120) + SAVE_VECTOR_UNALIGNED(19, 424+0x130) + SAVE_VECTOR_UNALIGNED(20, 424+0x140) + SAVE_VECTOR_UNALIGNED(21, 424+0x150) + SAVE_VECTOR_UNALIGNED(22, 424+0x160) + SAVE_VECTOR_UNALIGNED(23, 424+0x170) + SAVE_VECTOR_UNALIGNED(24, 424+0x180) + SAVE_VECTOR_UNALIGNED(25, 424+0x190) + SAVE_VECTOR_UNALIGNED(26, 424+0x1A0) + SAVE_VECTOR_UNALIGNED(27, 424+0x1B0) + SAVE_VECTOR_UNALIGNED(28, 424+0x1C0) + SAVE_VECTOR_UNALIGNED(29, 424+0x1D0) + SAVE_VECTOR_UNALIGNED(30, 424+0x1E0) + SAVE_VECTOR_UNALIGNED(31, 424+0x1F0) +#endif + + li 3, 0 // return UNW_ESUCCESS + blr + + +#elif defined(__aarch64__) + +// +// extern int __unw_getcontext(unw_context_t* thread_state) +// +// On entry: +// thread_state pointer is in x0 +// + .p2align 2 +DEFINE_LIBUNWIND_FUNCTION(__unw_getcontext) + stp x0, x1, [x0, #0x000] + stp x2, x3, [x0, #0x010] + stp x4, x5, [x0, #0x020] + stp x6, x7, [x0, #0x030] + stp x8, x9, [x0, #0x040] + stp x10,x11, [x0, #0x050] + stp x12,x13, [x0, #0x060] + stp x14,x15, [x0, #0x070] + stp x16,x17, [x0, #0x080] + stp x18,x19, [x0, #0x090] + stp x20,x21, [x0, #0x0A0] + stp x22,x23, [x0, #0x0B0] + stp x24,x25, [x0, #0x0C0] + stp x26,x27, [x0, #0x0D0] + stp x28,x29, [x0, #0x0E0] + str x30, [x0, #0x0F0] + mov x1,sp + str x1, [x0, #0x0F8] + str x30, [x0, #0x100] // store return address as pc + // skip cpsr + stp d0, d1, [x0, #0x110] + stp d2, d3, [x0, #0x120] + stp d4, d5, [x0, #0x130] + stp d6, d7, [x0, #0x140] + stp d8, d9, [x0, #0x150] + stp d10,d11, [x0, #0x160] + stp d12,d13, [x0, #0x170] + stp d14,d15, [x0, #0x180] + stp d16,d17, [x0, #0x190] + stp d18,d19, [x0, #0x1A0] + stp d20,d21, [x0, #0x1B0] + stp d22,d23, [x0, #0x1C0] + stp d24,d25, [x0, #0x1D0] + stp d26,d27, [x0, #0x1E0] + stp d28,d29, [x0, #0x1F0] + str d30, [x0, #0x200] + str d31, [x0, #0x208] + mov x0, #0 // return UNW_ESUCCESS + ret + +#elif defined(__arm__) && !defined(__APPLE__) + +#if !defined(__ARM_ARCH_ISA_ARM) +#if (__ARM_ARCH_ISA_THUMB == 2) + .syntax unified +#endif + .thumb +#endif + +@ +@ extern int __unw_getcontext(unw_context_t* thread_state) +@ +@ On entry: +@ thread_state pointer is in r0 +@ +@ Per EHABI #4.7 this only saves the core integer registers. +@ EHABI #7.4.5 notes that in general all VRS registers should be restored +@ however this is very hard to do for VFP registers because it is unknown +@ to the library how many registers are implemented by the architecture. +@ Instead, VFP registers are demand saved by logic external to __unw_getcontext. +@ + .p2align 2 +DEFINE_LIBUNWIND_FUNCTION(__unw_getcontext) +#if !defined(__ARM_ARCH_ISA_ARM) && __ARM_ARCH_ISA_THUMB == 1 + stm r0!, {r0-r7} + mov r1, r8 + mov r2, r9 + mov r3, r10 + stm r0!, {r1-r3} + mov r1, r11 + mov r2, sp + mov r3, lr + str r1, [r0, #0] @ r11 + @ r12 does not need storing, it it the intra-procedure-call scratch register + str r2, [r0, #8] @ sp + str r3, [r0, #12] @ lr + str r3, [r0, #16] @ store return address as pc + @ T1 does not have a non-cpsr-clobbering register-zeroing instruction. + @ It is safe to use here though because we are about to return, and cpsr is + @ not expected to be preserved. + movs r0, #0 @ return UNW_ESUCCESS +#else + @ 32bit thumb-2 restrictions for stm: + @ . the sp (r13) cannot be in the list + @ . the pc (r15) cannot be in the list in an STM instruction + stm r0, {r0-r12} + str sp, [r0, #52] + str lr, [r0, #56] + str lr, [r0, #60] @ store return address as pc + mov r0, #0 @ return UNW_ESUCCESS +#endif + JMP(lr) + +@ +@ static void libunwind::Registers_arm::saveVFPWithFSTMD(unw_fpreg_t* values) +@ +@ On entry: +@ values pointer is in r0 +@ + .p2align 2 +#if defined(__ELF__) + .fpu vfpv3-d16 +#endif +DEFINE_LIBUNWIND_FUNCTION(_ZN9libunwind13Registers_arm16saveVFPWithFSTMDEPv) + vstmia r0, {d0-d15} + JMP(lr) + +@ +@ static void libunwind::Registers_arm::saveVFPWithFSTMX(unw_fpreg_t* values) +@ +@ On entry: +@ values pointer is in r0 +@ + .p2align 2 +#if defined(__ELF__) + .fpu vfpv3-d16 +#endif +DEFINE_LIBUNWIND_FUNCTION(_ZN9libunwind13Registers_arm16saveVFPWithFSTMXEPv) + vstmia r0, {d0-d15} @ fstmiax is deprecated in ARMv7+ and now behaves like vstmia + JMP(lr) + +@ +@ static void libunwind::Registers_arm::saveVFPv3(unw_fpreg_t* values) +@ +@ On entry: +@ values pointer is in r0 +@ + .p2align 2 +#if defined(__ELF__) + .fpu vfpv3 +#endif +DEFINE_LIBUNWIND_FUNCTION(_ZN9libunwind13Registers_arm9saveVFPv3EPv) + @ VFP and iwMMX instructions are only available when compiling with the flags + @ that enable them. We do not want to do that in the library (because we do not + @ want the compiler to generate instructions that access those) but this is + @ only accessed if the personality routine needs these registers. Use of + @ these registers implies they are, actually, available on the target, so + @ it's ok to execute. + @ So, generate the instructions using the corresponding coprocessor mnemonic. + vstmia r0, {d16-d31} + JMP(lr) + +#if defined(_LIBUNWIND_ARM_WMMX) + +@ +@ static void libunwind::Registers_arm::saveiWMMX(unw_fpreg_t* values) +@ +@ On entry: +@ values pointer is in r0 +@ + .p2align 2 +#if defined(__ELF__) + .arch armv5te +#endif +DEFINE_LIBUNWIND_FUNCTION(_ZN9libunwind13Registers_arm9saveiWMMXEPv) + stcl p1, cr0, [r0], #8 @ wstrd wR0, [r0], #8 + stcl p1, cr1, [r0], #8 @ wstrd wR1, [r0], #8 + stcl p1, cr2, [r0], #8 @ wstrd wR2, [r0], #8 + stcl p1, cr3, [r0], #8 @ wstrd wR3, [r0], #8 + stcl p1, cr4, [r0], #8 @ wstrd wR4, [r0], #8 + stcl p1, cr5, [r0], #8 @ wstrd wR5, [r0], #8 + stcl p1, cr6, [r0], #8 @ wstrd wR6, [r0], #8 + stcl p1, cr7, [r0], #8 @ wstrd wR7, [r0], #8 + stcl p1, cr8, [r0], #8 @ wstrd wR8, [r0], #8 + stcl p1, cr9, [r0], #8 @ wstrd wR9, [r0], #8 + stcl p1, cr10, [r0], #8 @ wstrd wR10, [r0], #8 + stcl p1, cr11, [r0], #8 @ wstrd wR11, [r0], #8 + stcl p1, cr12, [r0], #8 @ wstrd wR12, [r0], #8 + stcl p1, cr13, [r0], #8 @ wstrd wR13, [r0], #8 + stcl p1, cr14, [r0], #8 @ wstrd wR14, [r0], #8 + stcl p1, cr15, [r0], #8 @ wstrd wR15, [r0], #8 + JMP(lr) + +@ +@ static void libunwind::Registers_arm::saveiWMMXControl(unw_uint32_t* values) +@ +@ On entry: +@ values pointer is in r0 +@ + .p2align 2 +#if defined(__ELF__) + .arch armv5te +#endif +DEFINE_LIBUNWIND_FUNCTION(_ZN9libunwind13Registers_arm16saveiWMMXControlEPj) + stc2 p1, cr8, [r0], #4 @ wstrw wCGR0, [r0], #4 + stc2 p1, cr9, [r0], #4 @ wstrw wCGR1, [r0], #4 + stc2 p1, cr10, [r0], #4 @ wstrw wCGR2, [r0], #4 + stc2 p1, cr11, [r0], #4 @ wstrw wCGR3, [r0], #4 + JMP(lr) + +#endif + +#elif defined(__or1k__) + +# +# extern int __unw_getcontext(unw_context_t* thread_state) +# +# On entry: +# thread_state pointer is in r3 +# +DEFINE_LIBUNWIND_FUNCTION(__unw_getcontext) + l.sw 0(r3), r0 + l.sw 4(r3), r1 + l.sw 8(r3), r2 + l.sw 12(r3), r3 + l.sw 16(r3), r4 + l.sw 20(r3), r5 + l.sw 24(r3), r6 + l.sw 28(r3), r7 + l.sw 32(r3), r8 + l.sw 36(r3), r9 + l.sw 40(r3), r10 + l.sw 44(r3), r11 + l.sw 48(r3), r12 + l.sw 52(r3), r13 + l.sw 56(r3), r14 + l.sw 60(r3), r15 + l.sw 64(r3), r16 + l.sw 68(r3), r17 + l.sw 72(r3), r18 + l.sw 76(r3), r19 + l.sw 80(r3), r20 + l.sw 84(r3), r21 + l.sw 88(r3), r22 + l.sw 92(r3), r23 + l.sw 96(r3), r24 + l.sw 100(r3), r25 + l.sw 104(r3), r26 + l.sw 108(r3), r27 + l.sw 112(r3), r28 + l.sw 116(r3), r29 + l.sw 120(r3), r30 + l.sw 124(r3), r31 + # store ra to pc + l.sw 128(r3), r9 + # zero epcr + l.sw 132(r3), r0 + +#elif defined(__hexagon__) +# +# extern int unw_getcontext(unw_context_t* thread_state) +# +# On entry: +# thread_state pointer is in r0 +# +#define OFFSET(offset) (offset/4) +DEFINE_LIBUNWIND_FUNCTION(__unw_getcontext) + memw(r0+#32) = r8 + memw(r0+#36) = r9 + memw(r0+#40) = r10 + memw(r0+#44) = r11 + + memw(r0+#48) = r12 + memw(r0+#52) = r13 + memw(r0+#56) = r14 + memw(r0+#60) = r15 + + memw(r0+#64) = r16 + memw(r0+#68) = r17 + memw(r0+#72) = r18 + memw(r0+#76) = r19 + + memw(r0+#80) = r20 + memw(r0+#84) = r21 + memw(r0+#88) = r22 + memw(r0+#92) = r23 + + memw(r0+#96) = r24 + memw(r0+#100) = r25 + memw(r0+#104) = r26 + memw(r0+#108) = r27 + + memw(r0+#112) = r28 + memw(r0+#116) = r29 + memw(r0+#120) = r30 + memw(r0+#124) = r31 + r1 = c4 // Predicate register + memw(r0+#128) = r1 + r1 = memw(r30) // *FP == Saved FP + r1 = r31 + memw(r0+#132) = r1 + + jumpr r31 + +#elif defined(__sparc__) && defined(__arch64__) + +# +# extern int __unw_getcontext(unw_context_t* thread_state) +# +# On entry: +# thread_state pointer is in %o0 +# +DEFINE_LIBUNWIND_FUNCTION(__unw_getcontext) + .register %g2, #scratch + .register %g3, #scratch + .register %g6, #scratch + .register %g7, #scratch + stx %g1, [%o0 + 0x08] + stx %g2, [%o0 + 0x10] + stx %g3, [%o0 + 0x18] + stx %g4, [%o0 + 0x20] + stx %g5, [%o0 + 0x28] + stx %g6, [%o0 + 0x30] + stx %g7, [%o0 + 0x38] + stx %o0, [%o0 + 0x40] + stx %o1, [%o0 + 0x48] + stx %o2, [%o0 + 0x50] + stx %o3, [%o0 + 0x58] + stx %o4, [%o0 + 0x60] + stx %o5, [%o0 + 0x68] + stx %o6, [%o0 + 0x70] + stx %o7, [%o0 + 0x78] + stx %l0, [%o0 + 0x80] + stx %l1, [%o0 + 0x88] + stx %l2, [%o0 + 0x90] + stx %l3, [%o0 + 0x98] + stx %l4, [%o0 + 0xa0] + stx %l5, [%o0 + 0xa8] + stx %l6, [%o0 + 0xb0] + stx %l7, [%o0 + 0xb8] + stx %i0, [%o0 + 0xc0] + stx %i1, [%o0 + 0xc8] + stx %i2, [%o0 + 0xd0] + stx %i3, [%o0 + 0xd8] + stx %i4, [%o0 + 0xe0] + stx %i5, [%o0 + 0xe8] + stx %i6, [%o0 + 0xf0] + stx %i7, [%o0 + 0xf8] + + # save StackGhost cookie + mov %i7, %g4 + save %sp, -176, %sp + # register window flush necessary even without StackGhost + flushw + restore + ldx [%sp + 2047 + 0x78], %g5 + xor %g4, %g5, %g4 + stx %g4, [%o0 + 0x100] + retl + # return UNW_ESUCCESS + clr %o0 + +#elif defined(__sparc__) + +# +# extern int __unw_getcontext(unw_context_t* thread_state) +# +# On entry: +# thread_state pointer is in o0 +# +DEFINE_LIBUNWIND_FUNCTION(__unw_getcontext) + ta 3 + add %o7, 8, %o7 + std %g0, [%o0 + 0] + std %g2, [%o0 + 8] + std %g4, [%o0 + 16] + std %g6, [%o0 + 24] + std %o0, [%o0 + 32] + std %o2, [%o0 + 40] + std %o4, [%o0 + 48] + std %o6, [%o0 + 56] + std %l0, [%o0 + 64] + std %l2, [%o0 + 72] + std %l4, [%o0 + 80] + std %l6, [%o0 + 88] + std %i0, [%o0 + 96] + std %i2, [%o0 + 104] + std %i4, [%o0 + 112] + std %i6, [%o0 + 120] + jmp %o7 + clr %o0 // return UNW_ESUCCESS + +#elif defined(__riscv) + +# +# extern int __unw_getcontext(unw_context_t* thread_state) +# +# On entry: +# thread_state pointer is in a0 +# +DEFINE_LIBUNWIND_FUNCTION(__unw_getcontext) + ISTORE x1, (RISCV_ISIZE * 0)(a0) // store ra as pc +#if defined(__riscv_32e) + .irp i,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15 +#else + .irp i,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31 +#endif + ISTORE x\i, (RISCV_ISIZE * \i)(a0) + .endr + +# if defined(__riscv_flen) + .irp i,FROM_0_TO_31 + FSTORE f\i, (RISCV_FOFFSET + RISCV_FSIZE * \i)(a0) + .endr +# endif + + li a0, 0 // return UNW_ESUCCESS + ret // jump to ra + +#elif defined(__s390x__) + +// +// extern int __unw_getcontext(unw_context_t* thread_state) +// +// On entry: +// thread_state pointer is in r2 +// +DEFINE_LIBUNWIND_FUNCTION(__unw_getcontext) + + // Save GPRs + stmg %r0, %r15, 16(%r2) + + // Save PSWM + epsw %r0, %r1 + stm %r0, %r1, 0(%r2) + + // Store return address as PSWA + stg %r14, 8(%r2) + + // Save FPRs + .irp i,FROM_0_TO_15 + std %f\i, (144+8*\i)(%r2) + .endr + + // Return UNW_ESUCCESS + lghi %r2, 0 + br %r14 + +#elif defined(__loongarch__) && __loongarch_grlen == 64 + +# +# extern int __unw_getcontext(unw_context_t* thread_state) +# +# On entry: +# thread_state pointer is in $a0($r4) +# +DEFINE_LIBUNWIND_FUNCTION(__unw_getcontext) + .irp i,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31 + st.d $r\i, $a0, (8*\i) + .endr + st.d $r1, $a0, (8 * 32) // store $ra to pc + +# if __loongarch_frlen == 64 + .irp i,FROM_0_TO_31 + fst.d $f\i, $a0, (8 * 33 + 8 * \i) + .endr +# endif + + move $a0, $zero // UNW_ESUCCESS + jr $ra + +#endif + + WEAK_ALIAS(__unw_getcontext, unw_getcontext) + +#endif /* !defined(__USING_SJLJ_EXCEPTIONS__) && !defined(__wasm__) */ + +NO_EXEC_STACK_DIRECTIVE diff --git a/third_party/libunwind/assembly.h b/third_party/libunwind/assembly.h new file mode 100644 index 000000000..f8e83e138 --- /dev/null +++ b/third_party/libunwind/assembly.h @@ -0,0 +1,303 @@ +/* ===-- assembly.h - libUnwind assembler support macros -------------------=== + * + * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. + * See https://llvm.org/LICENSE.txt for license information. + * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + * + * ===----------------------------------------------------------------------=== + * + * This file defines macros for use in libUnwind assembler source. + * This file is not part of the interface of this library. + * + * ===----------------------------------------------------------------------=== + */ + +#ifndef UNWIND_ASSEMBLY_H +#define UNWIND_ASSEMBLY_H + +#if defined(__linux__) && defined(__CET__) +#include +#define _LIBUNWIND_CET_ENDBR _CET_ENDBR +#else +#define _LIBUNWIND_CET_ENDBR +#endif + +#if defined(__powerpc64__) +#define SEPARATOR ; +#define PPC64_OFFS_SRR0 0 +#define PPC64_OFFS_CR 272 +#define PPC64_OFFS_XER 280 +#define PPC64_OFFS_LR 288 +#define PPC64_OFFS_CTR 296 +#define PPC64_OFFS_VRSAVE 304 +#define PPC64_OFFS_FP 312 +#define PPC64_OFFS_V 824 +#elif defined(__APPLE__) && defined(__aarch64__) +#define SEPARATOR %% +#elif defined(__riscv) +# define RISCV_ISIZE (__riscv_xlen / 8) +# define RISCV_FOFFSET (RISCV_ISIZE * 32) +# if defined(__riscv_flen) +# define RISCV_FSIZE (__riscv_flen / 8) +# endif + +# if __riscv_xlen == 64 +# define ILOAD ld +# define ISTORE sd +# elif __riscv_xlen == 32 +# define ILOAD lw +# define ISTORE sw +# else +# error "Unsupported __riscv_xlen" +# endif + +# if defined(__riscv_flen) +# if __riscv_flen == 64 +# define FLOAD fld +# define FSTORE fsd +# elif __riscv_flen == 32 +# define FLOAD flw +# define FSTORE fsw +# else +# error "Unsupported __riscv_flen" +# endif +# endif +# define SEPARATOR ; +#else +#define SEPARATOR ; +#endif + +#if defined(__powerpc64__) && (!defined(_CALL_ELF) || _CALL_ELF == 1) && \ + !defined(_AIX) +#define PPC64_OPD1 .section .opd,"aw",@progbits SEPARATOR +#define PPC64_OPD2 SEPARATOR \ + .p2align 3 SEPARATOR \ + .quad .Lfunc_begin0 SEPARATOR \ + .quad .TOC.@tocbase SEPARATOR \ + .quad 0 SEPARATOR \ + .text SEPARATOR \ +.Lfunc_begin0: +#else +#define PPC64_OPD1 +#define PPC64_OPD2 +#endif + +#if defined(__aarch64__) +#if defined(__ARM_FEATURE_GCS_DEFAULT) && defined(__ARM_FEATURE_BTI_DEFAULT) +// Set BTI, PAC, and GCS gnu property bits +#define GNU_PROPERTY 7 +// We indirectly branch to __libunwind_Registers_arm64_jumpto from +// __unw_phase2_resume, so we need to use bti jc. +#define AARCH64_BTI bti jc +#elif defined(__ARM_FEATURE_GCS_DEFAULT) +// Set GCS gnu property bit +#define GNU_PROPERTY 4 +#elif defined(__ARM_FEATURE_BTI_DEFAULT) +// Set BTI and PAC gnu property bits +#define GNU_PROPERTY 3 +#define AARCH64_BTI bti c +#endif +#ifdef GNU_PROPERTY + .pushsection ".note.gnu.property", "a" SEPARATOR \ + .balign 8 SEPARATOR \ + .long 4 SEPARATOR \ + .long 0x10 SEPARATOR \ + .long 0x5 SEPARATOR \ + .asciz "GNU" SEPARATOR \ + .long 0xc0000000 SEPARATOR /* GNU_PROPERTY_AARCH64_FEATURE_1_AND */ \ + .long 4 SEPARATOR \ + .long GNU_PROPERTY SEPARATOR \ + .long 0 SEPARATOR \ + .popsection SEPARATOR +#endif +#endif +#if !defined(AARCH64_BTI) +#define AARCH64_BTI +#endif + +#if !defined(__aarch64__) +#ifdef __ARM_FEATURE_PAC_DEFAULT + .eabi_attribute Tag_PAC_extension, 2 + .eabi_attribute Tag_PACRET_use, 1 +#endif +#ifdef __ARM_FEATURE_BTI_DEFAULT + .eabi_attribute Tag_BTI_extension, 1 + .eabi_attribute Tag_BTI_use, 1 +#endif +#endif + +#define GLUE2(a, b) a ## b +#define GLUE(a, b) GLUE2(a, b) +#define SYMBOL_NAME(name) GLUE(__USER_LABEL_PREFIX__, name) + +#if defined(__APPLE__) + +#define SYMBOL_IS_FUNC(name) +#define HIDDEN_SYMBOL(name) .private_extern name +#if defined(_LIBUNWIND_HIDE_SYMBOLS) +#define EXPORT_SYMBOL(name) HIDDEN_SYMBOL(name) +#else +#define EXPORT_SYMBOL(name) +#endif +#define WEAK_ALIAS(name, aliasname) \ + .globl SYMBOL_NAME(aliasname) SEPARATOR \ + EXPORT_SYMBOL(SYMBOL_NAME(aliasname)) SEPARATOR \ + SYMBOL_NAME(aliasname) = SYMBOL_NAME(name) + +#define NO_EXEC_STACK_DIRECTIVE + +#elif defined(__ELF__) + +#if defined(__arm__) +#define SYMBOL_IS_FUNC(name) .type name,%function +#else +#define SYMBOL_IS_FUNC(name) .type name,@function +#endif +#define HIDDEN_SYMBOL(name) .hidden name +#if defined(_LIBUNWIND_HIDE_SYMBOLS) +#define EXPORT_SYMBOL(name) HIDDEN_SYMBOL(name) +#else +#define EXPORT_SYMBOL(name) +#endif +#define WEAK_SYMBOL(name) .weak name + +#if defined(__hexagon__) +#define WEAK_ALIAS(name, aliasname) \ + EXPORT_SYMBOL(SYMBOL_NAME(aliasname)) SEPARATOR \ + WEAK_SYMBOL(SYMBOL_NAME(aliasname)) SEPARATOR \ + .equiv SYMBOL_NAME(aliasname), SYMBOL_NAME(name) +#else +#define WEAK_ALIAS(name, aliasname) \ + EXPORT_SYMBOL(SYMBOL_NAME(aliasname)) SEPARATOR \ + WEAK_SYMBOL(SYMBOL_NAME(aliasname)) SEPARATOR \ + SYMBOL_NAME(aliasname) = SYMBOL_NAME(name) +#endif + +#if defined(__GNU__) || defined(__FreeBSD__) || defined(__Fuchsia__) || \ + defined(__linux__) +#define NO_EXEC_STACK_DIRECTIVE .section .note.GNU-stack,"",%progbits +#else +#define NO_EXEC_STACK_DIRECTIVE +#endif + +#elif defined(_WIN32) + +#define SYMBOL_IS_FUNC(name) \ + .def name SEPARATOR \ + .scl 2 SEPARATOR \ + .type 32 SEPARATOR \ + .endef +#define EXPORT_SYMBOL2(name) \ + .section .drectve,"yn" SEPARATOR \ + .ascii "-export:", #name, "\0" SEPARATOR \ + .text +#if defined(_LIBUNWIND_HIDE_SYMBOLS) +#define EXPORT_SYMBOL(name) +#else +#define EXPORT_SYMBOL(name) EXPORT_SYMBOL2(name) +#endif +#define HIDDEN_SYMBOL(name) + +#if defined(__MINGW32__) +#define WEAK_ALIAS(name, aliasname) \ + .globl SYMBOL_NAME(aliasname) SEPARATOR \ + EXPORT_SYMBOL(aliasname) SEPARATOR \ + SYMBOL_NAME(aliasname) = SYMBOL_NAME(name) +#else +#define WEAK_ALIAS3(name, aliasname) \ + .section .drectve,"yn" SEPARATOR \ + .ascii "-alternatename:", #aliasname, "=", #name, "\0" SEPARATOR \ + .text +#define WEAK_ALIAS2(name, aliasname) \ + WEAK_ALIAS3(name, aliasname) +#define WEAK_ALIAS(name, aliasname) \ + EXPORT_SYMBOL(SYMBOL_NAME(aliasname)) SEPARATOR \ + WEAK_ALIAS2(SYMBOL_NAME(name), SYMBOL_NAME(aliasname)) +#endif + +#define NO_EXEC_STACK_DIRECTIVE + +#elif defined(__sparc__) + +#elif defined(_AIX) + +#if defined(__powerpc64__) +#define VBYTE_LEN 8 +#define CSECT_ALIGN 3 +#else +#define VBYTE_LEN 4 +#define CSECT_ALIGN 2 +#endif + +// clang-format off +#define DEFINE_LIBUNWIND_FUNCTION_AND_WEAK_ALIAS(name, aliasname) \ + .csect .text[PR], 2 SEPARATOR \ + .csect .name[PR], 2 SEPARATOR \ + .globl name[DS] SEPARATOR \ + .globl .name[PR] SEPARATOR \ + .align 4 SEPARATOR \ + .csect name[DS], CSECT_ALIGN SEPARATOR \ +aliasname: \ + .vbyte VBYTE_LEN, .name[PR] SEPARATOR \ + .vbyte VBYTE_LEN, TOC[TC0] SEPARATOR \ + .vbyte VBYTE_LEN, 0 SEPARATOR \ + .weak aliasname SEPARATOR \ + .weak .aliasname SEPARATOR \ + .csect .name[PR], 2 SEPARATOR \ +.aliasname: \ + +#define WEAK_ALIAS(name, aliasname) +#define NO_EXEC_STACK_DIRECTIVE + +// clang-format on +#else + +#error Unsupported target + +#endif + +#if defined(_AIX) + // clang-format off +#define DEFINE_LIBUNWIND_FUNCTION(name) \ + .globl name[DS] SEPARATOR \ + .globl .name SEPARATOR \ + .align 4 SEPARATOR \ + .csect name[DS], CSECT_ALIGN SEPARATOR \ + .vbyte VBYTE_LEN, .name SEPARATOR \ + .vbyte VBYTE_LEN, TOC[TC0] SEPARATOR \ + .vbyte VBYTE_LEN, 0 SEPARATOR \ + .csect .text[PR], 2 SEPARATOR \ +.name: + // clang-format on +#else +#define DEFINE_LIBUNWIND_FUNCTION(name) \ + .globl SYMBOL_NAME(name) SEPARATOR \ + HIDDEN_SYMBOL(SYMBOL_NAME(name)) SEPARATOR \ + SYMBOL_IS_FUNC(SYMBOL_NAME(name)) SEPARATOR \ + PPC64_OPD1 \ + SYMBOL_NAME(name): \ + PPC64_OPD2 \ + AARCH64_BTI +#endif + +#if defined(__arm__) +#if !defined(__ARM_ARCH) +#define __ARM_ARCH 4 +#endif + +#if defined(__ARM_ARCH_4T__) || __ARM_ARCH >= 5 +#define ARM_HAS_BX +#endif + +#ifdef ARM_HAS_BX +#define JMP(r) bx r +#else +#define JMP(r) mov pc, r +#endif +#endif /* __arm__ */ + +#if defined(__powerpc__) +#define PPC_LEFT_SHIFT(index) << (index) +#endif + +#endif /* UNWIND_ASSEMBLY_H */ diff --git a/third_party/libunwind/libunwind.cc b/third_party/libunwind/libunwind.cc index 4f58a27a0..053b972a7 100644 --- a/third_party/libunwind/libunwind.cc +++ b/third_party/libunwind/libunwind.cc @@ -321,7 +321,7 @@ void __unw_remove_dynamic_fde(unw_word_t fde) { void __unw_add_dynamic_eh_frame_section(unw_word_t eh_frame_start) { // The eh_frame section start serves as the mh_group unw_word_t mh_group = eh_frame_start; - CFI_Parser::CIE_Info cieInfo; + CFI_Parser::CIE_Info cieInfo = {}; CFI_Parser::FDE_Info fdeInfo; auto p = (LocalAddressSpace::pint_t)eh_frame_start; while (true) { diff --git a/third_party/openmp/BUILD.mk b/third_party/openmp/BUILD.mk index a916aa22d..7e6dde1f1 100644 --- a/third_party/openmp/BUILD.mk +++ b/third_party/openmp/BUILD.mk @@ -33,6 +33,8 @@ THIRD_PARTY_OPENMP_A_DIRECTDEPS = \ THIRD_PARTY_COMPILER_RT \ THIRD_PARTY_GDTOA \ THIRD_PARTY_LIBCXX \ + THIRD_PARTY_LIBCXXABI \ + THIRD_PARTY_LIBUNWIND \ THIRD_PARTY_NSYNC \ THIRD_PARTY_MUSL diff --git a/third_party/smallz4/BUILD.mk b/third_party/smallz4/BUILD.mk index ecc92c5a3..c4e38d8e7 100644 --- a/third_party/smallz4/BUILD.mk +++ b/third_party/smallz4/BUILD.mk @@ -36,7 +36,9 @@ THIRD_PARTY_SMALLZ4_A_DIRECTDEPS = \ LIBC_CALLS \ LIBC_STDIO \ LIBC_STR \ - THIRD_PARTY_LIBCXX + THIRD_PARTY_LIBCXX \ + THIRD_PARTY_LIBCXXABI \ + THIRD_PARTY_LIBUNWIND \ THIRD_PARTY_SMALLZ4_A_DEPS := \ $(call uniq,$(foreach x,$(THIRD_PARTY_SMALLZ4_A_DIRECTDEPS),$($(x)))) diff --git a/tool/cosmocc/README.md b/tool/cosmocc/README.md index 532cdcf83..9674754dc 100644 --- a/tool/cosmocc/README.md +++ b/tool/cosmocc/README.md @@ -9,13 +9,13 @@ reach a broader audience from the platform(s) of your choosing. ## What's Included -This toolchain bundles GCC 14.1.0, Cosmopolitan Libc, LLVM LIBCXX, LLVM -compiler-rt, and LLVM OpenMP. Additional libraries were provided by Musl -Libc, and the venerable BSDs OSes. This lets you benefit from the -awesome modern GCC compiler with the strongest GPL barrier possible. The -preprocessor advertises cross compilers as both `__COSMOCC__` and -`__COSMOPOLITAN__` whereas `cosmocc` additionally defines -`__FATCOSMOCC__`. +This toolchain bundles GCC 14.1.0, Clang 19, Cosmopolitan Libc, LLVM +LIBCXX, LLVM compiler-rt, and LLVM OpenMP. Additional libraries were +provided by Musl Libc, and the venerable BSDs OSes. This lets you +benefit from the awesome modern GCC compiler with the strongest GPL +barrier possible. The preprocessor advertises cross compilers as both +`__COSMOCC__` and `__COSMOPOLITAN__` whereas `cosmocc` additionally +defines `__FATCOSMOCC__`. ## Getting Started @@ -153,6 +153,14 @@ The following supplemental flags are defined by cosmocc: Including `cosmo.h` has a similar effect, however it's recommended that any program that uses cosmo-specific APIs pass this flag. +- `-mclang` (experimental) may be passed to the `cosmocc` command to use + Clang instead of GCC under the hood. This can help C++ code compile 3x + faster. + +- `-mgcc` may be passed to the `cosmocc` command to use GCC instead of + Clang under the hood. Since this is the default mode, this flag may be + used to override the effect of passing the `-mclang` flag earlier. + - `-mdbg` may be passed when linking programs. It has the same effect as `export MODE=dbg` in that it will cause an alternative build of the Cosmopolitan Libc runtime to be linked that was built with `-O0 -g`. @@ -417,7 +425,7 @@ statements instead, so that Cosmopolitan Libc's system constants will work as expected. Our modifications to GNU GCC are published under the ISC license at . The binaries you see here were first published at - which + which is regularly updated. ## Legal diff --git a/tool/cosmocc/bin/cosmocc b/tool/cosmocc/bin/cosmocc index b07dc1be5..fba76f91d 100755 --- a/tool/cosmocc/bin/cosmocc +++ b/tool/cosmocc/bin/cosmocc @@ -75,14 +75,31 @@ elif [ ! -d "$TMPDIR" ]; then fi fi -CLANG=0 -CC_X86_64="$BIN/x86_64-linux-cosmo-gcc" -CC_AARCH64="$BIN/aarch64-linux-cosmo-gcc" -CXX_X86_64="$BIN/x86_64-linux-cosmo-g++" -CXX_AARCH64="$BIN/aarch64-linux-cosmo-g++" -FPORTCOSMO="-fportcosmo" -TARGET_X86_64= -TARGET_AARCH64= +use_gcc() { + CLANG=0 + CC_X86_64="$BIN/x86_64-linux-cosmo-gcc" + CC_AARCH64="$BIN/aarch64-linux-cosmo-gcc" + CXX_X86_64="$BIN/x86_64-linux-cosmo-g++" + CXX_AARCH64="$BIN/aarch64-linux-cosmo-g++" + TARGET_X86_64= + TARGET_AARCH64= + FPORTCOSMO="-fportcosmo" + FNO_INLINE_FUNCTIONS_CALLED_ONCE="-fno-inline-functions-called-once" +} + +use_clang() { + CLANG=1 + CC_X86_64="$BIN/cosmo-clang" + CC_AARCH64="$BIN/cosmo-clang" + CXX_X86_64="$BIN/cosmo-clang" + CXX_AARCH64="$BIN/cosmo-clang" + TARGET_X86_64="--target=x86_64" + TARGET_AARCH64="--target=aarch64" + FPORTCOSMO= + FNO_INLINE_FUNCTIONS_CALLED_ONCE="-fno-inline-functions-called-once" +} + +use_gcc X= OPT= @@ -196,14 +213,10 @@ EOF MODE=optlinux continue elif [ x"$x" = x"-mclang" ]; then - CLANG=1 - CC_X86_64="$BIN/cosmo-clang" - CC_AARCH64="$BIN/cosmo-clang" - CXX_X86_64="$BIN/cosmo-clang++" - CXX_AARCH64="$BIN/cosmo-clang++" - TARGET_X86_64="--target=x86_64" - TARGET_AARCH64="--target=aarch64" - FPORTCOSMO= + use_clang + continue + elif [ x"$x" = x"-mgcc" ]; then + use_gcc continue elif [ x"$x" = x"-m64" ]; then continue @@ -315,10 +328,14 @@ elif [ -n "$OUTPUT" ] && [ $INPUT_FILE_COUNT -gt 1 ]; then fi fi +if [ $INTENT = ld ]; then + use_gcc +fi + PLATFORM="-D__COSMOPOLITAN__ -D__COSMOCC__ -D__FATCOSMOCC__" PREDEF="-include libc/integral/normalize.inc" CPPFLAGS="-fno-pie -nostdinc -isystem $BIN/../include" -CFLAGS="$FPORTCOSMO -fno-dwarf2-cfi-asm -fno-unwind-tables -fno-asynchronous-unwind-tables -fno-semantic-interposition" +CFLAGS="$FPORTCOSMO -fno-semantic-interposition" LDFLAGS="-static -nostdlib -no-pie -fuse-ld=bfd -Wl,-z,noexecstack -Wl,-z,norelro -Wl,--gc-sections" PRECIOUS="-fno-omit-frame-pointer" @@ -345,9 +362,6 @@ fi if [ $CPLUSPLUS -eq 1 ]; then CC_X86_64=$CXX_X86_64 CC_AARCH64=$CXX_AARCH64 - if [ $INTENT != cpp ]; then - CFLAGS="$CFLAGS -fno-rtti -fno-exceptions -fuse-cxa-atexit" - fi CPPFLAGS="-isystem $BIN/../include/third_party/libcxx $CPPFLAGS" else CFLAGS="$CFLAGS -Wno-implicit-int" @@ -386,8 +400,8 @@ if [ x"$MODE" = x"optlinux" ]; then fi if [ x"$OPT" != x"-Os" ] && [ x"$MODE" != x"tiny" ] && [ x"$MODE" != x"optlinux" ]; then - CFLAGS_X86_64="${CFLAGS_X86_64} -fpatchable-function-entry=18,16 -fno-inline-functions-called-once -DFTRACE -DSYSDEBUG" - CFLAGS_AARCH64="${CFLAGS_AARCH64} -fpatchable-function-entry=7,6 -fno-inline-functions-called-once -DFTRACE -DSYSDEBUG" + CFLAGS_X86_64="${CFLAGS_X86_64} -fpatchable-function-entry=18,16 $FNO_INLINE_FUNCTIONS_CALLED_ONCE -DFTRACE -DSYSDEBUG" + CFLAGS_AARCH64="${CFLAGS_AARCH64} -fpatchable-function-entry=7,6 $FNO_INLINE_FUNCTIONS_CALLED_ONCE -DFTRACE -DSYSDEBUG" fi if [ $CPLUSPLUS -eq 1 ]; then diff --git a/tool/cosmocc/bin/cosmocross b/tool/cosmocc/bin/cosmocross index 65aa487ea..357239920 100755 --- a/tool/cosmocc/bin/cosmocross +++ b/tool/cosmocc/bin/cosmocross @@ -47,7 +47,7 @@ log_command() { ORIGINAL="$0 $*" PLATFORM="-D__COSMOPOLITAN__ -D__COSMOCC__" PREDEF="-include libc/integral/normalize.inc" -CFLAGS="-fportcosmo -fno-dwarf2-cfi-asm -fno-unwind-tables -fno-asynchronous-unwind-tables -fno-semantic-interposition" +CFLAGS="-fportcosmo -fno-semantic-interposition" CPPFLAGS="-fno-pie -nostdinc -isystem $BIN/../include" LDFLAGS="-static -no-pie -nostdlib -fuse-ld=bfd -Wl,-z,noexecstack" APEFLAGS="-Wl,--gc-sections" @@ -109,7 +109,6 @@ else fi if [ $CPLUSPLUS -eq 1 ]; then CC="$BIN/$ARCH-linux-cosmo-g++" - CFLAGS="$CFLAGS -fno-rtti -fno-exceptions -fuse-cxa-atexit" CPPFLAGS="-isystem $BIN/../include/third_party/libcxx $CPPFLAGS" LDLIBS="-lcxx $LDLIBS" else diff --git a/tool/cosmocc/package.sh b/tool/cosmocc/package.sh index 207b3ca7a..245253ecb 100755 --- a/tool/cosmocc/package.sh +++ b/tool/cosmocc/package.sh @@ -102,8 +102,6 @@ make -j$NPROC m=$ARM64 \ o/$ARM64/ape/ape.elf \ o/$ARM64/ape/aarch64.lds \ o/$ARM64/libc/crt/crt.o \ - o/$ARM64/ape/ape-copy-self.o \ - o/$ARM64/ape/ape-no-modify-self.o \ o/$ARM64/cosmopolitan.a \ o/$ARM64/third_party/libcxx/libcxx.a \ o/$ARM64/tool/build/assimilate.dbg \ @@ -136,8 +134,6 @@ make -j$NPROC m=$ARM64-tiny \ o/$ARM64-tiny/ape/ape.elf \ o/$ARM64-tiny/ape/aarch64.lds \ o/$ARM64-tiny/libc/crt/crt.o \ - o/$ARM64-tiny/ape/ape-copy-self.o \ - o/$ARM64-tiny/ape/ape-no-modify-self.o \ o/$ARM64-tiny/cosmopolitan.a \ o/$ARM64-tiny/third_party/libcxx/libcxx.a \ @@ -145,8 +141,6 @@ make -j$NPROC m=$ARM64-dbg \ o/$ARM64-dbg/ape/ape.elf \ o/$ARM64-dbg/ape/aarch64.lds \ o/$ARM64-dbg/libc/crt/crt.o \ - o/$ARM64-dbg/ape/ape-copy-self.o \ - o/$ARM64-dbg/ape/ape-no-modify-self.o \ o/$ARM64-dbg/cosmopolitan.a \ o/$ARM64-dbg/third_party/libcxx/libcxx.a \ @@ -154,8 +148,6 @@ make -j$NPROC m=$ARM64-optlinux \ o/$ARM64-optlinux/ape/ape.elf \ o/$ARM64-optlinux/ape/aarch64.lds \ o/$ARM64-optlinux/libc/crt/crt.o \ - o/$ARM64-optlinux/ape/ape-copy-self.o \ - o/$ARM64-optlinux/ape/ape-no-modify-self.o \ o/$ARM64-optlinux/cosmopolitan.a \ o/$ARM64-optlinux/third_party/libcxx/libcxx.a \ @@ -182,17 +174,18 @@ fetch() { OLD=$PWD cd "$OUTDIR/" if [ ! -x bin/x86_64-linux-cosmo-gcc ]; then - fetch https://github.com/ahgamut/superconfigure/releases/download/z0.0.53/aarch64-gcc.zip & - fetch https://github.com/ahgamut/superconfigure/releases/download/z0.0.53/x86_64-gcc.zip & - fetch https://github.com/ahgamut/superconfigure/releases/download/z0.0.53/llvm.zip & + fetch https://github.com/ahgamut/superconfigure/releases/download/z0.0.54/aarch64-gcc.zip & + fetch https://github.com/ahgamut/superconfigure/releases/download/z0.0.54/x86_64-gcc.zip & + fetch https://github.com/ahgamut/superconfigure/releases/download/z0.0.54/llvm.zip & wait unzip aarch64-gcc.zip & unzip x86_64-gcc.zip & - unzip llvm.zip bin/clang-18 & + unzip llvm.zip bin/clang-19 & wait rm -f aarch64-gcc.zip rm -f x86_64-gcc.zip - mv bin/clang-18 bin/cosmo-clang + rm -f llvm.zip + mv bin/clang-19 bin/cosmo-clang fi rm -f bin/*-cpp rm -f bin/*-gcc-*