mirror of
https://github.com/jart/cosmopolitan.git
synced 2025-07-07 19:58:30 +00:00
Upgrade to Cosmopolitan GCC 11.2.0 for x86_64
This commit is contained in:
parent
682b74ed88
commit
39f20dbb13
137 changed files with 48523 additions and 34001 deletions
457
third_party/intel/gfniintrin.internal.h
vendored
457
third_party/intel/gfniintrin.internal.h
vendored
|
@ -1,311 +1,310 @@
|
|||
/* clang-format off */
|
||||
#if defined(__x86_64__) && !(__ASSEMBLER__ + __LINKER__ + 0)
|
||||
#ifndef _IMMINTRIN_H_INCLUDED
|
||||
#error "Never use <gfniintrin.h> directly; include <immintrin.h> instead."
|
||||
#endif
|
||||
|
||||
#ifndef _GFNIINTRIN_H_INCLUDED
|
||||
#define _GFNIINTRIN_H_INCLUDED
|
||||
|
||||
#if !defined(__GFNI__) || !defined(__SSE2__)
|
||||
#pragma GCC push_options
|
||||
#pragma GCC target("gfni,sse2")
|
||||
#define __DISABLE_GFNI__
|
||||
#endif /* __GFNI__ */
|
||||
|
||||
__funline __m128i _mm_gf2p8mul_epi8(__m128i __A, __m128i __B) {
|
||||
return (__m128i)__builtin_ia32_vgf2p8mulb_v16qi((__v16qi)__A, (__v16qi)__B);
|
||||
#endif
|
||||
extern __inline __m128i
|
||||
__attribute__((__gnu_inline__, __always_inline__, __artificial__))
|
||||
_mm_gf2p8mul_epi8 (__m128i __A, __m128i __B)
|
||||
{
|
||||
return (__m128i) __builtin_ia32_vgf2p8mulb_v16qi((__v16qi) __A,
|
||||
(__v16qi) __B);
|
||||
}
|
||||
|
||||
#ifdef __OPTIMIZE__
|
||||
__funline __m128i _mm_gf2p8affineinv_epi64_epi8(__m128i __A, __m128i __B,
|
||||
const int __C) {
|
||||
return (__m128i)__builtin_ia32_vgf2p8affineinvqb_v16qi((__v16qi)__A,
|
||||
(__v16qi)__B, __C);
|
||||
extern __inline __m128i
|
||||
__attribute__((__gnu_inline__, __always_inline__, __artificial__))
|
||||
_mm_gf2p8affineinv_epi64_epi8 (__m128i __A, __m128i __B, const int __C)
|
||||
{
|
||||
return (__m128i) __builtin_ia32_vgf2p8affineinvqb_v16qi ((__v16qi) __A,
|
||||
(__v16qi) __B,
|
||||
__C);
|
||||
}
|
||||
|
||||
__funline __m128i _mm_gf2p8affine_epi64_epi8(__m128i __A, __m128i __B,
|
||||
const int __C) {
|
||||
return (__m128i)__builtin_ia32_vgf2p8affineqb_v16qi((__v16qi)__A,
|
||||
(__v16qi)__B, __C);
|
||||
extern __inline __m128i
|
||||
__attribute__((__gnu_inline__, __always_inline__, __artificial__))
|
||||
_mm_gf2p8affine_epi64_epi8 (__m128i __A, __m128i __B, const int __C)
|
||||
{
|
||||
return (__m128i) __builtin_ia32_vgf2p8affineqb_v16qi ((__v16qi) __A,
|
||||
(__v16qi) __B, __C);
|
||||
}
|
||||
#else
|
||||
#define _mm_gf2p8affineinv_epi64_epi8(A, B, C) \
|
||||
((__m128i)__builtin_ia32_vgf2p8affineinvqb_v16qi( \
|
||||
(__v16qi)(__m128i)(A), (__v16qi)(__m128i)(B), (int)(C)))
|
||||
#define _mm_gf2p8affine_epi64_epi8(A, B, C) \
|
||||
((__m128i)__builtin_ia32_vgf2p8affineqb_v16qi( \
|
||||
(__v16qi)(__m128i)(A), (__v16qi)(__m128i)(B), (int)(C)))
|
||||
#define _mm_gf2p8affineinv_epi64_epi8(A, B, C) ((__m128i) __builtin_ia32_vgf2p8affineinvqb_v16qi((__v16qi)(__m128i)(A), (__v16qi)(__m128i)(B), (int)(C)))
|
||||
#define _mm_gf2p8affine_epi64_epi8(A, B, C) ((__m128i) __builtin_ia32_vgf2p8affineqb_v16qi ((__v16qi)(__m128i)(A), (__v16qi)(__m128i)(B), (int)(C)))
|
||||
#endif
|
||||
|
||||
#ifdef __DISABLE_GFNI__
|
||||
#undef __DISABLE_GFNI__
|
||||
#pragma GCC pop_options
|
||||
#endif /* __DISABLE_GFNI__ */
|
||||
|
||||
#endif
|
||||
#if !defined(__GFNI__) || !defined(__AVX__)
|
||||
#pragma GCC push_options
|
||||
#pragma GCC target("gfni,avx")
|
||||
#define __DISABLE_GFNIAVX__
|
||||
#endif /* __GFNIAVX__ */
|
||||
|
||||
__funline __m256i _mm256_gf2p8mul_epi8(__m256i __A, __m256i __B) {
|
||||
return (__m256i)__builtin_ia32_vgf2p8mulb_v32qi((__v32qi)__A, (__v32qi)__B);
|
||||
#endif
|
||||
extern __inline __m256i
|
||||
__attribute__((__gnu_inline__, __always_inline__, __artificial__))
|
||||
_mm256_gf2p8mul_epi8 (__m256i __A, __m256i __B)
|
||||
{
|
||||
return (__m256i) __builtin_ia32_vgf2p8mulb_v32qi ((__v32qi) __A,
|
||||
(__v32qi) __B);
|
||||
}
|
||||
|
||||
#ifdef __OPTIMIZE__
|
||||
__funline __m256i _mm256_gf2p8affineinv_epi64_epi8(__m256i __A, __m256i __B,
|
||||
const int __C) {
|
||||
return (__m256i)__builtin_ia32_vgf2p8affineinvqb_v32qi((__v32qi)__A,
|
||||
(__v32qi)__B, __C);
|
||||
extern __inline __m256i
|
||||
__attribute__((__gnu_inline__, __always_inline__, __artificial__))
|
||||
_mm256_gf2p8affineinv_epi64_epi8 (__m256i __A, __m256i __B, const int __C)
|
||||
{
|
||||
return (__m256i) __builtin_ia32_vgf2p8affineinvqb_v32qi ((__v32qi) __A,
|
||||
(__v32qi) __B,
|
||||
__C);
|
||||
}
|
||||
|
||||
__funline __m256i _mm256_gf2p8affine_epi64_epi8(__m256i __A, __m256i __B,
|
||||
const int __C) {
|
||||
return (__m256i)__builtin_ia32_vgf2p8affineqb_v32qi((__v32qi)__A,
|
||||
(__v32qi)__B, __C);
|
||||
extern __inline __m256i
|
||||
__attribute__((__gnu_inline__, __always_inline__, __artificial__))
|
||||
_mm256_gf2p8affine_epi64_epi8 (__m256i __A, __m256i __B, const int __C)
|
||||
{
|
||||
return (__m256i) __builtin_ia32_vgf2p8affineqb_v32qi ((__v32qi) __A,
|
||||
(__v32qi) __B, __C);
|
||||
}
|
||||
#else
|
||||
#define _mm256_gf2p8affineinv_epi64_epi8(A, B, C) \
|
||||
((__m256i)__builtin_ia32_vgf2p8affineinvqb_v32qi( \
|
||||
(__v32qi)(__m256i)(A), (__v32qi)(__m256i)(B), (int)(C)))
|
||||
#define _mm256_gf2p8affine_epi64_epi8(A, B, C) \
|
||||
((__m256i)__builtin_ia32_vgf2p8affineqb_v32qi( \
|
||||
(__v32qi)(__m256i)(A), (__v32qi)(__m256i)(B), (int)(C)))
|
||||
#define _mm256_gf2p8affineinv_epi64_epi8(A, B, C) ((__m256i) __builtin_ia32_vgf2p8affineinvqb_v32qi((__v32qi)(__m256i)(A), (__v32qi)(__m256i)(B), (int)(C)))
|
||||
#define _mm256_gf2p8affine_epi64_epi8(A, B, C) ((__m256i) __builtin_ia32_vgf2p8affineqb_v32qi ((__v32qi)(__m256i)(A), ( __v32qi)(__m256i)(B), (int)(C)))
|
||||
#endif
|
||||
|
||||
#ifdef __DISABLE_GFNIAVX__
|
||||
#undef __DISABLE_GFNIAVX__
|
||||
#pragma GCC pop_options
|
||||
#endif /* __GFNIAVX__ */
|
||||
|
||||
#endif
|
||||
#if !defined(__GFNI__) || !defined(__AVX512VL__)
|
||||
#pragma GCC push_options
|
||||
#pragma GCC target("gfni,avx512vl")
|
||||
#define __DISABLE_GFNIAVX512VL__
|
||||
#endif /* __GFNIAVX512VL__ */
|
||||
|
||||
__funline __m128i _mm_mask_gf2p8mul_epi8(__m128i __A, __mmask16 __B, __m128i __C,
|
||||
__m128i __D) {
|
||||
return (__m128i)__builtin_ia32_vgf2p8mulb_v16qi_mask(
|
||||
(__v16qi)__C, (__v16qi)__D, (__v16qi)__A, __B);
|
||||
#endif
|
||||
extern __inline __m128i
|
||||
__attribute__((__gnu_inline__, __always_inline__, __artificial__))
|
||||
_mm_mask_gf2p8mul_epi8 (__m128i __A, __mmask16 __B, __m128i __C, __m128i __D)
|
||||
{
|
||||
return (__m128i) __builtin_ia32_vgf2p8mulb_v16qi_mask ((__v16qi) __C,
|
||||
(__v16qi) __D,
|
||||
(__v16qi)__A, __B);
|
||||
}
|
||||
|
||||
__funline __m128i _mm_maskz_gf2p8mul_epi8(__mmask16 __A, __m128i __B,
|
||||
__m128i __C) {
|
||||
return (__m128i)__builtin_ia32_vgf2p8mulb_v16qi_mask(
|
||||
(__v16qi)__B, (__v16qi)__C, (__v16qi)_mm_setzero_si128(), __A);
|
||||
extern __inline __m128i
|
||||
__attribute__((__gnu_inline__, __always_inline__, __artificial__))
|
||||
_mm_maskz_gf2p8mul_epi8 (__mmask16 __A, __m128i __B, __m128i __C)
|
||||
{
|
||||
return (__m128i) __builtin_ia32_vgf2p8mulb_v16qi_mask ((__v16qi) __B,
|
||||
(__v16qi) __C, (__v16qi) _mm_setzero_si128 (), __A);
|
||||
}
|
||||
|
||||
#ifdef __OPTIMIZE__
|
||||
__funline __m128i _mm_mask_gf2p8affineinv_epi64_epi8(__m128i __A, __mmask16 __B,
|
||||
__m128i __C, __m128i __D,
|
||||
const int __E) {
|
||||
return (__m128i)__builtin_ia32_vgf2p8affineinvqb_v16qi_mask(
|
||||
(__v16qi)__C, (__v16qi)__D, __E, (__v16qi)__A, __B);
|
||||
extern __inline __m128i
|
||||
__attribute__((__gnu_inline__, __always_inline__, __artificial__))
|
||||
_mm_mask_gf2p8affineinv_epi64_epi8 (__m128i __A, __mmask16 __B, __m128i __C,
|
||||
__m128i __D, const int __E)
|
||||
{
|
||||
return (__m128i) __builtin_ia32_vgf2p8affineinvqb_v16qi_mask ((__v16qi) __C,
|
||||
(__v16qi) __D,
|
||||
__E,
|
||||
(__v16qi)__A,
|
||||
__B);
|
||||
}
|
||||
|
||||
__funline __m128i _mm_maskz_gf2p8affineinv_epi64_epi8(__mmask16 __A, __m128i __B,
|
||||
__m128i __C,
|
||||
const int __D) {
|
||||
return (__m128i)__builtin_ia32_vgf2p8affineinvqb_v16qi_mask(
|
||||
(__v16qi)__B, (__v16qi)__C, __D, (__v16qi)_mm_setzero_si128(), __A);
|
||||
extern __inline __m128i
|
||||
__attribute__((__gnu_inline__, __always_inline__, __artificial__))
|
||||
_mm_maskz_gf2p8affineinv_epi64_epi8 (__mmask16 __A, __m128i __B, __m128i __C,
|
||||
const int __D)
|
||||
{
|
||||
return (__m128i) __builtin_ia32_vgf2p8affineinvqb_v16qi_mask ((__v16qi) __B,
|
||||
(__v16qi) __C, __D,
|
||||
(__v16qi) _mm_setzero_si128 (),
|
||||
__A);
|
||||
}
|
||||
|
||||
__funline __m128i _mm_mask_gf2p8affine_epi64_epi8(__m128i __A, __mmask16 __B,
|
||||
__m128i __C, __m128i __D,
|
||||
const int __E) {
|
||||
return (__m128i)__builtin_ia32_vgf2p8affineqb_v16qi_mask(
|
||||
(__v16qi)__C, (__v16qi)__D, __E, (__v16qi)__A, __B);
|
||||
extern __inline __m128i
|
||||
__attribute__((__gnu_inline__, __always_inline__, __artificial__))
|
||||
_mm_mask_gf2p8affine_epi64_epi8 (__m128i __A, __mmask16 __B, __m128i __C,
|
||||
__m128i __D, const int __E)
|
||||
{
|
||||
return (__m128i) __builtin_ia32_vgf2p8affineqb_v16qi_mask ((__v16qi) __C,
|
||||
(__v16qi) __D, __E, (__v16qi)__A, __B);
|
||||
}
|
||||
|
||||
__funline __m128i _mm_maskz_gf2p8affine_epi64_epi8(__mmask16 __A, __m128i __B,
|
||||
__m128i __C, const int __D) {
|
||||
return (__m128i)__builtin_ia32_vgf2p8affineqb_v16qi_mask(
|
||||
(__v16qi)__B, (__v16qi)__C, __D, (__v16qi)_mm_setzero_si128(), __A);
|
||||
extern __inline __m128i
|
||||
__attribute__((__gnu_inline__, __always_inline__, __artificial__))
|
||||
_mm_maskz_gf2p8affine_epi64_epi8 (__mmask16 __A, __m128i __B, __m128i __C,
|
||||
const int __D)
|
||||
{
|
||||
return (__m128i) __builtin_ia32_vgf2p8affineqb_v16qi_mask ((__v16qi) __B,
|
||||
(__v16qi) __C, __D, (__v16qi) _mm_setzero_si128 (), __A);
|
||||
}
|
||||
#else
|
||||
#define _mm_mask_gf2p8affineinv_epi64_epi8(A, B, C, D, E) \
|
||||
((__m128i)__builtin_ia32_vgf2p8affineinvqb_v16qi_mask( \
|
||||
(__v16qi)(__m128i)(C), (__v16qi)(__m128i)(D), (int)(E), \
|
||||
(__v16qi)(__m128i)(A), (__mmask16)(B)))
|
||||
#define _mm_maskz_gf2p8affineinv_epi64_epi8(A, B, C, D) \
|
||||
((__m128i)__builtin_ia32_vgf2p8affineinvqb_v16qi_mask( \
|
||||
(__v16qi)(__m128i)(B), (__v16qi)(__m128i)(C), (int)(D), \
|
||||
(__v16qi)(__m128i)_mm_setzero_si128(), (__mmask16)(A)))
|
||||
#define _mm_mask_gf2p8affine_epi64_epi8(A, B, C, D, E) \
|
||||
((__m128i)__builtin_ia32_vgf2p8affineqb_v16qi_mask( \
|
||||
(__v16qi)(__m128i)(C), (__v16qi)(__m128i)(D), (int)(E), \
|
||||
(__v16qi)(__m128i)(A), (__mmask16)(B)))
|
||||
#define _mm_maskz_gf2p8affine_epi64_epi8(A, B, C, D) \
|
||||
((__m128i)__builtin_ia32_vgf2p8affineqb_v16qi_mask( \
|
||||
(__v16qi)(__m128i)(B), (__v16qi)(__m128i)(C), (int)(D), \
|
||||
(__v16qi)(__m128i)_mm_setzero_si128(), (__mmask16)(A)))
|
||||
#define _mm_mask_gf2p8affineinv_epi64_epi8(A, B, C, D, E) ((__m128i) __builtin_ia32_vgf2p8affineinvqb_v16qi_mask( (__v16qi)(__m128i)(C), (__v16qi)(__m128i)(D), (int)(E), (__v16qi)(__m128i)(A), (__mmask16)(B)))
|
||||
#define _mm_maskz_gf2p8affineinv_epi64_epi8(A, B, C, D) ((__m128i) __builtin_ia32_vgf2p8affineinvqb_v16qi_mask( (__v16qi)(__m128i)(B), (__v16qi)(__m128i)(C), (int)(D), (__v16qi)(__m128i) _mm_setzero_si128 (), (__mmask16)(A)))
|
||||
#define _mm_mask_gf2p8affine_epi64_epi8(A, B, C, D, E) ((__m128i) __builtin_ia32_vgf2p8affineqb_v16qi_mask((__v16qi)(__m128i)(C), (__v16qi)(__m128i)(D), (int)(E), (__v16qi)(__m128i)(A), (__mmask16)(B)))
|
||||
#define _mm_maskz_gf2p8affine_epi64_epi8(A, B, C, D) ((__m128i) __builtin_ia32_vgf2p8affineqb_v16qi_mask((__v16qi)(__m128i)(B), (__v16qi)(__m128i)(C), (int)(D), (__v16qi)(__m128i) _mm_setzero_si128 (), (__mmask16)(A)))
|
||||
#endif
|
||||
|
||||
#ifdef __DISABLE_GFNIAVX512VL__
|
||||
#undef __DISABLE_GFNIAVX512VL__
|
||||
#pragma GCC pop_options
|
||||
#endif /* __GFNIAVX512VL__ */
|
||||
|
||||
#endif
|
||||
#if !defined(__GFNI__) || !defined(__AVX512VL__) || !defined(__AVX512BW__)
|
||||
#pragma GCC push_options
|
||||
#pragma GCC target("gfni,avx512vl,avx512bw")
|
||||
#define __DISABLE_GFNIAVX512VLBW__
|
||||
#endif /* __GFNIAVX512VLBW__ */
|
||||
|
||||
__funline __m256i _mm256_mask_gf2p8mul_epi8(__m256i __A, __mmask32 __B,
|
||||
__m256i __C, __m256i __D) {
|
||||
return (__m256i)__builtin_ia32_vgf2p8mulb_v32qi_mask(
|
||||
(__v32qi)__C, (__v32qi)__D, (__v32qi)__A, __B);
|
||||
#endif
|
||||
extern __inline __m256i
|
||||
__attribute__((__gnu_inline__, __always_inline__, __artificial__))
|
||||
_mm256_mask_gf2p8mul_epi8 (__m256i __A, __mmask32 __B, __m256i __C,
|
||||
__m256i __D)
|
||||
{
|
||||
return (__m256i) __builtin_ia32_vgf2p8mulb_v32qi_mask ((__v32qi) __C,
|
||||
(__v32qi) __D,
|
||||
(__v32qi)__A, __B);
|
||||
}
|
||||
|
||||
__funline __m256i _mm256_maskz_gf2p8mul_epi8(__mmask32 __A, __m256i __B,
|
||||
__m256i __C) {
|
||||
return (__m256i)__builtin_ia32_vgf2p8mulb_v32qi_mask(
|
||||
(__v32qi)__B, (__v32qi)__C, (__v32qi)_mm256_setzero_si256(), __A);
|
||||
extern __inline __m256i
|
||||
__attribute__((__gnu_inline__, __always_inline__, __artificial__))
|
||||
_mm256_maskz_gf2p8mul_epi8 (__mmask32 __A, __m256i __B, __m256i __C)
|
||||
{
|
||||
return (__m256i) __builtin_ia32_vgf2p8mulb_v32qi_mask ((__v32qi) __B,
|
||||
(__v32qi) __C, (__v32qi) _mm256_setzero_si256 (), __A);
|
||||
}
|
||||
|
||||
#ifdef __OPTIMIZE__
|
||||
__funline __m256i _mm256_mask_gf2p8affineinv_epi64_epi8(__m256i __A,
|
||||
__mmask32 __B,
|
||||
__m256i __C, __m256i __D,
|
||||
const int __E) {
|
||||
return (__m256i)__builtin_ia32_vgf2p8affineinvqb_v32qi_mask(
|
||||
(__v32qi)__C, (__v32qi)__D, __E, (__v32qi)__A, __B);
|
||||
extern __inline __m256i
|
||||
__attribute__((__gnu_inline__, __always_inline__, __artificial__))
|
||||
_mm256_mask_gf2p8affineinv_epi64_epi8 (__m256i __A, __mmask32 __B,
|
||||
__m256i __C, __m256i __D, const int __E)
|
||||
{
|
||||
return (__m256i) __builtin_ia32_vgf2p8affineinvqb_v32qi_mask ((__v32qi) __C,
|
||||
(__v32qi) __D,
|
||||
__E,
|
||||
(__v32qi)__A,
|
||||
__B);
|
||||
}
|
||||
|
||||
__funline __m256i _mm256_maskz_gf2p8affineinv_epi64_epi8(__mmask32 __A,
|
||||
__m256i __B, __m256i __C,
|
||||
const int __D) {
|
||||
return (__m256i)__builtin_ia32_vgf2p8affineinvqb_v32qi_mask(
|
||||
(__v32qi)__B, (__v32qi)__C, __D, (__v32qi)_mm256_setzero_si256(), __A);
|
||||
extern __inline __m256i
|
||||
__attribute__((__gnu_inline__, __always_inline__, __artificial__))
|
||||
_mm256_maskz_gf2p8affineinv_epi64_epi8 (__mmask32 __A, __m256i __B,
|
||||
__m256i __C, const int __D)
|
||||
{
|
||||
return (__m256i) __builtin_ia32_vgf2p8affineinvqb_v32qi_mask ((__v32qi) __B,
|
||||
(__v32qi) __C, __D,
|
||||
(__v32qi) _mm256_setzero_si256 (), __A);
|
||||
}
|
||||
|
||||
__funline __m256i _mm256_mask_gf2p8affine_epi64_epi8(__m256i __A, __mmask32 __B,
|
||||
__m256i __C, __m256i __D,
|
||||
const int __E) {
|
||||
return (__m256i)__builtin_ia32_vgf2p8affineqb_v32qi_mask(
|
||||
(__v32qi)__C, (__v32qi)__D, __E, (__v32qi)__A, __B);
|
||||
extern __inline __m256i
|
||||
__attribute__((__gnu_inline__, __always_inline__, __artificial__))
|
||||
_mm256_mask_gf2p8affine_epi64_epi8 (__m256i __A, __mmask32 __B, __m256i __C,
|
||||
__m256i __D, const int __E)
|
||||
{
|
||||
return (__m256i) __builtin_ia32_vgf2p8affineqb_v32qi_mask ((__v32qi) __C,
|
||||
(__v32qi) __D,
|
||||
__E,
|
||||
(__v32qi)__A,
|
||||
__B);
|
||||
}
|
||||
|
||||
__funline __m256i _mm256_maskz_gf2p8affine_epi64_epi8(__mmask32 __A, __m256i __B,
|
||||
__m256i __C,
|
||||
const int __D) {
|
||||
return (__m256i)__builtin_ia32_vgf2p8affineqb_v32qi_mask(
|
||||
(__v32qi)__B, (__v32qi)__C, __D, (__v32qi)_mm256_setzero_si256(), __A);
|
||||
extern __inline __m256i
|
||||
__attribute__((__gnu_inline__, __always_inline__, __artificial__))
|
||||
_mm256_maskz_gf2p8affine_epi64_epi8 (__mmask32 __A, __m256i __B,
|
||||
__m256i __C, const int __D)
|
||||
{
|
||||
return (__m256i) __builtin_ia32_vgf2p8affineqb_v32qi_mask ((__v32qi) __B,
|
||||
(__v32qi) __C, __D, (__v32qi)_mm256_setzero_si256 (), __A);
|
||||
}
|
||||
#else
|
||||
#define _mm256_mask_gf2p8affineinv_epi64_epi8(A, B, C, D, E) \
|
||||
((__m256i)__builtin_ia32_vgf2p8affineinvqb_v32qi_mask( \
|
||||
(__v32qi)(__m256i)(C), (__v32qi)(__m256i)(D), (int)(E), \
|
||||
(__v32qi)(__m256i)(A), (__mmask32)(B)))
|
||||
#define _mm256_maskz_gf2p8affineinv_epi64_epi8(A, B, C, D) \
|
||||
((__m256i)__builtin_ia32_vgf2p8affineinvqb_v32qi_mask( \
|
||||
(__v32qi)(__m256i)(B), (__v32qi)(__m256i)(C), (int)(D), \
|
||||
(__v32qi)(__m256i)_mm256_setzero_si256(), (__mmask32)(A)))
|
||||
#define _mm256_mask_gf2p8affine_epi64_epi8(A, B, C, D, E) \
|
||||
((__m256i)__builtin_ia32_vgf2p8affineqb_v32qi_mask( \
|
||||
(__v32qi)(__m256i)(C), (__v32qi)(__m256i)(D), (int)(E), \
|
||||
(__v32qi)(__m256i)(A), (__mmask32)(B)))
|
||||
#define _mm256_maskz_gf2p8affine_epi64_epi8(A, B, C, D) \
|
||||
((__m256i)__builtin_ia32_vgf2p8affineqb_v32qi_mask( \
|
||||
(__v32qi)(__m256i)(B), (__v32qi)(__m256i)(C), (int)(D), \
|
||||
(__v32qi)(__m256i)_mm256_setzero_si256(), (__mmask32)(A)))
|
||||
#define _mm256_mask_gf2p8affineinv_epi64_epi8(A, B, C, D, E) ((__m256i) __builtin_ia32_vgf2p8affineinvqb_v32qi_mask( (__v32qi)(__m256i)(C), (__v32qi)(__m256i)(D), (int)(E), (__v32qi)(__m256i)(A), (__mmask32)(B)))
|
||||
#define _mm256_maskz_gf2p8affineinv_epi64_epi8(A, B, C, D) ((__m256i) __builtin_ia32_vgf2p8affineinvqb_v32qi_mask( (__v32qi)(__m256i)(B), (__v32qi)(__m256i)(C), (int)(D), (__v32qi)(__m256i) _mm256_setzero_si256 (), (__mmask32)(A)))
|
||||
#define _mm256_mask_gf2p8affine_epi64_epi8(A, B, C, D, E) ((__m256i) __builtin_ia32_vgf2p8affineqb_v32qi_mask((__v32qi)(__m256i)(C), (__v32qi)(__m256i)(D), (int)(E), (__v32qi)(__m256i)(A), (__mmask32)(B)))
|
||||
#define _mm256_maskz_gf2p8affine_epi64_epi8(A, B, C, D) ((__m256i) __builtin_ia32_vgf2p8affineqb_v32qi_mask((__v32qi)(__m256i)(B), (__v32qi)(__m256i)(C), (int)(D), (__v32qi)(__m256i) _mm256_setzero_si256 (), (__mmask32)(A)))
|
||||
#endif
|
||||
|
||||
#ifdef __DISABLE_GFNIAVX512VLBW__
|
||||
#undef __DISABLE_GFNIAVX512VLBW__
|
||||
#pragma GCC pop_options
|
||||
#endif /* __GFNIAVX512VLBW__ */
|
||||
|
||||
#endif
|
||||
#if !defined(__GFNI__) || !defined(__AVX512F__) || !defined(__AVX512BW__)
|
||||
#pragma GCC push_options
|
||||
#pragma GCC target("gfni,avx512f,avx512bw")
|
||||
#define __DISABLE_GFNIAVX512FBW__
|
||||
#endif /* __GFNIAVX512FBW__ */
|
||||
|
||||
__funline __m512i _mm512_mask_gf2p8mul_epi8(__m512i __A, __mmask64 __B,
|
||||
__m512i __C, __m512i __D) {
|
||||
return (__m512i)__builtin_ia32_vgf2p8mulb_v64qi_mask(
|
||||
(__v64qi)__C, (__v64qi)__D, (__v64qi)__A, __B);
|
||||
#endif
|
||||
extern __inline __m512i
|
||||
__attribute__((__gnu_inline__, __always_inline__, __artificial__))
|
||||
_mm512_mask_gf2p8mul_epi8 (__m512i __A, __mmask64 __B, __m512i __C,
|
||||
__m512i __D)
|
||||
{
|
||||
return (__m512i) __builtin_ia32_vgf2p8mulb_v64qi_mask ((__v64qi) __C,
|
||||
(__v64qi) __D, (__v64qi)__A, __B);
|
||||
}
|
||||
|
||||
__funline __m512i _mm512_maskz_gf2p8mul_epi8(__mmask64 __A, __m512i __B,
|
||||
__m512i __C) {
|
||||
return (__m512i)__builtin_ia32_vgf2p8mulb_v64qi_mask(
|
||||
(__v64qi)__B, (__v64qi)__C, (__v64qi)_mm512_setzero_si512(), __A);
|
||||
extern __inline __m512i
|
||||
__attribute__((__gnu_inline__, __always_inline__, __artificial__))
|
||||
_mm512_maskz_gf2p8mul_epi8 (__mmask64 __A, __m512i __B, __m512i __C)
|
||||
{
|
||||
return (__m512i) __builtin_ia32_vgf2p8mulb_v64qi_mask ((__v64qi) __B,
|
||||
(__v64qi) __C, (__v64qi) _mm512_setzero_si512 (), __A);
|
||||
}
|
||||
__funline __m512i _mm512_gf2p8mul_epi8(__m512i __A, __m512i __B) {
|
||||
return (__m512i)__builtin_ia32_vgf2p8mulb_v64qi((__v64qi)__A, (__v64qi)__B);
|
||||
extern __inline __m512i
|
||||
__attribute__((__gnu_inline__, __always_inline__, __artificial__))
|
||||
_mm512_gf2p8mul_epi8 (__m512i __A, __m512i __B)
|
||||
{
|
||||
return (__m512i) __builtin_ia32_vgf2p8mulb_v64qi ((__v64qi) __A,
|
||||
(__v64qi) __B);
|
||||
}
|
||||
|
||||
#ifdef __OPTIMIZE__
|
||||
__funline __m512i _mm512_mask_gf2p8affineinv_epi64_epi8(__m512i __A,
|
||||
__mmask64 __B,
|
||||
__m512i __C, __m512i __D,
|
||||
const int __E) {
|
||||
return (__m512i)__builtin_ia32_vgf2p8affineinvqb_v64qi_mask(
|
||||
(__v64qi)__C, (__v64qi)__D, __E, (__v64qi)__A, __B);
|
||||
extern __inline __m512i
|
||||
__attribute__((__gnu_inline__, __always_inline__, __artificial__))
|
||||
_mm512_mask_gf2p8affineinv_epi64_epi8 (__m512i __A, __mmask64 __B, __m512i __C,
|
||||
__m512i __D, const int __E)
|
||||
{
|
||||
return (__m512i) __builtin_ia32_vgf2p8affineinvqb_v64qi_mask ((__v64qi) __C,
|
||||
(__v64qi) __D,
|
||||
__E,
|
||||
(__v64qi)__A,
|
||||
__B);
|
||||
}
|
||||
|
||||
__funline __m512i _mm512_maskz_gf2p8affineinv_epi64_epi8(__mmask64 __A,
|
||||
__m512i __B, __m512i __C,
|
||||
const int __D) {
|
||||
return (__m512i)__builtin_ia32_vgf2p8affineinvqb_v64qi_mask(
|
||||
(__v64qi)__B, (__v64qi)__C, __D, (__v64qi)_mm512_setzero_si512(), __A);
|
||||
extern __inline __m512i
|
||||
__attribute__((__gnu_inline__, __always_inline__, __artificial__))
|
||||
_mm512_maskz_gf2p8affineinv_epi64_epi8 (__mmask64 __A, __m512i __B,
|
||||
__m512i __C, const int __D)
|
||||
{
|
||||
return (__m512i) __builtin_ia32_vgf2p8affineinvqb_v64qi_mask ((__v64qi) __B,
|
||||
(__v64qi) __C, __D,
|
||||
(__v64qi) _mm512_setzero_si512 (), __A);
|
||||
}
|
||||
|
||||
__funline __m512i _mm512_gf2p8affineinv_epi64_epi8(__m512i __A, __m512i __B,
|
||||
const int __C) {
|
||||
return (__m512i)__builtin_ia32_vgf2p8affineinvqb_v64qi((__v64qi)__A,
|
||||
(__v64qi)__B, __C);
|
||||
extern __inline __m512i
|
||||
__attribute__((__gnu_inline__, __always_inline__, __artificial__))
|
||||
_mm512_gf2p8affineinv_epi64_epi8 (__m512i __A, __m512i __B, const int __C)
|
||||
{
|
||||
return (__m512i) __builtin_ia32_vgf2p8affineinvqb_v64qi ((__v64qi) __A,
|
||||
(__v64qi) __B, __C);
|
||||
}
|
||||
|
||||
__funline __m512i _mm512_mask_gf2p8affine_epi64_epi8(__m512i __A, __mmask64 __B,
|
||||
__m512i __C, __m512i __D,
|
||||
const int __E) {
|
||||
return (__m512i)__builtin_ia32_vgf2p8affineqb_v64qi_mask(
|
||||
(__v64qi)__C, (__v64qi)__D, __E, (__v64qi)__A, __B);
|
||||
extern __inline __m512i
|
||||
__attribute__((__gnu_inline__, __always_inline__, __artificial__))
|
||||
_mm512_mask_gf2p8affine_epi64_epi8 (__m512i __A, __mmask64 __B, __m512i __C,
|
||||
__m512i __D, const int __E)
|
||||
{
|
||||
return (__m512i) __builtin_ia32_vgf2p8affineqb_v64qi_mask ((__v64qi) __C,
|
||||
(__v64qi) __D, __E, (__v64qi)__A, __B);
|
||||
}
|
||||
|
||||
__funline __m512i _mm512_maskz_gf2p8affine_epi64_epi8(__mmask64 __A, __m512i __B,
|
||||
__m512i __C,
|
||||
const int __D) {
|
||||
return (__m512i)__builtin_ia32_vgf2p8affineqb_v64qi_mask(
|
||||
(__v64qi)__B, (__v64qi)__C, __D, (__v64qi)_mm512_setzero_si512(), __A);
|
||||
extern __inline __m512i
|
||||
__attribute__((__gnu_inline__, __always_inline__, __artificial__))
|
||||
_mm512_maskz_gf2p8affine_epi64_epi8 (__mmask64 __A, __m512i __B, __m512i __C,
|
||||
const int __D)
|
||||
{
|
||||
return (__m512i) __builtin_ia32_vgf2p8affineqb_v64qi_mask ((__v64qi) __B,
|
||||
(__v64qi) __C, __D, (__v64qi) _mm512_setzero_si512 (), __A);
|
||||
}
|
||||
__funline __m512i _mm512_gf2p8affine_epi64_epi8(__m512i __A, __m512i __B,
|
||||
const int __C) {
|
||||
return (__m512i)__builtin_ia32_vgf2p8affineqb_v64qi((__v64qi)__A,
|
||||
(__v64qi)__B, __C);
|
||||
extern __inline __m512i
|
||||
__attribute__((__gnu_inline__, __always_inline__, __artificial__))
|
||||
_mm512_gf2p8affine_epi64_epi8 (__m512i __A, __m512i __B, const int __C)
|
||||
{
|
||||
return (__m512i) __builtin_ia32_vgf2p8affineqb_v64qi ((__v64qi) __A,
|
||||
(__v64qi) __B, __C);
|
||||
}
|
||||
#else
|
||||
#define _mm512_mask_gf2p8affineinv_epi64_epi8(A, B, C, D, E) \
|
||||
((__m512i)__builtin_ia32_vgf2p8affineinvqb_v64qi_mask( \
|
||||
(__v64qi)(__m512i)(C), (__v64qi)(__m512i)(D), (int)(E), \
|
||||
(__v64qi)(__m512i)(A), (__mmask64)(B)))
|
||||
#define _mm512_maskz_gf2p8affineinv_epi64_epi8(A, B, C, D) \
|
||||
((__m512i)__builtin_ia32_vgf2p8affineinvqb_v64qi_mask( \
|
||||
(__v64qi)(__m512i)(B), (__v64qi)(__m512i)(C), (int)(D), \
|
||||
(__v64qi)(__m512i)_mm512_setzero_si512(), (__mmask64)(A)))
|
||||
#define _mm512_gf2p8affineinv_epi64_epi8(A, B, C) \
|
||||
((__m512i)__builtin_ia32_vgf2p8affineinvqb_v64qi( \
|
||||
(__v64qi)(__m512i)(A), (__v64qi)(__m512i)(B), (int)(C)))
|
||||
#define _mm512_mask_gf2p8affine_epi64_epi8(A, B, C, D, E) \
|
||||
((__m512i)__builtin_ia32_vgf2p8affineqb_v64qi_mask( \
|
||||
(__v64qi)(__m512i)(C), (__v64qi)(__m512i)(D), (int)(E), \
|
||||
(__v64qi)(__m512i)(A), (__mmask64)(B)))
|
||||
#define _mm512_maskz_gf2p8affine_epi64_epi8(A, B, C, D) \
|
||||
((__m512i)__builtin_ia32_vgf2p8affineqb_v64qi_mask( \
|
||||
(__v64qi)(__m512i)(B), (__v64qi)(__m512i)(C), (int)(D), \
|
||||
(__v64qi)(__m512i)_mm512_setzero_si512(), (__mmask64)(A)))
|
||||
#define _mm512_gf2p8affine_epi64_epi8(A, B, C) \
|
||||
((__m512i)__builtin_ia32_vgf2p8affineqb_v64qi( \
|
||||
(__v64qi)(__m512i)(A), (__v64qi)(__m512i)(B), (int)(C)))
|
||||
#define _mm512_mask_gf2p8affineinv_epi64_epi8(A, B, C, D, E) ((__m512i) __builtin_ia32_vgf2p8affineinvqb_v64qi_mask( (__v64qi)(__m512i)(C), (__v64qi)(__m512i)(D), (int)(E), (__v64qi)(__m512i)(A), (__mmask64)(B)))
|
||||
#define _mm512_maskz_gf2p8affineinv_epi64_epi8(A, B, C, D) ((__m512i) __builtin_ia32_vgf2p8affineinvqb_v64qi_mask( (__v64qi)(__m512i)(B), (__v64qi)(__m512i)(C), (int)(D), (__v64qi)(__m512i) _mm512_setzero_si512 (), (__mmask64)(A)))
|
||||
#define _mm512_gf2p8affineinv_epi64_epi8(A, B, C) ((__m512i) __builtin_ia32_vgf2p8affineinvqb_v64qi ( (__v64qi)(__m512i)(A), (__v64qi)(__m512i)(B), (int)(C)))
|
||||
#define _mm512_mask_gf2p8affine_epi64_epi8(A, B, C, D, E) ((__m512i) __builtin_ia32_vgf2p8affineqb_v64qi_mask((__v64qi)(__m512i)(C), (__v64qi)(__m512i)(D), (int)(E), (__v64qi)(__m512i)(A), (__mmask64)(B)))
|
||||
#define _mm512_maskz_gf2p8affine_epi64_epi8(A, B, C, D) ((__m512i) __builtin_ia32_vgf2p8affineqb_v64qi_mask((__v64qi)(__m512i)(B), (__v64qi)(__m512i)(C), (int)(D), (__v64qi)(__m512i) _mm512_setzero_si512 (), (__mmask64)(A)))
|
||||
#define _mm512_gf2p8affine_epi64_epi8(A, B, C) ((__m512i) __builtin_ia32_vgf2p8affineqb_v64qi ((__v64qi)(__m512i)(A), (__v64qi)(__m512i)(B), (int)(C)))
|
||||
#endif
|
||||
|
||||
#ifdef __DISABLE_GFNIAVX512FBW__
|
||||
#undef __DISABLE_GFNIAVX512FBW__
|
||||
#pragma GCC pop_options
|
||||
#endif /* __GFNIAVX512FBW__ */
|
||||
|
||||
#endif /* _GFNIINTRIN_H_INCLUDED */
|
||||
#endif
|
||||
#endif
|
||||
#endif
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue